2 * Copyright (c) 2009-2011 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
23 // Dispatch data objects are dispatch objects with standard retain/release
24 // memory management. A dispatch data object either points to a number of other
25 // dispatch data objects or is a leaf data object. A leaf data object contains
26 // a pointer to represented memory. A composite data object specifies the total
27 // size of data it represents and list of constituent records.
29 // A leaf data object has a single entry in records[], the object size is the
30 // same as records[0].length and records[0].from is always 0. In other words, a
31 // leaf data object always points to a full represented buffer, so a composite
32 // dispatch data object is needed to represent a subrange of a memory region.
34 #define _dispatch_data_retain(x) dispatch_retain(x)
35 #define _dispatch_data_release(x) dispatch_release(x)
37 #if DISPATCH_DATA_MOVABLE
38 #if DISPATCH_USE_RESOLVERS && !defined(DISPATCH_RESOLVED_VARIANT)
39 #error Resolved variant required for movable
41 static const dispatch_block_t _dispatch_data_destructor_unlock = ^{
42 DISPATCH_CRASH("unlock destructor called");
44 #define DISPATCH_DATA_DESTRUCTOR_UNLOCK (_dispatch_data_destructor_unlock)
47 const dispatch_block_t _dispatch_data_destructor_free = ^{
48 DISPATCH_CRASH("free destructor called");
51 const dispatch_block_t _dispatch_data_destructor_none = ^{
52 DISPATCH_CRASH("none destructor called");
55 const dispatch_block_t _dispatch_data_destructor_vm_deallocate = ^{
56 DISPATCH_CRASH("vmdeallocate destructor called");
59 struct dispatch_data_s _dispatch_data_empty = {
60 .do_vtable = DISPATCH_VTABLE(data),
61 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
62 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
63 .do_next = DISPATCH_OBJECT_LISTLESS,
66 static dispatch_data_t
67 _dispatch_data_init(size_t n)
69 dispatch_data_t data = _dispatch_alloc(DISPATCH_VTABLE(data),
70 sizeof(struct dispatch_data_s) + n * sizeof(range_record));
71 data->num_records = n;
72 data->do_targetq = dispatch_get_global_queue(
73 DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
74 data->do_next = DISPATCH_OBJECT_LISTLESS;
79 _dispatch_data_destroy_buffer(const void* buffer, size_t size,
80 dispatch_queue_t queue, dispatch_block_t destructor)
82 if (destructor == DISPATCH_DATA_DESTRUCTOR_FREE) {
84 } else if (destructor == DISPATCH_DATA_DESTRUCTOR_NONE) {
86 } else if (destructor == DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE) {
87 vm_deallocate(mach_task_self(), (vm_address_t)buffer, size);
90 queue = dispatch_get_global_queue(
91 DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
93 dispatch_async_f(queue, destructor, _dispatch_call_block_and_release);
98 dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue,
99 dispatch_block_t destructor)
101 dispatch_data_t data;
102 if (!buffer || !size) {
103 // Empty data requested so return the singleton empty object. Call
104 // destructor immediately in this case to ensure any unused associated
105 // storage is released.
107 _dispatch_data_destroy_buffer(buffer, size, queue,
108 _dispatch_Block_copy(destructor));
110 return dispatch_data_empty;
112 data = _dispatch_data_init(1);
113 // Leaf objects always point to the entirety of the memory region
116 data->records[0].from = 0;
117 data->records[0].length = size;
118 if (destructor == DISPATCH_DATA_DESTRUCTOR_DEFAULT) {
119 // The default destructor was provided, indicating the data should be
121 void *data_buf = malloc(size);
122 if (slowpath(!data_buf)) {
126 buffer = memcpy(data_buf, buffer, size);
127 data->destructor = DISPATCH_DATA_DESTRUCTOR_FREE;
129 data->destructor = _dispatch_Block_copy(destructor);
130 #if DISPATCH_DATA_MOVABLE
131 // A non-default destructor was provided, indicating the system does not
132 // own the buffer. Mark the object as locked since the application has
133 // direct access to the buffer and it cannot be reallocated/moved.
137 data->records[0].data_object = (void*)buffer;
139 _dispatch_retain(queue);
140 data->do_targetq = queue;
146 _dispatch_data_dispose(dispatch_data_t dd)
148 dispatch_block_t destructor = dd->destructor;
149 if (destructor == NULL) {
151 for (i = 0; i < dd->num_records; ++i) {
152 _dispatch_data_release(dd->records[i].data_object);
154 #if DISPATCH_DATA_MOVABLE
155 } else if (destructor == DISPATCH_DATA_DESTRUCTOR_UNLOCK) {
156 dispatch_data_t data = (dispatch_data_t)dd->records[0].data_object;
157 (void)dispatch_atomic_dec2o(data, locked);
158 _dispatch_data_release(data);
161 _dispatch_data_destroy_buffer(dd->records[0].data_object,
162 dd->records[0].length, dd->do_targetq, destructor);
167 _dispatch_data_debug(dispatch_data_t dd, char* buf, size_t bufsiz)
171 offset += snprintf(&buf[offset], bufsiz - offset,
172 "leaf: %d, size: %zd, data: %p", dd->leaf, dd->size,
173 dd->records[0].data_object);
175 offset += snprintf(&buf[offset], bufsiz - offset,
176 "leaf: %d, size: %zd, num_records: %zd", dd->leaf,
177 dd->size, dd->num_records);
179 for (i = 0; i < dd->num_records; ++i) {
180 range_record r = dd->records[i];
181 offset += snprintf(&buf[offset], bufsiz - offset,
182 "records[%zd] from: %zd, length %zd, data_object: %p", i,
183 r.from, r.length, r.data_object);
190 dispatch_data_get_size(dispatch_data_t dd)
196 dispatch_data_create_concat(dispatch_data_t dd1, dispatch_data_t dd2)
198 dispatch_data_t data;
200 _dispatch_data_retain(dd2);
204 _dispatch_data_retain(dd1);
207 data = _dispatch_data_init(dd1->num_records + dd2->num_records);
208 data->size = dd1->size + dd2->size;
209 // Copy the constituent records into the newly created data object
210 memcpy(data->records, dd1->records, dd1->num_records *
211 sizeof(range_record));
212 memcpy(data->records + dd1->num_records, dd2->records, dd2->num_records *
213 sizeof(range_record));
214 // Reference leaf objects as sub-objects
216 data->records[0].data_object = dd1;
219 data->records[dd1->num_records].data_object = dd2;
222 for (i = 0; i < data->num_records; ++i) {
223 _dispatch_data_retain(data->records[i].data_object);
229 dispatch_data_create_subrange(dispatch_data_t dd, size_t offset,
232 dispatch_data_t data;
233 if (offset >= dd->size || !length) {
234 return dispatch_data_empty;
235 } else if ((offset + length) > dd->size) {
236 length = dd->size - offset;
237 } else if (length == dd->size) {
238 _dispatch_data_retain(dd);
242 data = _dispatch_data_init(1);
244 data->records[0].from = offset;
245 data->records[0].length = length;
246 data->records[0].data_object = dd;
247 _dispatch_data_retain(dd);
250 // Subrange of a composite dispatch data object: find the record containing
251 // the specified offset
252 data = dispatch_data_empty;
253 size_t i = 0, bytes_left = length;
254 while (i < dd->num_records && offset >= dd->records[i].length) {
255 offset -= dd->records[i++].length;
257 while (i < dd->num_records) {
258 size_t record_len = dd->records[i].length - offset;
259 if (record_len > bytes_left) {
260 record_len = bytes_left;
262 dispatch_data_t subrange = dispatch_data_create_subrange(
263 dd->records[i].data_object, dd->records[i].from + offset,
265 dispatch_data_t concat = dispatch_data_create_concat(data, subrange);
266 _dispatch_data_release(data);
267 _dispatch_data_release(subrange);
269 bytes_left -= record_len;
276 // Crashing here indicates memory corruption of passed in data object
277 DISPATCH_CRASH("dispatch_data_create_subrange out of bounds");
281 // When mapping a leaf object or a subrange of a leaf object, return a direct
282 // pointer to the represented buffer. For all other data objects, copy the
283 // represented buffers into a contiguous area. In the future it might
284 // be possible to relocate the buffers instead (if not marked as locked).
286 dispatch_data_create_map(dispatch_data_t dd, const void **buffer_ptr,
289 dispatch_data_t data = dd;
291 size_t size = dd->size, offset = 0;
293 data = dispatch_data_empty;
296 if (!dd->leaf && dd->num_records == 1 &&
297 ((dispatch_data_t)dd->records[0].data_object)->leaf) {
298 offset = dd->records[0].from;
299 dd = (dispatch_data_t)(dd->records[0].data_object);
302 #if DISPATCH_DATA_MOVABLE
303 data = _dispatch_data_init(1);
304 // Make sure the underlying leaf object does not move the backing buffer
305 (void)dispatch_atomic_inc2o(dd, locked);
307 data->destructor = DISPATCH_DATA_DESTRUCTOR_UNLOCK;
308 data->records[0].data_object = dd;
309 data->records[0].from = offset;
310 data->records[0].length = size;
311 _dispatch_data_retain(dd);
313 _dispatch_data_retain(data);
315 buffer = dd->records[0].data_object + offset;
318 // Composite data object, copy the represented buffers
319 buffer = malloc(size);
325 dispatch_data_apply(dd, ^(dispatch_data_t region DISPATCH_UNUSED,
326 size_t off, const void* buf, size_t len) {
327 memcpy(buffer + off, buf, len);
330 data = dispatch_data_create(buffer, size, NULL,
331 DISPATCH_DATA_DESTRUCTOR_FREE);
334 *buffer_ptr = buffer;
343 _dispatch_data_apply(dispatch_data_t dd, size_t offset, size_t from,
344 size_t size, dispatch_data_applier_t applier)
347 dispatch_data_t data = dd;
349 dispatch_assert(dd->size);
350 #if DISPATCH_DATA_MOVABLE
352 data = _dispatch_data_init(1);
353 // Make sure the underlying leaf object does not move the backing buffer
354 (void)dispatch_atomic_inc2o(dd, locked);
356 data->destructor = DISPATCH_DATA_DESTRUCTOR_UNLOCK;
357 data->records[0].data_object = dd;
358 data->records[0].from = from;
359 data->records[0].length = size;
360 _dispatch_data_retain(dd);
361 buffer = dd->records[0].data_object + from;
362 result = applier(data, offset, buffer, size);
363 _dispatch_data_release(data);
367 if (!dd->leaf && dd->num_records == 1 &&
368 ((dispatch_data_t)dd->records[0].data_object)->leaf) {
369 from = dd->records[0].from;
370 dd = (dispatch_data_t)(dd->records[0].data_object);
373 buffer = dd->records[0].data_object + from;
374 return applier(data, offset, buffer, size);
378 for (i = 0; i < dd->num_records && result; ++i) {
379 result = _dispatch_data_apply(dd->records[i].data_object,
380 offset, dd->records[i].from, dd->records[i].length,
382 offset += dd->records[i].length;
388 dispatch_data_apply(dispatch_data_t dd, dispatch_data_applier_t applier)
393 return _dispatch_data_apply(dd, 0, 0, dd->size, applier);
396 // Returs either a leaf object or an object composed of a single leaf object
398 dispatch_data_copy_region(dispatch_data_t dd, size_t location,
401 if (location >= dd->size) {
403 return dispatch_data_empty;
405 dispatch_data_t data;
406 size_t size = dd->size, offset = 0, from = 0;
409 _dispatch_data_retain(dd);
410 *offset_ptr = offset;
411 if (size == dd->size) {
414 // Create a new object for the requested subrange of the leaf
415 data = _dispatch_data_init(1);
417 data->records[0].from = from;
418 data->records[0].length = size;
419 data->records[0].data_object = dd;
423 // Find record at the specified location
425 for (i = 0; i < dd->num_records; ++i) {
426 pos = offset + dd->records[i].length;
427 if (location < pos) {
428 size = dd->records[i].length;
429 from = dd->records[i].from;
430 data = (dispatch_data_t)(dd->records[i].data_object);
431 if (dd->num_records == 1 && data->leaf) {
432 // Return objects composed of a single leaf node
433 *offset_ptr = offset;
434 _dispatch_data_retain(dd);
437 // Drill down into other objects