1 /*******************************************************************************
2 * Copyright (c) 2008-2015 The Khronos Group Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and/or associated documentation files (the
6 * "Materials"), to deal in the Materials without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Materials, and to
9 * permit persons to whom the Materials are furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice shall be included
13 * in all copies or substantial portions of the Materials.
15 * MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS
16 * KHRONOS STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS
17 * SPECIFICATIONS AND HEADER INFORMATION ARE LOCATED AT
18 * https://www.khronos.org/registry/
20 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
24 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
27 ******************************************************************************/
31 * \brief C++ bindings for OpenCL 1.0 (rev 48), OpenCL 1.1 (rev 33),
32 * OpenCL 1.2 (rev 15) and OpenCL 2.0 (rev 29)
33 * \author Lee Howes and Bruce Merry
35 * Derived from the OpenCL 1.x C++ bindings written by
36 * Benedict R. Gaster, Laurent Morichetti and Lee Howes
37 * With additions and fixes from:
38 * Brian Cole, March 3rd 2010 and April 2012
39 * Matt Gruenke, April 2012.
40 * Bruce Merry, February 2013.
41 * Tom Deakin and Simon McIntosh-Smith, July 2013
42 * James Price, June-November 2015
47 * Optional extension support
49 * cl_ext_device_fission
50 * #define CL_HPP_USE_CL_DEVICE_FISSION
51 * cl_khr_d3d10_sharing
52 * #define CL_HPP_USE_DX_INTEROP
54 * #define CL_HPP_USE_CL_SUB_GROUPS_KHR
58 * \section intro Introduction
59 * For many large applications C++ is the language of choice and so it seems
60 * reasonable to define C++ bindings for OpenCL.
62 * The interface is contained with a single C++ header file \em cl2.hpp and all
63 * definitions are contained within the namespace \em cl. There is no additional
64 * requirement to include \em cl.h and to use either the C++ or original C
65 * bindings; it is enough to simply include \em cl2.hpp.
67 * The bindings themselves are lightweight and correspond closely to the
68 * underlying C API. Using the C++ bindings introduces no additional execution
71 * There are numerous compatibility, portability and memory management
72 * fixes in the new header as well as additional OpenCL 2.0 features.
73 * As a result the header is not directly backward compatible and for this
74 * reason we release it as cl2.hpp rather than a new version of cl.hpp.
77 * \section compatibility Compatibility
78 * Due to the evolution of the underlying OpenCL API the 2.0 C++ bindings
79 * include an updated approach to defining supported feature versions
80 * and the range of valid underlying OpenCL runtime versions supported.
82 * The combination of preprocessor macros CL_HPP_TARGET_OPENCL_VERSION and
83 * CL_HPP_MINIMUM_OPENCL_VERSION control this range. These are three digit
84 * decimal values representing OpenCL runime versions. The default for
85 * the target is 200, representing OpenCL 2.0 and the minimum is also
86 * defined as 200. These settings would use 2.0 API calls only.
87 * If backward compatibility with a 1.2 runtime is required, the minimum
88 * version may be set to 120.
90 * Note that this is a compile-time setting, and so affects linking against
91 * a particular SDK version rather than the versioning of the loaded runtime.
93 * The earlier versions of the header included basic vector and string
94 * classes based loosely on STL versions. These were difficult to
95 * maintain and very rarely used. For the 2.0 header we now assume
96 * the presence of the standard library unless requested otherwise.
97 * We use std::array, std::vector, std::shared_ptr and std::string
98 * throughout to safely manage memory and reduce the chance of a
99 * recurrance of earlier memory management bugs.
101 * These classes are used through typedefs in the cl namespace:
102 * cl::array, cl::vector, cl::pointer and cl::string.
103 * In addition cl::allocate_pointer forwards to std::allocate_shared
105 * In all cases these standard library classes can be replaced with
106 * custom interface-compatible versions using the CL_HPP_NO_STD_ARRAY,
107 * CL_HPP_NO_STD_VECTOR, CL_HPP_NO_STD_UNIQUE_PTR and
108 * CL_HPP_NO_STD_STRING macros.
110 * The OpenCL 1.x versions of the C++ bindings included a size_t wrapper
111 * class to interface with kernel enqueue. This caused unpleasant interactions
112 * with the standard size_t declaration and led to namespacing bugs.
113 * In the 2.0 version we have replaced this with a std::array-based interface.
114 * However, the old behaviour can be regained for backward compatibility
115 * using the CL_HPP_ENABLE_SIZE_T_COMPATIBILITY macro.
117 * Finally, the program construction interface used a clumsy vector-of-pairs
118 * design in the earlier versions. We have replaced that with a cleaner
119 * vector-of-vectors and vector-of-strings design. However, for backward
120 * compatibility old behaviour can be regained with the
121 * CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY macro.
123 * In OpenCL 2.0 OpenCL C is not entirely backward compatibility with
124 * earlier versions. As a result a flag must be passed to the OpenCL C
125 * compiled to request OpenCL 2.0 compilation of kernels with 1.2 as
126 * the default in the absence of the flag.
127 * In some cases the C++ bindings automatically compile code for ease.
128 * For those cases the compilation defaults to OpenCL C 2.0.
129 * If this is not wanted, the CL_HPP_CL_1_2_DEFAULT_BUILD macro may
130 * be specified to assume 1.2 compilation.
131 * If more fine-grained decisions on a per-kernel bases are required
132 * then explicit build operations that take the flag should be used.
135 * \section parameterization Parameters
136 * This header may be parameterized by a set of preprocessor macros.
137 * CL_HPP_TARGET_OPENCL_VERSION
138 * - Defines the target OpenCL runtime version to build the header against.
139 * Defaults to 200, representing OpenCL 2.0.
140 * CL_HPP_NO_STD_STRING
141 * - Do not use the standard library string class.
142 * cl::string is not defined and may be defined by the user before
143 * cl2.hpp is included.
144 * CL_HPP_NO_STD_VECTOR
145 * - Do not use the standard library vector class.
146 * cl::vector is not defined and may be defined by the user before
147 * cl2.hpp is included.
148 * CL_HPP_NO_STD_ARRAY
149 * - Do not use the standard library array class.
150 * cl::array is not defined and may be defined by the user before
151 * cl2.hpp is included.
152 * CL_HPP_NO_STD_UNIQUE_PTR
153 * - Do not use the standard library unique_ptr class.
154 * cl::pointer and the cl::allocate_pointer function are not defined
155 * and may be defined by the user before cl2.hpp is included.
156 * CL_HPP_ENABLE_DEVICE_FISSION
157 * - Enables device fission for OpenCL 1.2 platforms
158 * CL_HPP_ENABLE_EXCEPTIONS
159 * - Enable exceptions for use in the C++ bindings header.
160 * This is the preferred error handling mechanism but is not required.
161 * CL_HPP_ENABLE_SIZE_T_COMPATIBILITY
162 * - Backward compatibility option to support cl.hpp-style size_t class.
163 * Replaces the updated std::array derived version and removal of size_t
164 * from the namespace. Note that in this case the new size_t class
165 * is placed in the cl::compatibility namespace and thus requires
166 * an additional using declaration for direct backward compatibility.
167 * CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY
168 * - Enable older vector of pairs interface for construction of programs.
169 * CL_HPP_CL_1_2_DEFAULT_BUILD
170 * - Default to OpenCL C 1.2 compilation rather than OpenCL C 2.0
171 * - applies to use of cl::Program construction and other program build variants.
174 * \section example Example
176 * The following example shows a general use case for the C++
177 * bindings, including support for the optional exception feature and
178 * also the supplied vector and string classes, see following sections for
179 * decriptions of these features.
182 #define CL_HPP_ENABLE_EXCEPTIONS
183 #define CL_HPP_TARGET_OPENCL_VERSION 200
185 #include <CL/cl2.hpp>
191 const int numElements = 32;
195 // Filter for a 2.0 platform and set it as the default
196 std::vector<cl::Platform> platforms;
197 cl::Platform::get(&platforms);
199 for (auto &p : platforms) {
200 std::string platver = p.getInfo<CL_PLATFORM_VERSION>();
201 if (platver.find("OpenCL 2.") != std::string::npos) {
206 std::cout << "No OpenCL 2.0 platform found.";
210 cl::Platform newP = cl::Platform::setDefault(plat);
212 std::cout << "Error setting default platform.";
217 "global int globalA;"
218 "kernel void updateGlobal(){"
222 "typedef struct { global int *bar; } Foo; kernel void vectorAdd(global const Foo* aNum, global const int *inputA, global const int *inputB, global int *output, int val, write_only pipe int outPipe, queue_t childQueue){"
223 " output[get_global_id(0)] = inputA[get_global_id(0)] + inputB[get_global_id(0)] + val + *(aNum->bar);"
224 " write_pipe(outPipe, &val);"
225 " queue_t default_queue = get_default_queue(); "
226 " ndrange_t ndrange = ndrange_1D(get_global_size(0)/2, get_global_size(0)/2); "
227 // Have a child kernel write into third quarter of output
228 " enqueue_kernel(default_queue, CLK_ENQUEUE_FLAGS_WAIT_KERNEL, ndrange, "
230 " output[get_global_size(0)*2 + get_global_id(0)] = inputA[get_global_size(0)*2+get_global_id(0)] + inputB[get_global_size(0)*2+get_global_id(0)] + globalA;"
232 // Have a child kernel write into last quarter of output
233 " enqueue_kernel(childQueue, CLK_ENQUEUE_FLAGS_WAIT_KERNEL, ndrange, "
235 " output[get_global_size(0)*3 + get_global_id(0)] = inputA[get_global_size(0)*3 + get_global_id(0)] + inputB[get_global_size(0)*3 + get_global_id(0)] + globalA + 2;"
239 // New simpler string interface style
240 std::vector<std::string> programStrings {kernel1, kernel2};
242 cl::Program vectorAddProgram(
245 vectorAddProgram.build("-cl-std=CL2.0");
248 // Print build info for all devices
249 cl_int buildErr = CL_SUCCESS;
250 auto buildInfo = vectorAddProgram.getBuildInfo<CL_PROGRAM_BUILD_LOG>(&buildErr);
251 for (auto &pair : buildInfo) {
252 std::cerr << pair.second << std::endl << std::endl;
258 typedef struct { int *bar; } Foo;
260 // Get and run kernel that initializes the program-scope global
261 // A test for kernels that take no arguments
262 auto program2Kernel =
263 cl::KernelFunctor<>(vectorAddProgram, "updateGlobal");
271 cl::pointer<int> anSVMInt = cl::allocate_svm<int, cl::SVMTraitCoarse<>>();
273 cl::SVMAllocator<int, cl::SVMTraitCoarse<cl::SVMTraitReadOnly<>>> svmAllocReadOnly;
274 auto fooPointer = cl::allocate_pointer<Foo>(svmAllocReadOnly);
275 fooPointer->bar = anSVMInt.get();
276 cl::SVMAllocator<int, cl::SVMTraitCoarse<>> svmAlloc;
277 std::vector<int, cl::SVMAllocator<int, cl::SVMTraitCoarse<>>> inputA(numElements, 1, svmAlloc);
278 cl::coarse_svm_vector<int> inputB(numElements, 2, svmAlloc);
283 // Traditional cl_mem allocations
284 std::vector<int> output(numElements, 0xdeadbeef);
285 cl::Buffer outputBuffer(begin(output), end(output), false);
286 cl::Pipe aPipe(sizeof(cl_int), numElements / 2);
288 // Default command queue, also passed in as a parameter
289 cl::DeviceCommandQueue defaultDeviceQueue = cl::DeviceCommandQueue::makeDefault(
290 cl::Context::getDefault(), cl::Device::getDefault());
292 auto vectorAddKernel =
294 decltype(fooPointer)&,
296 cl::coarse_svm_vector<int>&,
300 cl::DeviceCommandQueue
301 >(vectorAddProgram, "vectorAdd");
303 // Ensure that the additional SVM pointer is available to the kernel
304 // This one was not passed as a parameter
305 vectorAddKernel.setSVMPointers(anSVMInt);
307 // Hand control of coarse allocations to runtime
308 cl::enqueueUnmapSVM(anSVMInt);
309 cl::enqueueUnmapSVM(fooPointer);
310 cl::unmapSVM(inputB);
311 cl::unmapSVM(output2);
316 cl::NDRange(numElements/2),
317 cl::NDRange(numElements/2)),
328 cl::copy(outputBuffer, begin(output), end(output));
329 // Grab the SVM output vector using a map
332 cl::Device d = cl::Device::getDefault();
334 std::cout << "Output:\n";
335 for (int i = 1; i < numElements; ++i) {
336 std::cout << "\t" << output[i] << "\n";
349 /* Handle deprecated preprocessor definitions. In each case, we only check for
350 * the old name if the new name is not defined, so that user code can define
351 * both and hence work with either version of the bindings.
353 #if !defined(CL_HPP_USE_DX_INTEROP) && defined(USE_DX_INTEROP)
354 # pragma message("cl2.hpp: USE_DX_INTEROP is deprecated. Define CL_HPP_USE_DX_INTEROP instead")
355 # define CL_HPP_USE_DX_INTEROP
357 #if !defined(CL_HPP_USE_CL_DEVICE_FISSION) && defined(USE_CL_DEVICE_FISSION)
358 # pragma message("cl2.hpp: USE_CL_DEVICE_FISSION is deprecated. Define CL_HPP_USE_CL_DEVICE_FISSION instead")
359 # define CL_HPP_USE_CL_DEVICE_FISSION
361 #if !defined(CL_HPP_ENABLE_EXCEPTIONS) && defined(__CL_ENABLE_EXCEPTIONS)
362 # pragma message("cl2.hpp: __CL_ENABLE_EXCEPTIONS is deprecated. Define CL_HPP_ENABLE_EXCEPTIONS instead")
363 # define CL_HPP_ENABLE_EXCEPTIONS
365 #if !defined(CL_HPP_NO_STD_VECTOR) && defined(__NO_STD_VECTOR)
366 # pragma message("cl2.hpp: __NO_STD_VECTOR is deprecated. Define CL_HPP_NO_STD_VECTOR instead")
367 # define CL_HPP_NO_STD_VECTOR
369 #if !defined(CL_HPP_NO_STD_STRING) && defined(__NO_STD_STRING)
370 # pragma message("cl2.hpp: __NO_STD_STRING is deprecated. Define CL_HPP_NO_STD_STRING instead")
371 # define CL_HPP_NO_STD_STRING
373 #if defined(VECTOR_CLASS)
374 # pragma message("cl2.hpp: VECTOR_CLASS is deprecated. Alias cl::vector instead")
376 #if defined(STRING_CLASS)
377 # pragma message("cl2.hpp: STRING_CLASS is deprecated. Alias cl::string instead.")
379 #if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS) && defined(__CL_USER_OVERRIDE_ERROR_STRINGS)
380 # pragma message("cl2.hpp: __CL_USER_OVERRIDE_ERROR_STRINGS is deprecated. Define CL_HPP_USER_OVERRIDE_ERROR_STRINGS instead")
381 # define CL_HPP_USER_OVERRIDE_ERROR_STRINGS
384 /* Warn about features that are no longer supported
386 #if defined(__USE_DEV_VECTOR)
387 # pragma message("cl2.hpp: __USE_DEV_VECTOR is no longer supported. Expect compilation errors")
389 #if defined(__USE_DEV_STRING)
390 # pragma message("cl2.hpp: __USE_DEV_STRING is no longer supported. Expect compilation errors")
393 /* Detect which version to target */
394 #if !defined(CL_HPP_TARGET_OPENCL_VERSION)
395 # pragma message("cl2.hpp: CL_HPP_TARGET_OPENCL_VERSION is not defined. It will default to 200 (OpenCL 2.0)")
396 # define CL_HPP_TARGET_OPENCL_VERSION 200
398 #if CL_HPP_TARGET_OPENCL_VERSION != 100 && CL_HPP_TARGET_OPENCL_VERSION != 110 && CL_HPP_TARGET_OPENCL_VERSION != 120 && CL_HPP_TARGET_OPENCL_VERSION != 200
399 # pragma message("cl2.hpp: CL_HPP_TARGET_OPENCL_VERSION is not a valid value (100, 110, 120 or 200). It will be set to 200")
400 # undef CL_HPP_TARGET_OPENCL_VERSION
401 # define CL_HPP_TARGET_OPENCL_VERSION 200
404 #if !defined(CL_HPP_MINIMUM_OPENCL_VERSION)
405 # define CL_HPP_MINIMUM_OPENCL_VERSION 200
407 #if CL_HPP_MINIMUM_OPENCL_VERSION != 100 && CL_HPP_MINIMUM_OPENCL_VERSION != 110 && CL_HPP_MINIMUM_OPENCL_VERSION != 120 && CL_HPP_MINIMUM_OPENCL_VERSION != 200
408 # pragma message("cl2.hpp: CL_HPP_MINIMUM_OPENCL_VERSION is not a valid value (100, 110, 120 or 200). It will be set to 100")
409 # undef CL_HPP_MINIMUM_OPENCL_VERSION
410 # define CL_HPP_MINIMUM_OPENCL_VERSION 100
412 #if CL_HPP_MINIMUM_OPENCL_VERSION > CL_HPP_TARGET_OPENCL_VERSION
413 # error "CL_HPP_MINIMUM_OPENCL_VERSION must not be greater than CL_HPP_TARGET_OPENCL_VERSION"
416 #if CL_HPP_MINIMUM_OPENCL_VERSION <= 100 && !defined(CL_USE_DEPRECATED_OPENCL_1_0_APIS)
417 # define CL_USE_DEPRECATED_OPENCL_1_0_APIS
419 #if CL_HPP_MINIMUM_OPENCL_VERSION <= 110 && !defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
420 # define CL_USE_DEPRECATED_OPENCL_1_1_APIS
422 #if CL_HPP_MINIMUM_OPENCL_VERSION <= 120 && !defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
423 # define CL_USE_DEPRECATED_OPENCL_1_2_APIS
425 #if CL_HPP_MINIMUM_OPENCL_VERSION <= 200 && !defined(CL_USE_DEPRECATED_OPENCL_2_0_APIS)
426 # define CL_USE_DEPRECATED_OPENCL_2_0_APIS
433 #if defined(CL_HPP_USE_DX_INTEROP)
434 #include <CL/cl_d3d10.h>
435 #include <CL/cl_dx9_media_sharing.h>
439 #if defined(_MSC_VER)
443 // Check for a valid C++ version
445 // Need to do both tests here because for some reason __cplusplus is not
446 // updated in visual studio
447 #if (!defined(_MSC_VER) && __cplusplus < 201103L) || (defined(_MSC_VER) && _MSC_VER < 1700)
448 #error Visual studio 2013 or another C++11-supporting compiler required
452 #if defined(CL_HPP_USE_CL_DEVICE_FISSION) || defined(CL_HPP_USE_CL_SUB_GROUPS_KHR)
453 #include <CL/cl_ext.h>
456 #if defined(__APPLE__) || defined(__MACOSX)
457 #include <OpenCL/opencl.h>
459 #include <CL/opencl.h>
462 #if (__cplusplus >= 201103L)
463 #define CL_HPP_NOEXCEPT_ noexcept
465 #define CL_HPP_NOEXCEPT_
468 #if defined(_MSC_VER)
469 # define CL_HPP_DEFINE_STATIC_MEMBER_ __declspec(selectany)
471 # define CL_HPP_DEFINE_STATIC_MEMBER_ __attribute__((weak))
474 // Define deprecated prefixes and suffixes to ensure compilation
475 // in case they are not pre-defined
476 #if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED)
477 #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
478 #endif // #if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED)
479 #if !defined(CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED)
480 #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
481 #endif // #if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED)
483 #if !defined(CL_EXT_PREFIX__VERSION_1_2_DEPRECATED)
484 #define CL_EXT_PREFIX__VERSION_1_2_DEPRECATED
485 #endif // #if !defined(CL_EXT_PREFIX__VERSION_1_2_DEPRECATED)
486 #if !defined(CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED)
487 #define CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED
488 #endif // #if !defined(CL_EXT_PREFIX__VERSION_1_2_DEPRECATED)
490 #if !defined(CL_CALLBACK)
499 #include <functional>
502 // Define a size_type to represent a correctly resolved size_t
503 #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
505 using size_type = ::size_t;
507 #else // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
509 using size_type = size_t;
511 #endif // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
514 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
516 #endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS)
518 #if !defined(CL_HPP_NO_STD_VECTOR)
521 template < class T, class Alloc = std::allocator<T> >
522 using vector = std::vector<T, Alloc>;
524 #endif // #if !defined(CL_HPP_NO_STD_VECTOR)
526 #if !defined(CL_HPP_NO_STD_STRING)
529 using string = std::string;
531 #endif // #if !defined(CL_HPP_NO_STD_STRING)
533 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
535 #if !defined(CL_HPP_NO_STD_UNIQUE_PTR)
538 // Replace unique_ptr and allocate_pointer for internal use
539 // to allow user to replace them
540 template<class T, class D>
541 using pointer = std::unique_ptr<T, D>;
544 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
545 #if !defined(CL_HPP_NO_STD_ARRAY)
548 template < class T, size_type N >
549 using array = std::array<T, N>;
551 #endif // #if !defined(CL_HPP_NO_STD_ARRAY)
553 // Define size_type appropriately to allow backward-compatibility
554 // use of the old size_t interface class
555 #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
557 namespace compatibility {
558 /*! \brief class used to interface between C++ and
559 * OpenCL C calls that require arrays of size_t values, whose
560 * size is known statically.
569 //! \brief Initialize size_t to all 0s
572 for (int i = 0; i < N; ++i) {
577 size_t(const array<size_type, N> &rhs)
579 for (int i = 0; i < N; ++i) {
584 size_type& operator[](int index)
589 const size_type& operator[](int index) const
594 //! \brief Conversion operator to T*.
595 operator size_type* () { return data_; }
597 //! \brief Conversion operator to const T*.
598 operator const size_type* () const { return data_; }
600 operator array<size_type, N>() const
602 array<size_type, N> ret;
604 for (int i = 0; i < N; ++i) {
610 } // namespace compatibility
613 using size_t = compatibility::size_t<N>;
615 #endif // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
617 // Helper alias to avoid confusing the macros
620 using size_t_array = array<size_type, 3>;
621 } // namespace detail
627 * \brief The OpenCL C++ bindings are defined within this namespace.
633 #define CL_HPP_INIT_CL_EXT_FCN_PTR_(name) \
635 pfn_##name = (PFN_##name) \
636 clGetExtensionFunctionAddress(#name); \
641 #define CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, name) \
643 pfn_##name = (PFN_##name) \
644 clGetExtensionFunctionAddressForPlatform(platform, #name); \
653 class DeviceCommandQueue;
658 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
659 /*! \brief Exception class
661 * This may be thrown by API functions when CL_HPP_ENABLE_EXCEPTIONS is defined.
663 class Error : public std::exception
667 const char * errStr_;
669 /*! \brief Create a new CL error exception for a given error code
670 * and corresponding message.
672 * \param err error code value.
674 * \param errStr a descriptive string that must remain in scope until
675 * handling of the exception has concluded. If set, it
676 * will be returned by what().
678 Error(cl_int err, const char * errStr = NULL) : err_(err), errStr_(errStr)
683 /*! \brief Get error string associated with exception
685 * \return A memory pointer to the error message string.
687 virtual const char * what() const throw ()
689 if (errStr_ == NULL) {
697 /*! \brief Get error code associated with exception
699 * \return The error code.
701 cl_int err(void) const { return err_; }
703 #define CL_HPP_ERR_STR_(x) #x
705 #define CL_HPP_ERR_STR_(x) NULL
706 #endif // CL_HPP_ENABLE_EXCEPTIONS
711 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
712 static inline cl_int errHandler (
714 const char * errStr = NULL)
716 if (err != CL_SUCCESS) {
717 throw Error(err, errStr);
722 static inline cl_int errHandler (cl_int err, const char * errStr = NULL)
724 (void) errStr; // suppress unused variable warning
727 #endif // CL_HPP_ENABLE_EXCEPTIONS
732 //! \cond DOXYGEN_DETAIL
733 #if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS)
734 #define __GET_DEVICE_INFO_ERR CL_HPP_ERR_STR_(clGetDeviceInfo)
735 #define __GET_PLATFORM_INFO_ERR CL_HPP_ERR_STR_(clGetPlatformInfo)
736 #define __GET_DEVICE_IDS_ERR CL_HPP_ERR_STR_(clGetDeviceIDs)
737 #define __GET_PLATFORM_IDS_ERR CL_HPP_ERR_STR_(clGetPlatformIDs)
738 #define __GET_CONTEXT_INFO_ERR CL_HPP_ERR_STR_(clGetContextInfo)
739 #define __GET_EVENT_INFO_ERR CL_HPP_ERR_STR_(clGetEventInfo)
740 #define __GET_EVENT_PROFILE_INFO_ERR CL_HPP_ERR_STR_(clGetEventProfileInfo)
741 #define __GET_MEM_OBJECT_INFO_ERR CL_HPP_ERR_STR_(clGetMemObjectInfo)
742 #define __GET_IMAGE_INFO_ERR CL_HPP_ERR_STR_(clGetImageInfo)
743 #define __GET_SAMPLER_INFO_ERR CL_HPP_ERR_STR_(clGetSamplerInfo)
744 #define __GET_KERNEL_INFO_ERR CL_HPP_ERR_STR_(clGetKernelInfo)
745 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
746 #define __GET_KERNEL_ARG_INFO_ERR CL_HPP_ERR_STR_(clGetKernelArgInfo)
747 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
748 #define __GET_KERNEL_WORK_GROUP_INFO_ERR CL_HPP_ERR_STR_(clGetKernelWorkGroupInfo)
749 #define __GET_PROGRAM_INFO_ERR CL_HPP_ERR_STR_(clGetProgramInfo)
750 #define __GET_PROGRAM_BUILD_INFO_ERR CL_HPP_ERR_STR_(clGetProgramBuildInfo)
751 #define __GET_COMMAND_QUEUE_INFO_ERR CL_HPP_ERR_STR_(clGetCommandQueueInfo)
753 #define __CREATE_CONTEXT_ERR CL_HPP_ERR_STR_(clCreateContext)
754 #define __CREATE_CONTEXT_FROM_TYPE_ERR CL_HPP_ERR_STR_(clCreateContextFromType)
755 #define __GET_SUPPORTED_IMAGE_FORMATS_ERR CL_HPP_ERR_STR_(clGetSupportedImageFormats)
757 #define __CREATE_BUFFER_ERR CL_HPP_ERR_STR_(clCreateBuffer)
758 #define __COPY_ERR CL_HPP_ERR_STR_(cl::copy)
759 #define __CREATE_SUBBUFFER_ERR CL_HPP_ERR_STR_(clCreateSubBuffer)
760 #define __CREATE_GL_BUFFER_ERR CL_HPP_ERR_STR_(clCreateFromGLBuffer)
761 #define __CREATE_GL_RENDER_BUFFER_ERR CL_HPP_ERR_STR_(clCreateFromGLBuffer)
762 #define __GET_GL_OBJECT_INFO_ERR CL_HPP_ERR_STR_(clGetGLObjectInfo)
763 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
764 #define __CREATE_IMAGE_ERR CL_HPP_ERR_STR_(clCreateImage)
765 #define __CREATE_GL_TEXTURE_ERR CL_HPP_ERR_STR_(clCreateFromGLTexture)
766 #define __IMAGE_DIMENSION_ERR CL_HPP_ERR_STR_(Incorrect image dimensions)
767 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
768 #define __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR CL_HPP_ERR_STR_(clSetMemObjectDestructorCallback)
770 #define __CREATE_USER_EVENT_ERR CL_HPP_ERR_STR_(clCreateUserEvent)
771 #define __SET_USER_EVENT_STATUS_ERR CL_HPP_ERR_STR_(clSetUserEventStatus)
772 #define __SET_EVENT_CALLBACK_ERR CL_HPP_ERR_STR_(clSetEventCallback)
773 #define __WAIT_FOR_EVENTS_ERR CL_HPP_ERR_STR_(clWaitForEvents)
775 #define __CREATE_KERNEL_ERR CL_HPP_ERR_STR_(clCreateKernel)
776 #define __SET_KERNEL_ARGS_ERR CL_HPP_ERR_STR_(clSetKernelArg)
777 #define __CREATE_PROGRAM_WITH_SOURCE_ERR CL_HPP_ERR_STR_(clCreateProgramWithSource)
778 #define __CREATE_PROGRAM_WITH_BINARY_ERR CL_HPP_ERR_STR_(clCreateProgramWithBinary)
779 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
780 #define __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR CL_HPP_ERR_STR_(clCreateProgramWithBuiltInKernels)
781 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
782 #define __BUILD_PROGRAM_ERR CL_HPP_ERR_STR_(clBuildProgram)
783 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
784 #define __COMPILE_PROGRAM_ERR CL_HPP_ERR_STR_(clCompileProgram)
785 #define __LINK_PROGRAM_ERR CL_HPP_ERR_STR_(clLinkProgram)
786 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
787 #define __CREATE_KERNELS_IN_PROGRAM_ERR CL_HPP_ERR_STR_(clCreateKernelsInProgram)
789 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
790 #define __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR CL_HPP_ERR_STR_(clCreateCommandQueueWithProperties)
791 #define __CREATE_SAMPLER_WITH_PROPERTIES_ERR CL_HPP_ERR_STR_(clCreateSamplerWithProperties)
792 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
793 #define __SET_COMMAND_QUEUE_PROPERTY_ERR CL_HPP_ERR_STR_(clSetCommandQueueProperty)
794 #define __ENQUEUE_READ_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueReadBuffer)
795 #define __ENQUEUE_READ_BUFFER_RECT_ERR CL_HPP_ERR_STR_(clEnqueueReadBufferRect)
796 #define __ENQUEUE_WRITE_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueWriteBuffer)
797 #define __ENQUEUE_WRITE_BUFFER_RECT_ERR CL_HPP_ERR_STR_(clEnqueueWriteBufferRect)
798 #define __ENQEUE_COPY_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueCopyBuffer)
799 #define __ENQEUE_COPY_BUFFER_RECT_ERR CL_HPP_ERR_STR_(clEnqueueCopyBufferRect)
800 #define __ENQUEUE_FILL_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueFillBuffer)
801 #define __ENQUEUE_READ_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueReadImage)
802 #define __ENQUEUE_WRITE_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueWriteImage)
803 #define __ENQUEUE_COPY_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueCopyImage)
804 #define __ENQUEUE_FILL_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueFillImage)
805 #define __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueCopyImageToBuffer)
806 #define __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueCopyBufferToImage)
807 #define __ENQUEUE_MAP_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueMapBuffer)
808 #define __ENQUEUE_MAP_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueMapImage)
809 #define __ENQUEUE_UNMAP_MEM_OBJECT_ERR CL_HPP_ERR_STR_(clEnqueueUnMapMemObject)
810 #define __ENQUEUE_NDRANGE_KERNEL_ERR CL_HPP_ERR_STR_(clEnqueueNDRangeKernel)
811 #define __ENQUEUE_NATIVE_KERNEL CL_HPP_ERR_STR_(clEnqueueNativeKernel)
812 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
813 #define __ENQUEUE_MIGRATE_MEM_OBJECTS_ERR CL_HPP_ERR_STR_(clEnqueueMigrateMemObjects)
814 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
816 #define __ENQUEUE_ACQUIRE_GL_ERR CL_HPP_ERR_STR_(clEnqueueAcquireGLObjects)
817 #define __ENQUEUE_RELEASE_GL_ERR CL_HPP_ERR_STR_(clEnqueueReleaseGLObjects)
819 #define __CREATE_PIPE_ERR CL_HPP_ERR_STR_(clCreatePipe)
820 #define __GET_PIPE_INFO_ERR CL_HPP_ERR_STR_(clGetPipeInfo)
823 #define __RETAIN_ERR CL_HPP_ERR_STR_(Retain Object)
824 #define __RELEASE_ERR CL_HPP_ERR_STR_(Release Object)
825 #define __FLUSH_ERR CL_HPP_ERR_STR_(clFlush)
826 #define __FINISH_ERR CL_HPP_ERR_STR_(clFinish)
827 #define __VECTOR_CAPACITY_ERR CL_HPP_ERR_STR_(Vector capacity error)
830 * CL 1.2 version that uses device fission.
832 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
833 #define __CREATE_SUB_DEVICES_ERR CL_HPP_ERR_STR_(clCreateSubDevices)
835 #define __CREATE_SUB_DEVICES_ERR CL_HPP_ERR_STR_(clCreateSubDevicesEXT)
836 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
839 * Deprecated APIs for 1.2
841 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
842 #define __ENQUEUE_MARKER_ERR CL_HPP_ERR_STR_(clEnqueueMarker)
843 #define __ENQUEUE_WAIT_FOR_EVENTS_ERR CL_HPP_ERR_STR_(clEnqueueWaitForEvents)
844 #define __ENQUEUE_BARRIER_ERR CL_HPP_ERR_STR_(clEnqueueBarrier)
845 #define __UNLOAD_COMPILER_ERR CL_HPP_ERR_STR_(clUnloadCompiler)
846 #define __CREATE_GL_TEXTURE_2D_ERR CL_HPP_ERR_STR_(clCreateFromGLTexture2D)
847 #define __CREATE_GL_TEXTURE_3D_ERR CL_HPP_ERR_STR_(clCreateFromGLTexture3D)
848 #define __CREATE_IMAGE2D_ERR CL_HPP_ERR_STR_(clCreateImage2D)
849 #define __CREATE_IMAGE3D_ERR CL_HPP_ERR_STR_(clCreateImage3D)
850 #endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
853 * Deprecated APIs for 2.0
855 #if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
856 #define __CREATE_COMMAND_QUEUE_ERR CL_HPP_ERR_STR_(clCreateCommandQueue)
857 #define __ENQUEUE_TASK_ERR CL_HPP_ERR_STR_(clEnqueueTask)
858 #define __CREATE_SAMPLER_ERR CL_HPP_ERR_STR_(clCreateSampler)
859 #endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
862 * CL 1.2 marker and barrier commands
864 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
865 #define __ENQUEUE_MARKER_WAIT_LIST_ERR CL_HPP_ERR_STR_(clEnqueueMarkerWithWaitList)
866 #define __ENQUEUE_BARRIER_WAIT_LIST_ERR CL_HPP_ERR_STR_(clEnqueueBarrierWithWaitList)
867 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
869 #endif // CL_HPP_USER_OVERRIDE_ERROR_STRINGS
875 // Generic getInfoHelper. The final parameter is used to guide overload
876 // resolution: the actual parameter passed is an int, which makes this
877 // a worse conversion sequence than a specialization that declares the
878 // parameter as an int.
879 template<typename Functor, typename T>
880 inline cl_int getInfoHelper(Functor f, cl_uint name, T* param, long)
882 return f(name, sizeof(T), param, NULL);
885 // Specialized for getInfo<CL_PROGRAM_BINARIES>
886 // Assumes that the output vector was correctly resized on the way in
887 template <typename Func>
888 inline cl_int getInfoHelper(Func f, cl_uint name, vector<vector<unsigned char>>* param, int)
890 if (name != CL_PROGRAM_BINARIES) {
891 return CL_INVALID_VALUE;
894 // Create array of pointers, calculate total size and pass pointer array in
895 size_type numBinaries = param->size();
896 vector<unsigned char*> binariesPointers(numBinaries);
898 size_type totalSize = 0;
899 for (size_type i = 0; i < numBinaries; ++i)
901 binariesPointers[i] = (*param)[i].data();
902 totalSize += (*param)[i].size();
905 cl_int err = f(name, totalSize, binariesPointers.data(), NULL);
907 if (err != CL_SUCCESS) {
916 // Specialized getInfoHelper for vector params
917 template <typename Func, typename T>
918 inline cl_int getInfoHelper(Func f, cl_uint name, vector<T>* param, long)
921 cl_int err = f(name, 0, NULL, &required);
922 if (err != CL_SUCCESS) {
925 const size_type elements = required / sizeof(T);
927 // Temporary to avoid changing param on an error
928 vector<T> localData(elements);
929 err = f(name, required, localData.data(), NULL);
930 if (err != CL_SUCCESS) {
934 *param = std::move(localData);
940 /* Specialization for reference-counted types. This depends on the
941 * existence of Wrapper<T>::cl_type, and none of the other types having the
942 * cl_type member. Note that simplify specifying the parameter as Wrapper<T>
943 * does not work, because when using a derived type (e.g. Context) the generic
944 * template will provide a better match.
946 template <typename Func, typename T>
947 inline cl_int getInfoHelper(
948 Func f, cl_uint name, vector<T>* param, int, typename T::cl_type = 0)
951 cl_int err = f(name, 0, NULL, &required);
952 if (err != CL_SUCCESS) {
956 const size_type elements = required / sizeof(typename T::cl_type);
958 vector<typename T::cl_type> value(elements);
959 err = f(name, required, value.data(), NULL);
960 if (err != CL_SUCCESS) {
965 // Assign to convert CL type to T for each element
966 param->resize(elements);
968 // Assign to param, constructing with retain behaviour
969 // to correctly capture each underlying CL object
970 for (size_type i = 0; i < elements; i++) {
971 (*param)[i] = T(value[i], true);
977 // Specialized GetInfoHelper for string params
978 template <typename Func>
979 inline cl_int getInfoHelper(Func f, cl_uint name, string* param, long)
982 cl_int err = f(name, 0, NULL, &required);
983 if (err != CL_SUCCESS) {
987 // std::string has a constant data member
988 // a char vector does not
990 vector<char> value(required);
991 err = f(name, required, value.data(), NULL);
992 if (err != CL_SUCCESS) {
996 param->assign(begin(value), prev(end(value)));
1005 // Specialized GetInfoHelper for clsize_t params
1006 template <typename Func, size_type N>
1007 inline cl_int getInfoHelper(Func f, cl_uint name, array<size_type, N>* param, long)
1010 cl_int err = f(name, 0, NULL, &required);
1011 if (err != CL_SUCCESS) {
1015 size_type elements = required / sizeof(size_type);
1016 vector<size_type> value(elements, 0);
1018 err = f(name, required, value.data(), NULL);
1019 if (err != CL_SUCCESS) {
1023 // Bound the copy with N to prevent overruns
1024 // if passed N > than the amount copied
1028 for (size_type i = 0; i < elements; ++i) {
1029 (*param)[i] = value[i];
1035 template<typename T> struct ReferenceHandler;
1037 /* Specialization for reference-counted types. This depends on the
1038 * existence of Wrapper<T>::cl_type, and none of the other types having the
1039 * cl_type member. Note that simplify specifying the parameter as Wrapper<T>
1040 * does not work, because when using a derived type (e.g. Context) the generic
1041 * template will provide a better match.
1043 template<typename Func, typename T>
1044 inline cl_int getInfoHelper(Func f, cl_uint name, T* param, int, typename T::cl_type = 0)
1046 typename T::cl_type value;
1047 cl_int err = f(name, sizeof(value), &value, NULL);
1048 if (err != CL_SUCCESS) {
1054 err = param->retain();
1055 if (err != CL_SUCCESS) {
1062 #define CL_HPP_PARAM_NAME_INFO_1_0_(F) \
1063 F(cl_platform_info, CL_PLATFORM_PROFILE, string) \
1064 F(cl_platform_info, CL_PLATFORM_VERSION, string) \
1065 F(cl_platform_info, CL_PLATFORM_NAME, string) \
1066 F(cl_platform_info, CL_PLATFORM_VENDOR, string) \
1067 F(cl_platform_info, CL_PLATFORM_EXTENSIONS, string) \
1069 F(cl_device_info, CL_DEVICE_TYPE, cl_device_type) \
1070 F(cl_device_info, CL_DEVICE_VENDOR_ID, cl_uint) \
1071 F(cl_device_info, CL_DEVICE_MAX_COMPUTE_UNITS, cl_uint) \
1072 F(cl_device_info, CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS, cl_uint) \
1073 F(cl_device_info, CL_DEVICE_MAX_WORK_GROUP_SIZE, size_type) \
1074 F(cl_device_info, CL_DEVICE_MAX_WORK_ITEM_SIZES, cl::vector<size_type>) \
1075 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR, cl_uint) \
1076 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT, cl_uint) \
1077 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT, cl_uint) \
1078 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG, cl_uint) \
1079 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT, cl_uint) \
1080 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE, cl_uint) \
1081 F(cl_device_info, CL_DEVICE_MAX_CLOCK_FREQUENCY, cl_uint) \
1082 F(cl_device_info, CL_DEVICE_ADDRESS_BITS, cl_uint) \
1083 F(cl_device_info, CL_DEVICE_MAX_READ_IMAGE_ARGS, cl_uint) \
1084 F(cl_device_info, CL_DEVICE_MAX_WRITE_IMAGE_ARGS, cl_uint) \
1085 F(cl_device_info, CL_DEVICE_MAX_MEM_ALLOC_SIZE, cl_ulong) \
1086 F(cl_device_info, CL_DEVICE_IMAGE2D_MAX_WIDTH, size_type) \
1087 F(cl_device_info, CL_DEVICE_IMAGE2D_MAX_HEIGHT, size_type) \
1088 F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_WIDTH, size_type) \
1089 F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_HEIGHT, size_type) \
1090 F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_DEPTH, size_type) \
1091 F(cl_device_info, CL_DEVICE_IMAGE_SUPPORT, cl_bool) \
1092 F(cl_device_info, CL_DEVICE_MAX_PARAMETER_SIZE, size_type) \
1093 F(cl_device_info, CL_DEVICE_MAX_SAMPLERS, cl_uint) \
1094 F(cl_device_info, CL_DEVICE_MEM_BASE_ADDR_ALIGN, cl_uint) \
1095 F(cl_device_info, CL_DEVICE_MIN_DATA_TYPE_ALIGN_SIZE, cl_uint) \
1096 F(cl_device_info, CL_DEVICE_SINGLE_FP_CONFIG, cl_device_fp_config) \
1097 F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHE_TYPE, cl_device_mem_cache_type) \
1098 F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE, cl_uint)\
1099 F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHE_SIZE, cl_ulong) \
1100 F(cl_device_info, CL_DEVICE_GLOBAL_MEM_SIZE, cl_ulong) \
1101 F(cl_device_info, CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE, cl_ulong) \
1102 F(cl_device_info, CL_DEVICE_MAX_CONSTANT_ARGS, cl_uint) \
1103 F(cl_device_info, CL_DEVICE_LOCAL_MEM_TYPE, cl_device_local_mem_type) \
1104 F(cl_device_info, CL_DEVICE_LOCAL_MEM_SIZE, cl_ulong) \
1105 F(cl_device_info, CL_DEVICE_ERROR_CORRECTION_SUPPORT, cl_bool) \
1106 F(cl_device_info, CL_DEVICE_PROFILING_TIMER_RESOLUTION, size_type) \
1107 F(cl_device_info, CL_DEVICE_ENDIAN_LITTLE, cl_bool) \
1108 F(cl_device_info, CL_DEVICE_AVAILABLE, cl_bool) \
1109 F(cl_device_info, CL_DEVICE_COMPILER_AVAILABLE, cl_bool) \
1110 F(cl_device_info, CL_DEVICE_EXECUTION_CAPABILITIES, cl_device_exec_capabilities) \
1111 F(cl_device_info, CL_DEVICE_PLATFORM, cl_platform_id) \
1112 F(cl_device_info, CL_DEVICE_NAME, string) \
1113 F(cl_device_info, CL_DEVICE_VENDOR, string) \
1114 F(cl_device_info, CL_DRIVER_VERSION, string) \
1115 F(cl_device_info, CL_DEVICE_PROFILE, string) \
1116 F(cl_device_info, CL_DEVICE_VERSION, string) \
1117 F(cl_device_info, CL_DEVICE_EXTENSIONS, string) \
1119 F(cl_context_info, CL_CONTEXT_REFERENCE_COUNT, cl_uint) \
1120 F(cl_context_info, CL_CONTEXT_DEVICES, cl::vector<Device>) \
1121 F(cl_context_info, CL_CONTEXT_PROPERTIES, cl::vector<cl_context_properties>) \
1123 F(cl_event_info, CL_EVENT_COMMAND_QUEUE, cl::CommandQueue) \
1124 F(cl_event_info, CL_EVENT_COMMAND_TYPE, cl_command_type) \
1125 F(cl_event_info, CL_EVENT_REFERENCE_COUNT, cl_uint) \
1126 F(cl_event_info, CL_EVENT_COMMAND_EXECUTION_STATUS, cl_int) \
1128 F(cl_profiling_info, CL_PROFILING_COMMAND_QUEUED, cl_ulong) \
1129 F(cl_profiling_info, CL_PROFILING_COMMAND_SUBMIT, cl_ulong) \
1130 F(cl_profiling_info, CL_PROFILING_COMMAND_START, cl_ulong) \
1131 F(cl_profiling_info, CL_PROFILING_COMMAND_END, cl_ulong) \
1133 F(cl_mem_info, CL_MEM_TYPE, cl_mem_object_type) \
1134 F(cl_mem_info, CL_MEM_FLAGS, cl_mem_flags) \
1135 F(cl_mem_info, CL_MEM_SIZE, size_type) \
1136 F(cl_mem_info, CL_MEM_HOST_PTR, void*) \
1137 F(cl_mem_info, CL_MEM_MAP_COUNT, cl_uint) \
1138 F(cl_mem_info, CL_MEM_REFERENCE_COUNT, cl_uint) \
1139 F(cl_mem_info, CL_MEM_CONTEXT, cl::Context) \
1141 F(cl_image_info, CL_IMAGE_FORMAT, cl_image_format) \
1142 F(cl_image_info, CL_IMAGE_ELEMENT_SIZE, size_type) \
1143 F(cl_image_info, CL_IMAGE_ROW_PITCH, size_type) \
1144 F(cl_image_info, CL_IMAGE_SLICE_PITCH, size_type) \
1145 F(cl_image_info, CL_IMAGE_WIDTH, size_type) \
1146 F(cl_image_info, CL_IMAGE_HEIGHT, size_type) \
1147 F(cl_image_info, CL_IMAGE_DEPTH, size_type) \
1149 F(cl_sampler_info, CL_SAMPLER_REFERENCE_COUNT, cl_uint) \
1150 F(cl_sampler_info, CL_SAMPLER_CONTEXT, cl::Context) \
1151 F(cl_sampler_info, CL_SAMPLER_NORMALIZED_COORDS, cl_bool) \
1152 F(cl_sampler_info, CL_SAMPLER_ADDRESSING_MODE, cl_addressing_mode) \
1153 F(cl_sampler_info, CL_SAMPLER_FILTER_MODE, cl_filter_mode) \
1155 F(cl_program_info, CL_PROGRAM_REFERENCE_COUNT, cl_uint) \
1156 F(cl_program_info, CL_PROGRAM_CONTEXT, cl::Context) \
1157 F(cl_program_info, CL_PROGRAM_NUM_DEVICES, cl_uint) \
1158 F(cl_program_info, CL_PROGRAM_DEVICES, cl::vector<Device>) \
1159 F(cl_program_info, CL_PROGRAM_SOURCE, string) \
1160 F(cl_program_info, CL_PROGRAM_BINARY_SIZES, cl::vector<size_type>) \
1161 F(cl_program_info, CL_PROGRAM_BINARIES, cl::vector<cl::vector<unsigned char>>) \
1163 F(cl_program_build_info, CL_PROGRAM_BUILD_STATUS, cl_build_status) \
1164 F(cl_program_build_info, CL_PROGRAM_BUILD_OPTIONS, string) \
1165 F(cl_program_build_info, CL_PROGRAM_BUILD_LOG, string) \
1167 F(cl_kernel_info, CL_KERNEL_FUNCTION_NAME, string) \
1168 F(cl_kernel_info, CL_KERNEL_NUM_ARGS, cl_uint) \
1169 F(cl_kernel_info, CL_KERNEL_REFERENCE_COUNT, cl_uint) \
1170 F(cl_kernel_info, CL_KERNEL_CONTEXT, cl::Context) \
1171 F(cl_kernel_info, CL_KERNEL_PROGRAM, cl::Program) \
1173 F(cl_kernel_work_group_info, CL_KERNEL_WORK_GROUP_SIZE, size_type) \
1174 F(cl_kernel_work_group_info, CL_KERNEL_COMPILE_WORK_GROUP_SIZE, cl::detail::size_t_array) \
1175 F(cl_kernel_work_group_info, CL_KERNEL_LOCAL_MEM_SIZE, cl_ulong) \
1177 F(cl_command_queue_info, CL_QUEUE_CONTEXT, cl::Context) \
1178 F(cl_command_queue_info, CL_QUEUE_DEVICE, cl::Device) \
1179 F(cl_command_queue_info, CL_QUEUE_REFERENCE_COUNT, cl_uint) \
1180 F(cl_command_queue_info, CL_QUEUE_PROPERTIES, cl_command_queue_properties)
1183 #define CL_HPP_PARAM_NAME_INFO_1_1_(F) \
1184 F(cl_context_info, CL_CONTEXT_NUM_DEVICES, cl_uint)\
1185 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_HALF, cl_uint) \
1186 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_CHAR, cl_uint) \
1187 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_SHORT, cl_uint) \
1188 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_INT, cl_uint) \
1189 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG, cl_uint) \
1190 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_FLOAT, cl_uint) \
1191 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_DOUBLE, cl_uint) \
1192 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_HALF, cl_uint) \
1193 F(cl_device_info, CL_DEVICE_DOUBLE_FP_CONFIG, cl_device_fp_config) \
1194 F(cl_device_info, CL_DEVICE_HALF_FP_CONFIG, cl_device_fp_config) \
1195 F(cl_device_info, CL_DEVICE_OPENCL_C_VERSION, string) \
1197 F(cl_mem_info, CL_MEM_ASSOCIATED_MEMOBJECT, cl::Memory) \
1198 F(cl_mem_info, CL_MEM_OFFSET, size_type) \
1200 F(cl_kernel_work_group_info, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, size_type) \
1201 F(cl_kernel_work_group_info, CL_KERNEL_PRIVATE_MEM_SIZE, cl_ulong) \
1203 F(cl_event_info, CL_EVENT_CONTEXT, cl::Context)
1205 #define CL_HPP_PARAM_NAME_INFO_1_2_(F) \
1206 F(cl_program_info, CL_PROGRAM_NUM_KERNELS, size_type) \
1207 F(cl_program_info, CL_PROGRAM_KERNEL_NAMES, string) \
1209 F(cl_program_build_info, CL_PROGRAM_BINARY_TYPE, cl_program_binary_type) \
1211 F(cl_kernel_info, CL_KERNEL_ATTRIBUTES, string) \
1213 F(cl_kernel_arg_info, CL_KERNEL_ARG_ADDRESS_QUALIFIER, cl_kernel_arg_address_qualifier) \
1214 F(cl_kernel_arg_info, CL_KERNEL_ARG_ACCESS_QUALIFIER, cl_kernel_arg_access_qualifier) \
1215 F(cl_kernel_arg_info, CL_KERNEL_ARG_TYPE_NAME, string) \
1216 F(cl_kernel_arg_info, CL_KERNEL_ARG_NAME, string) \
1217 F(cl_kernel_arg_info, CL_KERNEL_ARG_TYPE_QUALIFIER, cl_kernel_arg_type_qualifier) \
1219 F(cl_device_info, CL_DEVICE_PARENT_DEVICE, cl::Device) \
1220 F(cl_device_info, CL_DEVICE_PARTITION_PROPERTIES, cl::vector<cl_device_partition_property>) \
1221 F(cl_device_info, CL_DEVICE_PARTITION_TYPE, cl::vector<cl_device_partition_property>) \
1222 F(cl_device_info, CL_DEVICE_REFERENCE_COUNT, cl_uint) \
1223 F(cl_device_info, CL_DEVICE_PREFERRED_INTEROP_USER_SYNC, size_type) \
1224 F(cl_device_info, CL_DEVICE_PARTITION_AFFINITY_DOMAIN, cl_device_affinity_domain) \
1225 F(cl_device_info, CL_DEVICE_BUILT_IN_KERNELS, string) \
1227 F(cl_image_info, CL_IMAGE_ARRAY_SIZE, size_type) \
1228 F(cl_image_info, CL_IMAGE_NUM_MIP_LEVELS, cl_uint) \
1229 F(cl_image_info, CL_IMAGE_NUM_SAMPLES, cl_uint)
1231 #define CL_HPP_PARAM_NAME_INFO_2_0_(F) \
1232 F(cl_device_info, CL_DEVICE_QUEUE_ON_HOST_PROPERTIES, cl_command_queue_properties) \
1233 F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_PROPERTIES, cl_command_queue_properties) \
1234 F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_PREFERRED_SIZE, cl_uint) \
1235 F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_MAX_SIZE, cl_uint) \
1236 F(cl_device_info, CL_DEVICE_MAX_ON_DEVICE_QUEUES, cl_uint) \
1237 F(cl_device_info, CL_DEVICE_MAX_ON_DEVICE_EVENTS, cl_uint) \
1238 F(cl_device_info, CL_DEVICE_MAX_PIPE_ARGS, cl_uint) \
1239 F(cl_device_info, CL_DEVICE_PIPE_MAX_ACTIVE_RESERVATIONS, cl_uint) \
1240 F(cl_device_info, CL_DEVICE_PIPE_MAX_PACKET_SIZE, cl_uint) \
1241 F(cl_device_info, CL_DEVICE_SVM_CAPABILITIES, cl_device_svm_capabilities) \
1242 F(cl_device_info, CL_DEVICE_PREFERRED_PLATFORM_ATOMIC_ALIGNMENT, cl_uint) \
1243 F(cl_device_info, CL_DEVICE_PREFERRED_GLOBAL_ATOMIC_ALIGNMENT, cl_uint) \
1244 F(cl_device_info, CL_DEVICE_PREFERRED_LOCAL_ATOMIC_ALIGNMENT, cl_uint) \
1245 F(cl_command_queue_info, CL_QUEUE_SIZE, cl_uint) \
1246 F(cl_mem_info, CL_MEM_USES_SVM_POINTER, cl_bool) \
1247 F(cl_program_build_info, CL_PROGRAM_BUILD_GLOBAL_VARIABLE_TOTAL_SIZE, size_type) \
1248 F(cl_pipe_info, CL_PIPE_PACKET_SIZE, cl_uint) \
1249 F(cl_pipe_info, CL_PIPE_MAX_PACKETS, cl_uint)
1251 #define CL_HPP_PARAM_NAME_DEVICE_FISSION_(F) \
1252 F(cl_device_info, CL_DEVICE_PARENT_DEVICE_EXT, cl_device_id) \
1253 F(cl_device_info, CL_DEVICE_PARTITION_TYPES_EXT, cl::vector<cl_device_partition_property_ext>) \
1254 F(cl_device_info, CL_DEVICE_AFFINITY_DOMAINS_EXT, cl::vector<cl_device_partition_property_ext>) \
1255 F(cl_device_info, CL_DEVICE_REFERENCE_COUNT_EXT , cl_uint) \
1256 F(cl_device_info, CL_DEVICE_PARTITION_STYLE_EXT, cl::vector<cl_device_partition_property_ext>)
1258 template <typename enum_type, cl_int Name>
1259 struct param_traits {};
1261 #define CL_HPP_DECLARE_PARAM_TRAITS_(token, param_name, T) \
1264 struct param_traits<detail:: token,param_name> \
1266 enum { value = param_name }; \
1267 typedef T param_type; \
1270 CL_HPP_PARAM_NAME_INFO_1_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1271 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
1272 CL_HPP_PARAM_NAME_INFO_1_1_(CL_HPP_DECLARE_PARAM_TRAITS_)
1273 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
1274 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
1275 CL_HPP_PARAM_NAME_INFO_1_2_(CL_HPP_DECLARE_PARAM_TRAITS_)
1276 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
1277 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
1278 CL_HPP_PARAM_NAME_INFO_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1279 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
1282 // Flags deprecated in OpenCL 2.0
1283 #define CL_HPP_PARAM_NAME_INFO_1_0_DEPRECATED_IN_2_0_(F) \
1284 F(cl_device_info, CL_DEVICE_QUEUE_PROPERTIES, cl_command_queue_properties)
1286 #define CL_HPP_PARAM_NAME_INFO_1_1_DEPRECATED_IN_2_0_(F) \
1287 F(cl_device_info, CL_DEVICE_HOST_UNIFIED_MEMORY, cl_bool)
1289 #define CL_HPP_PARAM_NAME_INFO_1_2_DEPRECATED_IN_2_0_(F) \
1290 F(cl_image_info, CL_IMAGE_BUFFER, cl::Buffer)
1292 // Include deprecated query flags based on versions
1293 // Only include deprecated 1.0 flags if 2.0 not active as there is an enum clash
1294 #if CL_HPP_TARGET_OPENCL_VERSION > 100 && CL_HPP_MINIMUM_OPENCL_VERSION < 200 && CL_HPP_TARGET_OPENCL_VERSION < 200
1295 CL_HPP_PARAM_NAME_INFO_1_0_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1296 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 110
1297 #if CL_HPP_TARGET_OPENCL_VERSION > 110 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
1298 CL_HPP_PARAM_NAME_INFO_1_1_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1299 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
1300 #if CL_HPP_TARGET_OPENCL_VERSION > 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
1301 CL_HPP_PARAM_NAME_INFO_1_2_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1302 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
1304 #if defined(CL_HPP_USE_CL_DEVICE_FISSION)
1305 CL_HPP_PARAM_NAME_DEVICE_FISSION_(CL_HPP_DECLARE_PARAM_TRAITS_);
1306 #endif // CL_HPP_USE_CL_DEVICE_FISSION
1308 #ifdef CL_PLATFORM_ICD_SUFFIX_KHR
1309 CL_HPP_DECLARE_PARAM_TRAITS_(cl_platform_info, CL_PLATFORM_ICD_SUFFIX_KHR, string)
1312 #ifdef CL_DEVICE_PROFILING_TIMER_OFFSET_AMD
1313 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_PROFILING_TIMER_OFFSET_AMD, cl_ulong)
1316 #ifdef CL_DEVICE_GLOBAL_FREE_MEMORY_AMD
1317 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_FREE_MEMORY_AMD, vector<size_type>)
1319 #ifdef CL_DEVICE_SIMD_PER_COMPUTE_UNIT_AMD
1320 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_PER_COMPUTE_UNIT_AMD, cl_uint)
1322 #ifdef CL_DEVICE_SIMD_WIDTH_AMD
1323 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_WIDTH_AMD, cl_uint)
1325 #ifdef CL_DEVICE_SIMD_INSTRUCTION_WIDTH_AMD
1326 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_INSTRUCTION_WIDTH_AMD, cl_uint)
1328 #ifdef CL_DEVICE_WAVEFRONT_WIDTH_AMD
1329 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_WAVEFRONT_WIDTH_AMD, cl_uint)
1331 #ifdef CL_DEVICE_GLOBAL_MEM_CHANNELS_AMD
1332 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNELS_AMD, cl_uint)
1334 #ifdef CL_DEVICE_GLOBAL_MEM_CHANNEL_BANKS_AMD
1335 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNEL_BANKS_AMD, cl_uint)
1337 #ifdef CL_DEVICE_GLOBAL_MEM_CHANNEL_BANK_WIDTH_AMD
1338 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNEL_BANK_WIDTH_AMD, cl_uint)
1340 #ifdef CL_DEVICE_LOCAL_MEM_SIZE_PER_COMPUTE_UNIT_AMD
1341 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_LOCAL_MEM_SIZE_PER_COMPUTE_UNIT_AMD, cl_uint)
1343 #ifdef CL_DEVICE_LOCAL_MEM_BANKS_AMD
1344 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_LOCAL_MEM_BANKS_AMD, cl_uint)
1347 #ifdef CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV
1348 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV, cl_uint)
1350 #ifdef CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV
1351 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV, cl_uint)
1353 #ifdef CL_DEVICE_REGISTERS_PER_BLOCK_NV
1354 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_REGISTERS_PER_BLOCK_NV, cl_uint)
1356 #ifdef CL_DEVICE_WARP_SIZE_NV
1357 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_WARP_SIZE_NV, cl_uint)
1359 #ifdef CL_DEVICE_GPU_OVERLAP_NV
1360 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GPU_OVERLAP_NV, cl_bool)
1362 #ifdef CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV
1363 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV, cl_bool)
1365 #ifdef CL_DEVICE_INTEGRATED_MEMORY_NV
1366 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_INTEGRATED_MEMORY_NV, cl_bool)
1369 // Convenience functions
1371 template <typename Func, typename T>
1373 getInfo(Func f, cl_uint name, T* param)
1375 return getInfoHelper(f, name, param, 0);
1378 template <typename Func, typename Arg0>
1379 struct GetInfoFunctor0
1381 Func f_; const Arg0& arg0_;
1383 cl_uint param, size_type size, void* value, size_type* size_ret)
1384 { return f_(arg0_, param, size, value, size_ret); }
1387 template <typename Func, typename Arg0, typename Arg1>
1388 struct GetInfoFunctor1
1390 Func f_; const Arg0& arg0_; const Arg1& arg1_;
1392 cl_uint param, size_type size, void* value, size_type* size_ret)
1393 { return f_(arg0_, arg1_, param, size, value, size_ret); }
1396 template <typename Func, typename Arg0, typename T>
1398 getInfo(Func f, const Arg0& arg0, cl_uint name, T* param)
1400 GetInfoFunctor0<Func, Arg0> f0 = { f, arg0 };
1401 return getInfoHelper(f0, name, param, 0);
1404 template <typename Func, typename Arg0, typename Arg1, typename T>
1406 getInfo(Func f, const Arg0& arg0, const Arg1& arg1, cl_uint name, T* param)
1408 GetInfoFunctor1<Func, Arg0, Arg1> f0 = { f, arg0, arg1 };
1409 return getInfoHelper(f0, name, param, 0);
1413 template<typename T>
1414 struct ReferenceHandler
1417 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
1419 * OpenCL 1.2 devices do have retain/release.
1422 struct ReferenceHandler<cl_device_id>
1425 * Retain the device.
1426 * \param device A valid device created using createSubDevices
1428 * CL_SUCCESS if the function executed successfully.
1429 * CL_INVALID_DEVICE if device was not a valid subdevice
1430 * CL_OUT_OF_RESOURCES
1431 * CL_OUT_OF_HOST_MEMORY
1433 static cl_int retain(cl_device_id device)
1434 { return ::clRetainDevice(device); }
1436 * Retain the device.
1437 * \param device A valid device created using createSubDevices
1439 * CL_SUCCESS if the function executed successfully.
1440 * CL_INVALID_DEVICE if device was not a valid subdevice
1441 * CL_OUT_OF_RESOURCES
1442 * CL_OUT_OF_HOST_MEMORY
1444 static cl_int release(cl_device_id device)
1445 { return ::clReleaseDevice(device); }
1447 #else // CL_HPP_TARGET_OPENCL_VERSION >= 120
1449 * OpenCL 1.1 devices do not have retain/release.
1452 struct ReferenceHandler<cl_device_id>
1454 // cl_device_id does not have retain().
1455 static cl_int retain(cl_device_id)
1456 { return CL_SUCCESS; }
1457 // cl_device_id does not have release().
1458 static cl_int release(cl_device_id)
1459 { return CL_SUCCESS; }
1461 #endif // ! (CL_HPP_TARGET_OPENCL_VERSION >= 120)
1464 struct ReferenceHandler<cl_platform_id>
1466 // cl_platform_id does not have retain().
1467 static cl_int retain(cl_platform_id)
1468 { return CL_SUCCESS; }
1469 // cl_platform_id does not have release().
1470 static cl_int release(cl_platform_id)
1471 { return CL_SUCCESS; }
1475 struct ReferenceHandler<cl_context>
1477 static cl_int retain(cl_context context)
1478 { return ::clRetainContext(context); }
1479 static cl_int release(cl_context context)
1480 { return ::clReleaseContext(context); }
1484 struct ReferenceHandler<cl_command_queue>
1486 static cl_int retain(cl_command_queue queue)
1487 { return ::clRetainCommandQueue(queue); }
1488 static cl_int release(cl_command_queue queue)
1489 { return ::clReleaseCommandQueue(queue); }
1493 struct ReferenceHandler<cl_mem>
1495 static cl_int retain(cl_mem memory)
1496 { return ::clRetainMemObject(memory); }
1497 static cl_int release(cl_mem memory)
1498 { return ::clReleaseMemObject(memory); }
1502 struct ReferenceHandler<cl_sampler>
1504 static cl_int retain(cl_sampler sampler)
1505 { return ::clRetainSampler(sampler); }
1506 static cl_int release(cl_sampler sampler)
1507 { return ::clReleaseSampler(sampler); }
1511 struct ReferenceHandler<cl_program>
1513 static cl_int retain(cl_program program)
1514 { return ::clRetainProgram(program); }
1515 static cl_int release(cl_program program)
1516 { return ::clReleaseProgram(program); }
1520 struct ReferenceHandler<cl_kernel>
1522 static cl_int retain(cl_kernel kernel)
1523 { return ::clRetainKernel(kernel); }
1524 static cl_int release(cl_kernel kernel)
1525 { return ::clReleaseKernel(kernel); }
1529 struct ReferenceHandler<cl_event>
1531 static cl_int retain(cl_event event)
1532 { return ::clRetainEvent(event); }
1533 static cl_int release(cl_event event)
1534 { return ::clReleaseEvent(event); }
1537 #if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
1539 // Extracts version number with major in the upper 16 bits, minor in the lower 16
1540 static cl_uint getVersion(const vector<char> &versionInfo)
1542 int highVersion = 0;
1545 while(versionInfo[index] != '.' ) {
1547 highVersion += versionInfo[index]-'0';
1551 while(versionInfo[index] != ' ' && versionInfo[index] != '\0') {
1553 lowVersion += versionInfo[index]-'0';
1556 return (highVersion << 16) | lowVersion;
1560 static cl_uint getPlatformVersion(cl_platform_id platform)
1563 clGetPlatformInfo(platform, CL_PLATFORM_VERSION, 0, NULL, &size);
1565 vector<char> versionInfo(size);
1566 clGetPlatformInfo(platform, CL_PLATFORM_VERSION, size, versionInfo.data(), &size);
1567 return getVersion(versionInfo);
1570 static cl_uint getDevicePlatformVersion(cl_device_id device)
1572 cl_platform_id platform;
1573 clGetDeviceInfo(device, CL_DEVICE_PLATFORM, sizeof(platform), &platform, NULL);
1574 return getPlatformVersion(platform);
1577 static cl_uint getContextPlatformVersion(cl_context context)
1579 // The platform cannot be queried directly, so we first have to grab a
1580 // device and obtain its context
1582 clGetContextInfo(context, CL_CONTEXT_DEVICES, 0, NULL, &size);
1585 vector<cl_device_id> devices(size/sizeof(cl_device_id));
1586 clGetContextInfo(context, CL_CONTEXT_DEVICES, size, devices.data(), NULL);
1587 return getDevicePlatformVersion(devices[0]);
1589 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
1591 template <typename T>
1601 Wrapper() : object_(NULL) { }
1603 Wrapper(const cl_type &obj, bool retainObject) : object_(obj)
1606 detail::errHandler(retain(), __RETAIN_ERR);
1612 if (object_ != NULL) { release(); }
1615 Wrapper(const Wrapper<cl_type>& rhs)
1617 object_ = rhs.object_;
1618 detail::errHandler(retain(), __RETAIN_ERR);
1621 Wrapper(Wrapper<cl_type>&& rhs) CL_HPP_NOEXCEPT_
1623 object_ = rhs.object_;
1627 Wrapper<cl_type>& operator = (const Wrapper<cl_type>& rhs)
1630 detail::errHandler(release(), __RELEASE_ERR);
1631 object_ = rhs.object_;
1632 detail::errHandler(retain(), __RETAIN_ERR);
1637 Wrapper<cl_type>& operator = (Wrapper<cl_type>&& rhs)
1640 detail::errHandler(release(), __RELEASE_ERR);
1641 object_ = rhs.object_;
1647 Wrapper<cl_type>& operator = (const cl_type &rhs)
1649 detail::errHandler(release(), __RELEASE_ERR);
1654 const cl_type& operator ()() const { return object_; }
1656 cl_type& operator ()() { return object_; }
1658 const cl_type get() const { return object_; }
1660 cl_type get() { return object_; }
1664 template<typename Func, typename U>
1665 friend inline cl_int getInfoHelper(Func, cl_uint, U*, int, typename U::cl_type);
1667 cl_int retain() const
1669 if (object_ != nullptr) {
1670 return ReferenceHandler<cl_type>::retain(object_);
1677 cl_int release() const
1679 if (object_ != nullptr) {
1680 return ReferenceHandler<cl_type>::release(object_);
1689 class Wrapper<cl_device_id>
1692 typedef cl_device_id cl_type;
1696 bool referenceCountable_;
1698 static bool isReferenceCountable(cl_device_id device)
1700 bool retVal = false;
1701 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
1702 #if CL_HPP_MINIMUM_OPENCL_VERSION < 120
1703 if (device != NULL) {
1704 int version = getDevicePlatformVersion(device);
1705 if(version > ((1 << 16) + 1)) {
1709 #else // CL_HPP_MINIMUM_OPENCL_VERSION < 120
1711 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
1712 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
1717 Wrapper() : object_(NULL), referenceCountable_(false)
1721 Wrapper(const cl_type &obj, bool retainObject) :
1723 referenceCountable_(false)
1725 referenceCountable_ = isReferenceCountable(obj);
1728 detail::errHandler(retain(), __RETAIN_ERR);
1737 Wrapper(const Wrapper<cl_type>& rhs)
1739 object_ = rhs.object_;
1740 referenceCountable_ = isReferenceCountable(object_);
1741 detail::errHandler(retain(), __RETAIN_ERR);
1744 Wrapper(Wrapper<cl_type>&& rhs) CL_HPP_NOEXCEPT_
1746 object_ = rhs.object_;
1747 referenceCountable_ = rhs.referenceCountable_;
1749 rhs.referenceCountable_ = false;
1752 Wrapper<cl_type>& operator = (const Wrapper<cl_type>& rhs)
1755 detail::errHandler(release(), __RELEASE_ERR);
1756 object_ = rhs.object_;
1757 referenceCountable_ = rhs.referenceCountable_;
1758 detail::errHandler(retain(), __RETAIN_ERR);
1763 Wrapper<cl_type>& operator = (Wrapper<cl_type>&& rhs)
1766 detail::errHandler(release(), __RELEASE_ERR);
1767 object_ = rhs.object_;
1768 referenceCountable_ = rhs.referenceCountable_;
1770 rhs.referenceCountable_ = false;
1775 Wrapper<cl_type>& operator = (const cl_type &rhs)
1777 detail::errHandler(release(), __RELEASE_ERR);
1779 referenceCountable_ = isReferenceCountable(object_);
1783 const cl_type& operator ()() const { return object_; }
1785 cl_type& operator ()() { return object_; }
1787 const cl_type get() const { return object_; }
1789 cl_type get() { return object_; }
1792 template<typename Func, typename U>
1793 friend inline cl_int getInfoHelper(Func, cl_uint, U*, int, typename U::cl_type);
1795 template<typename Func, typename U>
1796 friend inline cl_int getInfoHelper(Func, cl_uint, vector<U>*, int, typename U::cl_type);
1798 cl_int retain() const
1800 if( object_ != nullptr && referenceCountable_ ) {
1801 return ReferenceHandler<cl_type>::retain(object_);
1808 cl_int release() const
1810 if (object_ != nullptr && referenceCountable_) {
1811 return ReferenceHandler<cl_type>::release(object_);
1819 template <typename T>
1820 inline bool operator==(const Wrapper<T> &lhs, const Wrapper<T> &rhs)
1822 return lhs() == rhs();
1825 template <typename T>
1826 inline bool operator!=(const Wrapper<T> &lhs, const Wrapper<T> &rhs)
1828 return !operator==(lhs, rhs);
1831 } // namespace detail
1835 using BuildLogType = vector<std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, CL_PROGRAM_BUILD_LOG>::param_type>>;
1836 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
1838 * Exception class for build errors to carry build info
1840 class BuildError : public Error
1843 BuildLogType buildLogs;
1845 BuildError(cl_int err, const char * errStr, const BuildLogType &vec) : Error(err, errStr), buildLogs(vec)
1849 BuildLogType getBuildLog() const
1855 static inline cl_int buildErrHandler(
1857 const char * errStr,
1858 const BuildLogType &buildLogs)
1860 if (err != CL_SUCCESS) {
1861 throw BuildError(err, errStr, buildLogs);
1865 } // namespace detail
1869 static inline cl_int buildErrHandler(
1871 const char * errStr,
1872 const BuildLogType &buildLogs)
1874 (void)buildLogs; // suppress unused variable warning
1878 } // namespace detail
1879 #endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS)
1882 /*! \stuct ImageFormat
1883 * \brief Adds constructors and member functions for cl_image_format.
1885 * \see cl_image_format
1887 struct ImageFormat : public cl_image_format
1889 //! \brief Default constructor - performs no initialization.
1892 //! \brief Initializing constructor.
1893 ImageFormat(cl_channel_order order, cl_channel_type type)
1895 image_channel_order = order;
1896 image_channel_data_type = type;
1899 //! \brief Assignment operator.
1900 ImageFormat& operator = (const ImageFormat& rhs)
1903 this->image_channel_data_type = rhs.image_channel_data_type;
1904 this->image_channel_order = rhs.image_channel_order;
1910 /*! \brief Class interface for cl_device_id.
1912 * \note Copies of these objects are inexpensive, since they don't 'own'
1913 * any underlying resources or data structures.
1917 class Device : public detail::Wrapper<cl_device_id>
1920 static std::once_flag default_initialized_;
1921 static Device default_;
1922 static cl_int default_error_;
1924 /*! \brief Create the default context.
1926 * This sets @c default_ and @c default_error_. It does not throw
1929 static void makeDefault();
1931 /*! \brief Create the default platform from a provided platform.
1933 * This sets @c default_. It does not throw
1936 static void makeDefaultProvided(const Device &p) {
1941 #ifdef CL_HPP_UNIT_TEST_ENABLE
1942 /*! \brief Reset the default.
1944 * This sets @c default_ to an empty value to support cleanup in
1945 * the unit test framework.
1946 * This function is not thread safe.
1948 static void unitTestClearDefault() {
1949 default_ = Device();
1951 #endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
1953 //! \brief Default constructor - initializes to NULL.
1954 Device() : detail::Wrapper<cl_type>() { }
1956 /*! \brief Constructor from cl_device_id.
1958 * This simply copies the device ID value, which is an inexpensive operation.
1960 explicit Device(const cl_device_id &device, bool retainObject = false) :
1961 detail::Wrapper<cl_type>(device, retainObject) { }
1963 /*! \brief Returns the first device on the default context.
1965 * \see Context::getDefault()
1967 static Device getDefault(
1968 cl_int *errResult = NULL)
1970 std::call_once(default_initialized_, makeDefault);
1971 detail::errHandler(default_error_);
1972 if (errResult != NULL) {
1973 *errResult = default_error_;
1979 * Modify the default device to be used by
1980 * subsequent operations.
1981 * Will only set the default if no default was previously created.
1982 * @return updated default device.
1983 * Should be compared to the passed value to ensure that it was updated.
1985 static Device setDefault(const Device &default_device)
1987 std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_device));
1988 detail::errHandler(default_error_);
1992 /*! \brief Assignment operator from cl_device_id.
1994 * This simply copies the device ID value, which is an inexpensive operation.
1996 Device& operator = (const cl_device_id& rhs)
1998 detail::Wrapper<cl_type>::operator=(rhs);
2002 /*! \brief Copy constructor to forward copy to the superclass correctly.
2003 * Required for MSVC.
2005 Device(const Device& dev) : detail::Wrapper<cl_type>(dev) {}
2007 /*! \brief Copy assignment to forward copy to the superclass correctly.
2008 * Required for MSVC.
2010 Device& operator = (const Device &dev)
2012 detail::Wrapper<cl_type>::operator=(dev);
2016 /*! \brief Move constructor to forward move to the superclass correctly.
2017 * Required for MSVC.
2019 Device(Device&& dev) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(dev)) {}
2021 /*! \brief Move assignment to forward move to the superclass correctly.
2022 * Required for MSVC.
2024 Device& operator = (Device &&dev)
2026 detail::Wrapper<cl_type>::operator=(std::move(dev));
2030 //! \brief Wrapper for clGetDeviceInfo().
2031 template <typename T>
2032 cl_int getInfo(cl_device_info name, T* param) const
2034 return detail::errHandler(
2035 detail::getInfo(&::clGetDeviceInfo, object_, name, param),
2036 __GET_DEVICE_INFO_ERR);
2039 //! \brief Wrapper for clGetDeviceInfo() that returns by value.
2040 template <cl_int name> typename
2041 detail::param_traits<detail::cl_device_info, name>::param_type
2042 getInfo(cl_int* err = NULL) const
2044 typename detail::param_traits<
2045 detail::cl_device_info, name>::param_type param;
2046 cl_int result = getInfo(name, ¶m);
2056 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
2057 //! \brief Wrapper for clCreateSubDevices().
2058 cl_int createSubDevices(
2059 const cl_device_partition_property * properties,
2060 vector<Device>* devices)
2063 cl_int err = clCreateSubDevices(object_, properties, 0, NULL, &n);
2064 if (err != CL_SUCCESS) {
2065 return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
2068 vector<cl_device_id> ids(n);
2069 err = clCreateSubDevices(object_, properties, n, ids.data(), NULL);
2070 if (err != CL_SUCCESS) {
2071 return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
2074 // Cannot trivially assign because we need to capture intermediates
2075 // with safe construction
2077 devices->resize(ids.size());
2079 // Assign to param, constructing with retain behaviour
2080 // to correctly capture each underlying CL object
2081 for (size_type i = 0; i < ids.size(); i++) {
2082 // We do not need to retain because this device is being created
2084 (*devices)[i] = Device(ids[i], false);
2090 #elif defined(CL_HPP_USE_CL_DEVICE_FISSION)
2093 * CL 1.1 version that uses device fission extension.
2095 cl_int createSubDevices(
2096 const cl_device_partition_property_ext * properties,
2097 vector<Device>* devices)
2099 typedef CL_API_ENTRY cl_int
2100 ( CL_API_CALL * PFN_clCreateSubDevicesEXT)(
2101 cl_device_id /*in_device*/,
2102 const cl_device_partition_property_ext * /* properties */,
2103 cl_uint /*num_entries*/,
2104 cl_device_id * /*out_devices*/,
2105 cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1;
2107 static PFN_clCreateSubDevicesEXT pfn_clCreateSubDevicesEXT = NULL;
2108 CL_HPP_INIT_CL_EXT_FCN_PTR_(clCreateSubDevicesEXT);
2111 cl_int err = pfn_clCreateSubDevicesEXT(object_, properties, 0, NULL, &n);
2112 if (err != CL_SUCCESS) {
2113 return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
2116 vector<cl_device_id> ids(n);
2117 err = pfn_clCreateSubDevicesEXT(object_, properties, n, ids.data(), NULL);
2118 if (err != CL_SUCCESS) {
2119 return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
2121 // Cannot trivially assign because we need to capture intermediates
2122 // with safe construction
2124 devices->resize(ids.size());
2126 // Assign to param, constructing with retain behaviour
2127 // to correctly capture each underlying CL object
2128 for (size_type i = 0; i < ids.size(); i++) {
2129 // We do not need to retain because this device is being created
2131 (*devices)[i] = Device(ids[i], false);
2136 #endif // defined(CL_HPP_USE_CL_DEVICE_FISSION)
2139 CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Device::default_initialized_;
2140 CL_HPP_DEFINE_STATIC_MEMBER_ Device Device::default_;
2141 CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Device::default_error_ = CL_SUCCESS;
2143 /*! \brief Class interface for cl_platform_id.
2145 * \note Copies of these objects are inexpensive, since they don't 'own'
2146 * any underlying resources or data structures.
2148 * \see cl_platform_id
2150 class Platform : public detail::Wrapper<cl_platform_id>
2153 static std::once_flag default_initialized_;
2154 static Platform default_;
2155 static cl_int default_error_;
2157 /*! \brief Create the default context.
2159 * This sets @c default_ and @c default_error_. It does not throw
2162 static void makeDefault() {
2163 /* Throwing an exception from a call_once invocation does not do
2164 * what we wish, so we catch it and save the error.
2166 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2170 // If default wasn't passed ,generate one
2174 cl_int err = ::clGetPlatformIDs(0, NULL, &n);
2175 if (err != CL_SUCCESS) {
2176 default_error_ = err;
2180 default_error_ = CL_INVALID_PLATFORM;
2184 vector<cl_platform_id> ids(n);
2185 err = ::clGetPlatformIDs(n, ids.data(), NULL);
2186 if (err != CL_SUCCESS) {
2187 default_error_ = err;
2191 default_ = Platform(ids[0]);
2193 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2194 catch (cl::Error &e) {
2195 default_error_ = e.err();
2200 /*! \brief Create the default platform from a provided platform.
2202 * This sets @c default_. It does not throw
2205 static void makeDefaultProvided(const Platform &p) {
2210 #ifdef CL_HPP_UNIT_TEST_ENABLE
2211 /*! \brief Reset the default.
2213 * This sets @c default_ to an empty value to support cleanup in
2214 * the unit test framework.
2215 * This function is not thread safe.
2217 static void unitTestClearDefault() {
2218 default_ = Platform();
2220 #endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
2222 //! \brief Default constructor - initializes to NULL.
2223 Platform() : detail::Wrapper<cl_type>() { }
2225 /*! \brief Constructor from cl_platform_id.
2227 * \param retainObject will cause the constructor to retain its cl object.
2228 * Defaults to false to maintain compatibility with
2230 * This simply copies the platform ID value, which is an inexpensive operation.
2232 explicit Platform(const cl_platform_id &platform, bool retainObject = false) :
2233 detail::Wrapper<cl_type>(platform, retainObject) { }
2235 /*! \brief Assignment operator from cl_platform_id.
2237 * This simply copies the platform ID value, which is an inexpensive operation.
2239 Platform& operator = (const cl_platform_id& rhs)
2241 detail::Wrapper<cl_type>::operator=(rhs);
2245 static Platform getDefault(
2246 cl_int *errResult = NULL)
2248 std::call_once(default_initialized_, makeDefault);
2249 detail::errHandler(default_error_);
2250 if (errResult != NULL) {
2251 *errResult = default_error_;
2257 * Modify the default platform to be used by
2258 * subsequent operations.
2259 * Will only set the default if no default was previously created.
2260 * @return updated default platform.
2261 * Should be compared to the passed value to ensure that it was updated.
2263 static Platform setDefault(const Platform &default_platform)
2265 std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_platform));
2266 detail::errHandler(default_error_);
2270 //! \brief Wrapper for clGetPlatformInfo().
2271 cl_int getInfo(cl_platform_info name, string* param) const
2273 return detail::errHandler(
2274 detail::getInfo(&::clGetPlatformInfo, object_, name, param),
2275 __GET_PLATFORM_INFO_ERR);
2278 //! \brief Wrapper for clGetPlatformInfo() that returns by value.
2279 template <cl_int name> typename
2280 detail::param_traits<detail::cl_platform_info, name>::param_type
2281 getInfo(cl_int* err = NULL) const
2283 typename detail::param_traits<
2284 detail::cl_platform_info, name>::param_type param;
2285 cl_int result = getInfo(name, ¶m);
2292 /*! \brief Gets a list of devices for this platform.
2294 * Wraps clGetDeviceIDs().
2297 cl_device_type type,
2298 vector<Device>* devices) const
2301 if( devices == NULL ) {
2302 return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_DEVICE_IDS_ERR);
2304 cl_int err = ::clGetDeviceIDs(object_, type, 0, NULL, &n);
2305 if (err != CL_SUCCESS) {
2306 return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
2309 vector<cl_device_id> ids(n);
2310 err = ::clGetDeviceIDs(object_, type, n, ids.data(), NULL);
2311 if (err != CL_SUCCESS) {
2312 return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
2315 // Cannot trivially assign because we need to capture intermediates
2316 // with safe construction
2317 // We must retain things we obtain from the API to avoid releasing
2318 // API-owned objects.
2320 devices->resize(ids.size());
2322 // Assign to param, constructing with retain behaviour
2323 // to correctly capture each underlying CL object
2324 for (size_type i = 0; i < ids.size(); i++) {
2325 (*devices)[i] = Device(ids[i], true);
2331 #if defined(CL_HPP_USE_DX_INTEROP)
2332 /*! \brief Get the list of available D3D10 devices.
2334 * \param d3d_device_source.
2336 * \param d3d_object.
2338 * \param d3d_device_set.
2340 * \param devices returns a vector of OpenCL D3D10 devices found. The cl::Device
2341 * values returned in devices can be used to identify a specific OpenCL
2342 * device. If \a devices argument is NULL, this argument is ignored.
2344 * \return One of the following values:
2345 * - CL_SUCCESS if the function is executed successfully.
2347 * The application can query specific capabilities of the OpenCL device(s)
2348 * returned by cl::getDevices. This can be used by the application to
2349 * determine which device(s) to use.
2351 * \note In the case that exceptions are enabled and a return value
2352 * other than CL_SUCCESS is generated, then cl::Error exception is
2356 cl_d3d10_device_source_khr d3d_device_source,
2358 cl_d3d10_device_set_khr d3d_device_set,
2359 vector<Device>* devices) const
2361 typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clGetDeviceIDsFromD3D10KHR)(
2362 cl_platform_id platform,
2363 cl_d3d10_device_source_khr d3d_device_source,
2365 cl_d3d10_device_set_khr d3d_device_set,
2366 cl_uint num_entries,
2367 cl_device_id * devices,
2368 cl_uint* num_devices);
2370 if( devices == NULL ) {
2371 return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_DEVICE_IDS_ERR);
2374 static PFN_clGetDeviceIDsFromD3D10KHR pfn_clGetDeviceIDsFromD3D10KHR = NULL;
2375 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(object_, clGetDeviceIDsFromD3D10KHR);
2378 cl_int err = pfn_clGetDeviceIDsFromD3D10KHR(
2386 if (err != CL_SUCCESS) {
2387 return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
2390 vector<cl_device_id> ids(n);
2391 err = pfn_clGetDeviceIDsFromD3D10KHR(
2399 if (err != CL_SUCCESS) {
2400 return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
2403 // Cannot trivially assign because we need to capture intermediates
2404 // with safe construction
2405 // We must retain things we obtain from the API to avoid releasing
2406 // API-owned objects.
2408 devices->resize(ids.size());
2410 // Assign to param, constructing with retain behaviour
2411 // to correctly capture each underlying CL object
2412 for (size_type i = 0; i < ids.size(); i++) {
2413 (*devices)[i] = Device(ids[i], true);
2420 /*! \brief Gets a list of available platforms.
2422 * Wraps clGetPlatformIDs().
2425 vector<Platform>* platforms)
2429 if( platforms == NULL ) {
2430 return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_PLATFORM_IDS_ERR);
2433 cl_int err = ::clGetPlatformIDs(0, NULL, &n);
2434 if (err != CL_SUCCESS) {
2435 return detail::errHandler(err, __GET_PLATFORM_IDS_ERR);
2438 vector<cl_platform_id> ids(n);
2439 err = ::clGetPlatformIDs(n, ids.data(), NULL);
2440 if (err != CL_SUCCESS) {
2441 return detail::errHandler(err, __GET_PLATFORM_IDS_ERR);
2445 platforms->resize(ids.size());
2447 // Platforms don't reference count
2448 for (size_type i = 0; i < ids.size(); i++) {
2449 (*platforms)[i] = Platform(ids[i]);
2455 /*! \brief Gets the first available platform.
2457 * Wraps clGetPlatformIDs(), returning the first result.
2460 Platform * platform)
2463 Platform default_platform = Platform::getDefault(&err);
2465 *platform = default_platform;
2470 /*! \brief Gets the first available platform, returning it by value.
2472 * \return Returns a valid platform if one is available.
2473 * If no platform is available will return a null platform.
2474 * Throws an exception if no platforms are available
2475 * or an error condition occurs.
2476 * Wraps clGetPlatformIDs(), returning the first result.
2478 static Platform get(
2479 cl_int * errResult = NULL)
2482 Platform default_platform = Platform::getDefault(&err);
2486 return default_platform;
2489 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
2490 //! \brief Wrapper for clUnloadCompiler().
2494 return ::clUnloadPlatformCompiler(object_);
2496 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
2497 }; // class Platform
2499 CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Platform::default_initialized_;
2500 CL_HPP_DEFINE_STATIC_MEMBER_ Platform Platform::default_;
2501 CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Platform::default_error_ = CL_SUCCESS;
2505 * Deprecated APIs for 1.2
2507 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
2509 * Unload the OpenCL compiler.
2510 * \note Deprecated for OpenCL 1.2. Use Platform::unloadCompiler instead.
2512 inline CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int
2513 UnloadCompiler() CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
2517 return ::clUnloadCompiler();
2519 #endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
2521 /*! \brief Class interface for cl_context.
2523 * \note Copies of these objects are shallow, meaning that the copy will refer
2524 * to the same underlying cl_context as the original. For details, see
2525 * clRetainContext() and clReleaseContext().
2530 : public detail::Wrapper<cl_context>
2533 static std::once_flag default_initialized_;
2534 static Context default_;
2535 static cl_int default_error_;
2537 /*! \brief Create the default context from the default device type in the default platform.
2539 * This sets @c default_ and @c default_error_. It does not throw
2542 static void makeDefault() {
2543 /* Throwing an exception from a call_once invocation does not do
2544 * what we wish, so we catch it and save the error.
2546 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2550 #if !defined(__APPLE__) && !defined(__MACOS)
2551 const Platform &p = Platform::getDefault();
2552 cl_platform_id defaultPlatform = p();
2553 cl_context_properties properties[3] = {
2554 CL_CONTEXT_PLATFORM, (cl_context_properties)defaultPlatform, 0
2556 #else // #if !defined(__APPLE__) && !defined(__MACOS)
2557 cl_context_properties *properties = nullptr;
2558 #endif // #if !defined(__APPLE__) && !defined(__MACOS)
2561 CL_DEVICE_TYPE_DEFAULT,
2567 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2568 catch (cl::Error &e) {
2569 default_error_ = e.err();
2575 /*! \brief Create the default context from a provided Context.
2577 * This sets @c default_. It does not throw
2580 static void makeDefaultProvided(const Context &c) {
2585 #ifdef CL_HPP_UNIT_TEST_ENABLE
2586 /*! \brief Reset the default.
2588 * This sets @c default_ to an empty value to support cleanup in
2589 * the unit test framework.
2590 * This function is not thread safe.
2592 static void unitTestClearDefault() {
2593 default_ = Context();
2595 #endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
2597 /*! \brief Constructs a context including a list of specified devices.
2599 * Wraps clCreateContext().
2602 const vector<Device>& devices,
2603 cl_context_properties* properties = NULL,
2604 void (CL_CALLBACK * notifyFptr)(
2614 size_type numDevices = devices.size();
2615 vector<cl_device_id> deviceIDs(numDevices);
2617 for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
2618 deviceIDs[deviceIndex] = (devices[deviceIndex])();
2621 object_ = ::clCreateContext(
2622 properties, (cl_uint) numDevices,
2624 notifyFptr, data, &error);
2626 detail::errHandler(error, __CREATE_CONTEXT_ERR);
2633 const Device& device,
2634 cl_context_properties* properties = NULL,
2635 void (CL_CALLBACK * notifyFptr)(
2645 cl_device_id deviceID = device();
2647 object_ = ::clCreateContext(
2650 notifyFptr, data, &error);
2652 detail::errHandler(error, __CREATE_CONTEXT_ERR);
2658 /*! \brief Constructs a context including all or a subset of devices of a specified type.
2660 * Wraps clCreateContextFromType().
2663 cl_device_type type,
2664 cl_context_properties* properties = NULL,
2665 void (CL_CALLBACK * notifyFptr)(
2675 #if !defined(__APPLE__) && !defined(__MACOS)
2676 cl_context_properties prop[4] = {CL_CONTEXT_PLATFORM, 0, 0, 0 };
2678 if (properties == NULL) {
2679 // Get a valid platform ID as we cannot send in a blank one
2680 vector<Platform> platforms;
2681 error = Platform::get(&platforms);
2682 if (error != CL_SUCCESS) {
2683 detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
2690 // Check the platforms we found for a device of our specified type
2691 cl_context_properties platform_id = 0;
2692 for (unsigned int i = 0; i < platforms.size(); i++) {
2694 vector<Device> devices;
2696 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2700 error = platforms[i].getDevices(type, &devices);
2702 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2704 // Catch if exceptions are enabled as we don't want to exit if first platform has no devices of type
2705 // We do error checking next anyway, and can throw there if needed
2708 // Only squash CL_SUCCESS and CL_DEVICE_NOT_FOUND
2709 if (error != CL_SUCCESS && error != CL_DEVICE_NOT_FOUND) {
2710 detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
2716 if (devices.size() > 0) {
2717 platform_id = (cl_context_properties)platforms[i]();
2722 if (platform_id == 0) {
2723 detail::errHandler(CL_DEVICE_NOT_FOUND, __CREATE_CONTEXT_FROM_TYPE_ERR);
2725 *err = CL_DEVICE_NOT_FOUND;
2730 prop[1] = platform_id;
2731 properties = &prop[0];
2734 object_ = ::clCreateContextFromType(
2735 properties, type, notifyFptr, data, &error);
2737 detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
2743 /*! \brief Copy constructor to forward copy to the superclass correctly.
2744 * Required for MSVC.
2746 Context(const Context& ctx) : detail::Wrapper<cl_type>(ctx) {}
2748 /*! \brief Copy assignment to forward copy to the superclass correctly.
2749 * Required for MSVC.
2751 Context& operator = (const Context &ctx)
2753 detail::Wrapper<cl_type>::operator=(ctx);
2757 /*! \brief Move constructor to forward move to the superclass correctly.
2758 * Required for MSVC.
2760 Context(Context&& ctx) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(ctx)) {}
2762 /*! \brief Move assignment to forward move to the superclass correctly.
2763 * Required for MSVC.
2765 Context& operator = (Context &&ctx)
2767 detail::Wrapper<cl_type>::operator=(std::move(ctx));
2772 /*! \brief Returns a singleton context including all devices of CL_DEVICE_TYPE_DEFAULT.
2774 * \note All calls to this function return the same cl_context as the first.
2776 static Context getDefault(cl_int * err = NULL)
2778 std::call_once(default_initialized_, makeDefault);
2779 detail::errHandler(default_error_);
2781 *err = default_error_;
2787 * Modify the default context to be used by
2788 * subsequent operations.
2789 * Will only set the default if no default was previously created.
2790 * @return updated default context.
2791 * Should be compared to the passed value to ensure that it was updated.
2793 static Context setDefault(const Context &default_context)
2795 std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_context));
2796 detail::errHandler(default_error_);
2800 //! \brief Default constructor - initializes to NULL.
2801 Context() : detail::Wrapper<cl_type>() { }
2803 /*! \brief Constructor from cl_context - takes ownership.
2805 * This effectively transfers ownership of a refcount on the cl_context
2806 * into the new Context object.
2808 explicit Context(const cl_context& context, bool retainObject = false) :
2809 detail::Wrapper<cl_type>(context, retainObject) { }
2811 /*! \brief Assignment operator from cl_context - takes ownership.
2813 * This effectively transfers ownership of a refcount on the rhs and calls
2814 * clReleaseContext() on the value previously held by this instance.
2816 Context& operator = (const cl_context& rhs)
2818 detail::Wrapper<cl_type>::operator=(rhs);
2822 //! \brief Wrapper for clGetContextInfo().
2823 template <typename T>
2824 cl_int getInfo(cl_context_info name, T* param) const
2826 return detail::errHandler(
2827 detail::getInfo(&::clGetContextInfo, object_, name, param),
2828 __GET_CONTEXT_INFO_ERR);
2831 //! \brief Wrapper for clGetContextInfo() that returns by value.
2832 template <cl_int name> typename
2833 detail::param_traits<detail::cl_context_info, name>::param_type
2834 getInfo(cl_int* err = NULL) const
2836 typename detail::param_traits<
2837 detail::cl_context_info, name>::param_type param;
2838 cl_int result = getInfo(name, ¶m);
2845 /*! \brief Gets a list of supported image formats.
2847 * Wraps clGetSupportedImageFormats().
2849 cl_int getSupportedImageFormats(
2851 cl_mem_object_type type,
2852 vector<ImageFormat>* formats) const
2860 cl_int err = ::clGetSupportedImageFormats(
2867 if (err != CL_SUCCESS) {
2868 return detail::errHandler(err, __GET_SUPPORTED_IMAGE_FORMATS_ERR);
2871 if (numEntries > 0) {
2872 vector<ImageFormat> value(numEntries);
2873 err = ::clGetSupportedImageFormats(
2878 (cl_image_format*)value.data(),
2880 if (err != CL_SUCCESS) {
2881 return detail::errHandler(err, __GET_SUPPORTED_IMAGE_FORMATS_ERR);
2884 formats->assign(begin(value), end(value));
2887 // If no values are being returned, ensure an empty vector comes back
2895 inline void Device::makeDefault()
2897 /* Throwing an exception from a call_once invocation does not do
2898 * what we wish, so we catch it and save the error.
2900 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2906 Context context = Context::getDefault(&error);
2907 detail::errHandler(error, __CREATE_CONTEXT_ERR);
2909 if (error != CL_SUCCESS) {
2910 default_error_ = error;
2913 default_ = context.getInfo<CL_CONTEXT_DEVICES>()[0];
2914 default_error_ = CL_SUCCESS;
2917 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2918 catch (cl::Error &e) {
2919 default_error_ = e.err();
2924 CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Context::default_initialized_;
2925 CL_HPP_DEFINE_STATIC_MEMBER_ Context Context::default_;
2926 CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Context::default_error_ = CL_SUCCESS;
2928 /*! \brief Class interface for cl_event.
2930 * \note Copies of these objects are shallow, meaning that the copy will refer
2931 * to the same underlying cl_event as the original. For details, see
2932 * clRetainEvent() and clReleaseEvent().
2936 class Event : public detail::Wrapper<cl_event>
2939 //! \brief Default constructor - initializes to NULL.
2940 Event() : detail::Wrapper<cl_type>() { }
2942 /*! \brief Constructor from cl_event - takes ownership.
2944 * \param retainObject will cause the constructor to retain its cl object.
2945 * Defaults to false to maintain compatibility with
2947 * This effectively transfers ownership of a refcount on the cl_event
2948 * into the new Event object.
2950 explicit Event(const cl_event& event, bool retainObject = false) :
2951 detail::Wrapper<cl_type>(event, retainObject) { }
2953 /*! \brief Assignment operator from cl_event - takes ownership.
2955 * This effectively transfers ownership of a refcount on the rhs and calls
2956 * clReleaseEvent() on the value previously held by this instance.
2958 Event& operator = (const cl_event& rhs)
2960 detail::Wrapper<cl_type>::operator=(rhs);
2964 //! \brief Wrapper for clGetEventInfo().
2965 template <typename T>
2966 cl_int getInfo(cl_event_info name, T* param) const
2968 return detail::errHandler(
2969 detail::getInfo(&::clGetEventInfo, object_, name, param),
2970 __GET_EVENT_INFO_ERR);
2973 //! \brief Wrapper for clGetEventInfo() that returns by value.
2974 template <cl_int name> typename
2975 detail::param_traits<detail::cl_event_info, name>::param_type
2976 getInfo(cl_int* err = NULL) const
2978 typename detail::param_traits<
2979 detail::cl_event_info, name>::param_type param;
2980 cl_int result = getInfo(name, ¶m);
2987 //! \brief Wrapper for clGetEventProfilingInfo().
2988 template <typename T>
2989 cl_int getProfilingInfo(cl_profiling_info name, T* param) const
2991 return detail::errHandler(detail::getInfo(
2992 &::clGetEventProfilingInfo, object_, name, param),
2993 __GET_EVENT_PROFILE_INFO_ERR);
2996 //! \brief Wrapper for clGetEventProfilingInfo() that returns by value.
2997 template <cl_int name> typename
2998 detail::param_traits<detail::cl_profiling_info, name>::param_type
2999 getProfilingInfo(cl_int* err = NULL) const
3001 typename detail::param_traits<
3002 detail::cl_profiling_info, name>::param_type param;
3003 cl_int result = getProfilingInfo(name, ¶m);
3010 /*! \brief Blocks the calling thread until this event completes.
3012 * Wraps clWaitForEvents().
3016 return detail::errHandler(
3017 ::clWaitForEvents(1, &object_),
3018 __WAIT_FOR_EVENTS_ERR);
3021 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
3022 /*! \brief Registers a user callback function for a specific command execution status.
3024 * Wraps clSetEventCallback().
3028 void (CL_CALLBACK * pfn_notify)(cl_event, cl_int, void *),
3029 void * user_data = NULL)
3031 return detail::errHandler(
3032 ::clSetEventCallback(
3037 __SET_EVENT_CALLBACK_ERR);
3039 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
3041 /*! \brief Blocks the calling thread until every event specified is complete.
3043 * Wraps clWaitForEvents().
3046 waitForEvents(const vector<Event>& events)
3048 return detail::errHandler(
3050 (cl_uint) events.size(), (events.size() > 0) ? (cl_event*)&events.front() : NULL),
3051 __WAIT_FOR_EVENTS_ERR);
3055 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
3056 /*! \brief Class interface for user events (a subset of cl_event's).
3058 * See Event for details about copy semantics, etc.
3060 class UserEvent : public Event
3063 /*! \brief Constructs a user event on a given context.
3065 * Wraps clCreateUserEvent().
3068 const Context& context,
3069 cl_int * err = NULL)
3072 object_ = ::clCreateUserEvent(
3076 detail::errHandler(error, __CREATE_USER_EVENT_ERR);
3082 //! \brief Default constructor - initializes to NULL.
3083 UserEvent() : Event() { }
3085 /*! \brief Sets the execution status of a user event object.
3087 * Wraps clSetUserEventStatus().
3089 cl_int setStatus(cl_int status)
3091 return detail::errHandler(
3092 ::clSetUserEventStatus(object_,status),
3093 __SET_USER_EVENT_STATUS_ERR);
3096 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
3098 /*! \brief Blocks the calling thread until every event specified is complete.
3100 * Wraps clWaitForEvents().
3102 inline static cl_int
3103 WaitForEvents(const vector<Event>& events)
3105 return detail::errHandler(
3107 (cl_uint) events.size(), (events.size() > 0) ? (cl_event*)&events.front() : NULL),
3108 __WAIT_FOR_EVENTS_ERR);
3111 /*! \brief Class interface for cl_mem.
3113 * \note Copies of these objects are shallow, meaning that the copy will refer
3114 * to the same underlying cl_mem as the original. For details, see
3115 * clRetainMemObject() and clReleaseMemObject().
3119 class Memory : public detail::Wrapper<cl_mem>
3122 //! \brief Default constructor - initializes to NULL.
3123 Memory() : detail::Wrapper<cl_type>() { }
3125 /*! \brief Constructor from cl_mem - takes ownership.
3127 * Optionally transfer ownership of a refcount on the cl_mem
3128 * into the new Memory object.
3130 * \param retainObject will cause the constructor to retain its cl object.
3131 * Defaults to false to maintain compatibility with
3134 * See Memory for further details.
3136 explicit Memory(const cl_mem& memory, bool retainObject) :
3137 detail::Wrapper<cl_type>(memory, retainObject) { }
3139 /*! \brief Assignment operator from cl_mem - takes ownership.
3141 * This effectively transfers ownership of a refcount on the rhs and calls
3142 * clReleaseMemObject() on the value previously held by this instance.
3144 Memory& operator = (const cl_mem& rhs)
3146 detail::Wrapper<cl_type>::operator=(rhs);
3150 /*! \brief Copy constructor to forward copy to the superclass correctly.
3151 * Required for MSVC.
3153 Memory(const Memory& mem) : detail::Wrapper<cl_type>(mem) {}
3155 /*! \brief Copy assignment to forward copy to the superclass correctly.
3156 * Required for MSVC.
3158 Memory& operator = (const Memory &mem)
3160 detail::Wrapper<cl_type>::operator=(mem);
3164 /*! \brief Move constructor to forward move to the superclass correctly.
3165 * Required for MSVC.
3167 Memory(Memory&& mem) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(mem)) {}
3169 /*! \brief Move assignment to forward move to the superclass correctly.
3170 * Required for MSVC.
3172 Memory& operator = (Memory &&mem)
3174 detail::Wrapper<cl_type>::operator=(std::move(mem));
3179 //! \brief Wrapper for clGetMemObjectInfo().
3180 template <typename T>
3181 cl_int getInfo(cl_mem_info name, T* param) const
3183 return detail::errHandler(
3184 detail::getInfo(&::clGetMemObjectInfo, object_, name, param),
3185 __GET_MEM_OBJECT_INFO_ERR);
3188 //! \brief Wrapper for clGetMemObjectInfo() that returns by value.
3189 template <cl_int name> typename
3190 detail::param_traits<detail::cl_mem_info, name>::param_type
3191 getInfo(cl_int* err = NULL) const
3193 typename detail::param_traits<
3194 detail::cl_mem_info, name>::param_type param;
3195 cl_int result = getInfo(name, ¶m);
3202 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
3203 /*! \brief Registers a callback function to be called when the memory object
3204 * is no longer needed.
3206 * Wraps clSetMemObjectDestructorCallback().
3208 * Repeated calls to this function, for a given cl_mem value, will append
3209 * to the list of functions called (in reverse order) when memory object's
3210 * resources are freed and the memory object is deleted.
3213 * The registered callbacks are associated with the underlying cl_mem
3214 * value - not the Memory class instance.
3216 cl_int setDestructorCallback(
3217 void (CL_CALLBACK * pfn_notify)(cl_mem, void *),
3218 void * user_data = NULL)
3220 return detail::errHandler(
3221 ::clSetMemObjectDestructorCallback(
3225 __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR);
3227 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
3231 // Pre-declare copy functions
3233 template< typename IteratorType >
3234 cl_int copy( IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer );
3235 template< typename IteratorType >
3236 cl_int copy( const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator );
3237 template< typename IteratorType >
3238 cl_int copy( const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer );
3239 template< typename IteratorType >
3240 cl_int copy( const CommandQueue &queue, const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator );
3243 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
3249 static cl_svm_mem_flags getSVMMemFlags()
3254 } // namespace detail
3256 template<class Trait = detail::SVMTraitNull>
3257 class SVMTraitReadWrite
3260 static cl_svm_mem_flags getSVMMemFlags()
3262 return CL_MEM_READ_WRITE |
3263 Trait::getSVMMemFlags();
3267 template<class Trait = detail::SVMTraitNull>
3268 class SVMTraitReadOnly
3271 static cl_svm_mem_flags getSVMMemFlags()
3273 return CL_MEM_READ_ONLY |
3274 Trait::getSVMMemFlags();
3278 template<class Trait = detail::SVMTraitNull>
3279 class SVMTraitWriteOnly
3282 static cl_svm_mem_flags getSVMMemFlags()
3284 return CL_MEM_WRITE_ONLY |
3285 Trait::getSVMMemFlags();
3289 template<class Trait = SVMTraitReadWrite<>>
3290 class SVMTraitCoarse
3293 static cl_svm_mem_flags getSVMMemFlags()
3295 return Trait::getSVMMemFlags();
3299 template<class Trait = SVMTraitReadWrite<>>
3303 static cl_svm_mem_flags getSVMMemFlags()
3305 return CL_MEM_SVM_FINE_GRAIN_BUFFER |
3306 Trait::getSVMMemFlags();
3310 template<class Trait = SVMTraitReadWrite<>>
3311 class SVMTraitAtomic
3314 static cl_svm_mem_flags getSVMMemFlags()
3317 CL_MEM_SVM_FINE_GRAIN_BUFFER |
3318 CL_MEM_SVM_ATOMICS |
3319 Trait::getSVMMemFlags();
3323 // Pre-declare SVM map function
3324 template<typename T>
3325 inline cl_int enqueueMapSVM(
3330 const vector<Event>* events = NULL,
3331 Event* event = NULL);
3334 * STL-like allocator class for managing SVM objects provided for convenience.
3336 * Note that while this behaves like an allocator for the purposes of constructing vectors and similar objects,
3337 * care must be taken when using with smart pointers.
3338 * The allocator should not be used to construct a unique_ptr if we are using coarse-grained SVM mode because
3339 * the coarse-grained management behaviour would behave incorrectly with respect to reference counting.
3341 * Instead the allocator embeds a Deleter which may be used with unique_ptr and is used
3342 * with the allocate_shared and allocate_ptr supplied operations.
3344 template<typename T, class SVMTrait>
3345 class SVMAllocator {
3350 typedef T value_type;
3351 typedef value_type* pointer;
3352 typedef const value_type* const_pointer;
3353 typedef value_type& reference;
3354 typedef const value_type& const_reference;
3355 typedef std::size_t size_type;
3356 typedef std::ptrdiff_t difference_type;
3358 template<typename U>
3361 typedef SVMAllocator<U, SVMTrait> other;
3364 template<typename U, typename V>
3365 friend class SVMAllocator;
3368 context_(Context::getDefault())
3372 explicit SVMAllocator(cl::Context context) :
3378 SVMAllocator(const SVMAllocator &other) :
3379 context_(other.context_)
3383 template<typename U>
3384 SVMAllocator(const SVMAllocator<U, SVMTrait> &other) :
3385 context_(other.context_)
3393 pointer address(reference r) CL_HPP_NOEXCEPT_
3395 return std::addressof(r);
3398 const_pointer address(const_reference r) CL_HPP_NOEXCEPT_
3400 return std::addressof(r);
3404 * Allocate an SVM pointer.
3406 * If the allocator is coarse-grained, this will take ownership to allow
3407 * containers to correctly construct data in place.
3411 typename cl::SVMAllocator<void, SVMTrait>::const_pointer = 0)
3413 // Allocate memory with default alignment matching the size of the type
3417 SVMTrait::getSVMMemFlags(),
3420 pointer retValue = reinterpret_cast<pointer>(
3422 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
3424 std::bad_alloc excep;
3427 #endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS)
3429 // If allocation was coarse-grained then map it
3430 if (!(SVMTrait::getSVMMemFlags() & CL_MEM_SVM_FINE_GRAIN_BUFFER)) {
3431 cl_int err = enqueueMapSVM(retValue, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, size*sizeof(T));
3432 if (err != CL_SUCCESS) {
3433 std::bad_alloc excep;
3438 // If exceptions disabled, return null pointer from allocator
3442 void deallocate(pointer p, size_type)
3444 clSVMFree(context_(), p);
3448 * Return the maximum possible allocation size.
3449 * This is the minimum of the maximum sizes of all devices in the context.
3451 size_type max_size() const CL_HPP_NOEXCEPT_
3453 size_type maxSize = std::numeric_limits<size_type>::max() / sizeof(T);
3455 for (Device &d : context_.getInfo<CL_CONTEXT_DEVICES>()) {
3458 static_cast<size_type>(d.getInfo<CL_DEVICE_MAX_MEM_ALLOC_SIZE>()));
3464 template< class U, class... Args >
3465 void construct(U* p, Args&&... args)
3477 * Returns true if the contexts match.
3479 inline bool operator==(SVMAllocator const& rhs)
3481 return (context_==rhs.context_);
3484 inline bool operator!=(SVMAllocator const& a)
3486 return !operator==(a);
3488 }; // class SVMAllocator return cl::pointer<T>(tmp, detail::Deleter<T, Alloc>{alloc, copies});
3491 template<class SVMTrait>
3492 class SVMAllocator<void, SVMTrait> {
3494 typedef void value_type;
3495 typedef value_type* pointer;
3496 typedef const value_type* const_pointer;
3498 template<typename U>
3501 typedef SVMAllocator<U, SVMTrait> other;
3504 template<typename U, typename V>
3505 friend class SVMAllocator;
3508 #if !defined(CL_HPP_NO_STD_UNIQUE_PTR)
3511 template<class Alloc>
3518 typedef typename std::allocator_traits<Alloc>::pointer pointer;
3520 Deleter(const Alloc &alloc, size_type copies) : alloc_{ alloc }, copies_{ copies }
3524 void operator()(pointer ptr) const {
3525 Alloc tmpAlloc{ alloc_ };
3526 std::allocator_traits<Alloc>::destroy(tmpAlloc, std::addressof(*ptr));
3527 std::allocator_traits<Alloc>::deallocate(tmpAlloc, ptr, copies_);
3530 } // namespace detail
3533 * Allocation operation compatible with std::allocate_ptr.
3534 * Creates a unique_ptr<T> by default.
3535 * This requirement is to ensure that the control block is not
3536 * allocated in memory inaccessible to the host.
3538 template <class T, class Alloc, class... Args>
3539 cl::pointer<T, detail::Deleter<Alloc>> allocate_pointer(const Alloc &alloc_, Args&&... args)
3541 Alloc alloc(alloc_);
3542 static const size_t copies = 1;
3544 // Ensure that creation of the management block and the
3545 // object are dealt with separately such that we only provide a deleter
3547 T* tmp = std::allocator_traits<Alloc>::allocate(alloc, copies);
3549 std::bad_alloc excep;
3553 std::allocator_traits<Alloc>::construct(
3555 std::addressof(*tmp),
3556 std::forward<Args>(args)...);
3558 return cl::pointer<T, detail::Deleter<Alloc>>(tmp, detail::Deleter<Alloc>{alloc, copies});
3560 catch (std::bad_alloc b)
3562 std::allocator_traits<Alloc>::deallocate(alloc, tmp, copies);
3567 template< class T, class SVMTrait, class... Args >
3568 cl::pointer<T, detail::Deleter<SVMAllocator<T, SVMTrait>>> allocate_svm(Args... args)
3570 SVMAllocator<T, SVMTrait> alloc;
3571 return cl::allocate_pointer<T>(alloc, args...);
3574 template< class T, class SVMTrait, class... Args >
3575 cl::pointer<T, detail::Deleter<SVMAllocator<T, SVMTrait>>> allocate_svm(const cl::Context &c, Args... args)
3577 SVMAllocator<T, SVMTrait> alloc(c);
3578 return cl::allocate_pointer<T>(alloc, args...);
3580 #endif // #if !defined(CL_HPP_NO_STD_UNIQUE_PTR)
3582 /*! \brief Vector alias to simplify contruction of coarse-grained SVM containers.
3585 template < class T >
3586 using coarse_svm_vector = vector<T, cl::SVMAllocator<int, cl::SVMTraitCoarse<>>>;
3588 /*! \brief Vector alias to simplify contruction of fine-grained SVM containers.
3591 template < class T >
3592 using fine_svm_vector = vector<T, cl::SVMAllocator<int, cl::SVMTraitFine<>>>;
3594 /*! \brief Vector alias to simplify contruction of fine-grained SVM containers that support platform atomics.
3597 template < class T >
3598 using atomic_svm_vector = vector<T, cl::SVMAllocator<int, cl::SVMTraitAtomic<>>>;
3600 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
3603 /*! \brief Class interface for Buffer Memory Objects.
3605 * See Memory for details about copy semantics, etc.
3609 class Buffer : public Memory
3613 /*! \brief Constructs a Buffer in a specified context.
3615 * Wraps clCreateBuffer().
3617 * \param host_ptr Storage to be used if the CL_MEM_USE_HOST_PTR flag was
3618 * specified. Note alignment & exclusivity requirements.
3621 const Context& context,
3624 void* host_ptr = NULL,
3628 object_ = ::clCreateBuffer(context(), flags, size, host_ptr, &error);
3630 detail::errHandler(error, __CREATE_BUFFER_ERR);
3636 /*! \brief Constructs a Buffer in the default context.
3638 * Wraps clCreateBuffer().
3640 * \param host_ptr Storage to be used if the CL_MEM_USE_HOST_PTR flag was
3641 * specified. Note alignment & exclusivity requirements.
3643 * \see Context::getDefault()
3648 void* host_ptr = NULL,
3653 Context context = Context::getDefault(err);
3655 object_ = ::clCreateBuffer(context(), flags, size, host_ptr, &error);
3657 detail::errHandler(error, __CREATE_BUFFER_ERR);
3664 * \brief Construct a Buffer from a host container via iterators.
3665 * IteratorType must be random access.
3666 * If useHostPtr is specified iterators must represent contiguous data.
3668 template< typename IteratorType >
3670 IteratorType startIterator,
3671 IteratorType endIterator,
3673 bool useHostPtr = false,
3676 typedef typename std::iterator_traits<IteratorType>::value_type DataType;
3679 cl_mem_flags flags = 0;
3681 flags |= CL_MEM_READ_ONLY;
3684 flags |= CL_MEM_READ_WRITE;
3687 flags |= CL_MEM_USE_HOST_PTR;
3690 size_type size = sizeof(DataType)*(endIterator - startIterator);
3692 Context context = Context::getDefault(err);
3695 object_ = ::clCreateBuffer(context(), flags, size, static_cast<DataType*>(&*startIterator), &error);
3697 object_ = ::clCreateBuffer(context(), flags, size, 0, &error);
3700 detail::errHandler(error, __CREATE_BUFFER_ERR);
3706 error = cl::copy(startIterator, endIterator, *this);
3707 detail::errHandler(error, __CREATE_BUFFER_ERR);
3715 * \brief Construct a Buffer from a host container via iterators using a specified context.
3716 * IteratorType must be random access.
3717 * If useHostPtr is specified iterators must represent contiguous data.
3719 template< typename IteratorType >
3720 Buffer(const Context &context, IteratorType startIterator, IteratorType endIterator,
3721 bool readOnly, bool useHostPtr = false, cl_int* err = NULL);
3724 * \brief Construct a Buffer from a host container via iterators using a specified queue.
3725 * If useHostPtr is specified iterators must be random access.
3727 template< typename IteratorType >
3728 Buffer(const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator,
3729 bool readOnly, bool useHostPtr = false, cl_int* err = NULL);
3731 //! \brief Default constructor - initializes to NULL.
3732 Buffer() : Memory() { }
3734 /*! \brief Constructor from cl_mem - takes ownership.
3736 * \param retainObject will cause the constructor to retain its cl object.
3737 * Defaults to false to maintain compatibility with earlier versions.
3739 * See Memory for further details.
3741 explicit Buffer(const cl_mem& buffer, bool retainObject = false) :
3742 Memory(buffer, retainObject) { }
3744 /*! \brief Assignment from cl_mem - performs shallow copy.
3746 * See Memory for further details.
3748 Buffer& operator = (const cl_mem& rhs)
3750 Memory::operator=(rhs);
3754 /*! \brief Copy constructor to forward copy to the superclass correctly.
3755 * Required for MSVC.
3757 Buffer(const Buffer& buf) : Memory(buf) {}
3759 /*! \brief Copy assignment to forward copy to the superclass correctly.
3760 * Required for MSVC.
3762 Buffer& operator = (const Buffer &buf)
3764 Memory::operator=(buf);
3768 /*! \brief Move constructor to forward move to the superclass correctly.
3769 * Required for MSVC.
3771 Buffer(Buffer&& buf) CL_HPP_NOEXCEPT_ : Memory(std::move(buf)) {}
3773 /*! \brief Move assignment to forward move to the superclass correctly.
3774 * Required for MSVC.
3776 Buffer& operator = (Buffer &&buf)
3778 Memory::operator=(std::move(buf));
3782 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
3783 /*! \brief Creates a new buffer object from this.
3785 * Wraps clCreateSubBuffer().
3787 Buffer createSubBuffer(
3789 cl_buffer_create_type buffer_create_type,
3790 const void * buffer_create_info,
3791 cl_int * err = NULL)
3795 result.object_ = ::clCreateSubBuffer(
3802 detail::errHandler(error, __CREATE_SUBBUFFER_ERR);
3809 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
3812 #if defined (CL_HPP_USE_DX_INTEROP)
3813 /*! \brief Class interface for creating OpenCL buffers from ID3D10Buffer's.
3815 * This is provided to facilitate interoperability with Direct3D.
3817 * See Memory for details about copy semantics, etc.
3821 class BufferD3D10 : public Buffer
3826 /*! \brief Constructs a BufferD3D10, in a specified context, from a
3827 * given ID3D10Buffer.
3829 * Wraps clCreateFromD3D10BufferKHR().
3832 const Context& context,
3834 ID3D10Buffer* bufobj,
3835 cl_int * err = NULL) : pfn_clCreateFromD3D10BufferKHR(nullptr)
3837 typedef CL_API_ENTRY cl_mem (CL_API_CALL *PFN_clCreateFromD3D10BufferKHR)(
3838 cl_context context, cl_mem_flags flags, ID3D10Buffer* buffer,
3839 cl_int* errcode_ret);
3840 PFN_clCreateFromD3D10BufferKHR pfn_clCreateFromD3D10BufferKHR;
3841 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
3842 vector<cl_context_properties> props = context.getInfo<CL_CONTEXT_PROPERTIES>();
3843 cl_platform platform = -1;
3844 for( int i = 0; i < props.size(); ++i ) {
3845 if( props[i] == CL_CONTEXT_PLATFORM ) {
3846 platform = props[i+1];
3849 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clCreateFromD3D10BufferKHR);
3850 #elif CL_HPP_TARGET_OPENCL_VERSION >= 110
3851 CL_HPP_INIT_CL_EXT_FCN_PTR_(clCreateFromD3D10BufferKHR);
3855 object_ = pfn_clCreateFromD3D10BufferKHR(
3861 detail::errHandler(error, __CREATE_GL_BUFFER_ERR);
3867 //! \brief Default constructor - initializes to NULL.
3868 BufferD3D10() : Buffer() { }
3870 /*! \brief Constructor from cl_mem - takes ownership.
3872 * \param retainObject will cause the constructor to retain its cl object.
3873 * Defaults to false to maintain compatibility with
3875 * See Memory for further details.
3877 explicit BufferD3D10(const cl_mem& buffer, bool retainObject = false) :
3878 Buffer(buffer, retainObject) { }
3880 /*! \brief Assignment from cl_mem - performs shallow copy.
3882 * See Memory for further details.
3884 BufferD3D10& operator = (const cl_mem& rhs)
3886 Buffer::operator=(rhs);
3890 /*! \brief Copy constructor to forward copy to the superclass correctly.
3891 * Required for MSVC.
3893 BufferD3D10(const BufferD3D10& buf) :
3896 /*! \brief Copy assignment to forward copy to the superclass correctly.
3897 * Required for MSVC.
3899 BufferD3D10& operator = (const BufferD3D10 &buf)
3901 Buffer::operator=(buf);
3905 /*! \brief Move constructor to forward move to the superclass correctly.
3906 * Required for MSVC.
3908 BufferD3D10(BufferD3D10&& buf) CL_HPP_NOEXCEPT_ : Buffer(std::move(buf)) {}
3910 /*! \brief Move assignment to forward move to the superclass correctly.
3911 * Required for MSVC.
3913 BufferD3D10& operator = (BufferD3D10 &&buf)
3915 Buffer::operator=(std::move(buf));
3921 /*! \brief Class interface for GL Buffer Memory Objects.
3923 * This is provided to facilitate interoperability with OpenGL.
3925 * See Memory for details about copy semantics, etc.
3929 class BufferGL : public Buffer
3932 /*! \brief Constructs a BufferGL in a specified context, from a given
3935 * Wraps clCreateFromGLBuffer().
3938 const Context& context,
3941 cl_int * err = NULL)
3944 object_ = ::clCreateFromGLBuffer(
3950 detail::errHandler(error, __CREATE_GL_BUFFER_ERR);
3956 //! \brief Default constructor - initializes to NULL.
3957 BufferGL() : Buffer() { }
3959 /*! \brief Constructor from cl_mem - takes ownership.
3961 * \param retainObject will cause the constructor to retain its cl object.
3962 * Defaults to false to maintain compatibility with
3964 * See Memory for further details.
3966 explicit BufferGL(const cl_mem& buffer, bool retainObject = false) :
3967 Buffer(buffer, retainObject) { }
3969 /*! \brief Assignment from cl_mem - performs shallow copy.
3971 * See Memory for further details.
3973 BufferGL& operator = (const cl_mem& rhs)
3975 Buffer::operator=(rhs);
3979 /*! \brief Copy constructor to forward copy to the superclass correctly.
3980 * Required for MSVC.
3982 BufferGL(const BufferGL& buf) : Buffer(buf) {}
3984 /*! \brief Copy assignment to forward copy to the superclass correctly.
3985 * Required for MSVC.
3987 BufferGL& operator = (const BufferGL &buf)
3989 Buffer::operator=(buf);
3993 /*! \brief Move constructor to forward move to the superclass correctly.
3994 * Required for MSVC.
3996 BufferGL(BufferGL&& buf) CL_HPP_NOEXCEPT_ : Buffer(std::move(buf)) {}
3998 /*! \brief Move assignment to forward move to the superclass correctly.
3999 * Required for MSVC.
4001 BufferGL& operator = (BufferGL &&buf)
4003 Buffer::operator=(std::move(buf));
4007 //! \brief Wrapper for clGetGLObjectInfo().
4008 cl_int getObjectInfo(
4009 cl_gl_object_type *type,
4010 cl_GLuint * gl_object_name)
4012 return detail::errHandler(
4013 ::clGetGLObjectInfo(object_,type,gl_object_name),
4014 __GET_GL_OBJECT_INFO_ERR);
4018 /*! \brief Class interface for GL Render Buffer Memory Objects.
4020 * This is provided to facilitate interoperability with OpenGL.
4022 * See Memory for details about copy semantics, etc.
4026 class BufferRenderGL : public Buffer
4029 /*! \brief Constructs a BufferRenderGL in a specified context, from a given
4032 * Wraps clCreateFromGLRenderbuffer().
4035 const Context& context,
4038 cl_int * err = NULL)
4041 object_ = ::clCreateFromGLRenderbuffer(
4047 detail::errHandler(error, __CREATE_GL_RENDER_BUFFER_ERR);
4053 //! \brief Default constructor - initializes to NULL.
4054 BufferRenderGL() : Buffer() { }
4056 /*! \brief Constructor from cl_mem - takes ownership.
4058 * \param retainObject will cause the constructor to retain its cl object.
4059 * Defaults to false to maintain compatibility with
4061 * See Memory for further details.
4063 explicit BufferRenderGL(const cl_mem& buffer, bool retainObject = false) :
4064 Buffer(buffer, retainObject) { }
4066 /*! \brief Assignment from cl_mem - performs shallow copy.
4068 * See Memory for further details.
4070 BufferRenderGL& operator = (const cl_mem& rhs)
4072 Buffer::operator=(rhs);
4076 /*! \brief Copy constructor to forward copy to the superclass correctly.
4077 * Required for MSVC.
4079 BufferRenderGL(const BufferRenderGL& buf) : Buffer(buf) {}
4081 /*! \brief Copy assignment to forward copy to the superclass correctly.
4082 * Required for MSVC.
4084 BufferRenderGL& operator = (const BufferRenderGL &buf)
4086 Buffer::operator=(buf);
4090 /*! \brief Move constructor to forward move to the superclass correctly.
4091 * Required for MSVC.
4093 BufferRenderGL(BufferRenderGL&& buf) CL_HPP_NOEXCEPT_ : Buffer(std::move(buf)) {}
4095 /*! \brief Move assignment to forward move to the superclass correctly.
4096 * Required for MSVC.
4098 BufferRenderGL& operator = (BufferRenderGL &&buf)
4100 Buffer::operator=(std::move(buf));
4104 //! \brief Wrapper for clGetGLObjectInfo().
4105 cl_int getObjectInfo(
4106 cl_gl_object_type *type,
4107 cl_GLuint * gl_object_name)
4109 return detail::errHandler(
4110 ::clGetGLObjectInfo(object_,type,gl_object_name),
4111 __GET_GL_OBJECT_INFO_ERR);
4115 /*! \brief C++ base class for Image Memory objects.
4117 * See Memory for details about copy semantics, etc.
4121 class Image : public Memory
4124 //! \brief Default constructor - initializes to NULL.
4125 Image() : Memory() { }
4127 /*! \brief Constructor from cl_mem - takes ownership.
4129 * \param retainObject will cause the constructor to retain its cl object.
4130 * Defaults to false to maintain compatibility with
4132 * See Memory for further details.
4134 explicit Image(const cl_mem& image, bool retainObject = false) :
4135 Memory(image, retainObject) { }
4137 /*! \brief Assignment from cl_mem - performs shallow copy.
4139 * See Memory for further details.
4141 Image& operator = (const cl_mem& rhs)
4143 Memory::operator=(rhs);
4147 /*! \brief Copy constructor to forward copy to the superclass correctly.
4148 * Required for MSVC.
4150 Image(const Image& img) : Memory(img) {}
4152 /*! \brief Copy assignment to forward copy to the superclass correctly.
4153 * Required for MSVC.
4155 Image& operator = (const Image &img)
4157 Memory::operator=(img);
4161 /*! \brief Move constructor to forward move to the superclass correctly.
4162 * Required for MSVC.
4164 Image(Image&& img) CL_HPP_NOEXCEPT_ : Memory(std::move(img)) {}
4166 /*! \brief Move assignment to forward move to the superclass correctly.
4167 * Required for MSVC.
4169 Image& operator = (Image &&img)
4171 Memory::operator=(std::move(img));
4177 //! \brief Wrapper for clGetImageInfo().
4178 template <typename T>
4179 cl_int getImageInfo(cl_image_info name, T* param) const
4181 return detail::errHandler(
4182 detail::getInfo(&::clGetImageInfo, object_, name, param),
4183 __GET_IMAGE_INFO_ERR);
4186 //! \brief Wrapper for clGetImageInfo() that returns by value.
4187 template <cl_int name> typename
4188 detail::param_traits<detail::cl_image_info, name>::param_type
4189 getImageInfo(cl_int* err = NULL) const
4191 typename detail::param_traits<
4192 detail::cl_image_info, name>::param_type param;
4193 cl_int result = getImageInfo(name, ¶m);
4201 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4202 /*! \brief Class interface for 1D Image Memory objects.
4204 * See Memory for details about copy semantics, etc.
4208 class Image1D : public Image
4211 /*! \brief Constructs a 1D Image in a specified context.
4213 * Wraps clCreateImage().
4216 const Context& context,
4220 void* host_ptr = NULL,
4224 cl_image_desc desc =
4226 CL_MEM_OBJECT_IMAGE1D,
4228 0, 0, 0, 0, 0, 0, 0, 0
4230 object_ = ::clCreateImage(
4238 detail::errHandler(error, __CREATE_IMAGE_ERR);
4244 //! \brief Default constructor - initializes to NULL.
4247 /*! \brief Constructor from cl_mem - takes ownership.
4249 * \param retainObject will cause the constructor to retain its cl object.
4250 * Defaults to false to maintain compatibility with
4252 * See Memory for further details.
4254 explicit Image1D(const cl_mem& image1D, bool retainObject = false) :
4255 Image(image1D, retainObject) { }
4257 /*! \brief Assignment from cl_mem - performs shallow copy.
4259 * See Memory for further details.
4261 Image1D& operator = (const cl_mem& rhs)
4263 Image::operator=(rhs);
4267 /*! \brief Copy constructor to forward copy to the superclass correctly.
4268 * Required for MSVC.
4270 Image1D(const Image1D& img) : Image(img) {}
4272 /*! \brief Copy assignment to forward copy to the superclass correctly.
4273 * Required for MSVC.
4275 Image1D& operator = (const Image1D &img)
4277 Image::operator=(img);
4281 /*! \brief Move constructor to forward move to the superclass correctly.
4282 * Required for MSVC.
4284 Image1D(Image1D&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
4286 /*! \brief Move assignment to forward move to the superclass correctly.
4287 * Required for MSVC.
4289 Image1D& operator = (Image1D &&img)
4291 Image::operator=(std::move(img));
4297 /*! \class Image1DBuffer
4298 * \brief Image interface for 1D buffer images.
4300 class Image1DBuffer : public Image
4304 const Context& context,
4308 const Buffer &buffer,
4312 cl_image_desc desc =
4314 CL_MEM_OBJECT_IMAGE1D_BUFFER,
4316 0, 0, 0, 0, 0, 0, 0,
4319 object_ = ::clCreateImage(
4327 detail::errHandler(error, __CREATE_IMAGE_ERR);
4335 /*! \brief Constructor from cl_mem - takes ownership.
4337 * \param retainObject will cause the constructor to retain its cl object.
4338 * Defaults to false to maintain compatibility with
4340 * See Memory for further details.
4342 explicit Image1DBuffer(const cl_mem& image1D, bool retainObject = false) :
4343 Image(image1D, retainObject) { }
4345 Image1DBuffer& operator = (const cl_mem& rhs)
4347 Image::operator=(rhs);
4351 /*! \brief Copy constructor to forward copy to the superclass correctly.
4352 * Required for MSVC.
4354 Image1DBuffer(const Image1DBuffer& img) : Image(img) {}
4356 /*! \brief Copy assignment to forward copy to the superclass correctly.
4357 * Required for MSVC.
4359 Image1DBuffer& operator = (const Image1DBuffer &img)
4361 Image::operator=(img);
4365 /*! \brief Move constructor to forward move to the superclass correctly.
4366 * Required for MSVC.
4368 Image1DBuffer(Image1DBuffer&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
4370 /*! \brief Move assignment to forward move to the superclass correctly.
4371 * Required for MSVC.
4373 Image1DBuffer& operator = (Image1DBuffer &&img)
4375 Image::operator=(std::move(img));
4381 /*! \class Image1DArray
4382 * \brief Image interface for arrays of 1D images.
4384 class Image1DArray : public Image
4388 const Context& context,
4391 size_type arraySize,
4394 void* host_ptr = NULL,
4398 cl_image_desc desc =
4400 CL_MEM_OBJECT_IMAGE1D_ARRAY,
4402 0, 0, // height, depth (unused)
4407 object_ = ::clCreateImage(
4415 detail::errHandler(error, __CREATE_IMAGE_ERR);
4423 /*! \brief Constructor from cl_mem - takes ownership.
4425 * \param retainObject will cause the constructor to retain its cl object.
4426 * Defaults to false to maintain compatibility with
4428 * See Memory for further details.
4430 explicit Image1DArray(const cl_mem& imageArray, bool retainObject = false) :
4431 Image(imageArray, retainObject) { }
4434 Image1DArray& operator = (const cl_mem& rhs)
4436 Image::operator=(rhs);
4440 /*! \brief Copy constructor to forward copy to the superclass correctly.
4441 * Required for MSVC.
4443 Image1DArray(const Image1DArray& img) : Image(img) {}
4445 /*! \brief Copy assignment to forward copy to the superclass correctly.
4446 * Required for MSVC.
4448 Image1DArray& operator = (const Image1DArray &img)
4450 Image::operator=(img);
4454 /*! \brief Move constructor to forward move to the superclass correctly.
4455 * Required for MSVC.
4457 Image1DArray(Image1DArray&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
4459 /*! \brief Move assignment to forward move to the superclass correctly.
4460 * Required for MSVC.
4462 Image1DArray& operator = (Image1DArray &&img)
4464 Image::operator=(std::move(img));
4469 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4472 /*! \brief Class interface for 2D Image Memory objects.
4474 * See Memory for details about copy semantics, etc.
4478 class Image2D : public Image
4481 /*! \brief Constructs a 2D Image in a specified context.
4483 * Wraps clCreateImage().
4486 const Context& context,
4491 size_type row_pitch = 0,
4492 void* host_ptr = NULL,
4496 bool useCreateImage;
4498 #if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
4499 // Run-time decision based on the actual platform
4501 cl_uint version = detail::getContextPlatformVersion(context());
4502 useCreateImage = (version >= 0x10002); // OpenCL 1.2 or above
4504 #elif CL_HPP_TARGET_OPENCL_VERSION >= 120
4505 useCreateImage = true;
4507 useCreateImage = false;
4510 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4513 cl_image_desc desc =
4515 CL_MEM_OBJECT_IMAGE2D,
4518 0, 0, // depth, array size (unused)
4522 object_ = ::clCreateImage(
4530 detail::errHandler(error, __CREATE_IMAGE_ERR);
4535 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
4536 #if CL_HPP_MINIMUM_OPENCL_VERSION < 120
4537 if (!useCreateImage)
4539 object_ = ::clCreateImage2D(
4540 context(), flags,&format, width, height, row_pitch, host_ptr, &error);
4542 detail::errHandler(error, __CREATE_IMAGE2D_ERR);
4547 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
4550 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
4551 /*! \brief Constructs a 2D Image from a buffer.
4552 * \note This will share storage with the underlying buffer.
4554 * Wraps clCreateImage().
4557 const Context& context,
4559 const Buffer &sourceBuffer,
4562 size_type row_pitch = 0,
4563 cl_int* err = nullptr)
4567 cl_image_desc desc =
4569 CL_MEM_OBJECT_IMAGE2D,
4572 0, 0, // depth, array size (unused)
4575 // Use buffer as input to image
4578 object_ = ::clCreateImage(
4580 0, // flags inherited from buffer
4586 detail::errHandler(error, __CREATE_IMAGE_ERR);
4587 if (err != nullptr) {
4591 #endif //#if CL_HPP_TARGET_OPENCL_VERSION >= 200
4593 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
4594 /*! \brief Constructs a 2D Image from an image.
4595 * \note This will share storage with the underlying image but may
4596 * reinterpret the channel order and type.
4598 * The image will be created matching with a descriptor matching the source.
4600 * \param order is the channel order to reinterpret the image data as.
4601 * The channel order may differ as described in the OpenCL
4602 * 2.0 API specification.
4604 * Wraps clCreateImage().
4607 const Context& context,
4608 cl_channel_order order,
4609 const Image &sourceImage,
4610 cl_int* err = nullptr)
4614 // Descriptor fields have to match source image
4615 size_type sourceWidth =
4616 sourceImage.getImageInfo<CL_IMAGE_WIDTH>();
4617 size_type sourceHeight =
4618 sourceImage.getImageInfo<CL_IMAGE_HEIGHT>();
4619 size_type sourceRowPitch =
4620 sourceImage.getImageInfo<CL_IMAGE_ROW_PITCH>();
4621 cl_uint sourceNumMIPLevels =
4622 sourceImage.getImageInfo<CL_IMAGE_NUM_MIP_LEVELS>();
4623 cl_uint sourceNumSamples =
4624 sourceImage.getImageInfo<CL_IMAGE_NUM_SAMPLES>();
4625 cl_image_format sourceFormat =
4626 sourceImage.getImageInfo<CL_IMAGE_FORMAT>();
4628 // Update only the channel order.
4629 // Channel format inherited from source.
4630 sourceFormat.image_channel_order = order;
4631 cl_image_desc desc =
4633 CL_MEM_OBJECT_IMAGE2D,
4636 0, 0, // depth (unused), array size (unused)
4638 0, // slice pitch (unused)
4641 // Use buffer as input to image
4644 object_ = ::clCreateImage(
4646 0, // flags should be inherited from mem_object
4652 detail::errHandler(error, __CREATE_IMAGE_ERR);
4653 if (err != nullptr) {
4657 #endif //#if CL_HPP_TARGET_OPENCL_VERSION >= 200
4659 //! \brief Default constructor - initializes to NULL.
4662 /*! \brief Constructor from cl_mem - takes ownership.
4664 * \param retainObject will cause the constructor to retain its cl object.
4665 * Defaults to false to maintain compatibility with
4667 * See Memory for further details.
4669 explicit Image2D(const cl_mem& image2D, bool retainObject = false) :
4670 Image(image2D, retainObject) { }
4672 /*! \brief Assignment from cl_mem - performs shallow copy.
4674 * See Memory for further details.
4676 Image2D& operator = (const cl_mem& rhs)
4678 Image::operator=(rhs);
4682 /*! \brief Copy constructor to forward copy to the superclass correctly.
4683 * Required for MSVC.
4685 Image2D(const Image2D& img) : Image(img) {}
4687 /*! \brief Copy assignment to forward copy to the superclass correctly.
4688 * Required for MSVC.
4690 Image2D& operator = (const Image2D &img)
4692 Image::operator=(img);
4696 /*! \brief Move constructor to forward move to the superclass correctly.
4697 * Required for MSVC.
4699 Image2D(Image2D&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
4701 /*! \brief Move assignment to forward move to the superclass correctly.
4702 * Required for MSVC.
4704 Image2D& operator = (Image2D &&img)
4706 Image::operator=(std::move(img));
4713 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
4714 /*! \brief Class interface for GL 2D Image Memory objects.
4716 * This is provided to facilitate interoperability with OpenGL.
4718 * See Memory for details about copy semantics, etc.
4721 * \note Deprecated for OpenCL 1.2. Please use ImageGL instead.
4723 class CL_EXT_PREFIX__VERSION_1_1_DEPRECATED Image2DGL : public Image2D
4726 /*! \brief Constructs an Image2DGL in a specified context, from a given
4729 * Wraps clCreateFromGLTexture2D().
4732 const Context& context,
4737 cl_int * err = NULL)
4740 object_ = ::clCreateFromGLTexture2D(
4748 detail::errHandler(error, __CREATE_GL_TEXTURE_2D_ERR);
4755 //! \brief Default constructor - initializes to NULL.
4756 Image2DGL() : Image2D() { }
4758 /*! \brief Constructor from cl_mem - takes ownership.
4760 * \param retainObject will cause the constructor to retain its cl object.
4761 * Defaults to false to maintain compatibility with
4763 * See Memory for further details.
4765 explicit Image2DGL(const cl_mem& image, bool retainObject = false) :
4766 Image2D(image, retainObject) { }
4768 /*! \brief Assignment from cl_mem - performs shallow copy.
4770 * See Memory for further details.
4772 Image2DGL& operator = (const cl_mem& rhs)
4774 Image2D::operator=(rhs);
4778 /*! \brief Copy constructor to forward copy to the superclass correctly.
4779 * Required for MSVC.
4781 Image2DGL(const Image2DGL& img) : Image2D(img) {}
4783 /*! \brief Copy assignment to forward copy to the superclass correctly.
4784 * Required for MSVC.
4786 Image2DGL& operator = (const Image2DGL &img)
4788 Image2D::operator=(img);
4792 /*! \brief Move constructor to forward move to the superclass correctly.
4793 * Required for MSVC.
4795 Image2DGL(Image2DGL&& img) CL_HPP_NOEXCEPT_ : Image2D(std::move(img)) {}
4797 /*! \brief Move assignment to forward move to the superclass correctly.
4798 * Required for MSVC.
4800 Image2DGL& operator = (Image2DGL &&img)
4802 Image2D::operator=(std::move(img));
4806 } CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
4807 #endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS
4809 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4810 /*! \class Image2DArray
4811 * \brief Image interface for arrays of 2D images.
4813 class Image2DArray : public Image
4817 const Context& context,
4820 size_type arraySize,
4824 size_type slicePitch,
4825 void* host_ptr = NULL,
4829 cl_image_desc desc =
4831 CL_MEM_OBJECT_IMAGE2D_ARRAY,
4834 0, // depth (unused)
4840 object_ = ::clCreateImage(
4848 detail::errHandler(error, __CREATE_IMAGE_ERR);
4856 /*! \brief Constructor from cl_mem - takes ownership.
4858 * \param retainObject will cause the constructor to retain its cl object.
4859 * Defaults to false to maintain compatibility with
4861 * See Memory for further details.
4863 explicit Image2DArray(const cl_mem& imageArray, bool retainObject = false) : Image(imageArray, retainObject) { }
4865 Image2DArray& operator = (const cl_mem& rhs)
4867 Image::operator=(rhs);
4871 /*! \brief Copy constructor to forward copy to the superclass correctly.
4872 * Required for MSVC.
4874 Image2DArray(const Image2DArray& img) : Image(img) {}
4876 /*! \brief Copy assignment to forward copy to the superclass correctly.
4877 * Required for MSVC.
4879 Image2DArray& operator = (const Image2DArray &img)
4881 Image::operator=(img);
4885 /*! \brief Move constructor to forward move to the superclass correctly.
4886 * Required for MSVC.
4888 Image2DArray(Image2DArray&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
4890 /*! \brief Move assignment to forward move to the superclass correctly.
4891 * Required for MSVC.
4893 Image2DArray& operator = (Image2DArray &&img)
4895 Image::operator=(std::move(img));
4899 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4901 /*! \brief Class interface for 3D Image Memory objects.
4903 * See Memory for details about copy semantics, etc.
4907 class Image3D : public Image
4910 /*! \brief Constructs a 3D Image in a specified context.
4912 * Wraps clCreateImage().
4915 const Context& context,
4921 size_type row_pitch = 0,
4922 size_type slice_pitch = 0,
4923 void* host_ptr = NULL,
4927 bool useCreateImage;
4929 #if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
4930 // Run-time decision based on the actual platform
4932 cl_uint version = detail::getContextPlatformVersion(context());
4933 useCreateImage = (version >= 0x10002); // OpenCL 1.2 or above
4935 #elif CL_HPP_TARGET_OPENCL_VERSION >= 120
4936 useCreateImage = true;
4938 useCreateImage = false;
4941 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4944 cl_image_desc desc =
4946 CL_MEM_OBJECT_IMAGE3D,
4950 0, // array size (unused)
4955 object_ = ::clCreateImage(
4963 detail::errHandler(error, __CREATE_IMAGE_ERR);
4968 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
4969 #if CL_HPP_MINIMUM_OPENCL_VERSION < 120
4970 if (!useCreateImage)
4972 object_ = ::clCreateImage3D(
4973 context(), flags, &format, width, height, depth, row_pitch,
4974 slice_pitch, host_ptr, &error);
4976 detail::errHandler(error, __CREATE_IMAGE3D_ERR);
4981 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
4984 //! \brief Default constructor - initializes to NULL.
4985 Image3D() : Image() { }
4987 /*! \brief Constructor from cl_mem - takes ownership.
4989 * \param retainObject will cause the constructor to retain its cl object.
4990 * Defaults to false to maintain compatibility with
4992 * See Memory for further details.
4994 explicit Image3D(const cl_mem& image3D, bool retainObject = false) :
4995 Image(image3D, retainObject) { }
4997 /*! \brief Assignment from cl_mem - performs shallow copy.
4999 * See Memory for further details.
5001 Image3D& operator = (const cl_mem& rhs)
5003 Image::operator=(rhs);
5007 /*! \brief Copy constructor to forward copy to the superclass correctly.
5008 * Required for MSVC.
5010 Image3D(const Image3D& img) : Image(img) {}
5012 /*! \brief Copy assignment to forward copy to the superclass correctly.
5013 * Required for MSVC.
5015 Image3D& operator = (const Image3D &img)
5017 Image::operator=(img);
5021 /*! \brief Move constructor to forward move to the superclass correctly.
5022 * Required for MSVC.
5024 Image3D(Image3D&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
5026 /*! \brief Move assignment to forward move to the superclass correctly.
5027 * Required for MSVC.
5029 Image3D& operator = (Image3D &&img)
5031 Image::operator=(std::move(img));
5036 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
5037 /*! \brief Class interface for GL 3D Image Memory objects.
5039 * This is provided to facilitate interoperability with OpenGL.
5041 * See Memory for details about copy semantics, etc.
5045 class Image3DGL : public Image3D
5048 /*! \brief Constructs an Image3DGL in a specified context, from a given
5051 * Wraps clCreateFromGLTexture3D().
5054 const Context& context,
5059 cl_int * err = NULL)
5062 object_ = ::clCreateFromGLTexture3D(
5070 detail::errHandler(error, __CREATE_GL_TEXTURE_3D_ERR);
5076 //! \brief Default constructor - initializes to NULL.
5077 Image3DGL() : Image3D() { }
5079 /*! \brief Constructor from cl_mem - takes ownership.
5081 * \param retainObject will cause the constructor to retain its cl object.
5082 * Defaults to false to maintain compatibility with
5084 * See Memory for further details.
5086 explicit Image3DGL(const cl_mem& image, bool retainObject = false) :
5087 Image3D(image, retainObject) { }
5089 /*! \brief Assignment from cl_mem - performs shallow copy.
5091 * See Memory for further details.
5093 Image3DGL& operator = (const cl_mem& rhs)
5095 Image3D::operator=(rhs);
5099 /*! \brief Copy constructor to forward copy to the superclass correctly.
5100 * Required for MSVC.
5102 Image3DGL(const Image3DGL& img) : Image3D(img) {}
5104 /*! \brief Copy assignment to forward copy to the superclass correctly.
5105 * Required for MSVC.
5107 Image3DGL& operator = (const Image3DGL &img)
5109 Image3D::operator=(img);
5113 /*! \brief Move constructor to forward move to the superclass correctly.
5114 * Required for MSVC.
5116 Image3DGL(Image3DGL&& img) CL_HPP_NOEXCEPT_ : Image3D(std::move(img)) {}
5118 /*! \brief Move assignment to forward move to the superclass correctly.
5119 * Required for MSVC.
5121 Image3DGL& operator = (Image3DGL &&img)
5123 Image3D::operator=(std::move(img));
5127 #endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS
5129 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
5131 * \brief general image interface for GL interop.
5132 * We abstract the 2D and 3D GL images into a single instance here
5133 * that wraps all GL sourced images on the grounds that setup information
5134 * was performed by OpenCL anyway.
5136 class ImageGL : public Image
5140 const Context& context,
5145 cl_int * err = NULL)
5148 object_ = ::clCreateFromGLTexture(
5156 detail::errHandler(error, __CREATE_GL_TEXTURE_ERR);
5162 ImageGL() : Image() { }
5164 /*! \brief Constructor from cl_mem - takes ownership.
5166 * \param retainObject will cause the constructor to retain its cl object.
5167 * Defaults to false to maintain compatibility with
5169 * See Memory for further details.
5171 explicit ImageGL(const cl_mem& image, bool retainObject = false) :
5172 Image(image, retainObject) { }
5174 ImageGL& operator = (const cl_mem& rhs)
5176 Image::operator=(rhs);
5180 /*! \brief Copy constructor to forward copy to the superclass correctly.
5181 * Required for MSVC.
5183 ImageGL(const ImageGL& img) : Image(img) {}
5185 /*! \brief Copy assignment to forward copy to the superclass correctly.
5186 * Required for MSVC.
5188 ImageGL& operator = (const ImageGL &img)
5190 Image::operator=(img);
5194 /*! \brief Move constructor to forward move to the superclass correctly.
5195 * Required for MSVC.
5197 ImageGL(ImageGL&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
5199 /*! \brief Move assignment to forward move to the superclass correctly.
5200 * Required for MSVC.
5202 ImageGL& operator = (ImageGL &&img)
5204 Image::operator=(std::move(img));
5208 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
5212 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5213 /*! \brief Class interface for Pipe Memory Objects.
5215 * See Memory for details about copy semantics, etc.
5219 class Pipe : public Memory
5223 /*! \brief Constructs a Pipe in a specified context.
5225 * Wraps clCreatePipe().
5226 * @param context Context in which to create the pipe.
5227 * @param flags Bitfield. Only CL_MEM_READ_WRITE and CL_MEM_HOST_NO_ACCESS are valid.
5228 * @param packet_size Size in bytes of a single packet of the pipe.
5229 * @param max_packets Number of packets that may be stored in the pipe.
5233 const Context& context,
5234 cl_uint packet_size,
5235 cl_uint max_packets,
5240 cl_mem_flags flags = CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS;
5241 object_ = ::clCreatePipe(context(), flags, packet_size, max_packets, nullptr, &error);
5243 detail::errHandler(error, __CREATE_PIPE_ERR);
5249 /*! \brief Constructs a Pipe in a the default context.
5251 * Wraps clCreatePipe().
5252 * @param flags Bitfield. Only CL_MEM_READ_WRITE and CL_MEM_HOST_NO_ACCESS are valid.
5253 * @param packet_size Size in bytes of a single packet of the pipe.
5254 * @param max_packets Number of packets that may be stored in the pipe.
5258 cl_uint packet_size,
5259 cl_uint max_packets,
5264 Context context = Context::getDefault(err);
5266 cl_mem_flags flags = CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS;
5267 object_ = ::clCreatePipe(context(), flags, packet_size, max_packets, nullptr, &error);
5269 detail::errHandler(error, __CREATE_PIPE_ERR);
5275 //! \brief Default constructor - initializes to NULL.
5276 Pipe() : Memory() { }
5278 /*! \brief Constructor from cl_mem - takes ownership.
5280 * \param retainObject will cause the constructor to retain its cl object.
5281 * Defaults to false to maintain compatibility with earlier versions.
5283 * See Memory for further details.
5285 explicit Pipe(const cl_mem& pipe, bool retainObject = false) :
5286 Memory(pipe, retainObject) { }
5288 /*! \brief Assignment from cl_mem - performs shallow copy.
5290 * See Memory for further details.
5292 Pipe& operator = (const cl_mem& rhs)
5294 Memory::operator=(rhs);
5298 /*! \brief Copy constructor to forward copy to the superclass correctly.
5299 * Required for MSVC.
5301 Pipe(const Pipe& pipe) : Memory(pipe) {}
5303 /*! \brief Copy assignment to forward copy to the superclass correctly.
5304 * Required for MSVC.
5306 Pipe& operator = (const Pipe &pipe)
5308 Memory::operator=(pipe);
5312 /*! \brief Move constructor to forward move to the superclass correctly.
5313 * Required for MSVC.
5315 Pipe(Pipe&& pipe) CL_HPP_NOEXCEPT_ : Memory(std::move(pipe)) {}
5317 /*! \brief Move assignment to forward move to the superclass correctly.
5318 * Required for MSVC.
5320 Pipe& operator = (Pipe &&pipe)
5322 Memory::operator=(std::move(pipe));
5326 //! \brief Wrapper for clGetMemObjectInfo().
5327 template <typename T>
5328 cl_int getInfo(cl_pipe_info name, T* param) const
5330 return detail::errHandler(
5331 detail::getInfo(&::clGetPipeInfo, object_, name, param),
5332 __GET_PIPE_INFO_ERR);
5335 //! \brief Wrapper for clGetMemObjectInfo() that returns by value.
5336 template <cl_int name> typename
5337 detail::param_traits<detail::cl_pipe_info, name>::param_type
5338 getInfo(cl_int* err = NULL) const
5340 typename detail::param_traits<
5341 detail::cl_pipe_info, name>::param_type param;
5342 cl_int result = getInfo(name, ¶m);
5349 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
5352 /*! \brief Class interface for cl_sampler.
5354 * \note Copies of these objects are shallow, meaning that the copy will refer
5355 * to the same underlying cl_sampler as the original. For details, see
5356 * clRetainSampler() and clReleaseSampler().
5360 class Sampler : public detail::Wrapper<cl_sampler>
5363 //! \brief Default constructor - initializes to NULL.
5366 /*! \brief Constructs a Sampler in a specified context.
5368 * Wraps clCreateSampler().
5371 const Context& context,
5372 cl_bool normalized_coords,
5373 cl_addressing_mode addressing_mode,
5374 cl_filter_mode filter_mode,
5379 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5380 cl_sampler_properties sampler_properties[] = {
5381 CL_SAMPLER_NORMALIZED_COORDS, normalized_coords,
5382 CL_SAMPLER_ADDRESSING_MODE, addressing_mode,
5383 CL_SAMPLER_FILTER_MODE, filter_mode,
5385 object_ = ::clCreateSamplerWithProperties(
5390 detail::errHandler(error, __CREATE_SAMPLER_WITH_PROPERTIES_ERR);
5395 object_ = ::clCreateSampler(
5402 detail::errHandler(error, __CREATE_SAMPLER_ERR);
5409 /*! \brief Constructor from cl_sampler - takes ownership.
5411 * \param retainObject will cause the constructor to retain its cl object.
5412 * Defaults to false to maintain compatibility with
5414 * This effectively transfers ownership of a refcount on the cl_sampler
5415 * into the new Sampler object.
5417 explicit Sampler(const cl_sampler& sampler, bool retainObject = false) :
5418 detail::Wrapper<cl_type>(sampler, retainObject) { }
5420 /*! \brief Assignment operator from cl_sampler - takes ownership.
5422 * This effectively transfers ownership of a refcount on the rhs and calls
5423 * clReleaseSampler() on the value previously held by this instance.
5425 Sampler& operator = (const cl_sampler& rhs)
5427 detail::Wrapper<cl_type>::operator=(rhs);
5431 /*! \brief Copy constructor to forward copy to the superclass correctly.
5432 * Required for MSVC.
5434 Sampler(const Sampler& sam) : detail::Wrapper<cl_type>(sam) {}
5436 /*! \brief Copy assignment to forward copy to the superclass correctly.
5437 * Required for MSVC.
5439 Sampler& operator = (const Sampler &sam)
5441 detail::Wrapper<cl_type>::operator=(sam);
5445 /*! \brief Move constructor to forward move to the superclass correctly.
5446 * Required for MSVC.
5448 Sampler(Sampler&& sam) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(sam)) {}
5450 /*! \brief Move assignment to forward move to the superclass correctly.
5451 * Required for MSVC.
5453 Sampler& operator = (Sampler &&sam)
5455 detail::Wrapper<cl_type>::operator=(std::move(sam));
5459 //! \brief Wrapper for clGetSamplerInfo().
5460 template <typename T>
5461 cl_int getInfo(cl_sampler_info name, T* param) const
5463 return detail::errHandler(
5464 detail::getInfo(&::clGetSamplerInfo, object_, name, param),
5465 __GET_SAMPLER_INFO_ERR);
5468 //! \brief Wrapper for clGetSamplerInfo() that returns by value.
5469 template <cl_int name> typename
5470 detail::param_traits<detail::cl_sampler_info, name>::param_type
5471 getInfo(cl_int* err = NULL) const
5473 typename detail::param_traits<
5474 detail::cl_sampler_info, name>::param_type param;
5475 cl_int result = getInfo(name, ¶m);
5485 class DeviceCommandQueue;
5488 //! \brief Class interface for specifying NDRange values.
5492 size_type sizes_[3];
5493 cl_uint dimensions_;
5496 //! \brief Default constructor - resulting range has zero dimensions.
5505 //! \brief Constructs one-dimensional range.
5506 NDRange(size_type size0)
5514 //! \brief Constructs two-dimensional range.
5515 NDRange(size_type size0, size_type size1)
5523 //! \brief Constructs three-dimensional range.
5524 NDRange(size_type size0, size_type size1, size_type size2)
5532 /*! \brief Conversion operator to const size_type *.
5534 * \returns a pointer to the size of the first dimension.
5536 operator const size_type*() const {
5540 //! \brief Queries the number of dimensions in the range.
5541 size_type dimensions() const
5546 //! \brief Returns the size of the object in bytes based on the
5547 // runtime number of dimensions
5548 size_type size() const
5550 return dimensions_*sizeof(size_type);
5558 const size_type* get() const
5564 //! \brief A zero-dimensional range.
5565 static const NDRange NullRange;
5567 //! \brief Local address wrapper for use with Kernel::setArg
5568 struct LocalSpaceArg
5575 template <typename T, class Enable = void>
5576 struct KernelArgumentHandler;
5578 // Enable for objects that are not subclasses of memory
5579 // Pointers, constants etc
5580 template <typename T>
5581 struct KernelArgumentHandler<T, typename std::enable_if<!std::is_base_of<cl::Memory, T>::value>::type>
5583 static size_type size(const T&) { return sizeof(T); }
5584 static const T* ptr(const T& value) { return &value; }
5587 // Enable for subclasses of memory where we want to get a reference to the cl_mem out
5588 // and pass that in for safety
5589 template <typename T>
5590 struct KernelArgumentHandler<T, typename std::enable_if<std::is_base_of<cl::Memory, T>::value>::type>
5592 static size_type size(const T&) { return sizeof(cl_mem); }
5593 static const cl_mem* ptr(const T& value) { return &(value()); }
5596 // Specialization for DeviceCommandQueue defined later
5599 struct KernelArgumentHandler<LocalSpaceArg, void>
5601 static size_type size(const LocalSpaceArg& value) { return value.size_; }
5602 static const void* ptr(const LocalSpaceArg&) { return NULL; }
5609 * \brief Helper function for generating LocalSpaceArg objects.
5611 inline LocalSpaceArg
5612 Local(size_type size)
5614 LocalSpaceArg ret = { size };
5618 /*! \brief Class interface for cl_kernel.
5620 * \note Copies of these objects are shallow, meaning that the copy will refer
5621 * to the same underlying cl_kernel as the original. For details, see
5622 * clRetainKernel() and clReleaseKernel().
5626 class Kernel : public detail::Wrapper<cl_kernel>
5629 inline Kernel(const Program& program, const char* name, cl_int* err = NULL);
5631 //! \brief Default constructor - initializes to NULL.
5634 /*! \brief Constructor from cl_kernel - takes ownership.
5636 * \param retainObject will cause the constructor to retain its cl object.
5637 * Defaults to false to maintain compatibility with
5639 * This effectively transfers ownership of a refcount on the cl_kernel
5640 * into the new Kernel object.
5642 explicit Kernel(const cl_kernel& kernel, bool retainObject = false) :
5643 detail::Wrapper<cl_type>(kernel, retainObject) { }
5645 /*! \brief Assignment operator from cl_kernel - takes ownership.
5647 * This effectively transfers ownership of a refcount on the rhs and calls
5648 * clReleaseKernel() on the value previously held by this instance.
5650 Kernel& operator = (const cl_kernel& rhs)
5652 detail::Wrapper<cl_type>::operator=(rhs);
5656 /*! \brief Copy constructor to forward copy to the superclass correctly.
5657 * Required for MSVC.
5659 Kernel(const Kernel& kernel) : detail::Wrapper<cl_type>(kernel) {}
5661 /*! \brief Copy assignment to forward copy to the superclass correctly.
5662 * Required for MSVC.
5664 Kernel& operator = (const Kernel &kernel)
5666 detail::Wrapper<cl_type>::operator=(kernel);
5670 /*! \brief Move constructor to forward move to the superclass correctly.
5671 * Required for MSVC.
5673 Kernel(Kernel&& kernel) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(kernel)) {}
5675 /*! \brief Move assignment to forward move to the superclass correctly.
5676 * Required for MSVC.
5678 Kernel& operator = (Kernel &&kernel)
5680 detail::Wrapper<cl_type>::operator=(std::move(kernel));
5684 template <typename T>
5685 cl_int getInfo(cl_kernel_info name, T* param) const
5687 return detail::errHandler(
5688 detail::getInfo(&::clGetKernelInfo, object_, name, param),
5689 __GET_KERNEL_INFO_ERR);
5692 template <cl_int name> typename
5693 detail::param_traits<detail::cl_kernel_info, name>::param_type
5694 getInfo(cl_int* err = NULL) const
5696 typename detail::param_traits<
5697 detail::cl_kernel_info, name>::param_type param;
5698 cl_int result = getInfo(name, ¶m);
5705 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
5706 template <typename T>
5707 cl_int getArgInfo(cl_uint argIndex, cl_kernel_arg_info name, T* param) const
5709 return detail::errHandler(
5710 detail::getInfo(&::clGetKernelArgInfo, object_, argIndex, name, param),
5711 __GET_KERNEL_ARG_INFO_ERR);
5714 template <cl_int name> typename
5715 detail::param_traits<detail::cl_kernel_arg_info, name>::param_type
5716 getArgInfo(cl_uint argIndex, cl_int* err = NULL) const
5718 typename detail::param_traits<
5719 detail::cl_kernel_arg_info, name>::param_type param;
5720 cl_int result = getArgInfo(argIndex, name, ¶m);
5726 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
5728 template <typename T>
5729 cl_int getWorkGroupInfo(
5730 const Device& device, cl_kernel_work_group_info name, T* param) const
5732 return detail::errHandler(
5734 &::clGetKernelWorkGroupInfo, object_, device(), name, param),
5735 __GET_KERNEL_WORK_GROUP_INFO_ERR);
5738 template <cl_int name> typename
5739 detail::param_traits<detail::cl_kernel_work_group_info, name>::param_type
5740 getWorkGroupInfo(const Device& device, cl_int* err = NULL) const
5742 typename detail::param_traits<
5743 detail::cl_kernel_work_group_info, name>::param_type param;
5744 cl_int result = getWorkGroupInfo(device, name, ¶m);
5751 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5752 #if defined(CL_HPP_USE_CL_SUB_GROUPS_KHR)
5753 cl_int getSubGroupInfo(const cl::Device &dev, cl_kernel_sub_group_info name, const cl::NDRange &range, size_type* param) const
5755 typedef clGetKernelSubGroupInfoKHR_fn PFN_clGetKernelSubGroupInfoKHR;
5756 static PFN_clGetKernelSubGroupInfoKHR pfn_clGetKernelSubGroupInfoKHR = NULL;
5757 CL_HPP_INIT_CL_EXT_FCN_PTR_(clGetKernelSubGroupInfoKHR);
5759 return detail::errHandler(
5760 pfn_clGetKernelSubGroupInfoKHR(object_, dev(), name, range.size(), range.get(), sizeof(size_type), param, nullptr),
5761 __GET_KERNEL_ARG_INFO_ERR);
5764 template <cl_int name>
5765 size_type getSubGroupInfo(const cl::Device &dev, const cl::NDRange &range, cl_int* err = NULL) const
5768 cl_int result = getSubGroupInfo(dev, name, range, ¶m);
5774 #endif // #if defined(CL_HPP_USE_CL_SUB_GROUPS_KHR)
5775 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5777 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5778 /*! \brief setArg overload taking a shared_ptr type
5780 template<typename T, class D>
5781 cl_int setArg(cl_uint index, const cl::pointer<T, D> &argPtr)
5783 return detail::errHandler(
5784 ::clSetKernelArgSVMPointer(object_, index, argPtr.get()),
5785 __SET_KERNEL_ARGS_ERR);
5788 /*! \brief setArg overload taking a vector type.
5790 template<typename T, class Alloc>
5791 cl_int setArg(cl_uint index, const cl::vector<T, Alloc> &argPtr)
5793 return detail::errHandler(
5794 ::clSetKernelArgSVMPointer(object_, index, argPtr.data()),
5795 __SET_KERNEL_ARGS_ERR);
5798 /*! \brief setArg overload taking a pointer type
5800 template<typename T>
5801 typename std::enable_if<std::is_pointer<T>::value, cl_int>::type
5802 setArg(cl_uint index, const T argPtr)
5804 return detail::errHandler(
5805 ::clSetKernelArgSVMPointer(object_, index, argPtr),
5806 __SET_KERNEL_ARGS_ERR);
5808 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5810 /*! \brief setArg overload taking a POD type
5812 template <typename T>
5813 typename std::enable_if<!std::is_pointer<T>::value, cl_int>::type
5814 setArg(cl_uint index, const T &value)
5816 return detail::errHandler(
5820 detail::KernelArgumentHandler<T>::size(value),
5821 detail::KernelArgumentHandler<T>::ptr(value)),
5822 __SET_KERNEL_ARGS_ERR);
5825 cl_int setArg(cl_uint index, size_type size, const void* argPtr)
5827 return detail::errHandler(
5828 ::clSetKernelArg(object_, index, size, argPtr),
5829 __SET_KERNEL_ARGS_ERR);
5832 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5834 * Specify a vector of SVM pointers that the kernel may access in
5835 * addition to its arguments.
5837 cl_int setSVMPointers(const vector<void*> &pointerList)
5839 return detail::errHandler(
5840 ::clSetKernelExecInfo(
5842 CL_KERNEL_EXEC_INFO_SVM_PTRS,
5843 sizeof(void*)*pointerList.size(),
5844 pointerList.data()));
5848 * Specify a std::array of SVM pointers that the kernel may access in
5849 * addition to its arguments.
5851 template<int ArrayLength>
5852 cl_int setSVMPointers(const std::array<void*, ArrayLength> &pointerList)
5854 return detail::errHandler(
5855 ::clSetKernelExecInfo(
5857 CL_KERNEL_EXEC_INFO_SVM_PTRS,
5858 sizeof(void*)*pointerList.size(),
5859 pointerList.data()));
5862 /*! \brief Enable fine-grained system SVM.
5864 * \note It is only possible to enable fine-grained system SVM if all devices
5865 * in the context associated with kernel support it.
5867 * \param svmEnabled True if fine-grained system SVM is requested. False otherwise.
5868 * \return CL_SUCCESS if the function was executed succesfully. CL_INVALID_OPERATION
5869 * if no devices in the context support fine-grained system SVM.
5871 * \see clSetKernelExecInfo
5873 cl_int enableFineGrainedSystemSVM(bool svmEnabled)
5875 cl_bool svmEnabled_ = svmEnabled ? CL_TRUE : CL_FALSE;
5876 return detail::errHandler(
5877 ::clSetKernelExecInfo(
5879 CL_KERNEL_EXEC_INFO_SVM_FINE_GRAIN_SYSTEM,
5886 template<int index, int ArrayLength, class D, typename T0, typename... Ts>
5887 void setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, const pointer<T0, D> &t0, Ts... ts)
5889 pointerList[index] = static_cast<void*>(t0.get());
5890 setSVMPointersHelper<index + 1, Ts...>(ts...);
5893 template<int index, int ArrayLength, typename T0, typename... Ts>
5894 typename std::enable_if<std::is_pointer<T0>::value, void>::type
5895 setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, T0 t0, Ts... ts)
5897 pointerList[index] = static_cast<void*>(t0);
5898 setSVMPointersHelper<index + 1, Ts...>(ts...);
5901 template<int index, int ArrayLength, typename T0, class D>
5902 void setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, const pointer<T0, D> &t0)
5904 pointerList[index] = static_cast<void*>(t0.get());
5907 template<int index, int ArrayLength, typename T0>
5908 typename std::enable_if<std::is_pointer<T0>::value, void>::type
5909 setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, T0 t0)
5911 pointerList[index] = static_cast<void*>(t0);
5914 template<typename T0, typename... Ts>
5915 cl_int setSVMPointers(const T0 &t0, Ts... ts)
5917 std::array<void*, 1 + sizeof...(Ts)> pointerList;
5919 setSVMPointersHelper<0, 1 + sizeof...(Ts)>(pointerList, t0, ts...);
5920 return detail::errHandler(
5921 ::clSetKernelExecInfo(
5923 CL_KERNEL_EXEC_INFO_SVM_PTRS,
5924 sizeof(void*)*(1 + sizeof...(Ts)),
5925 pointerList.data()));
5927 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5931 * \brief Program interface that implements cl_program.
5933 class Program : public detail::Wrapper<cl_program>
5936 #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
5937 typedef vector<vector<unsigned char>> Binaries;
5938 typedef vector<string> Sources;
5939 #else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
5940 typedef vector<std::pair<const void*, size_type> > Binaries;
5941 typedef vector<std::pair<const char*, size_type> > Sources;
5942 #endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
5945 const string& source,
5951 const char * strings = source.c_str();
5952 const size_type length = source.size();
5954 Context context = Context::getDefault(err);
5956 object_ = ::clCreateProgramWithSource(
5957 context(), (cl_uint)1, &strings, &length, &error);
5959 detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
5961 if (error == CL_SUCCESS && build) {
5963 error = ::clBuildProgram(
5967 #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
5971 #endif // #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
5975 detail::buildErrHandler(error, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
5984 const Context& context,
5985 const string& source,
5991 const char * strings = source.c_str();
5992 const size_type length = source.size();
5994 object_ = ::clCreateProgramWithSource(
5995 context(), (cl_uint)1, &strings, &length, &error);
5997 detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
5999 if (error == CL_SUCCESS && build) {
6000 error = ::clBuildProgram(
6004 #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6008 #endif // #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6012 detail::buildErrHandler(error, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6021 * Create a program from a vector of source strings and the default context.
6022 * Does not compile or link the program.
6025 const Sources& sources,
6029 Context context = Context::getDefault(err);
6031 const size_type n = (size_type)sources.size();
6033 vector<size_type> lengths(n);
6034 vector<const char*> strings(n);
6036 for (size_type i = 0; i < n; ++i) {
6037 #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6038 strings[i] = sources[(int)i].data();
6039 lengths[i] = sources[(int)i].length();
6040 #else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6041 strings[i] = sources[(int)i].first;
6042 lengths[i] = sources[(int)i].second;
6043 #endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6046 object_ = ::clCreateProgramWithSource(
6047 context(), (cl_uint)n, strings.data(), lengths.data(), &error);
6049 detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
6056 * Create a program from a vector of source strings and a provided context.
6057 * Does not compile or link the program.
6060 const Context& context,
6061 const Sources& sources,
6066 const size_type n = (size_type)sources.size();
6068 vector<size_type> lengths(n);
6069 vector<const char*> strings(n);
6071 for (size_type i = 0; i < n; ++i) {
6072 #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6073 strings[i] = sources[(int)i].data();
6074 lengths[i] = sources[(int)i].length();
6075 #else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6076 strings[i] = sources[(int)i].first;
6077 lengths[i] = sources[(int)i].second;
6078 #endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6081 object_ = ::clCreateProgramWithSource(
6082 context(), (cl_uint)n, strings.data(), lengths.data(), &error);
6084 detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
6091 * Construct a program object from a list of devices and a per-device list of binaries.
6092 * \param context A valid OpenCL context in which to construct the program.
6093 * \param devices A vector of OpenCL device objects for which the program will be created.
6094 * \param binaries A vector of pairs of a pointer to a binary object and its length.
6095 * \param binaryStatus An optional vector that on completion will be resized to
6096 * match the size of binaries and filled with values to specify if each binary
6097 * was successfully loaded.
6098 * Set to CL_SUCCESS if the binary was successfully loaded.
6099 * Set to CL_INVALID_VALUE if the length is 0 or the binary pointer is NULL.
6100 * Set to CL_INVALID_BINARY if the binary provided is not valid for the matching device.
6101 * \param err if non-NULL will be set to CL_SUCCESS on successful operation or one of the following errors:
6102 * CL_INVALID_CONTEXT if context is not a valid context.
6103 * CL_INVALID_VALUE if the length of devices is zero; or if the length of binaries does not match the length of devices;
6104 * or if any entry in binaries is NULL or has length 0.
6105 * CL_INVALID_DEVICE if OpenCL devices listed in devices are not in the list of devices associated with context.
6106 * CL_INVALID_BINARY if an invalid program binary was encountered for any device. binaryStatus will return specific status for each device.
6107 * CL_OUT_OF_HOST_MEMORY if there is a failure to allocate resources required by the OpenCL implementation on the host.
6110 const Context& context,
6111 const vector<Device>& devices,
6112 const Binaries& binaries,
6113 vector<cl_int>* binaryStatus = NULL,
6118 const size_type numDevices = devices.size();
6120 // Catch size mismatch early and return
6121 if(binaries.size() != numDevices) {
6122 error = CL_INVALID_VALUE;
6123 detail::errHandler(error, __CREATE_PROGRAM_WITH_BINARY_ERR);
6131 vector<size_type> lengths(numDevices);
6132 vector<const unsigned char*> images(numDevices);
6133 #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6134 for (size_type i = 0; i < numDevices; ++i) {
6135 images[i] = binaries[i].data();
6136 lengths[i] = binaries[(int)i].size();
6138 #else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6139 for (size_type i = 0; i < numDevices; ++i) {
6140 images[i] = (const unsigned char*)binaries[i].first;
6141 lengths[i] = binaries[(int)i].second;
6143 #endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6145 vector<cl_device_id> deviceIDs(numDevices);
6146 for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
6147 deviceIDs[deviceIndex] = (devices[deviceIndex])();
6151 binaryStatus->resize(numDevices);
6154 object_ = ::clCreateProgramWithBinary(
6155 context(), (cl_uint) devices.size(),
6157 lengths.data(), images.data(), (binaryStatus != NULL && numDevices > 0)
6158 ? &binaryStatus->front()
6161 detail::errHandler(error, __CREATE_PROGRAM_WITH_BINARY_ERR);
6168 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
6170 * Create program using builtin kernels.
6171 * \param kernelNames Semi-colon separated list of builtin kernel names
6174 const Context& context,
6175 const vector<Device>& devices,
6176 const string& kernelNames,
6182 size_type numDevices = devices.size();
6183 vector<cl_device_id> deviceIDs(numDevices);
6184 for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
6185 deviceIDs[deviceIndex] = (devices[deviceIndex])();
6188 object_ = ::clCreateProgramWithBuiltInKernels(
6190 (cl_uint) devices.size(),
6192 kernelNames.c_str(),
6195 detail::errHandler(error, __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR);
6200 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
6205 /*! \brief Constructor from cl_mem - takes ownership.
6207 * \param retainObject will cause the constructor to retain its cl object.
6208 * Defaults to false to maintain compatibility with
6211 explicit Program(const cl_program& program, bool retainObject = false) :
6212 detail::Wrapper<cl_type>(program, retainObject) { }
6214 Program& operator = (const cl_program& rhs)
6216 detail::Wrapper<cl_type>::operator=(rhs);
6220 /*! \brief Copy constructor to forward copy to the superclass correctly.
6221 * Required for MSVC.
6223 Program(const Program& program) : detail::Wrapper<cl_type>(program) {}
6225 /*! \brief Copy assignment to forward copy to the superclass correctly.
6226 * Required for MSVC.
6228 Program& operator = (const Program &program)
6230 detail::Wrapper<cl_type>::operator=(program);
6234 /*! \brief Move constructor to forward move to the superclass correctly.
6235 * Required for MSVC.
6237 Program(Program&& program) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(program)) {}
6239 /*! \brief Move assignment to forward move to the superclass correctly.
6240 * Required for MSVC.
6242 Program& operator = (Program &&program)
6244 detail::Wrapper<cl_type>::operator=(std::move(program));
6249 const vector<Device>& devices,
6250 const char* options = NULL,
6251 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
6252 void* data = NULL) const
6254 size_type numDevices = devices.size();
6255 vector<cl_device_id> deviceIDs(numDevices);
6257 for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
6258 deviceIDs[deviceIndex] = (devices[deviceIndex])();
6261 cl_int buildError = ::clBuildProgram(
6270 return detail::buildErrHandler(buildError, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6274 const char* options = NULL,
6275 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
6276 void* data = NULL) const
6278 cl_int buildError = ::clBuildProgram(
6287 return detail::buildErrHandler(buildError, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6290 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
6292 const char* options = NULL,
6293 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
6294 void* data = NULL) const
6296 cl_int error = ::clCompileProgram(
6306 return detail::buildErrHandler(error, __COMPILE_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6308 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
6310 template <typename T>
6311 cl_int getInfo(cl_program_info name, T* param) const
6313 return detail::errHandler(
6314 detail::getInfo(&::clGetProgramInfo, object_, name, param),
6315 __GET_PROGRAM_INFO_ERR);
6318 template <cl_int name> typename
6319 detail::param_traits<detail::cl_program_info, name>::param_type
6320 getInfo(cl_int* err = NULL) const
6322 typename detail::param_traits<
6323 detail::cl_program_info, name>::param_type param;
6324 cl_int result = getInfo(name, ¶m);
6331 template <typename T>
6332 cl_int getBuildInfo(
6333 const Device& device, cl_program_build_info name, T* param) const
6335 return detail::errHandler(
6337 &::clGetProgramBuildInfo, object_, device(), name, param),
6338 __GET_PROGRAM_BUILD_INFO_ERR);
6341 template <cl_int name> typename
6342 detail::param_traits<detail::cl_program_build_info, name>::param_type
6343 getBuildInfo(const Device& device, cl_int* err = NULL) const
6345 typename detail::param_traits<
6346 detail::cl_program_build_info, name>::param_type param;
6347 cl_int result = getBuildInfo(device, name, ¶m);
6355 * Build info function that returns a vector of device/info pairs for the specified
6356 * info type and for all devices in the program.
6357 * On an error reading the info for any device, an empty vector of info will be returned.
6359 template <cl_int name>
6360 vector<std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, name>::param_type>>
6361 getBuildInfo(cl_int *err = NULL) const
6363 cl_int result = CL_SUCCESS;
6365 auto devs = getInfo<CL_PROGRAM_DEVICES>(&result);
6366 vector<std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, name>::param_type>>
6369 // If there was an initial error from getInfo return the error
6370 if (result != CL_SUCCESS) {
6377 for (cl::Device d : devs) {
6378 typename detail::param_traits<
6379 detail::cl_program_build_info, name>::param_type param;
6380 result = getBuildInfo(d, name, ¶m);
6382 std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, name>::param_type>
6384 if (result != CL_SUCCESS) {
6385 // On error, leave the loop and return the error code
6392 if (result != CL_SUCCESS) {
6398 cl_int createKernels(vector<Kernel>* kernels)
6401 cl_int err = ::clCreateKernelsInProgram(object_, 0, NULL, &numKernels);
6402 if (err != CL_SUCCESS) {
6403 return detail::errHandler(err, __CREATE_KERNELS_IN_PROGRAM_ERR);
6406 vector<cl_kernel> value(numKernels);
6408 err = ::clCreateKernelsInProgram(
6409 object_, numKernels, value.data(), NULL);
6410 if (err != CL_SUCCESS) {
6411 return detail::errHandler(err, __CREATE_KERNELS_IN_PROGRAM_ERR);
6415 kernels->resize(value.size());
6417 // Assign to param, constructing with retain behaviour
6418 // to correctly capture each underlying CL object
6419 for (size_type i = 0; i < value.size(); i++) {
6420 // We do not need to retain because this kernel is being created
6422 (*kernels)[i] = Kernel(value[i], false);
6429 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
6430 inline Program linkProgram(
6433 const char* options = NULL,
6434 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
6438 cl_int error_local = CL_SUCCESS;
6440 cl_program programs[2] = { input1(), input2() };
6442 Context ctx = input1.getInfo<CL_PROGRAM_CONTEXT>(&error_local);
6443 if(error_local!=CL_SUCCESS) {
6444 detail::errHandler(error_local, __LINK_PROGRAM_ERR);
6447 cl_program prog = ::clLinkProgram(
6458 detail::errHandler(error_local,__COMPILE_PROGRAM_ERR);
6463 return Program(prog);
6466 inline Program linkProgram(
6467 vector<Program> inputPrograms,
6468 const char* options = NULL,
6469 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
6473 cl_int error_local = CL_SUCCESS;
6475 vector<cl_program> programs(inputPrograms.size());
6477 for (unsigned int i = 0; i < inputPrograms.size(); i++) {
6478 programs[i] = inputPrograms[i]();
6482 if(inputPrograms.size() > 0) {
6483 ctx = inputPrograms[0].getInfo<CL_PROGRAM_CONTEXT>(&error_local);
6484 if(error_local!=CL_SUCCESS) {
6485 detail::errHandler(error_local, __LINK_PROGRAM_ERR);
6488 cl_program prog = ::clLinkProgram(
6493 (cl_uint)inputPrograms.size(),
6499 detail::errHandler(error_local,__COMPILE_PROGRAM_ERR);
6504 return Program(prog, false);
6506 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
6508 // Template specialization for CL_PROGRAM_BINARIES
6510 inline cl_int cl::Program::getInfo(cl_program_info name, vector<vector<unsigned char>>* param) const
6512 if (name != CL_PROGRAM_BINARIES) {
6513 return CL_INVALID_VALUE;
6516 // Resize the parameter array appropriately for each allocation
6517 // and pass down to the helper
6519 vector<size_type> sizes = getInfo<CL_PROGRAM_BINARY_SIZES>();
6520 size_type numBinaries = sizes.size();
6522 // Resize the parameter array and constituent arrays
6523 param->resize(numBinaries);
6524 for (int i = 0; i < numBinaries; ++i) {
6525 (*param)[i].resize(sizes[i]);
6528 return detail::errHandler(
6529 detail::getInfo(&::clGetProgramInfo, object_, name, param),
6530 __GET_PROGRAM_INFO_ERR);
6537 inline vector<vector<unsigned char>> cl::Program::getInfo<CL_PROGRAM_BINARIES>(cl_int* err) const
6539 vector<vector<unsigned char>> binariesVectors;
6541 cl_int result = getInfo(CL_PROGRAM_BINARIES, &binariesVectors);
6545 return binariesVectors;
6548 inline Kernel::Kernel(const Program& program, const char* name, cl_int* err)
6552 object_ = ::clCreateKernel(program(), name, &error);
6553 detail::errHandler(error, __CREATE_KERNEL_ERR);
6561 enum class QueueProperties : cl_command_queue_properties
6564 Profiling = CL_QUEUE_PROFILING_ENABLE,
6565 OutOfOrder = CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE,
6568 inline QueueProperties operator|(QueueProperties lhs, QueueProperties rhs)
6570 return static_cast<QueueProperties>(static_cast<cl_command_queue_properties>(lhs) | static_cast<cl_command_queue_properties>(rhs));
6573 /*! \class CommandQueue
6574 * \brief CommandQueue interface for cl_command_queue.
6576 class CommandQueue : public detail::Wrapper<cl_command_queue>
6579 static std::once_flag default_initialized_;
6580 static CommandQueue default_;
6581 static cl_int default_error_;
6583 /*! \brief Create the default command queue returned by @ref getDefault.
6585 * It sets default_error_ to indicate success or failure. It does not throw
6588 static void makeDefault()
6590 /* We don't want to throw an error from this function, so we have to
6591 * catch and set the error flag.
6593 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
6598 Context context = Context::getDefault(&error);
6600 if (error != CL_SUCCESS) {
6601 default_error_ = error;
6604 Device device = Device::getDefault();
6605 default_ = CommandQueue(context, device, 0, &default_error_);
6608 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
6609 catch (cl::Error &e) {
6610 default_error_ = e.err();
6615 /*! \brief Create the default command queue.
6617 * This sets @c default_. It does not throw
6620 static void makeDefaultProvided(const CommandQueue &c) {
6625 #ifdef CL_HPP_UNIT_TEST_ENABLE
6626 /*! \brief Reset the default.
6628 * This sets @c default_ to an empty value to support cleanup in
6629 * the unit test framework.
6630 * This function is not thread safe.
6632 static void unitTestClearDefault() {
6633 default_ = CommandQueue();
6635 #endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
6639 * \brief Constructs a CommandQueue based on passed properties.
6640 * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
6643 cl_command_queue_properties properties,
6648 Context context = Context::getDefault(&error);
6649 detail::errHandler(error, __CREATE_CONTEXT_ERR);
6651 if (error != CL_SUCCESS) {
6657 Device device = context.getInfo<CL_CONTEXT_DEVICES>()[0];
6659 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6660 cl_queue_properties queue_properties[] = {
6661 CL_QUEUE_PROPERTIES, properties, 0 };
6662 if ((properties & CL_QUEUE_ON_DEVICE) == 0) {
6663 object_ = ::clCreateCommandQueueWithProperties(
6664 context(), device(), queue_properties, &error);
6667 error = CL_INVALID_QUEUE_PROPERTIES;
6670 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
6675 object_ = ::clCreateCommandQueue(
6676 context(), device(), properties, &error);
6678 detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
6687 * \brief Constructs a CommandQueue based on passed properties.
6688 * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
6691 QueueProperties properties,
6696 Context context = Context::getDefault(&error);
6697 detail::errHandler(error, __CREATE_CONTEXT_ERR);
6699 if (error != CL_SUCCESS) {
6705 Device device = context.getInfo<CL_CONTEXT_DEVICES>()[0];
6707 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6708 cl_queue_properties queue_properties[] = {
6709 CL_QUEUE_PROPERTIES, static_cast<cl_queue_properties>(properties), 0 };
6711 object_ = ::clCreateCommandQueueWithProperties(
6712 context(), device(), queue_properties, &error);
6715 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
6720 object_ = ::clCreateCommandQueue(
6721 context(), device(), static_cast<cl_command_queue_properties>(properties), &error);
6723 detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
6732 * \brief Constructs a CommandQueue for an implementation defined device in the given context
6733 * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
6735 explicit CommandQueue(
6736 const Context& context,
6737 cl_command_queue_properties properties = 0,
6741 vector<cl::Device> devices;
6742 error = context.getInfo(CL_CONTEXT_DEVICES, &devices);
6744 detail::errHandler(error, __CREATE_CONTEXT_ERR);
6746 if (error != CL_SUCCESS)
6754 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6755 cl_queue_properties queue_properties[] = {
6756 CL_QUEUE_PROPERTIES, properties, 0 };
6757 if ((properties & CL_QUEUE_ON_DEVICE) == 0) {
6758 object_ = ::clCreateCommandQueueWithProperties(
6759 context(), devices[0](), queue_properties, &error);
6762 error = CL_INVALID_QUEUE_PROPERTIES;
6765 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
6770 object_ = ::clCreateCommandQueue(
6771 context(), devices[0](), properties, &error);
6773 detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
6782 * \brief Constructs a CommandQueue for an implementation defined device in the given context
6783 * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
6785 explicit CommandQueue(
6786 const Context& context,
6787 QueueProperties properties,
6791 vector<cl::Device> devices;
6792 error = context.getInfo(CL_CONTEXT_DEVICES, &devices);
6794 detail::errHandler(error, __CREATE_CONTEXT_ERR);
6796 if (error != CL_SUCCESS)
6804 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6805 cl_queue_properties queue_properties[] = {
6806 CL_QUEUE_PROPERTIES, static_cast<cl_queue_properties>(properties), 0 };
6807 object_ = ::clCreateCommandQueueWithProperties(
6808 context(), devices[0](), queue_properties, &error);
6810 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
6815 object_ = ::clCreateCommandQueue(
6816 context(), devices[0](), static_cast<cl_command_queue_properties>(properties), &error);
6818 detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
6827 * \brief Constructs a CommandQueue for a passed device and context
6828 * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
6831 const Context& context,
6832 const Device& device,
6833 cl_command_queue_properties properties = 0,
6838 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6839 cl_queue_properties queue_properties[] = {
6840 CL_QUEUE_PROPERTIES, properties, 0 };
6841 object_ = ::clCreateCommandQueueWithProperties(
6842 context(), device(), queue_properties, &error);
6844 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
6849 object_ = ::clCreateCommandQueue(
6850 context(), device(), properties, &error);
6852 detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
6860 * \brief Constructs a CommandQueue for a passed device and context
6861 * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
6864 const Context& context,
6865 const Device& device,
6866 QueueProperties properties,
6871 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6872 cl_queue_properties queue_properties[] = {
6873 CL_QUEUE_PROPERTIES, static_cast<cl_queue_properties>(properties), 0 };
6874 object_ = ::clCreateCommandQueueWithProperties(
6875 context(), device(), queue_properties, &error);
6877 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
6882 object_ = ::clCreateCommandQueue(
6883 context(), device(), static_cast<cl_command_queue_properties>(properties), &error);
6885 detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
6892 static CommandQueue getDefault(cl_int * err = NULL)
6894 std::call_once(default_initialized_, makeDefault);
6895 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6896 detail::errHandler(default_error_, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
6897 #else // CL_HPP_TARGET_OPENCL_VERSION >= 200
6898 detail::errHandler(default_error_, __CREATE_COMMAND_QUEUE_ERR);
6899 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
6901 *err = default_error_;
6907 * Modify the default command queue to be used by
6908 * subsequent operations.
6909 * Will only set the default if no default was previously created.
6910 * @return updated default command queue.
6911 * Should be compared to the passed value to ensure that it was updated.
6913 static CommandQueue setDefault(const CommandQueue &default_queue)
6915 std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_queue));
6916 detail::errHandler(default_error_);
6923 /*! \brief Constructor from cl_mem - takes ownership.
6925 * \param retainObject will cause the constructor to retain its cl object.
6926 * Defaults to false to maintain compatibility with
6929 explicit CommandQueue(const cl_command_queue& commandQueue, bool retainObject = false) :
6930 detail::Wrapper<cl_type>(commandQueue, retainObject) { }
6932 CommandQueue& operator = (const cl_command_queue& rhs)
6934 detail::Wrapper<cl_type>::operator=(rhs);
6938 /*! \brief Copy constructor to forward copy to the superclass correctly.
6939 * Required for MSVC.
6941 CommandQueue(const CommandQueue& queue) : detail::Wrapper<cl_type>(queue) {}
6943 /*! \brief Copy assignment to forward copy to the superclass correctly.
6944 * Required for MSVC.
6946 CommandQueue& operator = (const CommandQueue &queue)
6948 detail::Wrapper<cl_type>::operator=(queue);
6952 /*! \brief Move constructor to forward move to the superclass correctly.
6953 * Required for MSVC.
6955 CommandQueue(CommandQueue&& queue) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(queue)) {}
6957 /*! \brief Move assignment to forward move to the superclass correctly.
6958 * Required for MSVC.
6960 CommandQueue& operator = (CommandQueue &&queue)
6962 detail::Wrapper<cl_type>::operator=(std::move(queue));
6966 template <typename T>
6967 cl_int getInfo(cl_command_queue_info name, T* param) const
6969 return detail::errHandler(
6971 &::clGetCommandQueueInfo, object_, name, param),
6972 __GET_COMMAND_QUEUE_INFO_ERR);
6975 template <cl_int name> typename
6976 detail::param_traits<detail::cl_command_queue_info, name>::param_type
6977 getInfo(cl_int* err = NULL) const
6979 typename detail::param_traits<
6980 detail::cl_command_queue_info, name>::param_type param;
6981 cl_int result = getInfo(name, ¶m);
6988 cl_int enqueueReadBuffer(
6989 const Buffer& buffer,
6994 const vector<Event>* events = NULL,
6995 Event* event = NULL) const
6998 cl_int err = detail::errHandler(
6999 ::clEnqueueReadBuffer(
7000 object_, buffer(), blocking, offset, size,
7002 (events != NULL) ? (cl_uint) events->size() : 0,
7003 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7004 (event != NULL) ? &tmp : NULL),
7005 __ENQUEUE_READ_BUFFER_ERR);
7007 if (event != NULL && err == CL_SUCCESS)
7013 cl_int enqueueWriteBuffer(
7014 const Buffer& buffer,
7019 const vector<Event>* events = NULL,
7020 Event* event = NULL) const
7023 cl_int err = detail::errHandler(
7024 ::clEnqueueWriteBuffer(
7025 object_, buffer(), blocking, offset, size,
7027 (events != NULL) ? (cl_uint) events->size() : 0,
7028 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7029 (event != NULL) ? &tmp : NULL),
7030 __ENQUEUE_WRITE_BUFFER_ERR);
7032 if (event != NULL && err == CL_SUCCESS)
7038 cl_int enqueueCopyBuffer(
7041 size_type src_offset,
7042 size_type dst_offset,
7044 const vector<Event>* events = NULL,
7045 Event* event = NULL) const
7048 cl_int err = detail::errHandler(
7049 ::clEnqueueCopyBuffer(
7050 object_, src(), dst(), src_offset, dst_offset, size,
7051 (events != NULL) ? (cl_uint) events->size() : 0,
7052 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7053 (event != NULL) ? &tmp : NULL),
7054 __ENQEUE_COPY_BUFFER_ERR);
7056 if (event != NULL && err == CL_SUCCESS)
7062 cl_int enqueueReadBufferRect(
7063 const Buffer& buffer,
7065 const array<size_type, 3>& buffer_offset,
7066 const array<size_type, 3>& host_offset,
7067 const array<size_type, 3>& region,
7068 size_type buffer_row_pitch,
7069 size_type buffer_slice_pitch,
7070 size_type host_row_pitch,
7071 size_type host_slice_pitch,
7073 const vector<Event>* events = NULL,
7074 Event* event = NULL) const
7077 cl_int err = detail::errHandler(
7078 ::clEnqueueReadBufferRect(
7082 buffer_offset.data(),
7090 (events != NULL) ? (cl_uint) events->size() : 0,
7091 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7092 (event != NULL) ? &tmp : NULL),
7093 __ENQUEUE_READ_BUFFER_RECT_ERR);
7095 if (event != NULL && err == CL_SUCCESS)
7101 cl_int enqueueWriteBufferRect(
7102 const Buffer& buffer,
7104 const array<size_type, 3>& buffer_offset,
7105 const array<size_type, 3>& host_offset,
7106 const array<size_type, 3>& region,
7107 size_type buffer_row_pitch,
7108 size_type buffer_slice_pitch,
7109 size_type host_row_pitch,
7110 size_type host_slice_pitch,
7112 const vector<Event>* events = NULL,
7113 Event* event = NULL) const
7116 cl_int err = detail::errHandler(
7117 ::clEnqueueWriteBufferRect(
7121 buffer_offset.data(),
7129 (events != NULL) ? (cl_uint) events->size() : 0,
7130 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7131 (event != NULL) ? &tmp : NULL),
7132 __ENQUEUE_WRITE_BUFFER_RECT_ERR);
7134 if (event != NULL && err == CL_SUCCESS)
7140 cl_int enqueueCopyBufferRect(
7143 const array<size_type, 3>& src_origin,
7144 const array<size_type, 3>& dst_origin,
7145 const array<size_type, 3>& region,
7146 size_type src_row_pitch,
7147 size_type src_slice_pitch,
7148 size_type dst_row_pitch,
7149 size_type dst_slice_pitch,
7150 const vector<Event>* events = NULL,
7151 Event* event = NULL) const
7154 cl_int err = detail::errHandler(
7155 ::clEnqueueCopyBufferRect(
7166 (events != NULL) ? (cl_uint) events->size() : 0,
7167 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7168 (event != NULL) ? &tmp : NULL),
7169 __ENQEUE_COPY_BUFFER_RECT_ERR);
7171 if (event != NULL && err == CL_SUCCESS)
7177 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
7179 * Enqueue a command to fill a buffer object with a pattern
7180 * of a given size. The pattern is specified as a vector type.
7181 * \tparam PatternType The datatype of the pattern field.
7182 * The pattern type must be an accepted OpenCL data type.
7183 * \tparam offset Is the offset in bytes into the buffer at
7184 * which to start filling. This must be a multiple of
7186 * \tparam size Is the size in bytes of the region to fill.
7187 * This must be a multiple of the pattern size.
7189 template<typename PatternType>
7190 cl_int enqueueFillBuffer(
7191 const Buffer& buffer,
7192 PatternType pattern,
7195 const vector<Event>* events = NULL,
7196 Event* event = NULL) const
7199 cl_int err = detail::errHandler(
7200 ::clEnqueueFillBuffer(
7203 static_cast<void*>(&pattern),
7204 sizeof(PatternType),
7207 (events != NULL) ? (cl_uint) events->size() : 0,
7208 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7209 (event != NULL) ? &tmp : NULL),
7210 __ENQUEUE_FILL_BUFFER_ERR);
7212 if (event != NULL && err == CL_SUCCESS)
7217 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
7219 cl_int enqueueReadImage(
7222 const array<size_type, 3>& origin,
7223 const array<size_type, 3>& region,
7224 size_type row_pitch,
7225 size_type slice_pitch,
7227 const vector<Event>* events = NULL,
7228 Event* event = NULL) const
7231 cl_int err = detail::errHandler(
7232 ::clEnqueueReadImage(
7241 (events != NULL) ? (cl_uint) events->size() : 0,
7242 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7243 (event != NULL) ? &tmp : NULL),
7244 __ENQUEUE_READ_IMAGE_ERR);
7246 if (event != NULL && err == CL_SUCCESS)
7252 cl_int enqueueWriteImage(
7255 const array<size_type, 3>& origin,
7256 const array<size_type, 3>& region,
7257 size_type row_pitch,
7258 size_type slice_pitch,
7260 const vector<Event>* events = NULL,
7261 Event* event = NULL) const
7264 cl_int err = detail::errHandler(
7265 ::clEnqueueWriteImage(
7274 (events != NULL) ? (cl_uint) events->size() : 0,
7275 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7276 (event != NULL) ? &tmp : NULL),
7277 __ENQUEUE_WRITE_IMAGE_ERR);
7279 if (event != NULL && err == CL_SUCCESS)
7285 cl_int enqueueCopyImage(
7288 const array<size_type, 3>& src_origin,
7289 const array<size_type, 3>& dst_origin,
7290 const array<size_type, 3>& region,
7291 const vector<Event>* events = NULL,
7292 Event* event = NULL) const
7295 cl_int err = detail::errHandler(
7296 ::clEnqueueCopyImage(
7303 (events != NULL) ? (cl_uint) events->size() : 0,
7304 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7305 (event != NULL) ? &tmp : NULL),
7306 __ENQUEUE_COPY_IMAGE_ERR);
7308 if (event != NULL && err == CL_SUCCESS)
7314 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
7316 * Enqueue a command to fill an image object with a specified color.
7317 * \param fillColor is the color to use to fill the image.
7318 * This is a four component RGBA floating-point color value if
7319 * the image channel data type is not an unnormalized signed or
7320 * unsigned data type.
7322 cl_int enqueueFillImage(
7324 cl_float4 fillColor,
7325 const array<size_type, 3>& origin,
7326 const array<size_type, 3>& region,
7327 const vector<Event>* events = NULL,
7328 Event* event = NULL) const
7331 cl_int err = detail::errHandler(
7332 ::clEnqueueFillImage(
7335 static_cast<void*>(&fillColor),
7338 (events != NULL) ? (cl_uint) events->size() : 0,
7339 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7340 (event != NULL) ? &tmp : NULL),
7341 __ENQUEUE_FILL_IMAGE_ERR);
7343 if (event != NULL && err == CL_SUCCESS)
7350 * Enqueue a command to fill an image object with a specified color.
7351 * \param fillColor is the color to use to fill the image.
7352 * This is a four component RGBA signed integer color value if
7353 * the image channel data type is an unnormalized signed integer
7356 cl_int enqueueFillImage(
7359 const array<size_type, 3>& origin,
7360 const array<size_type, 3>& region,
7361 const vector<Event>* events = NULL,
7362 Event* event = NULL) const
7365 cl_int err = detail::errHandler(
7366 ::clEnqueueFillImage(
7369 static_cast<void*>(&fillColor),
7372 (events != NULL) ? (cl_uint) events->size() : 0,
7373 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7374 (event != NULL) ? &tmp : NULL),
7375 __ENQUEUE_FILL_IMAGE_ERR);
7377 if (event != NULL && err == CL_SUCCESS)
7384 * Enqueue a command to fill an image object with a specified color.
7385 * \param fillColor is the color to use to fill the image.
7386 * This is a four component RGBA unsigned integer color value if
7387 * the image channel data type is an unnormalized unsigned integer
7390 cl_int enqueueFillImage(
7393 const array<size_type, 3>& origin,
7394 const array<size_type, 3>& region,
7395 const vector<Event>* events = NULL,
7396 Event* event = NULL) const
7399 cl_int err = detail::errHandler(
7400 ::clEnqueueFillImage(
7403 static_cast<void*>(&fillColor),
7406 (events != NULL) ? (cl_uint) events->size() : 0,
7407 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7408 (event != NULL) ? &tmp : NULL),
7409 __ENQUEUE_FILL_IMAGE_ERR);
7411 if (event != NULL && err == CL_SUCCESS)
7416 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
7418 cl_int enqueueCopyImageToBuffer(
7421 const array<size_type, 3>& src_origin,
7422 const array<size_type, 3>& region,
7423 size_type dst_offset,
7424 const vector<Event>* events = NULL,
7425 Event* event = NULL) const
7428 cl_int err = detail::errHandler(
7429 ::clEnqueueCopyImageToBuffer(
7436 (events != NULL) ? (cl_uint) events->size() : 0,
7437 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7438 (event != NULL) ? &tmp : NULL),
7439 __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR);
7441 if (event != NULL && err == CL_SUCCESS)
7447 cl_int enqueueCopyBufferToImage(
7450 size_type src_offset,
7451 const array<size_type, 3>& dst_origin,
7452 const array<size_type, 3>& region,
7453 const vector<Event>* events = NULL,
7454 Event* event = NULL) const
7457 cl_int err = detail::errHandler(
7458 ::clEnqueueCopyBufferToImage(
7465 (events != NULL) ? (cl_uint) events->size() : 0,
7466 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7467 (event != NULL) ? &tmp : NULL),
7468 __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR);
7470 if (event != NULL && err == CL_SUCCESS)
7476 void* enqueueMapBuffer(
7477 const Buffer& buffer,
7482 const vector<Event>* events = NULL,
7483 Event* event = NULL,
7484 cl_int* err = NULL) const
7488 void * result = ::clEnqueueMapBuffer(
7489 object_, buffer(), blocking, flags, offset, size,
7490 (events != NULL) ? (cl_uint) events->size() : 0,
7491 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7492 (event != NULL) ? &tmp : NULL,
7495 detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
7499 if (event != NULL && error == CL_SUCCESS)
7505 void* enqueueMapImage(
7506 const Image& buffer,
7509 const array<size_type, 3>& origin,
7510 const array<size_type, 3>& region,
7511 size_type * row_pitch,
7512 size_type * slice_pitch,
7513 const vector<Event>* events = NULL,
7514 Event* event = NULL,
7515 cl_int* err = NULL) const
7519 void * result = ::clEnqueueMapImage(
7520 object_, buffer(), blocking, flags,
7523 row_pitch, slice_pitch,
7524 (events != NULL) ? (cl_uint) events->size() : 0,
7525 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7526 (event != NULL) ? &tmp : NULL,
7529 detail::errHandler(error, __ENQUEUE_MAP_IMAGE_ERR);
7533 if (event != NULL && error == CL_SUCCESS)
7538 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7540 * Enqueues a command that will allow the host to update a region of a coarse-grained SVM buffer.
7541 * This variant takes a raw SVM pointer.
7543 template<typename T>
7544 cl_int enqueueMapSVM(
7549 const vector<Event>* events = NULL,
7550 Event* event = NULL) const
7553 cl_int err = detail::errHandler(::clEnqueueSVMMap(
7554 object_, blocking, flags, static_cast<void*>(ptr), size,
7555 (events != NULL) ? (cl_uint)events->size() : 0,
7556 (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
7557 (event != NULL) ? &tmp : NULL),
7558 __ENQUEUE_MAP_BUFFER_ERR);
7560 if (event != NULL && err == CL_SUCCESS)
7568 * Enqueues a command that will allow the host to update a region of a coarse-grained SVM buffer.
7569 * This variant takes a cl::pointer instance.
7571 template<typename T, class D>
7572 cl_int enqueueMapSVM(
7573 cl::pointer<T, D> &ptr,
7577 const vector<Event>* events = NULL,
7578 Event* event = NULL) const
7581 cl_int err = detail::errHandler(::clEnqueueSVMMap(
7582 object_, blocking, flags, static_cast<void*>(ptr.get()), size,
7583 (events != NULL) ? (cl_uint)events->size() : 0,
7584 (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
7585 (event != NULL) ? &tmp : NULL),
7586 __ENQUEUE_MAP_BUFFER_ERR);
7588 if (event != NULL && err == CL_SUCCESS)
7595 * Enqueues a command that will allow the host to update a region of a coarse-grained SVM buffer.
7596 * This variant takes a cl::vector instance.
7598 template<typename T, class Alloc>
7599 cl_int enqueueMapSVM(
7600 cl::vector<T, Alloc> &container,
7603 const vector<Event>* events = NULL,
7604 Event* event = NULL) const
7607 cl_int err = detail::errHandler(::clEnqueueSVMMap(
7608 object_, blocking, flags, static_cast<void*>(container.data()), container.size(),
7609 (events != NULL) ? (cl_uint)events->size() : 0,
7610 (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
7611 (event != NULL) ? &tmp : NULL),
7612 __ENQUEUE_MAP_BUFFER_ERR);
7614 if (event != NULL && err == CL_SUCCESS)
7619 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7621 cl_int enqueueUnmapMemObject(
7622 const Memory& memory,
7624 const vector<Event>* events = NULL,
7625 Event* event = NULL) const
7628 cl_int err = detail::errHandler(
7629 ::clEnqueueUnmapMemObject(
7630 object_, memory(), mapped_ptr,
7631 (events != NULL) ? (cl_uint) events->size() : 0,
7632 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7633 (event != NULL) ? &tmp : NULL),
7634 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
7636 if (event != NULL && err == CL_SUCCESS)
7643 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7645 * Enqueues a command that will release a coarse-grained SVM buffer back to the OpenCL runtime.
7646 * This variant takes a raw SVM pointer.
7648 template<typename T>
7649 cl_int enqueueUnmapSVM(
7651 const vector<Event>* events = NULL,
7652 Event* event = NULL) const
7655 cl_int err = detail::errHandler(
7656 ::clEnqueueSVMUnmap(
7657 object_, static_cast<void*>(ptr),
7658 (events != NULL) ? (cl_uint)events->size() : 0,
7659 (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
7660 (event != NULL) ? &tmp : NULL),
7661 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
7663 if (event != NULL && err == CL_SUCCESS)
7670 * Enqueues a command that will release a coarse-grained SVM buffer back to the OpenCL runtime.
7671 * This variant takes a cl::pointer instance.
7673 template<typename T, class D>
7674 cl_int enqueueUnmapSVM(
7675 cl::pointer<T, D> &ptr,
7676 const vector<Event>* events = NULL,
7677 Event* event = NULL) const
7680 cl_int err = detail::errHandler(
7681 ::clEnqueueSVMUnmap(
7682 object_, static_cast<void*>(ptr.get()),
7683 (events != NULL) ? (cl_uint)events->size() : 0,
7684 (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
7685 (event != NULL) ? &tmp : NULL),
7686 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
7688 if (event != NULL && err == CL_SUCCESS)
7695 * Enqueues a command that will release a coarse-grained SVM buffer back to the OpenCL runtime.
7696 * This variant takes a cl::vector instance.
7698 template<typename T, class Alloc>
7699 cl_int enqueueUnmapSVM(
7700 cl::vector<T, Alloc> &container,
7701 const vector<Event>* events = NULL,
7702 Event* event = NULL) const
7705 cl_int err = detail::errHandler(
7706 ::clEnqueueSVMUnmap(
7707 object_, static_cast<void*>(container.data()),
7708 (events != NULL) ? (cl_uint)events->size() : 0,
7709 (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
7710 (event != NULL) ? &tmp : NULL),
7711 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
7713 if (event != NULL && err == CL_SUCCESS)
7718 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7720 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
7722 * Enqueues a marker command which waits for either a list of events to complete,
7723 * or all previously enqueued commands to complete.
7725 * Enqueues a marker command which waits for either a list of events to complete,
7726 * or if the list is empty it waits for all commands previously enqueued in command_queue
7727 * to complete before it completes. This command returns an event which can be waited on,
7728 * i.e. this event can be waited on to insure that all events either in the event_wait_list
7729 * or all previously enqueued commands, queued before this command to command_queue,
7732 cl_int enqueueMarkerWithWaitList(
7733 const vector<Event> *events = 0,
7737 cl_int err = detail::errHandler(
7738 ::clEnqueueMarkerWithWaitList(
7740 (events != NULL) ? (cl_uint) events->size() : 0,
7741 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7742 (event != NULL) ? &tmp : NULL),
7743 __ENQUEUE_MARKER_WAIT_LIST_ERR);
7745 if (event != NULL && err == CL_SUCCESS)
7752 * A synchronization point that enqueues a barrier operation.
7754 * Enqueues a barrier command which waits for either a list of events to complete,
7755 * or if the list is empty it waits for all commands previously enqueued in command_queue
7756 * to complete before it completes. This command blocks command execution, that is, any
7757 * following commands enqueued after it do not execute until it completes. This command
7758 * returns an event which can be waited on, i.e. this event can be waited on to insure that
7759 * all events either in the event_wait_list or all previously enqueued commands, queued
7760 * before this command to command_queue, have completed.
7762 cl_int enqueueBarrierWithWaitList(
7763 const vector<Event> *events = 0,
7767 cl_int err = detail::errHandler(
7768 ::clEnqueueBarrierWithWaitList(
7770 (events != NULL) ? (cl_uint) events->size() : 0,
7771 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7772 (event != NULL) ? &tmp : NULL),
7773 __ENQUEUE_BARRIER_WAIT_LIST_ERR);
7775 if (event != NULL && err == CL_SUCCESS)
7782 * Enqueues a command to indicate with which device a set of memory objects
7783 * should be associated.
7785 cl_int enqueueMigrateMemObjects(
7786 const vector<Memory> &memObjects,
7787 cl_mem_migration_flags flags,
7788 const vector<Event>* events = NULL,
7794 vector<cl_mem> localMemObjects(memObjects.size());
7796 for( int i = 0; i < (int)memObjects.size(); ++i ) {
7797 localMemObjects[i] = memObjects[i]();
7801 cl_int err = detail::errHandler(
7802 ::clEnqueueMigrateMemObjects(
7804 (cl_uint)memObjects.size(),
7805 localMemObjects.data(),
7807 (events != NULL) ? (cl_uint) events->size() : 0,
7808 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7809 (event != NULL) ? &tmp : NULL),
7810 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
7812 if (event != NULL && err == CL_SUCCESS)
7817 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
7819 cl_int enqueueNDRangeKernel(
7820 const Kernel& kernel,
7821 const NDRange& offset,
7822 const NDRange& global,
7823 const NDRange& local = NullRange,
7824 const vector<Event>* events = NULL,
7825 Event* event = NULL) const
7828 cl_int err = detail::errHandler(
7829 ::clEnqueueNDRangeKernel(
7830 object_, kernel(), (cl_uint) global.dimensions(),
7831 offset.dimensions() != 0 ? (const size_type*) offset : NULL,
7832 (const size_type*) global,
7833 local.dimensions() != 0 ? (const size_type*) local : NULL,
7834 (events != NULL) ? (cl_uint) events->size() : 0,
7835 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7836 (event != NULL) ? &tmp : NULL),
7837 __ENQUEUE_NDRANGE_KERNEL_ERR);
7839 if (event != NULL && err == CL_SUCCESS)
7845 #if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
7846 CL_EXT_PREFIX__VERSION_1_2_DEPRECATED cl_int enqueueTask(
7847 const Kernel& kernel,
7848 const vector<Event>* events = NULL,
7849 Event* event = NULL) CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED const
7852 cl_int err = detail::errHandler(
7855 (events != NULL) ? (cl_uint) events->size() : 0,
7856 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7857 (event != NULL) ? &tmp : NULL),
7858 __ENQUEUE_TASK_ERR);
7860 if (event != NULL && err == CL_SUCCESS)
7865 #endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
7867 cl_int enqueueNativeKernel(
7868 void (CL_CALLBACK *userFptr)(void *),
7869 std::pair<void*, size_type> args,
7870 const vector<Memory>* mem_objects = NULL,
7871 const vector<const void*>* mem_locs = NULL,
7872 const vector<Event>* events = NULL,
7873 Event* event = NULL) const
7875 size_type elements = 0;
7876 if (mem_objects != NULL) {
7877 elements = mem_objects->size();
7879 vector<cl_mem> mems(elements);
7880 for (unsigned int i = 0; i < elements; i++) {
7881 mems[i] = ((*mem_objects)[i])();
7885 cl_int err = detail::errHandler(
7886 ::clEnqueueNativeKernel(
7887 object_, userFptr, args.first, args.second,
7888 (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
7890 (mem_locs != NULL && mem_locs->size() > 0) ? (const void **) &mem_locs->front() : NULL,
7891 (events != NULL) ? (cl_uint) events->size() : 0,
7892 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7893 (event != NULL) ? &tmp : NULL),
7894 __ENQUEUE_NATIVE_KERNEL);
7896 if (event != NULL && err == CL_SUCCESS)
7903 * Deprecated APIs for 1.2
7905 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
7906 CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
7907 cl_int enqueueMarker(Event* event = NULL) const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
7910 cl_int err = detail::errHandler(
7913 (event != NULL) ? &tmp : NULL),
7914 __ENQUEUE_MARKER_ERR);
7916 if (event != NULL && err == CL_SUCCESS)
7922 CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
7923 cl_int enqueueWaitForEvents(const vector<Event>& events) const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
7925 return detail::errHandler(
7926 ::clEnqueueWaitForEvents(
7928 (cl_uint) events.size(),
7929 events.size() > 0 ? (const cl_event*) &events.front() : NULL),
7930 __ENQUEUE_WAIT_FOR_EVENTS_ERR);
7932 #endif // defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
7934 cl_int enqueueAcquireGLObjects(
7935 const vector<Memory>* mem_objects = NULL,
7936 const vector<Event>* events = NULL,
7937 Event* event = NULL) const
7940 cl_int err = detail::errHandler(
7941 ::clEnqueueAcquireGLObjects(
7943 (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
7944 (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL,
7945 (events != NULL) ? (cl_uint) events->size() : 0,
7946 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7947 (event != NULL) ? &tmp : NULL),
7948 __ENQUEUE_ACQUIRE_GL_ERR);
7950 if (event != NULL && err == CL_SUCCESS)
7956 cl_int enqueueReleaseGLObjects(
7957 const vector<Memory>* mem_objects = NULL,
7958 const vector<Event>* events = NULL,
7959 Event* event = NULL) const
7962 cl_int err = detail::errHandler(
7963 ::clEnqueueReleaseGLObjects(
7965 (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
7966 (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL,
7967 (events != NULL) ? (cl_uint) events->size() : 0,
7968 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7969 (event != NULL) ? &tmp : NULL),
7970 __ENQUEUE_RELEASE_GL_ERR);
7972 if (event != NULL && err == CL_SUCCESS)
7978 #if defined (CL_HPP_USE_DX_INTEROP)
7979 typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clEnqueueAcquireD3D10ObjectsKHR)(
7980 cl_command_queue command_queue, cl_uint num_objects,
7981 const cl_mem* mem_objects, cl_uint num_events_in_wait_list,
7982 const cl_event* event_wait_list, cl_event* event);
7983 typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clEnqueueReleaseD3D10ObjectsKHR)(
7984 cl_command_queue command_queue, cl_uint num_objects,
7985 const cl_mem* mem_objects, cl_uint num_events_in_wait_list,
7986 const cl_event* event_wait_list, cl_event* event);
7988 cl_int enqueueAcquireD3D10Objects(
7989 const vector<Memory>* mem_objects = NULL,
7990 const vector<Event>* events = NULL,
7991 Event* event = NULL) const
7993 static PFN_clEnqueueAcquireD3D10ObjectsKHR pfn_clEnqueueAcquireD3D10ObjectsKHR = NULL;
7994 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
7995 cl_context context = getInfo<CL_QUEUE_CONTEXT>();
7996 cl::Device device(getInfo<CL_QUEUE_DEVICE>());
7997 cl_platform_id platform = device.getInfo<CL_DEVICE_PLATFORM>();
7998 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clEnqueueAcquireD3D10ObjectsKHR);
8000 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
8001 CL_HPP_INIT_CL_EXT_FCN_PTR_(clEnqueueAcquireD3D10ObjectsKHR);
8005 cl_int err = detail::errHandler(
8006 pfn_clEnqueueAcquireD3D10ObjectsKHR(
8008 (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
8009 (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL,
8010 (events != NULL) ? (cl_uint) events->size() : 0,
8011 (events != NULL) ? (cl_event*) &events->front() : NULL,
8012 (event != NULL) ? &tmp : NULL),
8013 __ENQUEUE_ACQUIRE_GL_ERR);
8015 if (event != NULL && err == CL_SUCCESS)
8021 cl_int enqueueReleaseD3D10Objects(
8022 const vector<Memory>* mem_objects = NULL,
8023 const vector<Event>* events = NULL,
8024 Event* event = NULL) const
8026 static PFN_clEnqueueReleaseD3D10ObjectsKHR pfn_clEnqueueReleaseD3D10ObjectsKHR = NULL;
8027 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
8028 cl_context context = getInfo<CL_QUEUE_CONTEXT>();
8029 cl::Device device(getInfo<CL_QUEUE_DEVICE>());
8030 cl_platform_id platform = device.getInfo<CL_DEVICE_PLATFORM>();
8031 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clEnqueueReleaseD3D10ObjectsKHR);
8032 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
8033 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
8034 CL_HPP_INIT_CL_EXT_FCN_PTR_(clEnqueueReleaseD3D10ObjectsKHR);
8035 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
8038 cl_int err = detail::errHandler(
8039 pfn_clEnqueueReleaseD3D10ObjectsKHR(
8041 (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
8042 (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL,
8043 (events != NULL) ? (cl_uint) events->size() : 0,
8044 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8045 (event != NULL) ? &tmp : NULL),
8046 __ENQUEUE_RELEASE_GL_ERR);
8048 if (event != NULL && err == CL_SUCCESS)
8056 * Deprecated APIs for 1.2
8058 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
8059 CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
8060 cl_int enqueueBarrier() const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
8062 return detail::errHandler(
8063 ::clEnqueueBarrier(object_),
8064 __ENQUEUE_BARRIER_ERR);
8066 #endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS
8068 cl_int flush() const
8070 return detail::errHandler(::clFlush(object_), __FLUSH_ERR);
8073 cl_int finish() const
8075 return detail::errHandler(::clFinish(object_), __FINISH_ERR);
8079 CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag CommandQueue::default_initialized_;
8080 CL_HPP_DEFINE_STATIC_MEMBER_ CommandQueue CommandQueue::default_;
8081 CL_HPP_DEFINE_STATIC_MEMBER_ cl_int CommandQueue::default_error_ = CL_SUCCESS;
8084 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8085 enum class DeviceQueueProperties : cl_command_queue_properties
8088 Profiling = CL_QUEUE_PROFILING_ENABLE,
8091 inline DeviceQueueProperties operator|(DeviceQueueProperties lhs, DeviceQueueProperties rhs)
8093 return static_cast<DeviceQueueProperties>(static_cast<cl_command_queue_properties>(lhs) | static_cast<cl_command_queue_properties>(rhs));
8096 /*! \class DeviceCommandQueue
8097 * \brief DeviceCommandQueue interface for device cl_command_queues.
8099 class DeviceCommandQueue : public detail::Wrapper<cl_command_queue>
8104 * Trivial empty constructor to create a null queue.
8106 DeviceCommandQueue() { }
8109 * Default construct device command queue on default context and device
8111 DeviceCommandQueue(DeviceQueueProperties properties, cl_int* err = NULL)
8114 cl::Context context = cl::Context::getDefault();
8115 cl::Device device = cl::Device::getDefault();
8117 cl_command_queue_properties mergedProperties =
8118 CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast<cl_command_queue_properties>(properties);
8120 cl_queue_properties queue_properties[] = {
8121 CL_QUEUE_PROPERTIES, mergedProperties, 0 };
8122 object_ = ::clCreateCommandQueueWithProperties(
8123 context(), device(), queue_properties, &error);
8125 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8132 * Create a device command queue for a specified device in the passed context.
8135 const Context& context,
8136 const Device& device,
8137 DeviceQueueProperties properties = DeviceQueueProperties::None,
8142 cl_command_queue_properties mergedProperties =
8143 CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast<cl_command_queue_properties>(properties);
8144 cl_queue_properties queue_properties[] = {
8145 CL_QUEUE_PROPERTIES, mergedProperties, 0 };
8146 object_ = ::clCreateCommandQueueWithProperties(
8147 context(), device(), queue_properties, &error);
8149 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8156 * Create a device command queue for a specified device in the passed context.
8159 const Context& context,
8160 const Device& device,
8162 DeviceQueueProperties properties = DeviceQueueProperties::None,
8167 cl_command_queue_properties mergedProperties =
8168 CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast<cl_command_queue_properties>(properties);
8169 cl_queue_properties queue_properties[] = {
8170 CL_QUEUE_PROPERTIES, mergedProperties,
8171 CL_QUEUE_SIZE, queueSize,
8173 object_ = ::clCreateCommandQueueWithProperties(
8174 context(), device(), queue_properties, &error);
8176 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8182 /*! \brief Constructor from cl_command_queue - takes ownership.
8184 * \param retainObject will cause the constructor to retain its cl object.
8185 * Defaults to false to maintain compatibility with
8188 explicit DeviceCommandQueue(const cl_command_queue& commandQueue, bool retainObject = false) :
8189 detail::Wrapper<cl_type>(commandQueue, retainObject) { }
8191 DeviceCommandQueue& operator = (const cl_command_queue& rhs)
8193 detail::Wrapper<cl_type>::operator=(rhs);
8197 /*! \brief Copy constructor to forward copy to the superclass correctly.
8198 * Required for MSVC.
8200 DeviceCommandQueue(const DeviceCommandQueue& queue) : detail::Wrapper<cl_type>(queue) {}
8202 /*! \brief Copy assignment to forward copy to the superclass correctly.
8203 * Required for MSVC.
8205 DeviceCommandQueue& operator = (const DeviceCommandQueue &queue)
8207 detail::Wrapper<cl_type>::operator=(queue);
8211 /*! \brief Move constructor to forward move to the superclass correctly.
8212 * Required for MSVC.
8214 DeviceCommandQueue(DeviceCommandQueue&& queue) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(queue)) {}
8216 /*! \brief Move assignment to forward move to the superclass correctly.
8217 * Required for MSVC.
8219 DeviceCommandQueue& operator = (DeviceCommandQueue &&queue)
8221 detail::Wrapper<cl_type>::operator=(std::move(queue));
8225 template <typename T>
8226 cl_int getInfo(cl_command_queue_info name, T* param) const
8228 return detail::errHandler(
8230 &::clGetCommandQueueInfo, object_, name, param),
8231 __GET_COMMAND_QUEUE_INFO_ERR);
8234 template <cl_int name> typename
8235 detail::param_traits<detail::cl_command_queue_info, name>::param_type
8236 getInfo(cl_int* err = NULL) const
8238 typename detail::param_traits<
8239 detail::cl_command_queue_info, name>::param_type param;
8240 cl_int result = getInfo(name, ¶m);
8248 * Create a new default device command queue for the default device,
8249 * in the default context and of the default size.
8250 * If there is already a default queue for the specified device this
8251 * function will return the pre-existing queue.
8253 static DeviceCommandQueue makeDefault(
8254 cl_int *err = nullptr)
8257 cl::Context context = cl::Context::getDefault();
8258 cl::Device device = cl::Device::getDefault();
8260 cl_command_queue_properties properties =
8261 CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT;
8262 cl_queue_properties queue_properties[] = {
8263 CL_QUEUE_PROPERTIES, properties,
8265 DeviceCommandQueue deviceQueue(
8266 ::clCreateCommandQueueWithProperties(
8267 context(), device(), queue_properties, &error));
8269 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8278 * Create a new default device command queue for the specified device
8279 * and of the default size.
8280 * If there is already a default queue for the specified device this
8281 * function will return the pre-existing queue.
8283 static DeviceCommandQueue makeDefault(
8284 const Context &context, const Device &device, cl_int *err = nullptr)
8288 cl_command_queue_properties properties =
8289 CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT;
8290 cl_queue_properties queue_properties[] = {
8291 CL_QUEUE_PROPERTIES, properties,
8293 DeviceCommandQueue deviceQueue(
8294 ::clCreateCommandQueueWithProperties(
8295 context(), device(), queue_properties, &error));
8297 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8306 * Create a new default device command queue for the specified device
8307 * and of the requested size in bytes.
8308 * If there is already a default queue for the specified device this
8309 * function will return the pre-existing queue.
8311 static DeviceCommandQueue makeDefault(
8312 const Context &context, const Device &device, cl_uint queueSize, cl_int *err = nullptr)
8316 cl_command_queue_properties properties =
8317 CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT;
8318 cl_queue_properties queue_properties[] = {
8319 CL_QUEUE_PROPERTIES, properties,
8320 CL_QUEUE_SIZE, queueSize,
8322 DeviceCommandQueue deviceQueue(
8323 ::clCreateCommandQueueWithProperties(
8324 context(), device(), queue_properties, &error));
8326 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8333 }; // DeviceCommandQueue
8337 // Specialization for device command queue
8339 struct KernelArgumentHandler<cl::DeviceCommandQueue, void>
8341 static size_type size(const cl::DeviceCommandQueue&) { return sizeof(cl_command_queue); }
8342 static const cl_command_queue* ptr(const cl::DeviceCommandQueue& value) { return &(value()); }
8344 } // namespace detail
8346 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8349 template< typename IteratorType >
8351 const Context &context,
8352 IteratorType startIterator,
8353 IteratorType endIterator,
8358 typedef typename std::iterator_traits<IteratorType>::value_type DataType;
8361 cl_mem_flags flags = 0;
8363 flags |= CL_MEM_READ_ONLY;
8366 flags |= CL_MEM_READ_WRITE;
8369 flags |= CL_MEM_USE_HOST_PTR;
8372 size_type size = sizeof(DataType)*(endIterator - startIterator);
8375 object_ = ::clCreateBuffer(context(), flags, size, static_cast<DataType*>(&*startIterator), &error);
8377 object_ = ::clCreateBuffer(context(), flags, size, 0, &error);
8380 detail::errHandler(error, __CREATE_BUFFER_ERR);
8386 CommandQueue queue(context, 0, &error);
8387 detail::errHandler(error, __CREATE_BUFFER_ERR);
8392 error = cl::copy(queue, startIterator, endIterator, *this);
8393 detail::errHandler(error, __CREATE_BUFFER_ERR);
8400 template< typename IteratorType >
8402 const CommandQueue &queue,
8403 IteratorType startIterator,
8404 IteratorType endIterator,
8409 typedef typename std::iterator_traits<IteratorType>::value_type DataType;
8412 cl_mem_flags flags = 0;
8414 flags |= CL_MEM_READ_ONLY;
8417 flags |= CL_MEM_READ_WRITE;
8420 flags |= CL_MEM_USE_HOST_PTR;
8423 size_type size = sizeof(DataType)*(endIterator - startIterator);
8425 Context context = queue.getInfo<CL_QUEUE_CONTEXT>();
8428 object_ = ::clCreateBuffer(context(), flags, size, static_cast<DataType*>(&*startIterator), &error);
8431 object_ = ::clCreateBuffer(context(), flags, size, 0, &error);
8434 detail::errHandler(error, __CREATE_BUFFER_ERR);
8440 error = cl::copy(queue, startIterator, endIterator, *this);
8441 detail::errHandler(error, __CREATE_BUFFER_ERR);
8448 inline cl_int enqueueReadBuffer(
8449 const Buffer& buffer,
8454 const vector<Event>* events = NULL,
8455 Event* event = NULL)
8458 CommandQueue queue = CommandQueue::getDefault(&error);
8460 if (error != CL_SUCCESS) {
8464 return queue.enqueueReadBuffer(buffer, blocking, offset, size, ptr, events, event);
8467 inline cl_int enqueueWriteBuffer(
8468 const Buffer& buffer,
8473 const vector<Event>* events = NULL,
8474 Event* event = NULL)
8477 CommandQueue queue = CommandQueue::getDefault(&error);
8479 if (error != CL_SUCCESS) {
8483 return queue.enqueueWriteBuffer(buffer, blocking, offset, size, ptr, events, event);
8486 inline void* enqueueMapBuffer(
8487 const Buffer& buffer,
8492 const vector<Event>* events = NULL,
8493 Event* event = NULL,
8497 CommandQueue queue = CommandQueue::getDefault(&error);
8498 detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
8503 void * result = ::clEnqueueMapBuffer(
8504 queue(), buffer(), blocking, flags, offset, size,
8505 (events != NULL) ? (cl_uint) events->size() : 0,
8506 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8510 detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
8518 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8520 * Enqueues to the default queue a command that will allow the host to
8521 * update a region of a coarse-grained SVM buffer.
8522 * This variant takes a raw SVM pointer.
8524 template<typename T>
8525 inline cl_int enqueueMapSVM(
8530 const vector<Event>* events,
8534 CommandQueue queue = CommandQueue::getDefault(&error);
8535 if (error != CL_SUCCESS) {
8536 return detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
8539 return queue.enqueueMapSVM(
8540 ptr, blocking, flags, size, events, event);
8544 * Enqueues to the default queue a command that will allow the host to
8545 * update a region of a coarse-grained SVM buffer.
8546 * This variant takes a cl::pointer instance.
8548 template<typename T, class D>
8549 inline cl_int enqueueMapSVM(
8550 cl::pointer<T, D> ptr,
8554 const vector<Event>* events = NULL,
8555 Event* event = NULL)
8558 CommandQueue queue = CommandQueue::getDefault(&error);
8559 if (error != CL_SUCCESS) {
8560 return detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
8563 return queue.enqueueMapSVM(
8564 ptr, blocking, flags, size, events, event);
8568 * Enqueues to the default queue a command that will allow the host to
8569 * update a region of a coarse-grained SVM buffer.
8570 * This variant takes a cl::vector instance.
8572 template<typename T, class Alloc>
8573 inline cl_int enqueueMapSVM(
8574 cl::vector<T, Alloc> container,
8577 const vector<Event>* events = NULL,
8578 Event* event = NULL)
8581 CommandQueue queue = CommandQueue::getDefault(&error);
8582 if (error != CL_SUCCESS) {
8583 return detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
8586 return queue.enqueueMapSVM(
8587 container, blocking, flags, events, event);
8590 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8592 inline cl_int enqueueUnmapMemObject(
8593 const Memory& memory,
8595 const vector<Event>* events = NULL,
8596 Event* event = NULL)
8599 CommandQueue queue = CommandQueue::getDefault(&error);
8600 detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
8601 if (error != CL_SUCCESS) {
8606 cl_int err = detail::errHandler(
8607 ::clEnqueueUnmapMemObject(
8608 queue(), memory(), mapped_ptr,
8609 (events != NULL) ? (cl_uint)events->size() : 0,
8610 (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
8611 (event != NULL) ? &tmp : NULL),
8612 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8614 if (event != NULL && err == CL_SUCCESS)
8620 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8622 * Enqueues to the default queue a command that will release a coarse-grained
8623 * SVM buffer back to the OpenCL runtime.
8624 * This variant takes a raw SVM pointer.
8626 template<typename T>
8627 inline cl_int enqueueUnmapSVM(
8629 const vector<Event>* events = NULL,
8630 Event* event = NULL)
8633 CommandQueue queue = CommandQueue::getDefault(&error);
8634 if (error != CL_SUCCESS) {
8635 return detail::errHandler(error, __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8638 return detail::errHandler(queue.enqueueUnmapSVM(ptr, events, event),
8639 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8644 * Enqueues to the default queue a command that will release a coarse-grained
8645 * SVM buffer back to the OpenCL runtime.
8646 * This variant takes a cl::pointer instance.
8648 template<typename T, class D>
8649 inline cl_int enqueueUnmapSVM(
8650 cl::pointer<T, D> &ptr,
8651 const vector<Event>* events = NULL,
8652 Event* event = NULL)
8655 CommandQueue queue = CommandQueue::getDefault(&error);
8656 if (error != CL_SUCCESS) {
8657 return detail::errHandler(error, __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8660 return detail::errHandler(queue.enqueueUnmapSVM(ptr, events, event),
8661 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8665 * Enqueues to the default queue a command that will release a coarse-grained
8666 * SVM buffer back to the OpenCL runtime.
8667 * This variant takes a cl::vector instance.
8669 template<typename T, class Alloc>
8670 inline cl_int enqueueUnmapSVM(
8671 cl::vector<T, Alloc> &container,
8672 const vector<Event>* events = NULL,
8673 Event* event = NULL)
8676 CommandQueue queue = CommandQueue::getDefault(&error);
8677 if (error != CL_SUCCESS) {
8678 return detail::errHandler(error, __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8681 return detail::errHandler(queue.enqueueUnmapSVM(container, events, event),
8682 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8685 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8687 inline cl_int enqueueCopyBuffer(
8690 size_type src_offset,
8691 size_type dst_offset,
8693 const vector<Event>* events = NULL,
8694 Event* event = NULL)
8697 CommandQueue queue = CommandQueue::getDefault(&error);
8699 if (error != CL_SUCCESS) {
8703 return queue.enqueueCopyBuffer(src, dst, src_offset, dst_offset, size, events, event);
8707 * Blocking copy operation between iterators and a buffer.
8709 * Uses default command queue.
8711 template< typename IteratorType >
8712 inline cl_int copy( IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer )
8715 CommandQueue queue = CommandQueue::getDefault(&error);
8716 if (error != CL_SUCCESS)
8719 return cl::copy(queue, startIterator, endIterator, buffer);
8723 * Blocking copy operation between iterators and a buffer.
8725 * Uses default command queue.
8727 template< typename IteratorType >
8728 inline cl_int copy( const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator )
8731 CommandQueue queue = CommandQueue::getDefault(&error);
8732 if (error != CL_SUCCESS)
8735 return cl::copy(queue, buffer, startIterator, endIterator);
8739 * Blocking copy operation between iterators and a buffer.
8741 * Uses specified queue.
8743 template< typename IteratorType >
8744 inline cl_int copy( const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer )
8746 typedef typename std::iterator_traits<IteratorType>::value_type DataType;
8749 size_type length = endIterator-startIterator;
8750 size_type byteLength = length*sizeof(DataType);
8753 static_cast<DataType*>(queue.enqueueMapBuffer(buffer, CL_TRUE, CL_MAP_WRITE, 0, byteLength, 0, 0, &error));
8754 // if exceptions enabled, enqueueMapBuffer will throw
8755 if( error != CL_SUCCESS ) {
8758 #if defined(_MSC_VER)
8762 stdext::checked_array_iterator<DataType*>(
8765 std::copy(startIterator, endIterator, pointer);
8768 error = queue.enqueueUnmapMemObject(buffer, pointer, 0, &endEvent);
8769 // if exceptions enabled, enqueueUnmapMemObject will throw
8770 if( error != CL_SUCCESS ) {
8778 * Blocking copy operation between iterators and a buffer.
8780 * Uses specified queue.
8782 template< typename IteratorType >
8783 inline cl_int copy( const CommandQueue &queue, const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator )
8785 typedef typename std::iterator_traits<IteratorType>::value_type DataType;
8788 size_type length = endIterator-startIterator;
8789 size_type byteLength = length*sizeof(DataType);
8792 static_cast<DataType*>(queue.enqueueMapBuffer(buffer, CL_TRUE, CL_MAP_READ, 0, byteLength, 0, 0, &error));
8793 // if exceptions enabled, enqueueMapBuffer will throw
8794 if( error != CL_SUCCESS ) {
8797 std::copy(pointer, pointer + length, startIterator);
8799 error = queue.enqueueUnmapMemObject(buffer, pointer, 0, &endEvent);
8800 // if exceptions enabled, enqueueUnmapMemObject will throw
8801 if( error != CL_SUCCESS ) {
8809 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8811 * Blocking SVM map operation - performs a blocking map underneath.
8813 template<typename T, class Alloc>
8814 inline cl_int mapSVM(cl::vector<T, Alloc> &container)
8816 return enqueueMapSVM(container, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE);
8820 * Blocking SVM map operation - performs a blocking map underneath.
8822 template<typename T, class Alloc>
8823 inline cl_int unmapSVM(cl::vector<T, Alloc> &container)
8825 return enqueueUnmapSVM(container);
8828 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8830 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
8831 inline cl_int enqueueReadBufferRect(
8832 const Buffer& buffer,
8834 const array<size_type, 3>& buffer_offset,
8835 const array<size_type, 3>& host_offset,
8836 const array<size_type, 3>& region,
8837 size_type buffer_row_pitch,
8838 size_type buffer_slice_pitch,
8839 size_type host_row_pitch,
8840 size_type host_slice_pitch,
8842 const vector<Event>* events = NULL,
8843 Event* event = NULL)
8846 CommandQueue queue = CommandQueue::getDefault(&error);
8848 if (error != CL_SUCCESS) {
8852 return queue.enqueueReadBufferRect(
8867 inline cl_int enqueueWriteBufferRect(
8868 const Buffer& buffer,
8870 const array<size_type, 3>& buffer_offset,
8871 const array<size_type, 3>& host_offset,
8872 const array<size_type, 3>& region,
8873 size_type buffer_row_pitch,
8874 size_type buffer_slice_pitch,
8875 size_type host_row_pitch,
8876 size_type host_slice_pitch,
8878 const vector<Event>* events = NULL,
8879 Event* event = NULL)
8882 CommandQueue queue = CommandQueue::getDefault(&error);
8884 if (error != CL_SUCCESS) {
8888 return queue.enqueueWriteBufferRect(
8903 inline cl_int enqueueCopyBufferRect(
8906 const array<size_type, 3>& src_origin,
8907 const array<size_type, 3>& dst_origin,
8908 const array<size_type, 3>& region,
8909 size_type src_row_pitch,
8910 size_type src_slice_pitch,
8911 size_type dst_row_pitch,
8912 size_type dst_slice_pitch,
8913 const vector<Event>* events = NULL,
8914 Event* event = NULL)
8917 CommandQueue queue = CommandQueue::getDefault(&error);
8919 if (error != CL_SUCCESS) {
8923 return queue.enqueueCopyBufferRect(
8936 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
8938 inline cl_int enqueueReadImage(
8941 const array<size_type, 3>& origin,
8942 const array<size_type, 3>& region,
8943 size_type row_pitch,
8944 size_type slice_pitch,
8946 const vector<Event>* events = NULL,
8947 Event* event = NULL)
8950 CommandQueue queue = CommandQueue::getDefault(&error);
8952 if (error != CL_SUCCESS) {
8956 return queue.enqueueReadImage(
8968 inline cl_int enqueueWriteImage(
8971 const array<size_type, 3>& origin,
8972 const array<size_type, 3>& region,
8973 size_type row_pitch,
8974 size_type slice_pitch,
8976 const vector<Event>* events = NULL,
8977 Event* event = NULL)
8980 CommandQueue queue = CommandQueue::getDefault(&error);
8982 if (error != CL_SUCCESS) {
8986 return queue.enqueueWriteImage(
8998 inline cl_int enqueueCopyImage(
9001 const array<size_type, 3>& src_origin,
9002 const array<size_type, 3>& dst_origin,
9003 const array<size_type, 3>& region,
9004 const vector<Event>* events = NULL,
9005 Event* event = NULL)
9008 CommandQueue queue = CommandQueue::getDefault(&error);
9010 if (error != CL_SUCCESS) {
9014 return queue.enqueueCopyImage(
9024 inline cl_int enqueueCopyImageToBuffer(
9027 const array<size_type, 3>& src_origin,
9028 const array<size_type, 3>& region,
9029 size_type dst_offset,
9030 const vector<Event>* events = NULL,
9031 Event* event = NULL)
9034 CommandQueue queue = CommandQueue::getDefault(&error);
9036 if (error != CL_SUCCESS) {
9040 return queue.enqueueCopyImageToBuffer(
9050 inline cl_int enqueueCopyBufferToImage(
9053 size_type src_offset,
9054 const array<size_type, 3>& dst_origin,
9055 const array<size_type, 3>& region,
9056 const vector<Event>* events = NULL,
9057 Event* event = NULL)
9060 CommandQueue queue = CommandQueue::getDefault(&error);
9062 if (error != CL_SUCCESS) {
9066 return queue.enqueueCopyBufferToImage(
9077 inline cl_int flush(void)
9080 CommandQueue queue = CommandQueue::getDefault(&error);
9082 if (error != CL_SUCCESS) {
9086 return queue.flush();
9089 inline cl_int finish(void)
9092 CommandQueue queue = CommandQueue::getDefault(&error);
9094 if (error != CL_SUCCESS) {
9099 return queue.finish();
9105 CommandQueue queue_;
9106 const NDRange offset_;
9107 const NDRange global_;
9108 const NDRange local_;
9109 vector<Event> events_;
9111 template<typename... Ts>
9112 friend class KernelFunctor;
9115 EnqueueArgs(NDRange global) :
9116 queue_(CommandQueue::getDefault()),
9124 EnqueueArgs(NDRange global, NDRange local) :
9125 queue_(CommandQueue::getDefault()),
9133 EnqueueArgs(NDRange offset, NDRange global, NDRange local) :
9134 queue_(CommandQueue::getDefault()),
9142 EnqueueArgs(Event e, NDRange global) :
9143 queue_(CommandQueue::getDefault()),
9148 events_.push_back(e);
9151 EnqueueArgs(Event e, NDRange global, NDRange local) :
9152 queue_(CommandQueue::getDefault()),
9157 events_.push_back(e);
9160 EnqueueArgs(Event e, NDRange offset, NDRange global, NDRange local) :
9161 queue_(CommandQueue::getDefault()),
9166 events_.push_back(e);
9169 EnqueueArgs(const vector<Event> &events, NDRange global) :
9170 queue_(CommandQueue::getDefault()),
9179 EnqueueArgs(const vector<Event> &events, NDRange global, NDRange local) :
9180 queue_(CommandQueue::getDefault()),
9189 EnqueueArgs(const vector<Event> &events, NDRange offset, NDRange global, NDRange local) :
9190 queue_(CommandQueue::getDefault()),
9199 EnqueueArgs(CommandQueue &queue, NDRange global) :
9208 EnqueueArgs(CommandQueue &queue, NDRange global, NDRange local) :
9217 EnqueueArgs(CommandQueue &queue, NDRange offset, NDRange global, NDRange local) :
9226 EnqueueArgs(CommandQueue &queue, Event e, NDRange global) :
9232 events_.push_back(e);
9235 EnqueueArgs(CommandQueue &queue, Event e, NDRange global, NDRange local) :
9241 events_.push_back(e);
9244 EnqueueArgs(CommandQueue &queue, Event e, NDRange offset, NDRange global, NDRange local) :
9250 events_.push_back(e);
9253 EnqueueArgs(CommandQueue &queue, const vector<Event> &events, NDRange global) :
9263 EnqueueArgs(CommandQueue &queue, const vector<Event> &events, NDRange global, NDRange local) :
9273 EnqueueArgs(CommandQueue &queue, const vector<Event> &events, NDRange offset, NDRange global, NDRange local) :
9285 //----------------------------------------------------------------------------------------------
9289 * Type safe kernel functor.
9292 template<typename... Ts>
9298 template<int index, typename T0, typename... T1s>
9299 void setArgs(T0&& t0, T1s&&... t1s)
9301 kernel_.setArg(index, t0);
9302 setArgs<index + 1, T1s...>(std::forward<T1s>(t1s)...);
9305 template<int index, typename T0>
9306 void setArgs(T0&& t0)
9308 kernel_.setArg(index, t0);
9318 KernelFunctor(Kernel kernel) : kernel_(kernel)
9322 const Program& program,
9324 cl_int * err = NULL) :
9325 kernel_(program, name.c_str(), err)
9328 //! \brief Return type of the functor
9329 typedef Event result_type;
9333 * @param args Launch parameters of the kernel.
9334 * @param t0... List of kernel arguments based on the template type of the functor.
9337 const EnqueueArgs& args,
9341 setArgs<0>(std::forward<Ts>(ts)...);
9343 args.queue_.enqueueNDRangeKernel(
9355 * Enqueue kernel with support for error code.
9356 * @param args Launch parameters of the kernel.
9357 * @param t0... List of kernel arguments based on the template type of the functor.
9358 * @param error Out parameter returning the error code from the execution.
9361 const EnqueueArgs& args,
9366 setArgs<0>(std::forward<Ts>(ts)...);
9368 error = args.queue_.enqueueNDRangeKernel(
9379 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
9380 cl_int setSVMPointers(const vector<void*> &pointerList)
9382 return kernel_.setSVMPointers(pointerList);
9385 template<typename T0, typename... T1s>
9386 cl_int setSVMPointers(const T0 &t0, T1s... ts)
9388 return kernel_.setSVMPointers(t0, ts...);
9390 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
9398 namespace compatibility {
9400 * Backward compatibility class to ensure that cl.hpp code works with cl2.hpp.
9401 * Please use KernelFunctor directly.
9403 template<typename... Ts>
9406 typedef KernelFunctor<Ts...> FunctorType;
9408 FunctorType functor_;
9411 const Program& program,
9413 cl_int * err = NULL) :
9414 functor_(FunctorType(program, name, err))
9418 const Kernel kernel) :
9419 functor_(FunctorType(kernel))
9422 //! \brief Return type of the functor
9423 typedef Event result_type;
9425 //! \brief Function signature of kernel functor with no event dependency.
9426 typedef Event type_(
9431 const EnqueueArgs& enqueueArgs,
9435 enqueueArgs, args...);
9438 } // namespace compatibility
9441 //----------------------------------------------------------------------------------------------------------------------
9443 #undef CL_HPP_ERR_STR_
9444 #if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS)
9445 #undef __GET_DEVICE_INFO_ERR
9446 #undef __GET_PLATFORM_INFO_ERR
9447 #undef __GET_DEVICE_IDS_ERR
9448 #undef __GET_CONTEXT_INFO_ERR
9449 #undef __GET_EVENT_INFO_ERR
9450 #undef __GET_EVENT_PROFILE_INFO_ERR
9451 #undef __GET_MEM_OBJECT_INFO_ERR
9452 #undef __GET_IMAGE_INFO_ERR
9453 #undef __GET_SAMPLER_INFO_ERR
9454 #undef __GET_KERNEL_INFO_ERR
9455 #undef __GET_KERNEL_ARG_INFO_ERR
9456 #undef __GET_KERNEL_WORK_GROUP_INFO_ERR
9457 #undef __GET_PROGRAM_INFO_ERR
9458 #undef __GET_PROGRAM_BUILD_INFO_ERR
9459 #undef __GET_COMMAND_QUEUE_INFO_ERR
9461 #undef __CREATE_CONTEXT_ERR
9462 #undef __CREATE_CONTEXT_FROM_TYPE_ERR
9463 #undef __GET_SUPPORTED_IMAGE_FORMATS_ERR
9465 #undef __CREATE_BUFFER_ERR
9466 #undef __CREATE_SUBBUFFER_ERR
9467 #undef __CREATE_IMAGE2D_ERR
9468 #undef __CREATE_IMAGE3D_ERR
9469 #undef __CREATE_SAMPLER_ERR
9470 #undef __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR
9472 #undef __CREATE_USER_EVENT_ERR
9473 #undef __SET_USER_EVENT_STATUS_ERR
9474 #undef __SET_EVENT_CALLBACK_ERR
9475 #undef __SET_PRINTF_CALLBACK_ERR
9477 #undef __WAIT_FOR_EVENTS_ERR
9479 #undef __CREATE_KERNEL_ERR
9480 #undef __SET_KERNEL_ARGS_ERR
9481 #undef __CREATE_PROGRAM_WITH_SOURCE_ERR
9482 #undef __CREATE_PROGRAM_WITH_BINARY_ERR
9483 #undef __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR
9484 #undef __BUILD_PROGRAM_ERR
9485 #undef __CREATE_KERNELS_IN_PROGRAM_ERR
9487 #undef __CREATE_COMMAND_QUEUE_ERR
9488 #undef __SET_COMMAND_QUEUE_PROPERTY_ERR
9489 #undef __ENQUEUE_READ_BUFFER_ERR
9490 #undef __ENQUEUE_WRITE_BUFFER_ERR
9491 #undef __ENQUEUE_READ_BUFFER_RECT_ERR
9492 #undef __ENQUEUE_WRITE_BUFFER_RECT_ERR
9493 #undef __ENQEUE_COPY_BUFFER_ERR
9494 #undef __ENQEUE_COPY_BUFFER_RECT_ERR
9495 #undef __ENQUEUE_READ_IMAGE_ERR
9496 #undef __ENQUEUE_WRITE_IMAGE_ERR
9497 #undef __ENQUEUE_COPY_IMAGE_ERR
9498 #undef __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR
9499 #undef __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR
9500 #undef __ENQUEUE_MAP_BUFFER_ERR
9501 #undef __ENQUEUE_MAP_IMAGE_ERR
9502 #undef __ENQUEUE_UNMAP_MEM_OBJECT_ERR
9503 #undef __ENQUEUE_NDRANGE_KERNEL_ERR
9504 #undef __ENQUEUE_TASK_ERR
9505 #undef __ENQUEUE_NATIVE_KERNEL
9507 #undef __UNLOAD_COMPILER_ERR
9508 #undef __CREATE_SUB_DEVICES_ERR
9510 #undef __CREATE_PIPE_ERR
9511 #undef __GET_PIPE_INFO_ERR
9513 #endif //CL_HPP_USER_OVERRIDE_ERROR_STRINGS
9516 #undef CL_HPP_INIT_CL_EXT_FCN_PTR_
9517 #undef CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_
9519 #if defined(CL_HPP_USE_CL_DEVICE_FISSION)
9520 #undef CL_HPP_PARAM_NAME_DEVICE_FISSION_
9521 #endif // CL_HPP_USE_CL_DEVICE_FISSION
9523 #undef CL_HPP_NOEXCEPT_
9524 #undef CL_HPP_DEFINE_STATIC_MEMBER_