Implement the clEnqueueMigrateMemObjects API
authorJunyan He <junyan.he@linux.intel.com>
Wed, 11 Jun 2014 01:33:36 +0000 (09:33 +0800)
committerZhigang Gong <zhigang.gong@intel.com>
Fri, 13 Jun 2014 09:20:25 +0000 (17:20 +0800)
So far, we just support 1 device and no subdevices.
So all the command queues should belong to the small context.
There is no need to migrate the mem objects from one subcontext
to another by now. We just do the checks and fill the event.

Signed-off-by: Junyan He <junyan.he@linux.intel.com>
Reviewed-by: Zhigang Gong <zhigang.gong@linux.intel.com>
src/cl_api.c
src/cl_enqueue.c
src/cl_enqueue.h
src/cl_khr_icd.c

index 25b5870..96c102c 100644 (file)
@@ -2681,6 +2681,58 @@ error:
 }
 
 cl_int
+clEnqueueMigrateMemObjects(cl_command_queue        command_queue,
+                           cl_uint                 num_mem_objects,
+                           const cl_mem *          mem_objects,
+                           cl_mem_migration_flags  flags,
+                           cl_uint                 num_events_in_wait_list,
+                           const cl_event *        event_wait_list,
+                           cl_event *              event)
+{
+  /* So far, we just support 1 device and no subdevice. So all the command queues
+     belong to the small context. There is no need to migrate the mem objects by now. */
+  cl_int err = CL_SUCCESS;
+  cl_uint i = 0;
+  enqueue_data *data, defer_enqueue_data = { 0 };
+
+  if (!flags & CL_MIGRATE_MEM_OBJECT_HOST)
+    CHECK_QUEUE(command_queue);
+
+  if (num_mem_objects == 0 || mem_objects == NULL) {
+    err = CL_INVALID_VALUE;
+    goto error;
+  }
+
+  if (flags && flags & ~(CL_MIGRATE_MEM_OBJECT_HOST |
+                         CL_MIGRATE_MEM_OBJECT_CONTENT_UNDEFINED)) {
+    err = CL_INVALID_VALUE;
+    goto error;
+  }
+
+  for (i = 0; i < num_mem_objects; i++) {
+    CHECK_MEM(mem_objects[i]);
+    if (mem_objects[i]->ctx != command_queue->ctx) {
+      err = CL_INVALID_CONTEXT;
+      goto error;
+    }
+  }
+
+  /* really nothing to do, fill the event. */
+  TRY(cl_event_check_waitlist, num_events_in_wait_list, event_wait_list, event, command_queue->ctx);
+  data = &defer_enqueue_data;
+  data->type = EnqueueMigrateMemObj;
+
+  if(handle_events(command_queue, num_events_in_wait_list, event_wait_list,
+                   event, data, CL_COMMAND_READ_BUFFER) == CL_ENQUEUE_EXECUTE_IMM) {
+    err = cl_enqueue_handle(event ? *event : NULL, data);
+    if(event) cl_event_set_status(*event, CL_COMPLETE);
+  }
+
+error:
+  return err;
+}
+
+cl_int
 clEnqueueNDRangeKernel(cl_command_queue  command_queue,
                        cl_kernel         kernel,
                        cl_uint           work_dim,
index 4ec26c0..ff6ec9e 100644 (file)
@@ -426,6 +426,7 @@ cl_int cl_enqueue_handle(cl_event event, enqueue_data* data)
       return CL_SUCCESS;
     case EnqueueNativeKernel:
       return cl_enqueue_native_kernel(data);
+    case EnqueueMigrateMemObj:
     default:
       return CL_SUCCESS;
   }
index 1960cc2..6527602 100644 (file)
@@ -43,6 +43,7 @@ typedef enum {
   EnqueueMarker,
   EnqueueBarrier,
   EnqueueFillBuffer,
+  EnqueueMigrateMemObj,
   EnqueueInvalid
 } enqueue_type;
 
index f55873f..92041d7 100644 (file)
@@ -159,7 +159,7 @@ struct _cl_icd_dispatch const cl_khr_icd_dispatch = {
   clGetKernelArgInfo,
   clEnqueueFillBuffer,
   CL_1_2_NOTYET(clEnqueueFillImage),
-  CL_1_2_NOTYET(clEnqueueMigrateMemObjects),
+  clEnqueueMigrateMemObjects,
   clEnqueueMarkerWithWaitList,
   clEnqueueBarrierWithWaitList,
   CL_1_2_NOTYET(clGetExtensionFunctionAddressForPlatform),