*map = intel_canonical_address(next_bbo->bo->offset + next_bbo_offset);
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
- if (cmd_buffer->device->physical->memory.need_clflush)
+ if (cmd_buffer->device->physical->memory.need_flush)
intel_flush_range(map, sizeof(uint64_t));
#endif
}
memcpy(batch_bo->map, batch->start, batch_size);
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
- if (device->physical->memory.need_clflush)
+ if (device->physical->memory.need_flush)
intel_flush_range(batch_bo->map, batch_size);
#endif
if ((props & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) &&
!(props & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
- device->memory.need_clflush = true;
+ device->memory.need_flush = true;
#else
return vk_errorf(device, VK_ERROR_INITIALIZATION_FAILED,
"Memory configuration requires flushing, but it's not implemented for this architecture");
anv_batch_emit(&batch, GFX7_MI_NOOP, noop);
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
- if (device->physical->memory.need_clflush)
+ if (device->physical->memory.need_flush)
intel_clflush_range(batch.start, batch.next - batch.start);
#endif
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
ANV_FROM_HANDLE(anv_device, device, _device);
- if (!device->physical->memory.need_clflush)
+ if (!device->physical->memory.need_flush)
return VK_SUCCESS;
/* Make sure the writes we're flushing have landed. */
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
ANV_FROM_HANDLE(anv_device, device, _device);
- if (!device->physical->memory.need_clflush)
+ if (!device->physical->memory.need_flush)
return VK_SUCCESS;
for (uint32_t i = 0; i < memoryRangeCount; i++) {
uint32_t heap_count;
struct anv_memory_heap heaps[VK_MAX_MEMORY_HEAPS];
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
- bool need_clflush;
+ bool need_flush;
#endif
} memory;
memset(bo->map, 0, bo->size);
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
- if (device->physical->memory.need_clflush)
+ if (device->physical->memory.need_flush)
intel_clflush_range(bo->map, bo->size);
#endif
if (device->debug_frame_desc) {
device->debug_frame_desc->frame_id++;
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
- if (device->physical->memory.need_clflush) {
+ if (device->physical->memory.need_flush) {
intel_clflush_range(device->debug_frame_desc,
sizeof(*device->debug_frame_desc));
}
}
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
- if (device->physical->memory.need_clflush)
+ if (device->physical->memory.need_flush)
anv_cmd_buffer_clflush(cmd_buffers, num_cmd_buffers);
#endif
}
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
- if (device->physical->memory.need_clflush)
+ if (device->physical->memory.need_flush)
intel_flush_range(submit->batch_bo->map, submit->batch_bo->size);
#endif
xe_exec_fill_sync(&xe_sync, utrace_submit->sync, 0, TYPE_SIGNAL);
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
- if (device->physical->memory.need_clflush)
+ if (device->physical->memory.need_flush)
intel_flush_range(utrace_submit->batch_bo->map,
utrace_submit->batch_bo->size);
#endif
anv_cmd_buffer_chain_command_buffers(cmd_buffers, cmd_buffer_count);
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
- if (device->physical->memory.need_clflush)
+ if (device->physical->memory.need_flush)
anv_cmd_buffer_clflush(cmd_buffers, cmd_buffer_count);
#endif
anv_cmd_buffer_process_relocs(cmd_buffers[0], &cmd_buffers[0]->surface_relocs);
}
- if (device->physical->memory.need_clflush) {
+ if (device->physical->memory.need_flush) {
__builtin_ia32_mfence();
for (uint32_t i = 0; i < num_cmd_buffers; i++) {
u_vector_foreach(bbo, &cmd_buffers[i]->seen_bbos) {
flush->batch_bo->exec_obj_index = last_idx;
}
- if (device->physical->memory.need_clflush)
+ if (device->physical->memory.need_flush)
intel_flush_range(flush->batch_bo->map, flush->batch_bo->size);
execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
return result;
memcpy(batch_bo->map, batch->start, batch_size);
- if (device->physical->memory.need_clflush)
+ if (device->physical->memory.need_flush)
intel_flush_range(batch_bo->map, batch_size);
struct anv_execbuf execbuf = {
};
}
- device->memory.need_clflush = false;
+ device->memory.need_flush = false;
for (unsigned i = 0; i < device->memory.type_count; i++) {
VkMemoryPropertyFlags props = device->memory.types[i].propertyFlags;
if ((props & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) &&
!(props & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))
- device->memory.need_clflush = true;
+ device->memory.need_flush = true;
}
return VK_SUCCESS;
anv_batch_emit(&batch, GFX7_MI_BATCH_BUFFER_END, bbe);
anv_batch_emit(&batch, GFX7_MI_NOOP, noop);
- if (device->physical->memory.need_clflush)
+ if (device->physical->memory.need_flush)
intel_clflush_range(batch.start, batch.next - batch.start);
return VK_SUCCESS;
{
ANV_FROM_HANDLE(anv_device, device, _device);
- if (!device->physical->memory.need_clflush)
+ if (!device->physical->memory.need_flush)
return VK_SUCCESS;
/* Make sure the writes we're flushing have landed. */
{
ANV_FROM_HANDLE(anv_device, device, _device);
- if (!device->physical->memory.need_clflush)
+ if (!device->physical->memory.need_flush)
return VK_SUCCESS;
for (uint32_t i = 0; i < memoryRangeCount; i++) {
struct anv_memory_type types[VK_MAX_MEMORY_TYPES];
uint32_t heap_count;
struct anv_memory_heap heaps[VK_MAX_MEMORY_HEAPS];
- bool need_clflush;
+ bool need_flush;
} memory;
struct anv_memregion sys;
*(uint32_t *)p = v;
}
- if (flush && device->physical->memory.need_clflush)
+ if (flush && device->physical->memory.need_flush)
intel_flush_range(p, reloc_size);
}
if (device->debug_frame_desc) {
device->debug_frame_desc->frame_id++;
- if (device->physical->memory.need_clflush) {
+ if (device->physical->memory.need_flush) {
intel_clflush_range(device->debug_frame_desc,
sizeof(*device->debug_frame_desc));
}