}
static struct nil_extent4d
-nil_extent4d_align(struct nil_extent4d ext, struct nil_extent4d align)
+nil_extent4d_align(struct nil_extent4d ext, struct nil_extent4d alignment)
{
return (struct nil_extent4d) {
- .w = ALIGN_POT(ext.w, align.w),
- .h = ALIGN_POT(ext.h, align.h),
- .d = ALIGN_POT(ext.d, align.d),
- .a = ALIGN_POT(ext.a, align.a),
+ .w = align(ext.w, alignment.w),
+ .h = align(ext.h, alignment.h),
+ .d = align(ext.d, alignment.d),
+ .a = align(ext.a, alignment.a),
};
}
if (buffer->vk.size > 0 &&
(buffer->vk.create_flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
- const uint64_t alignment =
+ const uint32_t alignment =
nvk_get_buffer_alignment(&nvk_device_physical(dev)->info,
buffer->vk.usage,
buffer->vk.create_flags);
assert(alignment >= 4096);
- buffer->vma_size_B = ALIGN_POT(buffer->vk.size, alignment);
+ buffer->vma_size_B = align64(buffer->vk.size, alignment);
const bool sparse_residency =
buffer->vk.create_flags & VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT;
pInfo->pCreateInfo->flags);
pMemoryRequirements->memoryRequirements = (VkMemoryRequirements) {
- .size = ALIGN_POT(pInfo->pCreateInfo->size, alignment),
+ .size = align64(pInfo->pCreateInfo->size, alignment),
.alignment = alignment,
.memoryTypeBits = BITFIELD_MASK(dev->pdev->mem_type_cnt),
};
assert(stride <= UINT8_MAX);
assert(util_is_power_of_two_nonzero(align));
- buffer_size = ALIGN_POT(buffer_size, align);
+ buffer_size = align64(buffer_size, align);
layout->binding[b].offset = buffer_size;
layout->binding[b].stride = stride;
* keep non_variable_size aligned to max_align.
*/
non_variable_size += stride * binding->descriptorCount;
- non_variable_size = ALIGN_POT(non_variable_size, max_align);
+ non_variable_size = align64(non_variable_size, max_align);
}
}
}
uint64_t buffer_size = non_variable_size;
if (variable_stride > 0) {
buffer_size += variable_stride * variable_count;
- buffer_size = ALIGN_POT(buffer_size, max_align);
+ buffer_size = align64(buffer_size, max_align);
}
uint32_t max_buffer_size;
alignment = (1ULL << 16);
const uint64_t aligned_size =
- ALIGN_POT(pAllocateInfo->allocationSize, alignment);
+ align64(pAllocateInfo->allocationSize, alignment);
mem = vk_device_memory_create(&dev->vk, pAllocateInfo,
pAllocator, sizeof(*mem));
assert(util_is_power_of_two_or_zero64(plane->nil.align_B));
*align_B = MAX2(*align_B, plane->nil.align_B);
- *size_B = ALIGN_POT(*size_B, plane->nil.align_B);
+ *size_B = align64(*size_B, plane->nil.align_B);
*size_B += plane->nil.size_B;
}
struct nvk_device_memory *mem,
uint64_t *offset_B)
{
- *offset_B = ALIGN_POT(*offset_B, plane->nil.align_B);
+ *offset_B = align64(*offset_B, (uint64_t)plane->nil.align_B);
if (plane->vma_size_B) {
nouveau_ws_bo_bind_vma(dev->ws_dev,
return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
/* We place the availability first and then data */
- pool->query_start = ALIGN_POT(pool->vk.query_count * sizeof(uint32_t),
- sizeof(struct nvk_query_report));
+ pool->query_start = align(pool->vk.query_count * sizeof(uint32_t),
+ sizeof(struct nvk_query_report));
uint32_t reports_per_query;
switch (pCreateInfo->queryType) {
const VkSparseMemoryBind *bind,
uint64_t *image_plane_offset_B)
{
- *image_plane_offset_B = ALIGN_POT(*image_plane_offset_B,
- plane->nil.align_B);
+ *image_plane_offset_B = align64(*image_plane_offset_B, plane->nil.align_B);
/* The offset of the bind range within the image */
uint64_t image_bind_offset_B = bind->resourceOffset;