shader->bc.ndw * 4);
p = r600_buffer_map_sync_with_rings(
&rctx->b, shader->code_bo,
- PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
//TODO: use util_memcpy_cpu_to_le32 ?
memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4);
rctx->b.ws->buffer_unmap(shader->code_bo->buf);
bytecode = r600_buffer_map_sync_with_rings
(&rctx->b, shader->buffer,
- PIPE_MAP_WRITE | PIPE_MAP_UNSYNCHRONIZED | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | PIPE_MAP_UNSYNCHRONIZED | RADEON_MAP_TEMPORARY);
bytecode += shader->offset / 4;
if (R600_BIG_ENDIAN) {
}
ptr = r600_buffer_map_sync_with_rings(
&rctx->b, shader->bo,
- PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
if (R600_BIG_ENDIAN) {
for (i = 0; i < shader->shader.bc.ndw; ++i) {
ptr[i] = util_cpu_to_le32(shader->shader.bc.bytecode[i]);
/* and map it for CPU access */
ptr = dec->ws->buffer_map(buf->res->buf, dec->cs,
- PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
/* calc buffer offsets */
dec->msg = (struct ruvd_msg *)ptr;
dec->bs_size = 0;
dec->bs_ptr = dec->ws->buffer_map(
dec->bs_buffers[dec->cur_buffer].res->buf,
- dec->cs, PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
+ dec->cs, PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
}
/**
dec->bs_ptr = dec->ws->buffer_map(buf->res->buf, dec->cs,
PIPE_MAP_WRITE |
- RADEON_TRANSFER_TEMPORARY);
+ RADEON_MAP_TEMPORARY);
if (!dec->bs_ptr)
return;
if (size) {
uint32_t *ptr = enc->ws->buffer_map(
fb->res->buf, enc->cs,
- PIPE_MAP_READ_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_READ_WRITE | RADEON_MAP_TEMPORARY);
if (ptr[1]) {
*size = ptr[4] - ptr[9];
goto error;
src = ws->buffer_map(old_buf.res->buf, cs,
- PIPE_MAP_READ | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_READ | RADEON_MAP_TEMPORARY);
if (!src)
goto error;
dst = ws->buffer_map(new_buf->res->buf, cs,
- PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
if (!dst)
goto error;
/* and map it for CPU access */
ptr =
- dec->ws->buffer_map(buf->res->buf, dec->cs, PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
+ dec->ws->buffer_map(buf->res->buf, dec->cs, PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
/* calc buffer offsets */
dec->msg = (struct ruvd_msg *)ptr;
dec->bs_size = 0;
dec->bs_ptr = dec->ws->buffer_map(dec->bs_buffers[dec->cur_buffer].res->buf, dec->cs,
- PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
}
/**
}
dec->bs_ptr = dec->ws->buffer_map(buf->res->buf, dec->cs,
- PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
if (!dec->bs_ptr)
return;
if (NULL != size) {
radeon_uvd_enc_feedback_t *fb_data = (radeon_uvd_enc_feedback_t *)enc->ws->buffer_map(
- fb->res->buf, enc->cs, PIPE_MAP_READ_WRITE | RADEON_TRANSFER_TEMPORARY);
+ fb->res->buf, enc->cs, PIPE_MAP_READ_WRITE | RADEON_MAP_TEMPORARY);
if (!fb_data->status)
*size = fb_data->bitstream_size;
if (size) {
uint32_t *ptr = enc->ws->buffer_map(fb->res->buf, enc->cs,
- PIPE_MAP_READ_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_READ_WRITE | RADEON_MAP_TEMPORARY);
if (ptr[1]) {
*size = ptr[4] - ptr[9];
/* ctx needs probs table */
ptr = dec->ws->buffer_map(dec->ctx.res->buf, dec->cs,
- PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
fill_probs_table(ptr);
dec->ws->buffer_unmap(dec->ctx.res->buf);
dec->bs_ptr = NULL;
/* and map it for CPU access */
ptr =
- dec->ws->buffer_map(buf->res->buf, dec->cs, PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
+ dec->ws->buffer_map(buf->res->buf, dec->cs, PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
/* calc buffer offsets */
dec->msg = ptr;
dec->bs_size = 0;
dec->bs_ptr = dec->ws->buffer_map(dec->bs_buffers[dec->cur_buffer].res->buf, dec->cs,
- PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
}
/**
}
dec->bs_ptr = dec->ws->buffer_map(buf->res->buf, dec->cs,
- PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
if (!dec->bs_ptr)
return;
buf = &dec->msg_fb_it_probs_buffers[i];
ptr = dec->ws->buffer_map(buf->res->buf, dec->cs,
- PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
ptr += FB_BUFFER_OFFSET + FB_BUFFER_SIZE;
fill_probs_table(ptr);
dec->ws->buffer_unmap(buf->res->buf);
if (size) {
uint32_t *ptr = enc->ws->buffer_map(fb->res->buf, enc->cs,
- PIPE_MAP_READ_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_READ_WRITE | RADEON_MAP_TEMPORARY);
if (ptr[1])
*size = ptr[6];
else
if (!si_vid_create_buffer(screen, new_buf, new_size, new_buf->usage))
goto error;
- src = ws->buffer_map(old_buf.res->buf, cs, PIPE_MAP_READ | RADEON_TRANSFER_TEMPORARY);
+ src = ws->buffer_map(old_buf.res->buf, cs, PIPE_MAP_READ | RADEON_MAP_TEMPORARY);
if (!src)
goto error;
- dst = ws->buffer_map(new_buf->res->buf, cs, PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
+ dst = ws->buffer_map(new_buf->res->buf, cs, PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
if (!dst)
goto error;
RADEON_USAGE_SYNCHRONIZED = 8
};
-enum radeon_transfer_flags
+enum radeon_map_flags
{
/* Indicates that the caller will unmap the buffer.
*
* Not unmapping buffers is an important performance optimization for
* OpenGL (avoids kernel overhead for frequently mapped buffers).
*/
- RADEON_TRANSFER_TEMPORARY = (PIPE_MAP_DRV_PRV << 0),
+ RADEON_MAP_TEMPORARY = (PIPE_MAP_DRV_PRV << 0),
};
#define RADEON_SPARSE_PAGE_SIZE (64 * 1024)
* space.
*
* Callers are expected to unmap buffers again if and only if the
- * RADEON_TRANSFER_TEMPORARY flag is set in \p usage.
+ * RADEON_MAP_TEMPORARY flag is set in \p usage.
*
* \param buf A winsys buffer object to map.
* \param cs A command stream to flush if the buffer is referenced by it.
- * \param usage A bitmask of the PIPE_MAP_* and RADEON_TRANSFER_* flags.
+ * \param usage A bitmask of the PIPE_MAP_* and RADEON_MAP_* flags.
* \return The pointer at the beginning of the buffer.
*/
void *(*buffer_map)(struct pb_buffer *buf, struct radeon_cmdbuf *cs,
const char *mapped = sscreen->ws->buffer_map(
shader->bo->buf, NULL,
- PIPE_MAP_UNSYNCHRONIZED | PIPE_MAP_READ | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_UNSYNCHRONIZED | PIPE_MAP_READ | RADEON_MAP_TEMPORARY);
for (unsigned i = 0; i < size; i += 4) {
fprintf(f, " %4x: %08x\n", i, *(uint32_t *)(mapped + i));
u.rx_va = shader->bo->gpu_address;
u.rx_ptr = sscreen->ws->buffer_map(
shader->bo->buf, NULL,
- PIPE_MAP_READ_WRITE | PIPE_MAP_UNSYNCHRONIZED | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_READ_WRITE | PIPE_MAP_UNSYNCHRONIZED | RADEON_MAP_TEMPORARY);
if (!u.rx_ptr)
return false;
* we don't run out of the CPU address space.
*/
if (sizeof(void *) == 4)
- usage |= RADEON_TRANSFER_TEMPORARY;
+ usage |= RADEON_MAP_TEMPORARY;
if (!(map = si_buffer_map_sync_with_rings(sctx, buf, usage)))
goto fail_trans;
offset = bo->va - real->va;
}
- if (usage & RADEON_TRANSFER_TEMPORARY) {
+ if (usage & RADEON_MAP_TEMPORARY) {
if (real->is_user_ptr) {
cpu = real->cpu_ptr;
} else {
assert(real->u.real.map_count != 0 && "too many unmaps");
if (p_atomic_dec_zero(&real->u.real.map_count)) {
assert(!real->cpu_ptr &&
- "too many unmaps or forgot RADEON_TRANSFER_TEMPORARY flag");
+ "too many unmaps or forgot RADEON_MAP_TEMPORARY flag");
if (real->initial_domain & RADEON_DOMAIN_VRAM)
real->ws->mapped_vram -= real->base.size;
return false;
map = (uint32_t*)amdgpu_bo_map(preamble_bo, NULL,
- PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
if (!map) {
pb_reference(&preamble_bo, NULL);
return false;