q->num_pipes = r300screen->info.r300_num_gb_pipes;
q->buf = r300->rws->buffer_create(r300->rws, 4096, 4096, TRUE,
- RADEON_DOMAIN_GTT);
+ RADEON_DOMAIN_GTT, 0);
if (!q->buf) {
FREE(q);
return NULL;
r300->vbo = rws->buffer_create(rws,
MAX2(R300_MAX_DRAW_VBO_SIZE, size),
R300_BUFFER_ALIGNMENT, TRUE,
- RADEON_DOMAIN_GTT);
+ RADEON_DOMAIN_GTT, 0);
if (!r300->vbo) {
return FALSE;
}
/* Create a new one in the same pipe_resource. */
new_buf = r300->rws->buffer_create(r300->rws, rbuf->b.b.width0,
R300_BUFFER_ALIGNMENT, TRUE,
- rbuf->domain);
+ rbuf->domain, 0);
if (new_buf) {
/* Discard the old buffer. */
pb_reference(&rbuf->buf, NULL);
rbuf->buf =
r300screen->rws->buffer_create(r300screen->rws, rbuf->b.b.width0,
R300_BUFFER_ALIGNMENT, TRUE,
- rbuf->domain);
+ rbuf->domain, 0);
if (!rbuf->buf) {
FREE(rbuf);
return NULL;
/* Create the backing buffer if needed. */
if (!tex->buf) {
tex->buf = rws->buffer_create(rws, tex->tex.size_in_bytes, 2048, TRUE,
- tex->domain);
+ tex->domain, 0);
if (!tex->buf) {
goto fail;
{
struct r600_texture *rtex = (struct r600_texture*)res;
struct pb_buffer *old_buf, *new_buf;
+ enum radeon_bo_flag flags = 0;
switch (res->b.b.usage) {
- case PIPE_USAGE_STAGING:
case PIPE_USAGE_DYNAMIC:
case PIPE_USAGE_STREAM:
+ flags = RADEON_FLAG_GTT_WC;
+ /* fall through */
+ case PIPE_USAGE_STAGING:
/* Transfers are likely to occur more often with these resources. */
res->domains = RADEON_DOMAIN_GTT;
break;
default:
/* Not listing GTT here improves performance in some apps. */
res->domains = RADEON_DOMAIN_VRAM;
+ flags = RADEON_FLAG_GTT_WC;
break;
}
res->b.b.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT |
PIPE_RESOURCE_FLAG_MAP_COHERENT)) {
res->domains = RADEON_DOMAIN_GTT;
+ flags = 0;
}
/* Tiled textures are unmappable. Always put them in VRAM. */
/* Allocate a new resource. */
new_buf = rscreen->ws->buffer_create(rscreen->ws, size, alignment,
use_reusable_pool,
- res->domains);
+ res->domains, flags);
if (!new_buf) {
return false;
}
r600_init_temp_resource_from_box(&resource, texture, box, level,
R600_RESOURCE_FLAG_TRANSFER);
+ resource.usage = (usage & PIPE_TRANSFER_READ) ?
+ PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
/* Create the temporary texture. */
staging = (struct r600_texture*)ctx->screen->resource_create(ctx->screen, &resource);
for (i = 0; i < NUM_BUFFERS; ++i) {
unsigned msg_fb_size = FB_BUFFER_OFFSET + FB_BUFFER_SIZE;
STATIC_ASSERT(sizeof(struct ruvd_msg) <= FB_BUFFER_OFFSET);
- if (!rvid_create_buffer(dec->ws, &dec->msg_fb_buffers[i], msg_fb_size, RADEON_DOMAIN_VRAM)) {
+ if (!rvid_create_buffer(dec->ws, &dec->msg_fb_buffers[i], msg_fb_size,
+ RADEON_DOMAIN_VRAM, 0)) {
RVID_ERR("Can't allocated message buffers.\n");
goto error;
}
- if (!rvid_create_buffer(dec->ws, &dec->bs_buffers[i], bs_buf_size, RADEON_DOMAIN_GTT)) {
+ if (!rvid_create_buffer(dec->ws, &dec->bs_buffers[i], bs_buf_size,
+ RADEON_DOMAIN_GTT, 0)) {
RVID_ERR("Can't allocated bitstream buffers.\n");
goto error;
}
rvid_clear_buffer(dec->ws, dec->cs, &dec->bs_buffers[i]);
}
- if (!rvid_create_buffer(dec->ws, &dec->dpb, dpb_size, RADEON_DOMAIN_VRAM)) {
+ if (!rvid_create_buffer(dec->ws, &dec->dpb, dpb_size, RADEON_DOMAIN_VRAM, 0)) {
RVID_ERR("Can't allocated dpb.\n");
goto error;
}
struct rvce_encoder *enc = (struct rvce_encoder*)encoder;
if (enc->stream_handle) {
struct rvid_buffer fb;
- rvid_create_buffer(enc->ws, &fb, 512, RADEON_DOMAIN_GTT);
+ rvid_create_buffer(enc->ws, &fb, 512, RADEON_DOMAIN_GTT, 0);
enc->fb = &fb;
enc->session(enc);
enc->feedback(enc);
if (!enc->stream_handle) {
struct rvid_buffer fb;
enc->stream_handle = rvid_alloc_stream_handle();
- rvid_create_buffer(enc->ws, &fb, 512, RADEON_DOMAIN_GTT);
+ rvid_create_buffer(enc->ws, &fb, 512, RADEON_DOMAIN_GTT, 0);
enc->fb = &fb;
enc->session(enc);
enc->create(enc);
enc->bs_size = destination->width0;
*fb = enc->fb = CALLOC_STRUCT(rvid_buffer);
- if (!rvid_create_buffer(enc->ws, enc->fb, 512, RADEON_DOMAIN_GTT)) {
+ if (!rvid_create_buffer(enc->ws, enc->fb, 512, RADEON_DOMAIN_GTT, 0)) {
RVID_ERR("Can't create feedback buffer.\n");
return;
}
cpb_size = cpb_size * 3 / 2;
cpb_size = cpb_size * enc->cpb_num;
tmp_buf->destroy(tmp_buf);
- if (!rvid_create_buffer(enc->ws, &enc->cpb, cpb_size, RADEON_DOMAIN_VRAM)) {
+ if (!rvid_create_buffer(enc->ws, &enc->cpb, cpb_size, RADEON_DOMAIN_VRAM, 0)) {
RVID_ERR("Can't create CPB buffer.\n");
goto error;
}
/* create a buffer in the winsys */
bool rvid_create_buffer(struct radeon_winsys *ws, struct rvid_buffer *buffer,
- unsigned size, enum radeon_bo_domain domain)
+ unsigned size, enum radeon_bo_domain domain,
+ enum radeon_bo_flag flags)
{
buffer->domain = domain;
+ buffer->flags = flags;
- buffer->buf = ws->buffer_create(ws, size, 4096, false, domain);
+ buffer->buf = ws->buffer_create(ws, size, 4096, false, domain, flags);
if (!buffer->buf)
return false;
struct rvid_buffer old_buf = *new_buf;
void *src = NULL, *dst = NULL;
- if (!rvid_create_buffer(ws, new_buf, new_size, new_buf->domain))
+ if (!rvid_create_buffer(ws, new_buf, new_size, new_buf->domain,
+ new_buf->flags))
goto error;
src = ws->buffer_map(old_buf.cs_handle, cs, PIPE_TRANSFER_READ);
/* TODO: 2D tiling workaround */
alignment *= 2;
- pb = ws->buffer_create(ws, size, alignment, bind, RADEON_DOMAIN_VRAM);
+ pb = ws->buffer_create(ws, size, alignment, bind, RADEON_DOMAIN_VRAM, 0);
if (!pb)
return;
struct rvid_buffer
{
enum radeon_bo_domain domain;
+ enum radeon_bo_flag flags;
struct pb_buffer* buf;
struct radeon_winsys_cs_handle* cs_handle;
};
/* create a buffer in the winsys */
bool rvid_create_buffer(struct radeon_winsys *ws, struct rvid_buffer *buffer,
- unsigned size, enum radeon_bo_domain domain);
+ unsigned size, enum radeon_bo_domain domain,
+ enum radeon_bo_flag flags);
/* destroy a buffer */
void rvid_destroy_buffer(struct rvid_buffer *buffer);
sctx->border_color_table =
si_resource_create_custom(&sctx->screen->b.b,
- PIPE_USAGE_STAGING,
+ PIPE_USAGE_DYNAMIC,
4096 * 4 * 4);
}
radeon_bo_get_base_buffer,
};
+#ifndef RADEON_GEM_GTT_WC
+#define RADEON_GEM_GTT_WC (1 << 2)
+#endif
+
static struct pb_buffer *radeon_bomgr_create_bo(struct pb_manager *_mgr,
pb_size size,
const struct pb_desc *desc)
args.size = size;
args.alignment = desc->alignment;
args.initial_domain = rdesc->initial_domains;
+ args.flags = 0;
+
+ if (rdesc->flags & RADEON_FLAG_GTT_WC)
+ args.flags |= RADEON_GEM_GTT_WC;
if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_CREATE,
&args, sizeof(args))) {
fprintf(stderr, "radeon: size : %d bytes\n", size);
fprintf(stderr, "radeon: alignment : %d bytes\n", desc->alignment);
fprintf(stderr, "radeon: domains : %d\n", args.initial_domain);
+ fprintf(stderr, "radeon: flags : %d\n", args.flags);
return NULL;
}
unsigned size,
unsigned alignment,
boolean use_reusable_pool,
- enum radeon_bo_domain domain)
+ enum radeon_bo_domain domain,
+ enum radeon_bo_flag flags)
{
struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
struct radeon_bomgr *mgr = radeon_bomgr(ws->kman);
/* Additional criteria for the cache manager. */
desc.base.usage = domain;
desc.initial_domains = domain;
+ desc.flags = flags;
/* Assign a buffer manager. */
if (use_reusable_pool) {
- if (domain == RADEON_DOMAIN_VRAM)
- provider = ws->cman_vram;
- else
+ if (domain == RADEON_DOMAIN_VRAM) {
+ if (flags & RADEON_FLAG_GTT_WC)
+ provider = ws->cman_vram_gtt_wc;
+ else
+ provider = ws->cman_vram;
+ } else if (flags & RADEON_FLAG_GTT_WC) {
+ provider = ws->cman_gtt_wc;
+ } else {
provider = ws->cman_gtt;
+ }
} else {
provider = ws->kman;
}
struct pb_desc base;
unsigned initial_domains;
+ unsigned flags;
};
struct radeon_bo {
/* Create a fence, which is a dummy BO. */
fence = cs->ws->base.buffer_create(&cs->ws->base, 1, 1, TRUE,
- RADEON_DOMAIN_GTT);
+ RADEON_DOMAIN_GTT, 0);
/* Add the fence as a dummy relocation. */
cs->ws->base.cs_add_reloc(rcs, cs->ws->base.buffer_get_cs_handle(fence),
RADEON_USAGE_READWRITE, RADEON_DOMAIN_GTT,
pipe_mutex_destroy(ws->cs_stack_lock);
ws->cman_vram->destroy(ws->cman_vram);
+ ws->cman_vram_gtt_wc->destroy(ws->cman_vram_gtt_wc);
ws->cman_gtt->destroy(ws->cman_gtt);
+ ws->cman_gtt_wc->destroy(ws->cman_gtt_wc);
ws->kman->destroy(ws->kman);
if (ws->gen >= DRV_R600) {
radeon_surface_manager_free(ws->surf_man);
ws->cman_vram = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0);
if (!ws->cman_vram)
goto fail;
+ ws->cman_vram_gtt_wc = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0);
+ if (!ws->cman_vram_gtt_wc)
+ goto fail;
ws->cman_gtt = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0);
if (!ws->cman_gtt)
goto fail;
+ ws->cman_gtt_wc = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0);
+ if (!ws->cman_gtt_wc)
+ goto fail;
if (ws->gen >= DRV_R600) {
ws->surf_man = radeon_surface_manager_new(fd);
pipe_mutex_unlock(fd_tab_mutex);
if (ws->cman_gtt)
ws->cman_gtt->destroy(ws->cman_gtt);
+ if (ws->cman_gtt_wc)
+ ws->cman_gtt_wc->destroy(ws->cman_gtt_wc);
if (ws->cman_vram)
ws->cman_vram->destroy(ws->cman_vram);
+ if (ws->cman_vram_gtt_wc)
+ ws->cman_vram_gtt_wc->destroy(ws->cman_vram_gtt_wc);
if (ws->kman)
ws->kman->destroy(ws->kman);
if (ws->surf_man)
struct pb_manager *kman;
struct pb_manager *cman_vram;
+ struct pb_manager *cman_vram_gtt_wc;
struct pb_manager *cman_gtt;
+ struct pb_manager *cman_gtt_wc;
struct radeon_surface_manager *surf_man;
uint32_t num_cpus; /* Number of CPUs. */
RADEON_DOMAIN_VRAM_GTT = RADEON_DOMAIN_VRAM | RADEON_DOMAIN_GTT
};
+enum radeon_bo_flag { /* bitfield */
+ RADEON_FLAG_GTT_WC = (1 << 0)
+};
+
enum radeon_bo_usage { /* bitfield */
RADEON_USAGE_READ = 2,
RADEON_USAGE_WRITE = 4,
unsigned size,
unsigned alignment,
boolean use_reusable_pool,
- enum radeon_bo_domain domain);
+ enum radeon_bo_domain domain,
+ enum radeon_bo_flag flags);
struct radeon_winsys_cs_handle *(*buffer_get_cs_handle)(
struct pb_buffer *buf);