unsigned format;
uint32_t word4 = 0, yuv_format = 0, pitch = 0;
unsigned char swizzle[4];
- struct radeon_ws_bo *bo[2];
+ struct r600_bo *bo[2];
if (resource == NULL)
return NULL;
rctx->states[rstate->id] = NULL;
}
for (int i = 0; i < rstate->nregs; i++) {
- radeon_ws_bo_reference(rctx->radeon, &rstate->regs[i].bo, NULL);
+ r600_bo_reference(rctx->radeon, &rstate->regs[i].bo, NULL);
}
free(rstate);
}
unsigned color_info;
unsigned format, swap, ntype;
const struct util_format_description *desc;
- struct radeon_ws_bo *bo[3];
+ struct r600_bo *bo[3];
rtex = (struct r600_resource_texture*)state->cbufs[cb]->texture;
rbuffer = &rtex->resource;
enum chip_class r600_get_family_class(struct radeon *radeon);
/* lowlevel WS bo */
-struct radeon_ws_bo;
-struct radeon_ws_bo *radeon_ws_bo(struct radeon *radeon,
+struct r600_bo;
+struct r600_bo *r600_bo(struct radeon *radeon,
unsigned size, unsigned alignment, unsigned usage);
-struct radeon_ws_bo *radeon_ws_bo_handle(struct radeon *radeon,
+struct r600_bo *r600_bo_handle(struct radeon *radeon,
unsigned handle);
-void *radeon_ws_bo_map(struct radeon *radeon, struct radeon_ws_bo *bo, unsigned usage, void *ctx);
-void radeon_ws_bo_unmap(struct radeon *radeon, struct radeon_ws_bo *bo);
-void radeon_ws_bo_reference(struct radeon *radeon, struct radeon_ws_bo **dst,
- struct radeon_ws_bo *src);
+void *r600_bo_map(struct radeon *radeon, struct r600_bo *bo, unsigned usage, void *ctx);
+void r600_bo_unmap(struct radeon *radeon, struct r600_bo *bo);
+void r600_bo_reference(struct radeon *radeon, struct r600_bo **dst,
+ struct r600_bo *src);
/* R600/R700 STATES */
#define R600_GROUP_MAX 16
u32 offset;
u32 mask;
u32 value;
- struct radeon_ws_bo *bo;
+ struct r600_bo *bo;
};
struct r600_pipe_state {
static inline void r600_pipe_state_add_reg(struct r600_pipe_state *state,
u32 offset, u32 value, u32 mask,
- struct radeon_ws_bo *bo)
+ struct r600_bo *bo)
{
state->regs[state->nregs].offset = offset;
state->regs[state->nregs].value = value;
#define R600_BLOCK_STATUS_DIRTY (1 << 1)
struct r600_block_reloc {
- struct radeon_ws_bo *bo;
+ struct r600_bo *bo;
unsigned nreloc;
unsigned bo_pm4_index[R600_BLOCK_MAX_BO];
};
/* if we've flushed the query */
unsigned state;
/* The buffer where query results are stored. */
- struct radeon_ws_bo *buffer;
+ struct r600_bo *buffer;
unsigned buffer_size;
/* linked list of queries */
struct list_head list;
u32 vgt_index_type;
u32 vgt_draw_initiator;
u32 indices_bo_offset;
- struct radeon_ws_bo *indices;
+ struct r600_bo *indices;
};
int r600_context_init(struct r600_context *ctx, struct radeon *radeon);
const struct pipe_resource *templ)
{
struct r600_resource_buffer *rbuffer;
- struct radeon_ws_bo *bo;
+ struct r600_bo *bo;
/* XXX We probably want a different alignment for buffers and textures. */
unsigned alignment = 4096;
rbuffer->r.base.vtbl = &r600_buffer_vtbl;
rbuffer->r.size = rbuffer->r.base.b.width0;
rbuffer->r.domain = r600_domain_from_usage(rbuffer->r.base.b.bind);
- bo = radeon_ws_bo((struct radeon*)screen->winsys, rbuffer->r.base.b.width0, alignment, rbuffer->r.base.b.bind);
+ bo = r600_bo((struct radeon*)screen->winsys, rbuffer->r.base.b.width0, alignment, rbuffer->r.base.b.bind);
if (bo == NULL) {
FREE(rbuffer);
return NULL;
struct r600_resource_buffer *rbuffer = r600_buffer(buf);
if (rbuffer->r.bo) {
- radeon_ws_bo_reference((struct radeon*)screen->winsys, &rbuffer->r.bo, NULL);
+ r600_bo_reference((struct radeon*)screen->winsys, &rbuffer->r.bo, NULL);
}
FREE(rbuffer);
}
flush = TRUE;
if (flush) {
- radeon_ws_bo_reference((struct radeon*)pipe->winsys, &rbuffer->r.bo, NULL);
+ r600_bo_reference((struct radeon*)pipe->winsys, &rbuffer->r.bo, NULL);
rbuffer->num_ranges = 0;
- rbuffer->r.bo = radeon_ws_bo((struct radeon*)pipe->winsys,
+ rbuffer->r.bo = r600_bo((struct radeon*)pipe->winsys,
rbuffer->r.base.b.width0, 0,
rbuffer->r.base.b.bind);
break;
if (transfer->usage & PIPE_TRANSFER_WRITE) {
write = 1;
}
- data = radeon_ws_bo_map((struct radeon*)pipe->winsys, rbuffer->r.bo, transfer->usage, pipe);
+ data = r600_bo_map((struct radeon*)pipe->winsys, rbuffer->r.bo, transfer->usage, pipe);
if (!data)
return NULL;
struct r600_resource_buffer *rbuffer = r600_buffer(transfer->resource);
if (rbuffer->r.bo)
- radeon_ws_bo_unmap((struct radeon*)pipe->winsys, rbuffer->r.bo);
+ r600_bo_unmap((struct radeon*)pipe->winsys, rbuffer->r.bo);
}
static void r600_buffer_transfer_flush_region(struct pipe_context *pipe,
{
struct radeon *rw = (struct radeon*)screen->winsys;
struct r600_resource *rbuffer;
- struct radeon_ws_bo *bo = NULL;
+ struct r600_bo *bo = NULL;
- bo = radeon_ws_bo_handle(rw, whandle->handle);
+ bo = r600_bo_handle(rw, whandle->handle);
if (bo == NULL) {
return NULL;
}
rbuffer = CALLOC_STRUCT(r600_resource);
if (rbuffer == NULL) {
- radeon_ws_bo_reference(rw, &bo, NULL);
+ r600_bo_reference(rw, &bo, NULL);
return NULL;
}
struct r600_pipe_shader {
struct r600_shader shader;
struct r600_pipe_state rstate;
- struct radeon_ws_bo *bo;
+ struct r600_bo *bo;
struct r600_vertex_element vertex_elements;
};
*/
struct r600_resource {
struct u_resource base;
- struct radeon_ws_bo *bo;
+ struct r600_bo *bo;
u32 domain;
u32 flink;
u32 size;
/* copy new shader */
if (shader->bo == NULL) {
- shader->bo = radeon_ws_bo(rctx->radeon, rshader->bc.ndw * 4, 4096, 0);
+ shader->bo = r600_bo(rctx->radeon, rshader->bc.ndw * 4, 4096, 0);
if (shader->bo == NULL) {
return -ENOMEM;
}
- ptr = radeon_ws_bo_map(rctx->radeon, shader->bo, 0, NULL);
+ ptr = r600_bo_map(rctx->radeon, shader->bo, 0, NULL);
memcpy(ptr, rshader->bc.bytecode, rshader->bc.ndw * 4);
- radeon_ws_bo_unmap(rctx->radeon, shader->bo);
+ r600_bo_unmap(rctx->radeon, shader->bo);
}
/* build state */
rshader->flat_shade = rctx->flatshade;
for (i = 0; i < rctx->vertex_elements->count; i++) {
resource_format[nresources++] = rctx->vertex_elements->elements[i].src_format;
}
- radeon_ws_bo_reference(rctx->radeon, &rshader->bo, NULL);
+ r600_bo_reference(rctx->radeon, &rshader->bo, NULL);
LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
switch (cf->inst) {
case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
unsigned format;
uint32_t word4 = 0, yuv_format = 0, pitch = 0;
unsigned char swizzle[4], array_mode = 0, tile_type = 0;
- struct radeon_ws_bo *bo[2];
+ struct r600_bo *bo[2];
if (resource == NULL)
return NULL;
rctx->states[rstate->id] = NULL;
}
for (int i = 0; i < rstate->nregs; i++) {
- radeon_ws_bo_reference(rctx->radeon, &rstate->regs[i].bo, NULL);
+ r600_bo_reference(rctx->radeon, &rstate->regs[i].bo, NULL);
}
free(rstate);
}
unsigned color_info;
unsigned format, swap, ntype;
const struct util_format_description *desc;
- struct radeon_ws_bo *bo[3];
+ struct r600_bo *bo[3];
rtex = (struct r600_resource_texture*)state->cbufs[cb]->texture;
rbuffer = &rtex->resource;
/* FIXME alignment 4096 enought ? too much ? */
resource->domain = r600_domain_from_usage(resource->base.b.bind);
resource->size = rtex->size;
- resource->bo = radeon_ws_bo(radeon, rtex->size, 4096, 0);
+ resource->bo = r600_bo(radeon, rtex->size, 4096, 0);
if (resource->bo == NULL) {
FREE(rtex);
return NULL;
pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);
if (resource->bo) {
- radeon_ws_bo_reference(radeon, &resource->bo, NULL);
+ r600_bo_reference(radeon, &resource->bo, NULL);
}
FREE(rtex);
}
struct radeon *rw = (struct radeon*)screen->winsys;
struct r600_resource_texture *rtex;
struct r600_resource *resource;
- struct radeon_ws_bo *bo = NULL;
+ struct r600_bo *bo = NULL;
/* Support only 2D textures without mipmaps */
if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
if (rtex == NULL)
return NULL;
- bo = radeon_ws_bo_handle(rw, whandle->handle);
+ bo = r600_bo_handle(rw, whandle->handle);
if (bo == NULL) {
FREE(rtex);
return NULL;
struct pipe_transfer* transfer)
{
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
- struct radeon_ws_bo *bo;
+ struct r600_bo *bo;
enum pipe_format format = transfer->resource->format;
struct radeon *radeon = (struct radeon *)ctx->screen->winsys;
unsigned long offset = 0;
transfer->box.y / util_format_get_blockheight(format) * transfer->stride +
transfer->box.x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
}
- map = radeon_ws_bo_map(radeon, bo, 0, ctx);
+ map = r600_bo_map(radeon, bo, 0, ctx);
if (!map) {
return NULL;
}
{
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
struct radeon *radeon = (struct radeon *)ctx->screen->winsys;
- struct radeon_ws_bo *bo;
+ struct r600_bo *bo;
if (rtransfer->linear_texture) {
bo = ((struct r600_resource *)rtransfer->linear_texture)->bo;
bo = ((struct r600_resource *)transfer->resource)->bo;
}
}
- radeon_ws_bo_unmap(radeon, bo);
+ r600_bo_unmap(radeon, bo);
}
struct u_resource_vtbl r600_texture_vtbl =
block = range->blocks[CTX_BLOCK_ID(ctx, offset)];
if (state == NULL) {
block->status &= ~(R600_BLOCK_STATUS_ENABLED | R600_BLOCK_STATUS_DIRTY);
- radeon_ws_bo_reference(ctx->radeon, &block->reloc[1].bo, NULL);
- radeon_ws_bo_reference(ctx->radeon , &block->reloc[2].bo, NULL);
+ r600_bo_reference(ctx->radeon, &block->reloc[1].bo, NULL);
+ r600_bo_reference(ctx->radeon , &block->reloc[2].bo, NULL);
return;
}
block->reg[0] = state->regs[0].value;
block->reg[5] = state->regs[5].value;
block->reg[6] = state->regs[6].value;
block->reg[7] = state->regs[7].value;
- radeon_ws_bo_reference(ctx->radeon, &block->reloc[1].bo, NULL);
- radeon_ws_bo_reference(ctx->radeon , &block->reloc[2].bo, NULL);
+ r600_bo_reference(ctx->radeon, &block->reloc[1].bo, NULL);
+ r600_bo_reference(ctx->radeon , &block->reloc[2].bo, NULL);
if (state->regs[0].bo) {
/* VERTEX RESOURCE, we preted there is 2 bo to relocate so
* we have single case btw VERTEX & TEXTURE resource
*/
- radeon_ws_bo_reference(ctx->radeon, &block->reloc[1].bo, state->regs[0].bo);
- radeon_ws_bo_reference(ctx->radeon, &block->reloc[2].bo, state->regs[0].bo);
+ r600_bo_reference(ctx->radeon, &block->reloc[1].bo, state->regs[0].bo);
+ r600_bo_reference(ctx->radeon, &block->reloc[2].bo, state->regs[0].bo);
} else {
/* TEXTURE RESOURCE */
- radeon_ws_bo_reference(ctx->radeon, &block->reloc[1].bo, state->regs[2].bo);
- radeon_ws_bo_reference(ctx->radeon, &block->reloc[2].bo, state->regs[3].bo);
+ r600_bo_reference(ctx->radeon, &block->reloc[1].bo, state->regs[2].bo);
+ r600_bo_reference(ctx->radeon, &block->reloc[2].bo, state->regs[3].bo);
}
if (!(block->status & R600_BLOCK_STATUS_DIRTY)) {
block->status |= R600_BLOCK_STATUS_ENABLED;
block->reg[5] = state->regs[5].value;
block->reg[6] = state->regs[6].value;
block->reg[7] = state->regs[7].value;
- radeon_ws_bo_reference(ctx->radeon, &block->reloc[1].bo, NULL);
- radeon_ws_bo_reference(ctx->radeon , &block->reloc[2].bo, NULL);
+ r600_bo_reference(ctx->radeon, &block->reloc[1].bo, NULL);
+ r600_bo_reference(ctx->radeon , &block->reloc[2].bo, NULL);
if (state->regs[0].bo) {
/* VERTEX RESOURCE, we preted there is 2 bo to relocate so
* we have single case btw VERTEX & TEXTURE resource
*/
- radeon_ws_bo_reference(ctx->radeon, &block->reloc[1].bo, state->regs[0].bo);
- radeon_ws_bo_reference(ctx->radeon, &block->reloc[2].bo, state->regs[0].bo);
+ r600_bo_reference(ctx->radeon, &block->reloc[1].bo, state->regs[0].bo);
+ r600_bo_reference(ctx->radeon, &block->reloc[2].bo, state->regs[0].bo);
} else {
/* TEXTURE RESOURCE */
- radeon_ws_bo_reference(ctx->radeon, &block->reloc[1].bo, state->regs[2].bo);
- radeon_ws_bo_reference(ctx->radeon, &block->reloc[2].bo, state->regs[3].bo);
+ r600_bo_reference(ctx->radeon, &block->reloc[1].bo, state->regs[2].bo);
+ r600_bo_reference(ctx->radeon, &block->reloc[2].bo, state->regs[3].bo);
}
if (!(block->status & R600_BLOCK_STATUS_DIRTY)) {
block->status |= R600_BLOCK_STATUS_ENABLED;
if (block->pm4_bo_index[id]) {
/* find relocation */
id = block->pm4_bo_index[id];
- radeon_ws_bo_reference(ctx->radeon, &block->reloc[id].bo, state->regs[i].bo);
+ r600_bo_reference(ctx->radeon, &block->reloc[id].bo, state->regs[i].bo);
}
if (!(block->status & R600_BLOCK_STATUS_DIRTY)) {
block->status |= R600_BLOCK_STATUS_ENABLED;
block = range->blocks[CTX_BLOCK_ID(ctx, offset)];
if (state == NULL) {
block->status &= ~(R600_BLOCK_STATUS_ENABLED | R600_BLOCK_STATUS_DIRTY);
- radeon_ws_bo_reference(ctx->radeon, &block->reloc[1].bo, NULL);
- radeon_ws_bo_reference(ctx->radeon , &block->reloc[2].bo, NULL);
+ r600_bo_reference(ctx->radeon, &block->reloc[1].bo, NULL);
+ r600_bo_reference(ctx->radeon , &block->reloc[2].bo, NULL);
return;
}
block->reg[0] = state->regs[0].value;
block->reg[4] = state->regs[4].value;
block->reg[5] = state->regs[5].value;
block->reg[6] = state->regs[6].value;
- radeon_ws_bo_reference(ctx->radeon, &block->reloc[1].bo, NULL);
- radeon_ws_bo_reference(ctx->radeon , &block->reloc[2].bo, NULL);
+ r600_bo_reference(ctx->radeon, &block->reloc[1].bo, NULL);
+ r600_bo_reference(ctx->radeon , &block->reloc[2].bo, NULL);
if (state->regs[0].bo) {
/* VERTEX RESOURCE, we preted there is 2 bo to relocate so
* we have single case btw VERTEX & TEXTURE resource
*/
- radeon_ws_bo_reference(ctx->radeon, &block->reloc[1].bo, state->regs[0].bo);
- radeon_ws_bo_reference(ctx->radeon, &block->reloc[2].bo, state->regs[0].bo);
+ r600_bo_reference(ctx->radeon, &block->reloc[1].bo, state->regs[0].bo);
+ r600_bo_reference(ctx->radeon, &block->reloc[2].bo, state->regs[0].bo);
} else {
/* TEXTURE RESOURCE */
- radeon_ws_bo_reference(ctx->radeon, &block->reloc[1].bo, state->regs[2].bo);
- radeon_ws_bo_reference(ctx->radeon, &block->reloc[2].bo, state->regs[3].bo);
+ r600_bo_reference(ctx->radeon, &block->reloc[1].bo, state->regs[2].bo);
+ r600_bo_reference(ctx->radeon, &block->reloc[2].bo, state->regs[3].bo);
}
if (!(block->status & R600_BLOCK_STATUS_DIRTY)) {
block->status |= R600_BLOCK_STATUS_ENABLED;
u32 *results;
int i;
- results = radeon_ws_bo_map(ctx->radeon, query->buffer, 0, NULL);
+ results = r600_bo_map(ctx->radeon, query->buffer, 0, NULL);
for (i = 0; i < query->num_results; i += 4) {
start = (u64)results[i] | (u64)results[i + 1] << 32;
end = (u64)results[i + 2] | (u64)results[i + 3] << 32;
query->result += end - start;
}
}
- radeon_ws_bo_unmap(ctx->radeon, query->buffer);
+ r600_bo_unmap(ctx->radeon, query->buffer);
query->num_results = 0;
}
query->type = query_type;
query->buffer_size = 4096;
- query->buffer = radeon_ws_bo(ctx->radeon, query->buffer_size, 1, 0);
+ query->buffer = r600_bo(ctx->radeon, query->buffer_size, 1, 0);
if (!query->buffer) {
free(query);
return NULL;
void r600_context_query_destroy(struct r600_context *ctx, struct r600_query *query)
{
- radeon_ws_bo_reference(ctx->radeon, &query->buffer, NULL);
+ r600_bo_reference(ctx->radeon, &query->buffer, NULL);
LIST_DEL(&query->list);
free(query);
}
void *data;
};
-struct radeon_ws_bo {
+struct r600_bo {
struct pipe_reference reference;
struct pb_buffer *pb;
};
struct pb_buffer *radeon_bo_pb_create_buffer_from_handle(struct pb_manager *_mgr,
uint32_t handle);
-/* radeon_ws_bo.c */
-unsigned radeon_ws_bo_get_handle(struct radeon_ws_bo *bo);
-unsigned radeon_ws_bo_get_size(struct radeon_ws_bo *bo);
+/* r600_bo.c */
+unsigned r600_bo_get_handle(struct r600_bo *bo);
+unsigned r600_bo_get_size(struct r600_bo *bo);
#define CTX_RANGE_ID(ctx, offset) (((offset) >> (ctx)->hash_shift) & 255)
#define CTX_BLOCK_ID(ctx, offset) ((offset) & ((1 << (ctx)->hash_shift) - 1))
#include <pipebuffer/pb_bufmgr.h>
#include "r600_priv.h"
-struct radeon_ws_bo *radeon_ws_bo(struct radeon *radeon,
+struct r600_bo *r600_bo(struct radeon *radeon,
unsigned size, unsigned alignment, unsigned usage)
{
- struct radeon_ws_bo *ws_bo = calloc(1, sizeof(struct radeon_ws_bo));
+ struct r600_bo *ws_bo = calloc(1, sizeof(struct r600_bo));
struct pb_desc desc;
struct pb_manager *man;
return ws_bo;
}
-struct radeon_ws_bo *radeon_ws_bo_handle(struct radeon *radeon,
+struct r600_bo *r600_bo_handle(struct radeon *radeon,
unsigned handle)
{
- struct radeon_ws_bo *ws_bo = calloc(1, sizeof(struct radeon_ws_bo));
+ struct r600_bo *ws_bo = calloc(1, sizeof(struct r600_bo));
ws_bo->pb = radeon_bo_pb_create_buffer_from_handle(radeon->kman, handle);
if (!ws_bo->pb) {
return ws_bo;
}
-void *radeon_ws_bo_map(struct radeon *radeon, struct radeon_ws_bo *bo, unsigned usage, void *ctx)
+void *r600_bo_map(struct radeon *radeon, struct r600_bo *bo, unsigned usage, void *ctx)
{
return pb_map(bo->pb, usage, ctx);
}
-void radeon_ws_bo_unmap(struct radeon *radeon, struct radeon_ws_bo *bo)
+void r600_bo_unmap(struct radeon *radeon, struct r600_bo *bo)
{
pb_unmap(bo->pb);
}
-static void radeon_ws_bo_destroy(struct radeon *radeon, struct radeon_ws_bo *bo)
+static void r600_bo_destroy(struct radeon *radeon, struct r600_bo *bo)
{
if (bo->pb)
pb_reference(&bo->pb, NULL);
free(bo);
}
-void radeon_ws_bo_reference(struct radeon *radeon, struct radeon_ws_bo **dst,
- struct radeon_ws_bo *src)
+void r600_bo_reference(struct radeon *radeon, struct r600_bo **dst,
+ struct r600_bo *src)
{
- struct radeon_ws_bo *old = *dst;
+ struct r600_bo *old = *dst;
if (pipe_reference(&(*dst)->reference, &src->reference)) {
- radeon_ws_bo_destroy(radeon, old);
+ r600_bo_destroy(radeon, old);
}
*dst = src;
}
-unsigned radeon_ws_bo_get_handle(struct radeon_ws_bo *pb_bo)
+unsigned r600_bo_get_handle(struct r600_bo *pb_bo)
{
struct radeon_bo *bo;
return bo->handle;
}
-unsigned radeon_ws_bo_get_size(struct radeon_ws_bo *pb_bo)
+unsigned r600_bo_get_size(struct r600_bo *pb_bo)
{
struct radeon_bo *bo;