#ifndef R600_PIPE_H
#define R600_PIPE_H
+#include "util/u_blitter.h"
#include "util/u_slab.h"
#include "r600.h"
#include "r600_llvm.h"
bool flush_depthstencil_through_cb;
bool copy_depth, copy_stencil;
unsigned copy_sample;
+ unsigned log_samples;
};
struct r600_cb_misc_state {
unsigned default_ps_gprs, default_vs_gprs;
+ /* current unaccounted memory usage */
+ uint64_t vram;
+ uint64_t gtt;
+
/* States based on r600_atom. */
struct list_head dirty_states;
struct r600_command_buffer start_cs_cmd; /* invariant state mostly */
/* Index buffer. */
struct pipe_index_buffer index_buffer;
+
+ /* Dummy CMASK and FMASK buffers used to get around the R6xx hardware
+ * bug where valid CMASK and FMASK are required to be present to avoid
+ * a hardlock in certain operations but aren't actually used
+ * for anything useful. */
+ struct r600_resource *dummy_fmask;
+ struct r600_resource *dummy_cmask;
};
static INLINE void r600_emit_atom(struct r600_context *rctx, struct r600_atom *atom)
unsigned size, unsigned alignment,
unsigned bind, unsigned usage);
struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
- const struct pipe_resource *templ);
+ const struct pipe_resource *templ,
+ unsigned alignment);
/* r600_pipe.c */
void r600_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence,
void r600_fetch_shader(struct pipe_context *ctx, struct r600_vertex_element *ve);
void *r600_create_db_flush_dsa(struct r600_context *rctx);
void *r600_create_resolve_blend(struct r600_context *rctx);
+void *r700_create_resolve_blend(struct r600_context *rctx);
void *r600_create_decompress_blend(struct r600_context *rctx);
void r600_polygon_offset_update(struct r600_context *rctx);
void r600_adjust_gprs(struct r600_context *rctx);
void r600_set_pipe_stencil_ref(struct pipe_context *ctx,
const struct pipe_stencil_ref *state);
void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info);
+void r600_draw_rectangle(struct blitter_context *blitter,
+ unsigned x1, unsigned y1, unsigned x2, unsigned y2, float depth,
+ enum blitter_attrib_type type, const union pipe_color_union *attrib);
uint32_t r600_translate_stencil_op(int s_op);
uint32_t r600_translate_fill(uint32_t func);
unsigned r600_tex_wrap(unsigned wrap);
return rscreen->ws->buffer_get_virtual_address(rresource->cs_buf);
}
+static INLINE void r600_context_add_resource_size(struct pipe_context *ctx, struct pipe_resource *r)
+{
+ struct r600_context *rctx = (struct r600_context *)ctx;
+ struct r600_resource *rr = (struct r600_resource *)r;
+
+ if (r == NULL) {
+ return;
+ }
+
+ /*
+ * The idea is to compute a gross estimate of memory requirement of
+ * each draw call. After each draw call, memory will be precisely
+ * accounted. So the uncertainty is only on the current draw call.
+ * In practice this gave very good estimate (+/- 10% of the target
+ * memory limit).
+ */
+ if (rr->domains & RADEON_DOMAIN_GTT) {
+ rctx->gtt += rr->buf->size;
+ }
+ if (rr->domains & RADEON_DOMAIN_VRAM) {
+ rctx->vram += rr->buf->size;
+ }
+}
+
#endif