bool linear;
unsigned start;
- const ushort *elts;
+ const uint16_t *elts;
unsigned count;
enum mesa_prim prim;
FUNC(FUNC_VARS)
{
unsigned idx[6], i;
- ushort flags;
+ uint16_t flags;
LOCAL_VARS
FUNC_ENTER;
case MESA_PRIM_POLYGON:
if (count >= 3) {
- ushort edge_next, edge_finish;
+ uint16_t edge_next, edge_finish;
if (last_vertex_last) {
flags = (DRAW_PIPE_RESET_STIPPLE |
draw_gs_get_input_index(int semantic, int index,
const struct tgsi_shader_info *input_info)
{
- const ubyte *input_semantic_names = input_info->output_semantic_name;
- const ubyte *input_semantic_indices = input_info->output_semantic_index;
+ const uint8_t *input_semantic_names = input_info->output_semantic_name;
+ const uint8_t *input_semantic_indices = input_info->output_semantic_index;
for (int i = 0; i < PIPE_MAX_SHADER_OUTPUTS; i++) {
if (input_semantic_names[i] == semantic &&
input_semantic_indices[i] == index)
*/
static void
do_line(struct draw_context *draw,
- ushort flags,
+ uint16_t flags,
const char *v0,
const char *v1)
{
*/
static void
do_triangle(struct draw_context *draw,
- ushort flags,
+ uint16_t flags,
char *v0,
char *v1,
char *v2)
unsigned prim_flags, \
struct vertex_header *vertices, \
unsigned stride, \
- const ushort *elts, \
+ const uint16_t *elts, \
unsigned count, \
unsigned max_index
*/
struct prim_header {
float det; /**< front/back face determinant */
- ushort flags;
- ushort pad;
+ uint16_t flags;
+ uint16_t pad;
struct vertex_header *v[3]; /**< 1 to 3 vertex pointers */
};
const struct prim_header *origPrim)
{
const struct clip_stage *clipper = clip_stage(stage);
- ushort edge_first, edge_middle, edge_last;
+ uint16_t edge_first, edge_middle, edge_last;
if (stage->draw->rasterizer->flatshade_first) {
edge_first = DRAW_PIPE_EDGE_FLAG_0;
struct stipple_stage {
struct draw_stage stage;
unsigned counter;
- ushort pattern;
- ushort factor;
+ uint16_t pattern;
+ uint16_t factor;
bool rectangular;
};
static inline bool
-stipple_test(unsigned counter, ushort pattern, ushort factor)
+stipple_test(unsigned counter, uint16_t pattern, uint16_t factor)
{
unsigned b = (counter / factor) & 0xf;
return !!((1 << b) & pattern);
stage->nr_tmps = nr;
if (nr != 0) {
- ubyte *store = (ubyte *) MALLOC(MAX_VERTEX_SIZE * nr +
+ uint8_t *store = (uint8_t *) MALLOC(MAX_VERTEX_SIZE * nr +
DRAW_EXTRA_VERTICES_PADDING);
if (!store)
return false;
unsigned nr_vertices;
/** Indices */
- ushort *indices;
+ uint16_t *indices;
unsigned max_indices;
unsigned nr_indices;
* have a couple of slots at the beginning (1-dword header, 4-dword
* clip pos) that we ignore here. We only use the vertex->data[] fields.
*/
-static inline ushort
+static inline uint16_t
emit_vertex(struct vbuf_stage *vbuf, struct vertex_header *vertex)
{
if (vertex->vertex_id == UNDEFINED_VERTEX_ID && vbuf->vertex_ptr) {
vertex->vertex_id = vbuf->nr_vertices++;
}
- return (ushort)vertex->vertex_id;
+ return (uint16_t)vertex->vertex_id;
}
* fail, we are basically without usable hardware.
*/
vbuf->render->allocate_vertices(vbuf->render,
- (ushort) vbuf->vertex_size,
- (ushort) vbuf->max_vertices);
+ (uint16_t) vbuf->vertex_size,
+ (uint16_t) vbuf->max_vertices);
vbuf->vertex_ptr = vbuf->vertices =
vbuf->render->map_vertices(vbuf->render);
vbuf->render = render;
vbuf->max_indices = MIN2(render->max_indices, UNDEFINED_VERTEX_ID-1);
- vbuf->indices = (ushort *) align_malloc(vbuf->max_indices *
+ vbuf->indices = (uint16_t *) align_malloc(vbuf->max_indices *
sizeof(vbuf->indices[0]),
16);
if (!vbuf->indices)
/* Current active frontend */
struct draw_pt_front_end *frontend;
enum mesa_prim prim;
- ubyte vertices_per_patch;
+ uint8_t vertices_per_patch;
bool rebind_parameters;
unsigned opt; /**< bitmask of PT_x flags */
for (unsigned j = 0; j < draw->pt.nr_vertex_elements; j++) {
unsigned buf = draw->pt.vertex_element[j].vertex_buffer_index;
- ubyte *ptr = (ubyte *) draw->pt.user.vbuffer[buf].map;
+ uint8_t *ptr = (uint8_t *) draw->pt.user.vbuffer[buf].map;
if (draw->pt.vertex_element[j].instance_divisor) {
ii = draw->instance_id / draw->pt.vertex_element[j].instance_divisor;
break;
case PIPE_FORMAT_B8G8R8A8_UNORM:
{
- ubyte *u = (ubyte *) ptr;
+ uint8_t *u = (uint8_t *) ptr;
debug_printf("BGRA %d %d %d %d @ %p\n", u[0], u[1], u[2], u[3],
(void *) u);
}
break;
case PIPE_FORMAT_A8R8G8B8_UNORM:
{
- ubyte *u = (ubyte *) ptr;
+ uint8_t *u = (uint8_t *) ptr;
debug_printf("ARGB %d %d %d %d @ %p\n", u[0], u[1], u[2], u[3],
(void *) u);
}
void (*run)(struct draw_pt_middle_end *,
const unsigned *fetch_elts,
unsigned fetch_count,
- const ushort *draw_elts,
+ const uint16_t *draw_elts,
unsigned draw_count,
unsigned prim_flags);
bool (*run_linear_elts)(struct draw_pt_middle_end *,
unsigned fetch_start,
unsigned fetch_count,
- const ushort *draw_elts,
+ const uint16_t *draw_elts,
unsigned draw_count,
unsigned prim_flags);
const float (*vertex_data)[4] = (const float (*)[4])vert_info->verts->data;
unsigned vertex_count = vert_info->count;
unsigned stride = vert_info->stride;
- const ushort *elts = prim_info->elts;
+ const uint16_t *elts = prim_info->elts;
struct draw_context *draw = emit->draw;
struct translate *translate = emit->translate;
struct vbuf_render *render = draw->render;
assert(vertex_count <= 65535);
render->allocate_vertices(render,
- (ushort)translate->key.output_stride,
- (ushort)vertex_count);
+ (uint16_t)translate->key.output_stride,
+ (uint16_t)vertex_count);
hw_verts = render->map_vertices(render);
if (!hw_verts) {
assert(count <= 65535);
if (!render->allocate_vertices(render,
- (ushort)translate->key.output_stride,
- (ushort)count))
+ (uint16_t)translate->key.output_stride,
+ (uint16_t)count))
goto fail;
hw_verts = render->map_vertices(render);
*/
const float *constants;
unsigned pitch[PIPE_MAX_ATTRIBS];
- const ubyte *src[PIPE_MAX_ATTRIBS];
+ const uint8_t *src[PIPE_MAX_ATTRIBS];
unsigned prim;
struct draw_vs_variant_key key;
for (unsigned i = 0; i < draw->pt.nr_vertex_buffers; i++) {
fse->active->set_buffer(fse->active,
i,
- ((const ubyte *) draw->pt.user.vbuffer[i].map +
+ ((const uint8_t *) draw->pt.user.vbuffer[i].map +
draw->pt.vertex_buffer[i].buffer_offset),
draw->pt.vertex_buffer[i].stride,
draw->pt.max_index);
draw_do_flush(draw, DRAW_FLUSH_BACKEND);
if (!draw->render->allocate_vertices(draw->render,
- (ushort) fse->key.output_stride,
- (ushort) count))
+ (uint16_t) fse->key.output_stride,
+ (uint16_t) count))
goto fail;
hw_verts = draw->render->map_vertices(draw->render);
}
}
- draw->render->unmap_vertices(draw->render, 0, (ushort) (count - 1));
+ draw->render->unmap_vertices(draw->render, 0, (uint16_t) (count - 1));
/* Draw arrays path to avoid re-emitting index list again and
* again.
fse_run(struct draw_pt_middle_end *middle,
const unsigned *fetch_elts,
unsigned fetch_count,
- const ushort *draw_elts,
+ const uint16_t *draw_elts,
unsigned draw_count,
unsigned prim_flags)
{
draw_do_flush(draw, DRAW_FLUSH_BACKEND);
if (!draw->render->allocate_vertices(draw->render,
- (ushort) fse->key.output_stride,
- (ushort) fetch_count))
+ (uint16_t) fse->key.output_stride,
+ (uint16_t) fetch_count))
goto fail;
hw_verts = draw->render->map_vertices(draw->render);
}
}
- draw->render->unmap_vertices(draw->render, 0, (ushort)(fetch_count - 1));
+ draw->render->unmap_vertices(draw->render, 0, (uint16_t)(fetch_count - 1));
draw->render->draw_elements(draw->render, draw_elts, draw_count);
fse_run_linear_elts(struct draw_pt_middle_end *middle,
unsigned start,
unsigned count,
- const ushort *draw_elts,
+ const uint16_t *draw_elts,
unsigned draw_count,
unsigned prim_flags)
{
draw_do_flush(draw, DRAW_FLUSH_BACKEND);
if (!draw->render->allocate_vertices(draw->render,
- (ushort) fse->key.output_stride,
- (ushort) count))
+ (uint16_t) fse->key.output_stride,
+ (uint16_t) count))
return false;
hw_verts = draw->render->map_vertices(draw->render);
draw->render->draw_elements(draw->render, draw_elts, draw_count);
- draw->render->unmap_vertices(draw->render, 0, (ushort)(count - 1));
+ draw->render->unmap_vertices(draw->render, 0, (uint16_t)(count - 1));
draw->render->release_vertices(draw->render);
fetch_pipeline_run(struct draw_pt_middle_end *middle,
const unsigned *fetch_elts,
unsigned fetch_count,
- const ushort *draw_elts,
+ const uint16_t *draw_elts,
unsigned draw_count,
unsigned prim_flags)
{
fetch_pipeline_linear_run_elts(struct draw_pt_middle_end *middle,
unsigned start,
unsigned count,
- const ushort *draw_elts,
+ const uint16_t *draw_elts,
unsigned draw_count,
unsigned prim_flags)
{
bool free_prim_info = false;
unsigned opt = fpme->opt;
bool clipped = 0;
- ushort *tes_elts_out = NULL;
+ uint16_t *tes_elts_out = NULL;
assert(fetch_info->count > 0);
llvm_middle_end_run(struct draw_pt_middle_end *middle,
const unsigned *fetch_elts,
unsigned fetch_count,
- const ushort *draw_elts,
+ const uint16_t *draw_elts,
unsigned draw_count,
unsigned prim_flags)
{
llvm_middle_end_linear_run_elts(struct draw_pt_middle_end *middle,
unsigned start,
unsigned count,
- const ushort *draw_elts,
+ const uint16_t *draw_elts,
unsigned draw_count,
unsigned prim_flags)
{
const unsigned max_index = draw->pt.user.max_index;
const int elt_bias = draw->pt.user.eltBias;
unsigned fetch_start, fetch_count;
- const ushort *draw_elts = NULL;
+ const uint16_t *draw_elts = NULL;
const unsigned start = istart;
const unsigned end = istart + icount;
debug_printf("warning: index out of range\n");
}
}
- draw_elts = (const ushort *) (ib + istart);
+ draw_elts = (const uint16_t *) (ib + istart);
} else {
/* have to go through vsplit->draw_elts */
if (icount > vsplit->segment_size)
if (idx < min_index || idx > max_index) {
debug_printf("warning: index out of range\n");
}
- vsplit->draw_elts[i] = (ushort) idx;
+ vsplit->draw_elts[i] = (uint16_t) idx;
}
} else {
for (unsigned i = 0; i < icount; i++) {
if (idx < min_index || idx > max_index) {
debug_printf("warning: index out of range\n");
}
- vsplit->draw_elts[i] = (ushort) (idx - min_index);
+ vsplit->draw_elts[i] = (uint16_t) (idx - min_index);
}
}
const struct tgsi_shader_info *input_info)
{
int i;
- const ubyte *input_semantic_names = input_info->output_semantic_name;
- const ubyte *input_semantic_indices = input_info->output_semantic_index;
+ const uint8_t *input_semantic_names = input_info->output_semantic_name;
+ const uint8_t *input_semantic_indices = input_info->output_semantic_index;
for (i = 0; i < PIPE_MAX_SHADER_OUTPUTS; i++) {
if (input_semantic_names[i] == semantic &&
input_semantic_indices[i] == index)
const struct tgsi_shader_info *input_info,
struct draw_vertex_info *output_verts,
struct draw_prim_info *output_prims,
- ushort **elts_out)
+ uint16_t **elts_out)
{
const float (*input)[4] = (const float (*)[4])input_verts->verts->data;
unsigned num_outputs = draw_total_tes_outputs(shader->draw);
unsigned input_stride = input_verts->vertex_size;
unsigned vertex_size = sizeof(struct vertex_header) + num_outputs * 4 * sizeof(float);
- ushort *elts = NULL;
+ uint16_t *elts = NULL;
output_verts->vertex_size = vertex_size;
output_verts->stride = output_verts->vertex_size;
output_verts->count = 0;
const struct tgsi_shader_info *input_info,
struct draw_vertex_info *output_verts,
struct draw_prim_info *output_prims,
- ushort **elts_out);
+ uint16_t **elts_out);
#ifdef DRAW_LLVM_AVAILABLE
void draw_tcs_set_current_variant(struct draw_tess_ctrl_shader *shader,
* something.
*/
bool (*allocate_vertices)(struct vbuf_render *,
- ushort vertex_size,
- ushort nr_vertices);
+ uint16_t vertex_size,
+ uint16_t nr_vertices);
void *(*map_vertices)(struct vbuf_render *);
void (*unmap_vertices)(struct vbuf_render *,
- ushort min_index,
- ushort max_index);
+ uint16_t min_index,
+ uint16_t max_index);
/**
* Notify the renderer of the current primitive when it changes.
* must complete this call, if necessary splitting the index list itself.
*/
void (*draw_elements)(struct vbuf_render *,
- const ushort *indices,
+ const uint16_t *indices,
unsigned nr_indices);
/* Draw non-indexed primitives.
unsigned restart_index,
void *out )
{
- const ubyte *in_ub = (const ubyte *)in;
- ushort *out_us = (ushort *)out;
+ const uint8_t *in_ub = (const uint8_t *)in;
+ uint16_t *out_us = (uint16_t *)out;
unsigned i;
for (i = 0; i < out_nr; i++)
- out_us[i] = (ushort) in_ub[i+start];
+ out_us[i] = (uint16_t) in_ub[i+start];
}
static void generate_linear_ushort( unsigned start,
unsigned nr,
void *out )
{
- ushort *out_us = (ushort *)out;
+ uint16_t *out_us = (uint16_t *)out;
unsigned i;
for (i = 0; i < nr; i++)
- out_us[i] = (ushort)(i + start);
+ out_us[i] = (uint16_t)(i + start);
}
static void generate_linear_uint( unsigned start,
}
}
- ubyte usagemask = 0;
+ uint8_t usagemask = 0;
for (unsigned j = component; j < num_components + component; j++) {
switch (j) {
case 0:
break;
case TGSI_FILE_INPUT:
- info->input_semantic_name[reg] = (ubyte) semName;
- info->input_semantic_index[reg] = (ubyte) semIndex;
- info->input_interpolate[reg] = (ubyte)fulldecl->Interp.Interpolate;
- info->input_interpolate_loc[reg] = (ubyte)fulldecl->Interp.Location;
+ info->input_semantic_name[reg] = (uint8_t) semName;
+ info->input_semantic_index[reg] = (uint8_t) semIndex;
+ info->input_interpolate[reg] = (uint8_t)fulldecl->Interp.Interpolate;
+ info->input_interpolate_loc[reg] = (uint8_t)fulldecl->Interp.Location;
/* Vertex shaders can have inputs with holes between them. */
info->num_inputs = MAX2(info->num_inputs, reg + 1);
break;
case TGSI_FILE_OUTPUT:
- info->output_semantic_name[reg] = (ubyte) semName;
- info->output_semantic_index[reg] = (ubyte) semIndex;
+ info->output_semantic_name[reg] = (uint8_t) semName;
+ info->output_semantic_index[reg] = (uint8_t) semIndex;
info->output_usagemask[reg] |= fulldecl->Declaration.UsageMask;
info->num_outputs = MAX2(info->num_outputs, reg + 1);
if (fulldecl->Declaration.UsageMask & TGSI_WRITEMASK_X) {
- info->output_streams[reg] |= (ubyte)fulldecl->Semantic.StreamX;
+ info->output_streams[reg] |= (uint8_t)fulldecl->Semantic.StreamX;
info->num_stream_output_components[fulldecl->Semantic.StreamX]++;
}
if (fulldecl->Declaration.UsageMask & TGSI_WRITEMASK_Y) {
- info->output_streams[reg] |= (ubyte)fulldecl->Semantic.StreamY << 2;
+ info->output_streams[reg] |= (uint8_t)fulldecl->Semantic.StreamY << 2;
info->num_stream_output_components[fulldecl->Semantic.StreamY]++;
}
if (fulldecl->Declaration.UsageMask & TGSI_WRITEMASK_Z) {
- info->output_streams[reg] |= (ubyte)fulldecl->Semantic.StreamZ << 4;
+ info->output_streams[reg] |= (uint8_t)fulldecl->Semantic.StreamZ << 4;
info->num_stream_output_components[fulldecl->Semantic.StreamZ]++;
}
if (fulldecl->Declaration.UsageMask & TGSI_WRITEMASK_W) {
- info->output_streams[reg] |= (ubyte)fulldecl->Semantic.StreamW << 6;
+ info->output_streams[reg] |= (uint8_t)fulldecl->Semantic.StreamW << 6;
info->num_stream_output_components[fulldecl->Semantic.StreamW]++;
}
{
uint num_tokens;
- ubyte num_inputs;
- ubyte num_outputs;
- ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS]; /**< TGSI_SEMANTIC_x */
- ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
- ubyte input_interpolate[PIPE_MAX_SHADER_INPUTS];
- ubyte input_interpolate_loc[PIPE_MAX_SHADER_INPUTS];
- ubyte input_usage_mask[PIPE_MAX_SHADER_INPUTS];
- ubyte output_semantic_name[PIPE_MAX_SHADER_OUTPUTS]; /**< TGSI_SEMANTIC_x */
- ubyte output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
- ubyte output_usagemask[PIPE_MAX_SHADER_OUTPUTS];
- ubyte output_streams[PIPE_MAX_SHADER_OUTPUTS];
-
- ubyte num_system_values;
- ubyte system_value_semantic_name[PIPE_MAX_SHADER_INPUTS];
-
- ubyte processor;
+ uint8_t num_inputs;
+ uint8_t num_outputs;
+ uint8_t input_semantic_name[PIPE_MAX_SHADER_INPUTS]; /**< TGSI_SEMANTIC_x */
+ uint8_t input_semantic_index[PIPE_MAX_SHADER_INPUTS];
+ uint8_t input_interpolate[PIPE_MAX_SHADER_INPUTS];
+ uint8_t input_interpolate_loc[PIPE_MAX_SHADER_INPUTS];
+ uint8_t input_usage_mask[PIPE_MAX_SHADER_INPUTS];
+ uint8_t output_semantic_name[PIPE_MAX_SHADER_OUTPUTS]; /**< TGSI_SEMANTIC_x */
+ uint8_t output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
+ uint8_t output_usagemask[PIPE_MAX_SHADER_OUTPUTS];
+ uint8_t output_streams[PIPE_MAX_SHADER_OUTPUTS];
+
+ uint8_t num_system_values;
+ uint8_t system_value_semantic_name[PIPE_MAX_SHADER_INPUTS];
+
+ uint8_t processor;
uint file_mask[TGSI_FILE_COUNT]; /**< bitmask of declared registers */
uint file_count[TGSI_FILE_COUNT]; /**< number of declared registers */
int const_file_max[PIPE_MAX_CONSTANT_BUFFERS];
unsigned const_buffers_declared; /**< bitmask of declared const buffers */
unsigned samplers_declared; /**< bitmask of declared samplers */
- ubyte sampler_targets[PIPE_MAX_SHADER_SAMPLER_VIEWS]; /**< TGSI_TEXTURE_x values */
- ubyte sampler_type[PIPE_MAX_SHADER_SAMPLER_VIEWS]; /**< TGSI_RETURN_TYPE_x */
- ubyte num_stream_output_components[4];
+ uint8_t sampler_targets[PIPE_MAX_SHADER_SAMPLER_VIEWS]; /**< TGSI_TEXTURE_x values */
+ uint8_t sampler_type[PIPE_MAX_SHADER_SAMPLER_VIEWS]; /**< TGSI_RETURN_TYPE_x */
+ uint8_t num_stream_output_components[4];
- ubyte input_array_first[PIPE_MAX_SHADER_INPUTS];
- ubyte output_array_first[PIPE_MAX_SHADER_OUTPUTS];
+ uint8_t input_array_first[PIPE_MAX_SHADER_INPUTS];
+ uint8_t output_array_first[PIPE_MAX_SHADER_OUTPUTS];
unsigned array_max[TGSI_FILE_COUNT]; /**< highest index array per register file */
uint immediate_count; /**< number of immediates declared */
bool reads_perpatch_outputs;
bool reads_tessfactor_outputs;
- ubyte colors_read; /**< which color components are read by the FS */
- ubyte colors_written;
+ uint8_t colors_read; /**< which color components are read by the FS */
+ uint8_t colors_written;
bool reads_position; /**< does fragment shader read position? */
bool reads_z; /**< does fragment shader read depth? */
bool reads_samplemask; /**< does fragment shader read sample mask? */
ATTRIB(R32G32_FLOAT, 2, float, float, TO_32_FLOAT)
ATTRIB(R32_FLOAT, 1, float, float, TO_32_FLOAT)
-ATTRIB(R16G16B16A16_FLOAT, 4, float, ushort, TO_16_FLOAT)
-ATTRIB(R16G16B16_FLOAT, 3, float, ushort, TO_16_FLOAT)
-ATTRIB(R16G16_FLOAT, 2, float, ushort, TO_16_FLOAT)
-ATTRIB(R16_FLOAT, 1, float, ushort, TO_16_FLOAT)
+ATTRIB(R16G16B16A16_FLOAT, 4, float, uint16_t, TO_16_FLOAT)
+ATTRIB(R16G16B16_FLOAT, 3, float, uint16_t, TO_16_FLOAT)
+ATTRIB(R16G16_FLOAT, 2, float, uint16_t, TO_16_FLOAT)
+ATTRIB(R16_FLOAT, 1, float, uint16_t, TO_16_FLOAT)
ATTRIB(R32G32B32A32_USCALED, 4, float, unsigned, TO_32_USCALED)
ATTRIB(R32G32B32_USCALED, 3, float, unsigned, TO_32_USCALED)
ATTRIB(R32G32_SNORM, 2, float, int, TO_32_SNORM)
ATTRIB(R32_SNORM, 1, float, int, TO_32_SNORM)
-ATTRIB(R16G16B16A16_USCALED, 4, float, ushort, TO_16_USCALED)
-ATTRIB(R16G16B16_USCALED, 3, float, ushort, TO_16_USCALED)
-ATTRIB(R16G16_USCALED, 2, float, ushort, TO_16_USCALED)
-ATTRIB(R16_USCALED, 1, float, ushort, TO_16_USCALED)
+ATTRIB(R16G16B16A16_USCALED, 4, float, uint16_t, TO_16_USCALED)
+ATTRIB(R16G16B16_USCALED, 3, float, uint16_t, TO_16_USCALED)
+ATTRIB(R16G16_USCALED, 2, float, uint16_t, TO_16_USCALED)
+ATTRIB(R16_USCALED, 1, float, uint16_t, TO_16_USCALED)
ATTRIB(R16G16B16A16_SSCALED, 4, float, short, TO_16_SSCALED)
ATTRIB(R16G16B16_SSCALED, 3, float, short, TO_16_SSCALED)
ATTRIB(R16G16_SSCALED, 2, float, short, TO_16_SSCALED)
ATTRIB(R16_SSCALED, 1, float, short, TO_16_SSCALED)
-ATTRIB(R16G16B16A16_UNORM, 4, float, ushort, TO_16_UNORM)
-ATTRIB(R16G16B16_UNORM, 3, float, ushort, TO_16_UNORM)
-ATTRIB(R16G16_UNORM, 2, float, ushort, TO_16_UNORM)
-ATTRIB(R16_UNORM, 1, float, ushort, TO_16_UNORM)
+ATTRIB(R16G16B16A16_UNORM, 4, float, uint16_t, TO_16_UNORM)
+ATTRIB(R16G16B16_UNORM, 3, float, uint16_t, TO_16_UNORM)
+ATTRIB(R16G16_UNORM, 2, float, uint16_t, TO_16_UNORM)
+ATTRIB(R16_UNORM, 1, float, uint16_t, TO_16_UNORM)
ATTRIB(R16G16B16A16_SNORM, 4, float, short, TO_16_SNORM)
ATTRIB(R16G16B16_SNORM, 3, float, short, TO_16_SNORM)
ATTRIB(R16G16_SNORM, 2, float, short, TO_16_SNORM)
ATTRIB(R16_SNORM, 1, float, short, TO_16_SNORM)
-ATTRIB(R8G8B8A8_USCALED, 4, float, ubyte, TO_8_USCALED)
-ATTRIB(R8G8B8_USCALED, 3, float, ubyte, TO_8_USCALED)
-ATTRIB(R8G8_USCALED, 2, float, ubyte, TO_8_USCALED)
-ATTRIB(R8_USCALED, 1, float, ubyte, TO_8_USCALED)
+ATTRIB(R8G8B8A8_USCALED, 4, float, uint8_t, TO_8_USCALED)
+ATTRIB(R8G8B8_USCALED, 3, float, uint8_t, TO_8_USCALED)
+ATTRIB(R8G8_USCALED, 2, float, uint8_t, TO_8_USCALED)
+ATTRIB(R8_USCALED, 1, float, uint8_t, TO_8_USCALED)
ATTRIB(R8G8B8A8_SSCALED, 4, float, char, TO_8_SSCALED)
ATTRIB(R8G8B8_SSCALED, 3, float, char, TO_8_SSCALED)
ATTRIB(R8G8_SSCALED, 2, float, char, TO_8_SSCALED)
ATTRIB(R8_SSCALED, 1, float, char, TO_8_SSCALED)
-ATTRIB(R8G8B8A8_UNORM, 4, float, ubyte, TO_8_UNORM)
-ATTRIB(R8G8B8_UNORM, 3, float, ubyte, TO_8_UNORM)
-ATTRIB(R8G8_UNORM, 2, float, ubyte, TO_8_UNORM)
-ATTRIB(R8_UNORM, 1, float, ubyte, TO_8_UNORM)
+ATTRIB(R8G8B8A8_UNORM, 4, float, uint8_t, TO_8_UNORM)
+ATTRIB(R8G8B8_UNORM, 3, float, uint8_t, TO_8_UNORM)
+ATTRIB(R8G8_UNORM, 2, float, uint8_t, TO_8_UNORM)
+ATTRIB(R8_UNORM, 1, float, uint8_t, TO_8_UNORM)
ATTRIB(R8G8B8A8_SNORM, 4, float, char, TO_8_SNORM)
ATTRIB(R8G8B8_SNORM, 3, float, char, TO_8_SNORM)
ATTRIB(R32G32_UINT, 2, uint32_t, unsigned, TO_INT)
ATTRIB(R32_UINT, 1, uint32_t, unsigned, TO_INT)
-ATTRIB(R16G16B16A16_UINT, 4, uint32_t, ushort, TO_INT)
-ATTRIB(R16G16B16_UINT, 3, uint32_t, ushort, TO_INT)
-ATTRIB(R16G16_UINT, 2, uint32_t, ushort, TO_INT)
-ATTRIB(R16_UINT, 1, uint32_t, ushort, TO_INT)
+ATTRIB(R16G16B16A16_UINT, 4, uint32_t, uint16_t, TO_INT)
+ATTRIB(R16G16B16_UINT, 3, uint32_t, uint16_t, TO_INT)
+ATTRIB(R16G16_UINT, 2, uint32_t, uint16_t, TO_INT)
+ATTRIB(R16_UINT, 1, uint32_t, uint16_t, TO_INT)
-ATTRIB(R8G8B8A8_UINT, 4, uint32_t, ubyte, TO_INT)
-ATTRIB(R8G8B8_UINT, 3, uint32_t, ubyte, TO_INT)
-ATTRIB(R8G8_UINT, 2, uint32_t, ubyte, TO_INT)
-ATTRIB(R8_UINT, 1, uint32_t, ubyte, TO_INT)
+ATTRIB(R8G8B8A8_UINT, 4, uint32_t, uint8_t, TO_INT)
+ATTRIB(R8G8B8_UINT, 3, uint32_t, uint8_t, TO_INT)
+ATTRIB(R8G8_UINT, 2, uint32_t, uint8_t, TO_INT)
+ATTRIB(R8_UINT, 1, uint32_t, uint8_t, TO_INT)
ATTRIB(R32G32B32A32_SINT, 4, int32_t, int, TO_INT)
ATTRIB(R32G32B32_SINT, 3, int32_t, int, TO_INT)
emit_A8R8G8B8_UNORM(const void *attrib, void *ptr)
{
float *in = (float *)attrib;
- ubyte *out = (ubyte *)ptr;
+ uint8_t *out = (uint8_t *)ptr;
out[0] = TO_8_UNORM(in[3]);
out[1] = TO_8_UNORM(in[0]);
out[2] = TO_8_UNORM(in[1]);
emit_B8G8R8A8_UNORM(const void *attrib, void *ptr)
{
float *in = (float *)attrib;
- ubyte *out = (ubyte *)ptr;
+ uint8_t *out = (uint8_t *)ptr;
out[2] = TO_8_UNORM(in[0]);
out[1] = TO_8_UNORM(in[1]);
out[0] = TO_8_UNORM(in[2]);
static void blitter_check_saved_fb_state(ASSERTED struct blitter_context_priv *ctx)
{
- assert(ctx->base.saved_fb_state.nr_cbufs != (ubyte) ~0);
+ assert(ctx->base.saved_fb_state.nr_cbufs != (uint8_t) ~0);
}
static void blitter_disable_render_cond(struct blitter_context_priv *ctx)
void
debug_dump_ubyte_rgba_bmp(const char *filename,
unsigned width, unsigned height,
- const ubyte *rgba, unsigned stride)
+ const uint8_t *rgba, unsigned stride)
{
FILE *stream;
struct bmp_file_header bmfh;
y = height;
while (y--) {
- const ubyte *ptr = rgba + (stride * y * 4);
+ const uint8_t *ptr = rgba + (stride * y * 4);
for (x = 0; x < width; ++x) {
struct bmp_rgb_quad pixel;
pixel.rgbRed = ptr[x*4 + 0];
float *rgba, unsigned stride);
void debug_dump_ubyte_rgba_bmp(const char *filename,
unsigned width, unsigned height,
- const ubyte *rgba, unsigned stride);
+ const uint8_t *rgba, unsigned stride);
#else
#define debug_dump_image(prefix, format, cpp, width, height, stride, data) ((void)0)
#define debug_dump_surface(pipe, prefix, surface) ((void)0)
void *data)
{
struct pipe_transfer *src_transfer;
- ubyte *map;
+ uint8_t *map;
- map = (ubyte *) pipe_buffer_map_range(pipe,
+ map = (uint8_t *) pipe_buffer_map_range(pipe,
buf,
offset, size,
PIPE_MAP_READ,
* Must be big enough to hold data for all formats (currently 256 bits).
*/
union util_color {
- ubyte ub;
- ushort us;
+ uint8_t ub;
+ uint16_t us;
uint ui[4];
- ushort h[4]; /* half float */
+ uint16_t h[4]; /* half float */
float f[4];
double d[4];
};
* Pack ubyte R,G,B,A into dest pixel.
*/
static inline void
-util_pack_color_ub(ubyte r, ubyte g, ubyte b, ubyte a,
+util_pack_color_ub(uint8_t r, uint8_t g, uint8_t b, uint8_t a,
enum pipe_format format, union util_color *uc)
{
switch (format) {
*/
default:
{
- ubyte src[4];
+ uint8_t src[4];
src[0] = r;
src[1] = g;
*/
static inline void
util_unpack_color_ub(enum pipe_format format, union util_color *uc,
- ubyte *r, ubyte *g, ubyte *b, ubyte *a)
+ uint8_t *r, uint8_t *g, uint8_t *b, uint8_t *a)
{
switch (format) {
case PIPE_FORMAT_ABGR8888_UNORM:
{
uint p = uc->ui[0];
- *r = (ubyte) ((p >> 24) & 0xff);
- *g = (ubyte) ((p >> 16) & 0xff);
- *b = (ubyte) ((p >> 8) & 0xff);
- *a = (ubyte) ((p >> 0) & 0xff);
+ *r = (uint8_t) ((p >> 24) & 0xff);
+ *g = (uint8_t) ((p >> 16) & 0xff);
+ *b = (uint8_t) ((p >> 8) & 0xff);
+ *a = (uint8_t) ((p >> 0) & 0xff);
}
return;
case PIPE_FORMAT_XBGR8888_UNORM:
{
uint p = uc->ui[0];
- *r = (ubyte) ((p >> 24) & 0xff);
- *g = (ubyte) ((p >> 16) & 0xff);
- *b = (ubyte) ((p >> 8) & 0xff);
- *a = (ubyte) 0xff;
+ *r = (uint8_t) ((p >> 24) & 0xff);
+ *g = (uint8_t) ((p >> 16) & 0xff);
+ *b = (uint8_t) ((p >> 8) & 0xff);
+ *a = (uint8_t) 0xff;
}
return;
case PIPE_FORMAT_BGRA8888_UNORM:
{
uint p = uc->ui[0];
- *r = (ubyte) ((p >> 16) & 0xff);
- *g = (ubyte) ((p >> 8) & 0xff);
- *b = (ubyte) ((p >> 0) & 0xff);
- *a = (ubyte) ((p >> 24) & 0xff);
+ *r = (uint8_t) ((p >> 16) & 0xff);
+ *g = (uint8_t) ((p >> 8) & 0xff);
+ *b = (uint8_t) ((p >> 0) & 0xff);
+ *a = (uint8_t) ((p >> 24) & 0xff);
}
return;
case PIPE_FORMAT_BGRX8888_UNORM:
{
uint p = uc->ui[0];
- *r = (ubyte) ((p >> 16) & 0xff);
- *g = (ubyte) ((p >> 8) & 0xff);
- *b = (ubyte) ((p >> 0) & 0xff);
- *a = (ubyte) 0xff;
+ *r = (uint8_t) ((p >> 16) & 0xff);
+ *g = (uint8_t) ((p >> 8) & 0xff);
+ *b = (uint8_t) ((p >> 0) & 0xff);
+ *a = (uint8_t) 0xff;
}
return;
case PIPE_FORMAT_ARGB8888_UNORM:
{
uint p = uc->ui[0];
- *r = (ubyte) ((p >> 8) & 0xff);
- *g = (ubyte) ((p >> 16) & 0xff);
- *b = (ubyte) ((p >> 24) & 0xff);
- *a = (ubyte) ((p >> 0) & 0xff);
+ *r = (uint8_t) ((p >> 8) & 0xff);
+ *g = (uint8_t) ((p >> 16) & 0xff);
+ *b = (uint8_t) ((p >> 24) & 0xff);
+ *a = (uint8_t) ((p >> 0) & 0xff);
}
return;
case PIPE_FORMAT_XRGB8888_UNORM:
{
uint p = uc->ui[0];
- *r = (ubyte) ((p >> 8) & 0xff);
- *g = (ubyte) ((p >> 16) & 0xff);
- *b = (ubyte) ((p >> 24) & 0xff);
- *a = (ubyte) 0xff;
+ *r = (uint8_t) ((p >> 8) & 0xff);
+ *g = (uint8_t) ((p >> 16) & 0xff);
+ *b = (uint8_t) ((p >> 24) & 0xff);
+ *a = (uint8_t) 0xff;
}
return;
case PIPE_FORMAT_B5G6R5_UNORM:
{
- ushort p = uc->us;
- *r = (ubyte) (((p >> 8) & 0xf8) | ((p >> 13) & 0x7));
- *g = (ubyte) (((p >> 3) & 0xfc) | ((p >> 9) & 0x3));
- *b = (ubyte) (((p << 3) & 0xf8) | ((p >> 2) & 0x7));
- *a = (ubyte) 0xff;
+ uint16_t p = uc->us;
+ *r = (uint8_t) (((p >> 8) & 0xf8) | ((p >> 13) & 0x7));
+ *g = (uint8_t) (((p >> 3) & 0xfc) | ((p >> 9) & 0x3));
+ *b = (uint8_t) (((p << 3) & 0xf8) | ((p >> 2) & 0x7));
+ *a = (uint8_t) 0xff;
}
return;
case PIPE_FORMAT_B5G5R5X1_UNORM:
{
- ushort p = uc->us;
- *r = (ubyte) (((p >> 7) & 0xf8) | ((p >> 12) & 0x7));
- *g = (ubyte) (((p >> 2) & 0xf8) | ((p >> 7) & 0x7));
- *b = (ubyte) (((p << 3) & 0xf8) | ((p >> 2) & 0x7));
- *a = (ubyte) 0xff;
+ uint16_t p = uc->us;
+ *r = (uint8_t) (((p >> 7) & 0xf8) | ((p >> 12) & 0x7));
+ *g = (uint8_t) (((p >> 2) & 0xf8) | ((p >> 7) & 0x7));
+ *b = (uint8_t) (((p << 3) & 0xf8) | ((p >> 2) & 0x7));
+ *a = (uint8_t) 0xff;
}
return;
case PIPE_FORMAT_B5G5R5A1_UNORM:
{
- ushort p = uc->us;
- *r = (ubyte) (((p >> 7) & 0xf8) | ((p >> 12) & 0x7));
- *g = (ubyte) (((p >> 2) & 0xf8) | ((p >> 7) & 0x7));
- *b = (ubyte) (((p << 3) & 0xf8) | ((p >> 2) & 0x7));
- *a = (ubyte) (0xff * (p >> 15));
+ uint16_t p = uc->us;
+ *r = (uint8_t) (((p >> 7) & 0xf8) | ((p >> 12) & 0x7));
+ *g = (uint8_t) (((p >> 2) & 0xf8) | ((p >> 7) & 0x7));
+ *b = (uint8_t) (((p << 3) & 0xf8) | ((p >> 2) & 0x7));
+ *a = (uint8_t) (0xff * (p >> 15));
}
return;
case PIPE_FORMAT_B4G4R4A4_UNORM:
{
- ushort p = uc->us;
- *r = (ubyte) (((p >> 4) & 0xf0) | ((p >> 8) & 0xf));
- *g = (ubyte) (((p >> 0) & 0xf0) | ((p >> 4) & 0xf));
- *b = (ubyte) (((p << 4) & 0xf0) | ((p >> 0) & 0xf));
- *a = (ubyte) (((p >> 8) & 0xf0) | ((p >> 12) & 0xf));
+ uint16_t p = uc->us;
+ *r = (uint8_t) (((p >> 4) & 0xf0) | ((p >> 8) & 0xf));
+ *g = (uint8_t) (((p >> 0) & 0xf0) | ((p >> 4) & 0xf));
+ *b = (uint8_t) (((p << 4) & 0xf0) | ((p >> 0) & 0xf));
+ *a = (uint8_t) (((p >> 8) & 0xf0) | ((p >> 12) & 0xf));
}
return;
case PIPE_FORMAT_A8_UNORM:
{
- ubyte p = uc->ub;
- *r = *g = *b = (ubyte) 0xff;
+ uint8_t p = uc->ub;
+ *r = *g = *b = (uint8_t) 0xff;
*a = p;
}
return;
case PIPE_FORMAT_L8_UNORM:
{
- ubyte p = uc->ub;
+ uint8_t p = uc->ub;
*r = *g = *b = p;
- *a = (ubyte) 0xff;
+ *a = (uint8_t) 0xff;
}
return;
case PIPE_FORMAT_I8_UNORM:
{
- ubyte p = uc->ub;
+ uint8_t p = uc->ub;
*r = *g = *b = *a = p;
}
return;
*r = float_to_ubyte(p[0]);
*g = float_to_ubyte(p[1]);
*b = float_to_ubyte(p[2]);
- *a = (ubyte) 0xff;
+ *a = (uint8_t) 0xff;
}
return;
const float *p = &uc->f[0];
*r = float_to_ubyte(p[0]);
*g = float_to_ubyte(p[1]);
- *b = *a = (ubyte) 0xff;
+ *b = *a = (uint8_t) 0xff;
}
return;
{
const float *p = &uc->f[0];
*r = float_to_ubyte(p[0]);
- *g = *b = *a = (ubyte) 0xff;
+ *g = *b = *a = (uint8_t) 0xff;
}
return;
*/
default:
{
- ubyte dst[4];
+ uint8_t dst[4];
util_format_read_4ub(format, dst, 0, uc, 0, 0, 0, 1, 1);
*r = dst[0];
static inline void
util_pack_color(const float rgba[4], enum pipe_format format, union util_color *uc)
{
- ubyte r = 0;
- ubyte g = 0;
- ubyte b = 0;
- ubyte a = 0;
+ uint8_t r = 0;
+ uint8_t g = 0;
+ uint8_t b = 0;
+ uint8_t a = 0;
if (util_format_get_component_bits(format, UTIL_FORMAT_COLORSPACE_RGB, 0) <= 8) {
/* format uses 8-bit components or less */
* Pack 4 ubytes into a 4-byte word
*/
static inline unsigned
-pack_ub4(ubyte b0, ubyte b1, ubyte b2, ubyte b3)
+pack_ub4(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3)
{
return ((((unsigned int)b0) << 0) |
(((unsigned int)b1) << 8) |
{
static const uint bit31 = 1u << 31;
struct pipe_transfer *transfer;
- ubyte *data;
+ uint8_t *data;
int i, j;
/* map texture memory */
__m128i m128i;
vector signed int m128si;
vector unsigned int m128ui;
- ubyte ub[16];
- ushort us[8];
+ uint8_t ub[16];
+ uint16_t us[8];
int i[4];
uint ui[4];
} __m128i_union;
void *
util_make_geometry_passthrough_shader(struct pipe_context *pipe,
uint num_attribs,
- const ubyte *semantic_names,
- const ubyte *semantic_indexes)
+ const uint8_t *semantic_names,
+ const uint8_t *semantic_indexes)
{
static const unsigned zero[4] = {0, 0, 0, 0};
util_make_tess_ctrl_passthrough_shader(struct pipe_context *pipe,
uint num_vs_outputs,
uint num_tes_inputs,
- const ubyte *vs_semantic_names,
- const ubyte *vs_semantic_indexes,
- const ubyte *tes_semantic_names,
- const ubyte *tes_semantic_indexes,
+ const uint8_t *vs_semantic_names,
+ const uint8_t *vs_semantic_indexes,
+ const uint8_t *tes_semantic_names,
+ const uint8_t *tes_semantic_indexes,
const unsigned vertices_per_patch)
{
unsigned i, j;
extern void *
util_make_geometry_passthrough_shader(struct pipe_context *pipe,
uint num_attribs,
- const ubyte *semantic_names,
- const ubyte *semantic_indexes);
+ const uint8_t *semantic_names,
+ const uint8_t *semantic_indexes);
void *
util_make_fs_pack_color_zs(struct pipe_context *pipe,
util_make_tess_ctrl_passthrough_shader(struct pipe_context *pipe,
uint num_vs_outputs,
uint num_tes_inputs,
- const ubyte *vs_semantic_names,
- const ubyte *vs_semantic_indexes,
- const ubyte *tes_semantic_names,
- const ubyte *tes_semantic_indexes,
+ const uint8_t *vs_semantic_names,
+ const uint8_t *vs_semantic_indexes,
+ const uint8_t *tes_semantic_names,
+ const uint8_t *tes_semantic_indexes,
const unsigned vertices_per_patch);
void *
union m128i {
__m128i m;
- ubyte ub[16];
- ushort us[8];
+ uint8_t ub[16];
+ uint16_t us[8];
uint ui[4];
};
* Position and sizes are in pixels.
*/
void
-util_copy_box(ubyte * dst,
+util_copy_box(uint8_t * dst,
enum pipe_format format,
unsigned dst_stride, uint64_t dst_slice_stride,
unsigned dst_x, unsigned dst_y, unsigned dst_z,
unsigned width, unsigned height, unsigned depth,
- const ubyte * src,
+ const uint8_t * src,
int src_stride, uint64_t src_slice_stride,
unsigned src_x, unsigned src_y, unsigned src_z)
{
void
-util_fill_rect(ubyte * dst,
+util_fill_rect(uint8_t * dst,
enum pipe_format format,
unsigned dst_stride,
unsigned dst_x,
break;
default:
for (i = 0; i < height; i++) {
- ubyte *row = dst;
+ uint8_t *row = dst;
for (j = 0; j < width; j++) {
memcpy(row, uc, blocksize);
row += blocksize;
void
-util_fill_box(ubyte * dst,
+util_fill_box(uint8_t * dst,
enum pipe_format format,
unsigned stride,
uintptr_t layer_stride,
static void
util_clear_color_texture_helper(struct pipe_transfer *dst_trans,
- ubyte *dst_map,
+ uint8_t *dst_map,
enum pipe_format format,
const union pipe_color_union *color,
unsigned width, unsigned height, unsigned depth)
unsigned width, unsigned height, unsigned depth)
{
struct pipe_transfer *dst_trans;
- ubyte *dst_map;
+ uint8_t *dst_map;
dst_map = pipe_texture_map_3d(pipe,
texture,
unsigned width, unsigned height)
{
struct pipe_transfer *dst_trans;
- ubyte *dst_map;
+ uint8_t *dst_map;
assert(dst->texture);
if (!dst->texture)
}
static void
-util_fill_zs_rect(ubyte *dst_map,
+util_fill_zs_rect(uint8_t *dst_map,
enum pipe_format format,
bool need_rmw,
unsigned clear_flags,
}
void
-util_fill_zs_box(ubyte *dst,
+util_fill_zs_box(uint8_t *dst,
enum pipe_format format,
bool need_rmw,
unsigned clear_flags,
unsigned width, unsigned height, unsigned depth)
{
struct pipe_transfer *dst_trans;
- ubyte *dst_map;
+ uint8_t *dst_map;
bool need_rmw = false;
if ((clear_flags & PIPE_CLEAR_DEPTHSTENCIL) &&
const struct pipe_resource *texture);
extern void
-util_copy_box(ubyte * dst,
+util_copy_box(uint8_t * dst,
enum pipe_format format,
unsigned dst_stride, uint64_t dst_slice_stride,
unsigned dst_x, unsigned dst_y, unsigned dst_z,
unsigned width, unsigned height, unsigned depth,
- const ubyte * src,
+ const uint8_t * src,
int src_stride, uint64_t src_slice_stride,
unsigned src_x, unsigned src_y, unsigned src_z);
extern void
-util_fill_rect(ubyte * dst, enum pipe_format format,
+util_fill_rect(uint8_t * dst, enum pipe_format format,
unsigned dst_stride, unsigned dst_x, unsigned dst_y,
unsigned width, unsigned height, union util_color *uc);
extern void
-util_fill_box(ubyte * dst, enum pipe_format format,
+util_fill_box(uint8_t * dst, enum pipe_format format,
unsigned stride, uintptr_t layer_stride,
unsigned x, unsigned y, unsigned z,
unsigned width, unsigned height, unsigned depth,
union util_color *uc);
extern void
-util_fill_zs_box(ubyte *dst, enum pipe_format format,
+util_fill_zs_box(uint8_t *dst, enum pipe_format format,
bool need_rmw, unsigned clear_flags, unsigned stride,
unsigned layer_stride, unsigned width,
unsigned height, unsigned depth,
struct tc_sampler_states {
struct tc_call_base base;
- ubyte shader, start, count;
+ uint8_t shader, start, count;
void *slot[0]; /* more will be allocated if needed */
};
struct tc_patch_vertices {
struct tc_call_base base;
- ubyte patch_vertices;
+ uint8_t patch_vertices;
};
static uint16_t
struct tc_constant_buffer_base {
struct tc_call_base base;
- ubyte shader, index;
+ uint8_t shader, index;
bool is_null;
};
struct tc_inlinable_constants {
struct tc_call_base base;
- ubyte shader;
- ubyte num_values;
+ uint8_t shader;
+ uint8_t num_values;
uint32_t values[MAX_INLINABLE_UNIFORMS];
};
struct tc_scissors {
struct tc_call_base base;
- ubyte start, count;
+ uint8_t start, count;
struct pipe_scissor_state slot[0]; /* more will be allocated if needed */
};
struct tc_viewports {
struct tc_call_base base;
- ubyte start, count;
+ uint8_t start, count;
struct pipe_viewport_state slot[0]; /* more will be allocated if needed */
};
struct tc_window_rects {
struct tc_call_base base;
bool include;
- ubyte count;
+ uint8_t count;
struct pipe_scissor_state slot[0]; /* more will be allocated if needed */
};
struct tc_sampler_views {
struct tc_call_base base;
- ubyte shader, start, count, unbind_num_trailing_slots;
+ uint8_t shader, start, count, unbind_num_trailing_slots;
struct pipe_sampler_view *slot[0]; /* more will be allocated if needed */
};
struct tc_shader_images {
struct tc_call_base base;
- ubyte shader, start, count;
- ubyte unbind_num_trailing_slots;
+ uint8_t shader, start, count;
+ uint8_t unbind_num_trailing_slots;
struct pipe_image_view slot[0]; /* more will be allocated if needed */
};
struct tc_shader_buffers {
struct tc_call_base base;
- ubyte shader, start, count;
+ uint8_t shader, start, count;
bool unbind;
unsigned writable_bitmask;
struct pipe_shader_buffer slot[0]; /* more will be allocated if needed */
struct tc_vertex_buffers {
struct tc_call_base base;
- ubyte start, count;
- ubyte unbind_num_trailing_slots;
+ uint8_t start, count;
+ uint8_t unbind_num_trailing_slots;
struct pipe_vertex_buffer slot[0]; /* more will be allocated if needed */
};
#if !defined(NDEBUG) && TC_DEBUG >= 1
uint32_t sentinel;
#endif
- ushort num_slots;
- ushort call_id;
+ uint16_t num_slots;
+ uint16_t call_id;
};
/**
* Return each Z value as four floats in [0,1].
*/
static void
-z16_get_tile_rgba(const ushort *src,
+z16_get_tile_rgba(const uint16_t *src,
unsigned w, unsigned h,
float *p,
unsigned dst_stride)
switch (format) {
case PIPE_FORMAT_Z16_UNORM:
- z16_get_tile_rgba((ushort *) packed, w, h, dst, dst_stride);
+ z16_get_tile_rgba((uint16_t *) packed, w, h, dst, dst_stride);
break;
case PIPE_FORMAT_Z32_UNORM:
z32_get_tile_rgba((unsigned *) packed, w, h, dst, dst_stride);
u_minify(pres->height0, level),
&transfer);
if (res_map) {
- util_copy_rect((ubyte*)map, pres->format, res->dt_stride, 0, 0,
+ util_copy_rect((uint8_t*)map, pres->format, res->dt_stride, 0, 0,
transfer->box.width, transfer->box.height,
- (const ubyte*)res_map, transfer->stride, 0, 0);
+ (const uint8_t*)res_map, transfer->stride, 0, 0);
pipe_texture_unmap(pctx, transfer);
}
winsys->displaytarget_unmap(winsys, res->dt);
mapped_indices = info->has_user_indices ? info->index.user : NULL;
if (!mapped_indices)
mapped_indices = i915_buffer(info->index.resource)->data;
- draw_set_indexes(draw, (ubyte *)mapped_indices, info->index_size, ~0);
+ draw_set_indexes(draw, (uint8_t *)mapped_indices, info->index_size, ~0);
}
if (i915->constants[PIPE_SHADER_VERTEX])
* slot of the user's constant buffer. (set by pipe->set_constant_buffer())
* Else, the bitmask indicates which components are occupied by immediates.
*/
- ubyte constant_flags[I915_MAX_CONSTANT];
+ uint8_t constant_flags[I915_MAX_CONSTANT];
/**
* The mapping between TGSI inputs and hw texture coords.
const char *prim = get_prim_name(ptr[0]);
unsigned i, len;
- ushort *idx = (ushort *)(ptr + 1);
+ uint16_t *idx = (uint16_t *)(ptr + 1);
for (i = 0; idx[i] != 0xffff; i++)
;
*/
static bool
i915_vbuf_render_allocate_vertices(struct vbuf_render *render,
- ushort vertex_size, ushort nr_vertices)
+ uint16_t vertex_size, uint16_t nr_vertices)
{
struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
size_t size = (size_t)vertex_size * (size_t)nr_vertices;
}
static void
-i915_vbuf_render_unmap_vertices(struct vbuf_render *render, ushort min_index,
- ushort max_index)
+i915_vbuf_render_unmap_vertices(struct vbuf_render *render, uint16_t min_index,
+ uint16_t max_index)
{
struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
* If type is zero normal operation assumed.
*/
static void
-draw_generate_indices(struct vbuf_render *render, const ushort *indices,
+draw_generate_indices(struct vbuf_render *render, const uint16_t *indices,
uint32_t nr_indices, unsigned type)
{
struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
static void
i915_vbuf_render_draw_elements(struct vbuf_render *render,
- const ushort *indices, uint32_t nr_indices)
+ const uint16_t *indices, uint32_t nr_indices)
{
struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
struct i915_context *i915 = i915_render->i915;
}
{
- ubyte r = float_to_ubyte(sampler->border_color.f[0]);
- ubyte g = float_to_ubyte(sampler->border_color.f[1]);
- ubyte b = float_to_ubyte(sampler->border_color.f[2]);
- ubyte a = float_to_ubyte(sampler->border_color.f[3]);
+ uint8_t r = float_to_ubyte(sampler->border_color.f[0]);
+ uint8_t g = float_to_ubyte(sampler->border_color.f[1]);
+ uint8_t b = float_to_ubyte(sampler->border_color.f[2]);
+ uint8_t a = float_to_ubyte(sampler->border_color.f[3]);
cso->state[2] = I915PACKCOLOR8888(r, g, b, a);
}
return cso;
if (depth_stencil->alpha_enabled) {
int test = i915_translate_compare_func(depth_stencil->alpha_func);
- ubyte refByte = float_to_ubyte(depth_stencil->alpha_ref_value);
+ uint8_t refByte = float_to_ubyte(depth_stencil->alpha_ref_value);
cso->depth_LIS6 |=
(S6_ALPHA_TEST_ENABLE | (test << S6_ALPHA_TEST_FUNC_SHIFT) |
/* I915_NEW_STIPPLE
*/
{
- const ubyte *mask = (const ubyte *)i915->poly_stipple.stipple;
- ubyte p[4];
+ const uint8_t *mask = (const uint8_t *)i915->poly_stipple.stipple;
+ uint8_t p[4];
p[0] = mask[12] & 0xf;
p[1] = mask[8] & 0xf;
render->depth_test |= 0x801;
}
- ushort far, near;
+ uint16_t far, near;
near = float_to_ushort(ctx->viewport.near);
far = float_to_ushort(ctx->viewport.far);
uint64_t dirty; /**< Mask of LP_NEW_x flags */
unsigned cs_dirty; /**< Mask of LP_CSNEW_x flags */
/** Mapped vertex buffers */
- ubyte *mapped_vbuffer[PIPE_MAX_ATTRIBS];
+ uint8_t *mapped_vbuffer[PIPE_MAX_ATTRIBS];
/** Vertex format */
struct vertex_info vertex_info;
available_space = info->index.resource->width0;
}
draw_set_indexes(draw,
- (ubyte *) mapped_indices,
+ (uint8_t *) mapped_indices,
info->index_size, available_space);
}
void
lp_jit_buffer_from_pipe(struct lp_jit_buffer *jit, const struct pipe_shader_buffer *buffer)
{
- const ubyte *current_data = NULL;
+ const uint8_t *current_data = NULL;
/* resource buffer */
if (buffer->buffer)
- current_data = (ubyte *)llvmpipe_resource_data(buffer->buffer);
+ current_data = (uint8_t *)llvmpipe_resource_data(buffer->buffer);
if (current_data) {
current_data += buffer->buffer_offset;
{
uint64_t current_size = buffer->buffer_size;
- const ubyte *current_data = NULL;
+ const uint8_t *current_data = NULL;
if (buffer->buffer) {
/* resource buffer */
- current_data = (ubyte *)llvmpipe_resource_data(buffer->buffer);
+ current_data = (uint8_t *)llvmpipe_resource_data(buffer->buffer);
} else if (buffer->user_buffer) {
/* user-space buffer */
- current_data = (ubyte *)buffer->user_buffer;
+ current_data = (uint8_t *)buffer->user_buffer;
}
if (current_data && current_size >= sizeof(float)) {
/* Translate floating point value to 1.15 unsigned fixed-point.
*/
-static inline ushort
+static inline uint16_t
float_to_ufixed_1_15(float f)
{
return CLAMP((unsigned)(f * (float)FIXED15_ONE), 0, FIXED15_ONE);
struct data_block {
- ubyte data[DATA_BLOCK_SIZE];
+ uint8_t data[DATA_BLOCK_SIZE];
unsigned used;
struct data_block *next;
};
}
{
- ubyte *data = block->data + block->used;
+ uint8_t *data = block->data + block->used;
block->used += size;
return data;
}
}
{
- ubyte *data = block->data + block->used;
+ uint8_t *data = block->data + block->used;
unsigned offset = (((uintptr_t)data + alignment - 1) & ~(alignment - 1)) - (uintptr_t)data;
block->used += offset + size;
return data + offset;
void
lp_setup_set_stencil_ref_values(struct lp_setup_context *setup,
- const ubyte refs[2])
+ const uint8_t refs[2])
{
LP_DBG(DEBUG_SETUP, "%s %d %d\n", __func__, refs[0], refs[1]);
struct pipe_resource *buffer = setup->constants[i].current.buffer;
const unsigned current_size = MIN2(setup->constants[i].current.buffer_size,
LP_MAX_TGSI_CONST_BUFFER_SIZE);
- const ubyte *current_data = NULL;
+ const uint8_t *current_data = NULL;
STATIC_ASSERT(DATA_BLOCK_SIZE >= LP_MAX_TGSI_CONST_BUFFER_SIZE);
if (buffer) {
/* resource buffer */
- current_data = (ubyte *) llvmpipe_resource_data(buffer);
+ current_data = (uint8_t *) llvmpipe_resource_data(buffer);
} else if (setup->constants[i].current.user_buffer) {
/* user-space buffer */
- current_data = (ubyte *) setup->constants[i].current.user_buffer;
+ current_data = (uint8_t *) setup->constants[i].current.user_buffer;
}
if (current_data && current_size >= sizeof(float)) {
void
lp_setup_set_stencil_ref_values(struct lp_setup_context *setup,
- const ubyte refs[2]);
+ const uint8_t refs[2]);
void
lp_setup_set_blend_color(struct lp_setup_context *setup,
static bool
lp_setup_allocate_vertices(struct vbuf_render *vbr,
- ushort vertex_size, ushort nr_vertices)
+ uint16_t vertex_size, uint16_t nr_vertices)
{
struct lp_setup_context *setup = lp_setup_context(vbr);
unsigned size = vertex_size * nr_vertices;
static void
lp_setup_unmap_vertices(struct vbuf_render *vbr,
- ushort min_index,
- ushort max_index)
+ uint16_t min_index,
+ uint16_t max_index)
{
ASSERTED struct lp_setup_context *setup = lp_setup_context(vbr);
assert(setup->vertex_buffer_size >= (max_index+1) * setup->vertex_size);
* draw elements / indexed primitives
*/
static void
-lp_setup_draw_elements(struct vbuf_render *vbr, const ushort *indices, uint nr)
+lp_setup_draw_elements(struct vbuf_render *vbr, const uint16_t *indices, uint nr)
{
struct lp_setup_context *setup = lp_setup_context(vbr);
const unsigned stride = setup->vertex_info->size * sizeof(float);
{
for (int i = 0; i < ARRAY_SIZE(csctx->ssbos); ++i) {
struct pipe_resource *buffer = csctx->ssbos[i].current.buffer;
- const ubyte *current_data = NULL;
+ const uint8_t *current_data = NULL;
/* resource buffer */
if (buffer)
- current_data = (ubyte *) llvmpipe_resource_data(buffer);
+ current_data = (uint8_t *) llvmpipe_resource_data(buffer);
if (current_data) {
current_data += csctx->ssbos[i].current.buffer_offset;
case PIPE_SHADER_TESS_EVAL: {
const unsigned size = cb ? cb->buffer_size : 0;
- const ubyte *data = NULL;
+ const uint8_t *data = NULL;
if (constants->buffer) {
- data = (ubyte *) llvmpipe_resource_data(constants->buffer)
+ data = (uint8_t *) llvmpipe_resource_data(constants->buffer)
+ constants->buffer_offset;
}
case PIPE_SHADER_TESS_CTRL:
case PIPE_SHADER_TESS_EVAL: {
const unsigned size = buffer ? buffer->buffer_size : 0;
- const ubyte *data = NULL;
+ const uint8_t *data = NULL;
if (buffer && buffer->buffer)
- data = (ubyte *) llvmpipe_resource_data(buffer->buffer);
+ data = (uint8_t *) llvmpipe_resource_data(buffer->buffer);
if (data)
data += buffer->buffer_offset;
draw_set_mapped_shader_buffer(llvmpipe->draw, shader,
static void
lp_clear_color_texture_helper(struct pipe_transfer *dst_trans,
- ubyte *dst_map,
+ uint8_t *dst_map,
enum pipe_format format,
const union pipe_color_union *color,
unsigned width, unsigned height,
const struct pipe_box *box)
{
struct pipe_transfer *dst_trans;
- ubyte *dst_map;
+ uint8_t *dst_map;
dst_map = llvmpipe_transfer_map_ms(pipe, texture, 0, PIPE_MAP_WRITE,
sample, box, &dst_trans);
need_rmw = true;
}
- ubyte *dst_map = llvmpipe_transfer_map_ms(pipe,
+ uint8_t *dst_map = llvmpipe_transfer_map_ms(pipe,
texture,
0,
(need_rmw ? PIPE_MAP_READ_WRITE :
struct llvmpipe_resource *lpr = llvmpipe_resource(resource);
struct llvmpipe_transfer *lpt;
struct pipe_transfer *pt;
- ubyte *map;
+ uint8_t *map;
enum pipe_format format;
assert(resource);
* Return pointer to a 2D texture image/face/slice.
* No tiled/linear conversion is done.
*/
-ubyte *
+uint8_t *
llvmpipe_get_texture_image_address(struct llvmpipe_resource *lpr,
unsigned face_slice, unsigned level)
{
if (face_slice > 0)
offset += face_slice * tex_image_face_size(lpr, level);
- return (ubyte *) lpr->tex_data + offset;
+ return (uint8_t *) lpr->tex_data + offset;
}
llvmpipe_resource_size(const struct pipe_resource *resource);
-ubyte *
+uint8_t *
llvmpipe_get_texture_image_address(struct llvmpipe_resource *lpr,
unsigned face_slice, unsigned level);
static bool
nv30_render_allocate_vertices(struct vbuf_render *render,
- ushort vertex_size, ushort nr_vertices)
+ uint16_t vertex_size, uint16_t nr_vertices)
{
struct nv30_render *r = nv30_render(render);
struct nv30_context *nv30 = r->nv30;
static void
nv30_render_unmap_vertices(struct vbuf_render *render,
- ushort min_index, ushort max_index)
+ uint16_t min_index, uint16_t max_index)
{
struct nv30_render *r = nv30_render(render);
pipe_buffer_unmap(&r->nv30->base.pipe, r->transfer);
static void
nv30_render_draw_elements(struct vbuf_render *render,
- const ushort *indices, uint count)
+ const uint16_t *indices, uint count)
{
struct nv30_render *r = nv30_render(render);
struct nv30_context *nv30 = r->nv30;
PIPE_MAP_UNSYNCHRONIZED |
PIPE_MAP_READ, &transferi);
draw_set_indexes(draw,
- (ubyte *) map,
+ (uint8_t *) map,
info->index_size, ~0);
} else {
draw_set_indexes(draw, NULL, 0, 0);
nv50_program_destroy(struct nv50_context *nv50, struct nv50_program *p)
{
const struct pipe_shader_state pipe = p->pipe;
- const ubyte type = p->type;
+ const uint8_t type = p->type;
if (p->mem) {
if (nv50)
unsigned linear : 1;
unsigned pad : 3;
- ubyte sn; /* semantic name */
- ubyte si; /* semantic index */
+ uint8_t sn; /* semantic name */
+ uint8_t si; /* semantic index */
};
struct nv50_stream_output_state
struct nv50_program {
struct pipe_shader_state pipe;
- ubyte type;
+ uint8_t type;
bool translated;
uint32_t *code;
unsigned parm_size; /* size limit of uniform buffer */
uint32_t tls_space; /* required local memory per thread */
- ubyte max_gpr; /* REG_ALLOC_TEMP */
- ubyte max_out; /* REG_ALLOC_RESULT or FP_RESULT_COUNT */
+ uint8_t max_gpr; /* REG_ALLOC_TEMP */
+ uint8_t max_out; /* REG_ALLOC_RESULT or FP_RESULT_COUNT */
- ubyte in_nr;
- ubyte out_nr;
+ uint8_t in_nr;
+ uint8_t out_nr;
struct nv50_varying in[16];
struct nv50_varying out[16];
struct {
uint32_t attrs[3]; /* VP_ATTR_EN_0,1 and VP_GP_BUILTIN_ATTR_EN */
- ubyte psiz; /* output slot of point size */
- ubyte bfc[2]; /* indices into varying for FFC (FP) or BFC (VP) */
- ubyte edgeflag;
- ubyte clpd[2]; /* output slot of clip distance[i]'s 1st component */
- ubyte clpd_nr;
+ uint8_t psiz; /* output slot of point size */
+ uint8_t bfc[2]; /* indices into varying for FFC (FP) or BFC (VP) */
+ uint8_t edgeflag;
+ uint8_t clpd[2]; /* output slot of clip distance[i]'s 1st component */
+ uint8_t clpd_nr;
bool need_vertex_id;
uint32_t clip_mode;
uint8_t clip_enable; /* mask of defined clip planes */
uint32_t vert_count;
uint8_t prim_type; /* point, line strip or tri strip */
uint8_t has_layer;
- ubyte layerid; /* hw value of layer output */
+ uint8_t layerid; /* hw value of layer output */
uint8_t has_viewport;
- ubyte viewportid; /* hw value of viewport index output */
+ uint8_t viewportid; /* hw value of viewport index output */
} gp;
struct {
nvc0_program_destroy(struct nvc0_context *nvc0, struct nvc0_program *prog)
{
const struct pipe_shader_state pipe = prog->pipe;
- const ubyte type = prog->type;
+ const uint8_t type = prog->type;
if (prog->mem) {
if (nvc0)
struct nvc0_program {
struct pipe_shader_state pipe;
- ubyte type;
+ uint8_t type;
bool translated;
bool need_tls;
uint8_t num_gprs;
nvc0_validate_stencil_ref(struct nvc0_context *nvc0)
{
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
- const ubyte *ref = &nvc0->stencil_ref.ref_value[0];
+ const uint8_t *ref = &nvc0->stencil_ref.ref_value[0];
IMMED_NVC0(push, NVC0_3D(STENCIL_FRONT_FUNC_REF), ref[0]);
IMMED_NVC0(push, NVC0_3D(STENCIL_BACK_FUNC_REF), ref[1]);
}
static bool r300_render_allocate_vertices(struct vbuf_render* render,
- ushort vertex_size,
- ushort count)
+ uint16_t vertex_size,
+ uint16_t count)
{
struct r300_render* r300render = r300_render(render);
struct r300_context* r300 = r300render->r300;
}
static void r300_render_unmap_vertices(struct vbuf_render* render,
- ushort min,
- ushort max)
+ uint16_t min,
+ uint16_t max)
{
struct r300_render* r300render = r300_render(render);
struct r300_context* r300 = r300render->r300;
}
static void r300_render_draw_elements(struct vbuf_render* render,
- const ushort* indices,
+ const uint16_t* indices,
uint count)
{
struct r300_render* r300render = r300_render(render);
uint32_t rs_cull_mode;
uint32_t zb_stencilrefmask;
- ubyte ref_value_front;
+ uint8_t ref_value_front;
};
static bool r300_stencilref_needed(struct r300_context *r300)
struct r600_stencil_ref
{
- ubyte ref_value[2];
- ubyte valuemask[2];
- ubyte writemask[2];
+ uint8_t ref_value[2];
+ uint8_t valuemask[2];
+ uint8_t writemask[2];
};
struct r600_stencil_ref_state {
struct r600_dsa_state {
struct r600_command_buffer buffer;
unsigned alpha_ref;
- ubyte valuemask[2];
- ubyte writemask[2];
+ uint8_t valuemask[2];
+ uint8_t writemask[2];
unsigned zwritemask;
unsigned sx_alpha_test_control;
};
void evergreen_do_fast_color_clear(struct r600_common_context *rctx,
struct pipe_framebuffer_state *fb,
struct r600_atom *fb_state,
- unsigned *buffers, ubyte *dirty_cbufs,
+ unsigned *buffers, uint8_t *dirty_cbufs,
const union pipe_color_union *color);
void r600_init_screen_texture_functions(struct r600_common_screen *rscreen);
void r600_init_context_texture_functions(struct r600_common_context *rctx);
void evergreen_do_fast_color_clear(struct r600_common_context *rctx,
struct pipe_framebuffer_state *fb,
struct r600_atom *fb_state,
- unsigned *buffers, ubyte *dirty_cbufs,
+ unsigned *buffers, uint8_t *dirty_cbufs,
const union pipe_color_union *color)
{
int i;
uint32_t state[8];
uint32_t fmask_state[8];
const struct legacy_surf_level *base_level_info;
- ubyte block_width;
+ uint8_t block_width;
bool is_stencil_sampler;
bool dcc_incompatible;
};
unsigned spi_shader_col_format_alpha;
unsigned spi_shader_col_format_blend;
unsigned spi_shader_col_format_blend_alpha;
- ubyte nr_samples : 5; /* at most 16xAA */
- ubyte log_samples : 3; /* at most 4 = 16xAA */
- ubyte nr_color_samples; /* at most 8xAA */
- ubyte compressed_cb_mask;
- ubyte uncompressed_cb_mask;
- ubyte color_is_int8;
- ubyte color_is_int10;
- ubyte dirty_cbufs;
- ubyte min_bytes_per_pixel;
+ uint8_t nr_samples : 5; /* at most 16xAA */
+ uint8_t log_samples : 3; /* at most 4 = 16xAA */
+ uint8_t nr_color_samples; /* at most 8xAA */
+ uint8_t compressed_cb_mask;
+ uint8_t uncompressed_cb_mask;
+ uint8_t color_is_int8;
+ uint8_t color_is_int10;
+ uint8_t dirty_cbufs;
+ uint8_t min_bytes_per_pixel;
bool dirty_zsbuf;
bool any_dst_linear;
bool CB_has_shader_readable_metadata;
sel->stage == MESA_SHADER_TESS_EVAL ||
sel->stage == MESA_SHADER_GEOMETRY) &&
!shader->key.ge.as_ls && !shader->key.ge.as_es) {
- ubyte *vs_output_param_offset = shader->info.vs_output_param_offset;
+ uint8_t *vs_output_param_offset = shader->info.vs_output_param_offset;
if (sel->stage == MESA_SHADER_GEOMETRY && !shader->key.ge.as_ngg)
vs_output_param_offset = shader->gs_copy_shader->info.vs_output_param_offset;
shader->info.uses_vmem_load_other = true;
if (info->colors_read) {
- ubyte *color = shader->selector->info.color_attr_index;
+ uint8_t *color = shader->selector->info.color_attr_index;
if (shader->key.ps.part.prolog.color_two_side) {
/* BCOLORs are stored after the last input. */
union si_input_info {
struct {
- ubyte semantic;
- ubyte interpolate;
- ubyte fp16_lo_hi_valid;
- ubyte usage_mask;
+ uint8_t semantic;
+ uint8_t interpolate;
+ uint8_t fp16_lo_hi_valid;
+ uint8_t usage_mask;
};
uint32_t _unused; /* this just forces 4-byte alignment */
};
uint32_t options; /* bitmask of SI_PROFILE_* */
- ubyte num_inputs;
- ubyte num_outputs;
+ uint8_t num_inputs;
+ uint8_t num_outputs;
union si_input_info input[PIPE_MAX_SHADER_INPUTS];
- ubyte output_semantic[PIPE_MAX_SHADER_OUTPUTS];
- ubyte output_usagemask[PIPE_MAX_SHADER_OUTPUTS];
- ubyte output_readmask[PIPE_MAX_SHADER_OUTPUTS];
- ubyte output_streams[PIPE_MAX_SHADER_OUTPUTS];
- ubyte output_type[PIPE_MAX_SHADER_OUTPUTS]; /* enum nir_alu_type */
-
- ubyte num_vs_inputs;
- ubyte num_vbos_in_user_sgprs;
- ubyte num_stream_output_components[4];
+ uint8_t output_semantic[PIPE_MAX_SHADER_OUTPUTS];
+ uint8_t output_usagemask[PIPE_MAX_SHADER_OUTPUTS];
+ uint8_t output_readmask[PIPE_MAX_SHADER_OUTPUTS];
+ uint8_t output_streams[PIPE_MAX_SHADER_OUTPUTS];
+ uint8_t output_type[PIPE_MAX_SHADER_OUTPUTS]; /* enum nir_alu_type */
+
+ uint8_t num_vs_inputs;
+ uint8_t num_vbos_in_user_sgprs;
+ uint8_t num_stream_output_components[4];
uint16_t enabled_streamout_buffer_mask;
uint64_t inputs_read; /* "get_unique_index" bits */
uint64_t outputs_written; /* "get_unique_index" bits */
uint32_t patch_outputs_written; /* "get_unique_index_patch" bits */
- ubyte clipdist_mask;
- ubyte culldist_mask;
+ uint8_t clipdist_mask;
+ uint8_t culldist_mask;
uint16_t lshs_vertex_stride;
uint16_t esgs_vertex_stride;
uint16_t gsvs_vertex_size;
- ubyte gs_input_verts_per_prim;
+ uint8_t gs_input_verts_per_prim;
unsigned max_gsvs_emit_size;
/* Set 0xf or 0x0 (4 bits) per each written output.
int constbuf0_num_slots;
uint num_memory_stores;
- ubyte color_attr_index[2];
- ubyte color_interpolate[2];
- ubyte color_interpolate_loc[2];
- ubyte colors_read; /**< which color components are read by the FS */
- ubyte colors_written;
+ uint8_t color_attr_index[2];
+ uint8_t color_interpolate[2];
+ uint8_t color_interpolate_loc[2];
+ uint8_t colors_read; /**< which color components are read by the FS */
+ uint8_t colors_written;
uint16_t output_color_types; /**< Each bit pair is enum si_color_output_type */
bool vs_needs_prolog;
bool color0_writes_all_cbufs; /**< gl_FragColor */
struct si_shader_info info;
enum pipe_shader_type pipe_shader_type;
- ubyte const_and_shader_buf_descriptors_index;
- ubyte sampler_and_images_descriptors_index;
- ubyte cs_shaderbufs_sgpr_index;
- ubyte cs_num_shaderbufs_in_user_sgprs;
- ubyte cs_images_sgpr_index;
- ubyte cs_images_num_sgprs;
- ubyte cs_num_images_in_user_sgprs;
+ uint8_t const_and_shader_buf_descriptors_index;
+ uint8_t sampler_and_images_descriptors_index;
+ uint8_t cs_shaderbufs_sgpr_index;
+ uint8_t cs_num_shaderbufs_in_user_sgprs;
+ uint8_t cs_images_sgpr_index;
+ uint8_t cs_images_num_sgprs;
+ uint8_t cs_num_images_in_user_sgprs;
unsigned ngg_cull_vert_threshold; /* UINT32_MAX = disabled */
enum mesa_prim rast_prim;
/* GCN-specific shader info. */
struct si_shader_binary_info {
- ubyte vs_output_param_offset[NUM_TOTAL_VARYING_SLOTS];
+ uint8_t vs_output_param_offset[NUM_TOTAL_VARYING_SLOTS];
uint32_t vs_output_ps_input_cntl[NUM_TOTAL_VARYING_SLOTS];
- ubyte num_input_sgprs;
- ubyte num_input_vgprs;
+ uint8_t num_input_sgprs;
+ uint8_t num_input_vgprs;
bool uses_vmem_load_other; /* all other VMEM loads and atomics with return */
bool uses_vmem_sampler_or_bvh;
signed char face_vgpr_index;
signed char ancillary_vgpr_index;
signed char sample_coverage_vgpr_index;
bool uses_instanceid;
- ubyte nr_pos_exports;
- ubyte nr_param_exports;
+ uint8_t nr_pos_exports;
+ uint8_t nr_param_exports;
unsigned private_mem_vgprs;
unsigned max_simd_waves;
};
assert(ctx->shader->key.ge.opt.same_patch_vertices && !param_index);
- ubyte semantic = info->input[driver_location].semantic;
+ uint8_t semantic = info->input[driver_location].semantic;
/* Load the TCS input from a VGPR. */
unsigned func_param = ctx->args->ac.tcs_rel_ids.arg_index + 1 +
si_shader_io_get_unique_index(semantic) * 4;
break;
case 2:
constbuf.buffer_offset =
- (ubyte *)sctx->sample_positions.x2 - (ubyte *)sctx->sample_positions.x1;
+ (uint8_t *)sctx->sample_positions.x2 - (uint8_t *)sctx->sample_positions.x1;
break;
case 4:
constbuf.buffer_offset =
- (ubyte *)sctx->sample_positions.x4 - (ubyte *)sctx->sample_positions.x1;
+ (uint8_t *)sctx->sample_positions.x4 - (uint8_t *)sctx->sample_positions.x1;
break;
case 8:
constbuf.buffer_offset =
- (ubyte *)sctx->sample_positions.x8 - (ubyte *)sctx->sample_positions.x1;
+ (uint8_t *)sctx->sample_positions.x8 - (uint8_t *)sctx->sample_positions.x1;
break;
case 16:
constbuf.buffer_offset =
- (ubyte *)sctx->sample_positions.x16 - (ubyte *)sctx->sample_positions.x1;
+ (uint8_t *)sctx->sample_positions.x16 - (uint8_t *)sctx->sample_positions.x1;
break;
default:
PRINT_ERR("Requested an invalid number of samples %i.\n", sctx->framebuffer.nr_samples);
/* 0 = without stencil buffer, 1 = when both Z and S buffers are present */
struct si_dsa_order_invariance order_invariance[2];
- ubyte alpha_func : 3;
+ uint8_t alpha_func : 3;
bool depth_enabled : 1;
bool depth_write_enabled : 1;
bool stencil_enabled : 1;
* to the descriptor array will be stored. */
short shader_userdata_offset;
/* The size of one descriptor. */
- ubyte element_dw_size;
+ uint8_t element_dw_size;
/* If there is only one slot enabled, bind it directly instead of
* uploading descriptors. -1 if disabled. */
signed char slot_index_to_bind_directly;
unsigned min_vertex_count,
unsigned instance_count,
unsigned num_prims,
- ubyte vertices_per_patch)
+ uint8_t vertices_per_patch)
{
if (IS_DRAW_VERTEX_STATE)
return 0;
static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader)
{
struct si_shader_selector *sel = shader->selector;
- const ubyte *num_components = sel->info.num_stream_output_components;
+ const uint8_t *num_components = sel->info.num_stream_output_components;
unsigned gs_num_invocations = sel->info.base.gs.invocations;
struct si_pm4_state *pm4;
uint64_t va;
unsigned active_query_count;
/** Mapped vertex buffers */
- ubyte *mapped_vbuffer[PIPE_MAX_ATTRIBS];
+ uint8_t *mapped_vbuffer[PIPE_MAX_ATTRIBS];
/** Mapped constant buffers */
struct tgsi_exec_consts_info mapped_constants[PIPE_SHADER_TYPES][PIPE_MAX_CONSTANT_BUFFERS];
}
draw_set_indexes(draw,
- (ubyte *) mapped_indices,
+ (uint8_t *) mapped_indices,
info->index_size, available_space);
}
/* store outputs */
{
- const ubyte *sem_name = var->info.output_semantic_name;
- const ubyte *sem_index = var->info.output_semantic_index;
+ const uint8_t *sem_name = var->info.output_semantic_name;
+ const uint8_t *sem_index = var->info.output_semantic_index;
const uint n = var->info.num_outputs;
uint i;
for (i = 0; i < n; i++) {
static bool
sp_vbuf_allocate_vertices(struct vbuf_render *vbr,
- ushort vertex_size, ushort nr_vertices)
+ uint16_t vertex_size, uint16_t nr_vertices)
{
struct softpipe_vbuf_render *cvbr = softpipe_vbuf_render(vbr);
unsigned size = vertex_size * nr_vertices;
static void
sp_vbuf_unmap_vertices(struct vbuf_render *vbr,
- ushort min_index,
- ushort max_index )
+ uint16_t min_index,
+ uint16_t max_index )
{
struct softpipe_vbuf_render *cvbr = softpipe_vbuf_render(vbr);
assert( cvbr->vertex_buffer_size >= (max_index+1) * cvbr->vertex_size );
* draw elements / indexed primitives
*/
static void
-sp_vbuf_draw_elements(struct vbuf_render *vbr, const ushort *indices, uint nr)
+sp_vbuf_draw_elements(struct vbuf_render *vbr, const uint16_t *indices, uint nr)
{
struct softpipe_vbuf_render *cvbr = softpipe_vbuf_render(vbr);
struct softpipe_context *softpipe = cvbr->softpipe;
float (*dest)[4])
{
struct softpipe_context *softpipe = qs->softpipe;
- ubyte src[4][4], dst[4][4], res[4][4];
+ uint8_t src[4][4], dst[4][4], res[4][4];
uint *src4 = (uint *) src;
uint *dst4 = (uint *) dst;
uint *res4 = (uint *) res;
enum pipe_format format;
unsigned bzzzz[TGSI_QUAD_SIZE]; /**< Z values fetched from depth buffer */
unsigned qzzzz[TGSI_QUAD_SIZE]; /**< Z values from the quad */
- ubyte stencilVals[TGSI_QUAD_SIZE];
+ uint8_t stencilVals[TGSI_QUAD_SIZE];
bool use_shader_stencil_refs;
- ubyte shader_stencil_refs[TGSI_QUAD_SIZE];
+ uint8_t shader_stencil_refs[TGSI_QUAD_SIZE];
struct softpipe_cached_tile *tile;
float minval, maxval;
bool clamp;
for (j = 0; j < TGSI_QUAD_SIZE; j++) {
int x = quad->input.x0 % TILE_SIZE + (j & 1);
int y = quad->input.y0 % TILE_SIZE + (j >> 1);
- tile->data.depth16[y][x] = (ushort) data->bzzzz[j];
+ tile->data.depth16[y][x] = (uint16_t) data->bzzzz[j];
}
break;
case PIPE_FORMAT_Z24X8_UNORM:
{
unsigned passMask = 0x0;
unsigned j;
- ubyte refs[TGSI_QUAD_SIZE];
+ uint8_t refs[TGSI_QUAD_SIZE];
for (j = 0; j < TGSI_QUAD_SIZE; j++) {
if (data->use_shader_stencil_refs)
*/
static void
apply_stencil_op(struct depth_data *data,
- unsigned mask, unsigned op, ubyte ref, ubyte wrtMask)
+ unsigned mask, unsigned op, uint8_t ref, uint8_t wrtMask)
{
unsigned j;
- ubyte newstencil[TGSI_QUAD_SIZE];
- ubyte refs[TGSI_QUAD_SIZE];
+ uint8_t newstencil[TGSI_QUAD_SIZE];
+ uint8_t refs[TGSI_QUAD_SIZE];
for (j = 0; j < TGSI_QUAD_SIZE; j++) {
newstencil[j] = data->stencilVals[j];
{
struct softpipe_context *softpipe = qs->softpipe;
unsigned func, zFailOp, zPassOp, failOp;
- ubyte ref, wrtMask, valMask;
+ uint8_t ref, wrtMask, valMask;
uint face = quad->input.facing;
if (!softpipe->depth_stencil->stencil[1].enabled) {
const float dzdy = quads[0]->posCoef->dady[2];
const float z0 = quads[0]->posCoef->a0[2] + dzdx * fx + dzdy * fy;
struct softpipe_cached_tile *tile;
- ushort (*depth16)[TILE_SIZE];
- ushort init_idepth[4], idepth[4], depth_step;
+ uint16_t (*depth16)[TILE_SIZE];
+ uint16_t init_idepth[4], idepth[4], depth_step;
const float scale = 65535.0;
/* compute scaled depth of the four pixels in first quad */
- init_idepth[0] = (ushort)((z0) * scale);
- init_idepth[1] = (ushort)((z0 + dzdx) * scale);
- init_idepth[2] = (ushort)((z0 + dzdy) * scale);
- init_idepth[3] = (ushort)((z0 + dzdx + dzdy) * scale);
+ init_idepth[0] = (uint16_t)((z0) * scale);
+ init_idepth[1] = (uint16_t)((z0 + dzdx) * scale);
+ init_idepth[2] = (uint16_t)((z0 + dzdy) * scale);
+ init_idepth[3] = (uint16_t)((z0 + dzdx + dzdy) * scale);
- depth_step = (ushort)(dzdx * scale);
+ depth_step = (uint16_t)(dzdx * scale);
tile = sp_get_cached_tile(qs->softpipe->zsbuf_cache, ix, iy, quads[0]->input.layer);
idepth[2] = init_idepth[2] + dx * depth_step;
idepth[3] = init_idepth[3] + dx * depth_step;
- depth16 = (ushort (*)[TILE_SIZE])
+ depth16 = (uint16_t (*)[TILE_SIZE])
&tile->data.depth16[iy % TILE_SIZE][(ix + dx)% TILE_SIZE];
#ifdef ALWAYS
}
#endif
- depth16 = (ushort (*)[TILE_SIZE]) &depth16[0][2];
+ depth16 = (uint16_t (*)[TILE_SIZE]) &depth16[0][2];
quads[i]->inout.mask = mask;
if (quads[i]->inout.mask)
else {
for (i = 0; i < TILE_SIZE; i++) {
for (j = 0; j < TILE_SIZE; j++) {
- tile->data.depth16[i][j] = (ushort) clear_value;
+ tile->data.depth16[i][j] = (uint16_t) clear_value;
}
}
}
float color[TILE_SIZE][TILE_SIZE][4];
uint color32[TILE_SIZE][TILE_SIZE];
uint depth32[TILE_SIZE][TILE_SIZE];
- ushort depth16[TILE_SIZE][TILE_SIZE];
- ubyte stencil8[TILE_SIZE][TILE_SIZE];
+ uint16_t depth16[TILE_SIZE][TILE_SIZE];
+ uint8_t stencil8[TILE_SIZE][TILE_SIZE];
uint colorui128[TILE_SIZE][TILE_SIZE][4];
int colori128[TILE_SIZE][TILE_SIZE][4];
uint64_t depth64[TILE_SIZE][TILE_SIZE];
- ubyte any[1];
+ uint8_t any[1];
} data;
};
svga_hwtnl_draw_arrays(struct svga_hwtnl *hwtnl,
enum mesa_prim prim, unsigned start, unsigned count,
unsigned start_instance, unsigned instance_count,
- ubyte vertices_per_patch);
+ uint8_t vertices_per_patch);
enum pipe_error
svga_hwtnl_draw_range_elements(struct svga_hwtnl *hwtnl,
simple_draw_arrays(struct svga_hwtnl *hwtnl,
enum mesa_prim prim, unsigned start, unsigned count,
unsigned start_instance, unsigned instance_count,
- ubyte vertices_per_patch)
+ uint8_t vertices_per_patch)
{
SVGA3dPrimitiveRange range;
unsigned hw_prim;
svga_hwtnl_draw_arrays(struct svga_hwtnl *hwtnl,
enum mesa_prim prim, unsigned start, unsigned count,
unsigned start_instance, unsigned instance_count,
- ubyte vertices_per_patch)
+ uint8_t vertices_per_patch)
{
enum mesa_prim gen_prim;
unsigned gen_size, gen_nr;
unsigned count,
unsigned start_instance,
unsigned instance_count,
- ubyte vertices_per_patch)
+ uint8_t vertices_per_patch)
{
SVGA3dPrimitiveRange range;
unsigned hw_prim;
*/
static inline SVGA3dPrimitiveType
svga_translate_prim(unsigned mode, unsigned vcount, unsigned *prim_count,
- ubyte vertices_per_patch)
+ uint8_t vertices_per_patch)
{
switch (mode) {
case MESA_PRIM_POINTS:
unsigned count,
unsigned start_instance,
unsigned instance_count,
- ubyte vertices_per_patch);
+ uint8_t vertices_per_patch);
#endif
unsigned num_inputs; /* number of inputs in the current shader */
unsigned position_index; /* position register index */
unsigned input_map_max; /* highest index of mapped inputs */
- ubyte input_map[PIPE_MAX_SHADER_INPUTS];
+ uint8_t input_map[PIPE_MAX_SHADER_INPUTS];
struct {
unsigned num_outputs;
- ubyte output_map[PIPE_MAX_SHADER_OUTPUTS];
+ uint8_t output_map[PIPE_MAX_SHADER_OUTPUTS];
} prevShader;
};
retry_draw_arrays( struct svga_context *svga,
enum mesa_prim prim, unsigned start, unsigned count,
unsigned start_instance, unsigned instance_count,
- ubyte vertices_per_patch)
+ uint8_t vertices_per_patch)
{
enum pipe_error ret;
{
struct pipe_resource b;
- ushort *defined;
+ uint16_t *defined;
struct svga_sampler_view *cached_view;
unsigned size; /**< Approximate size in bytes */
/** array indexed by cube face or 3D/array slice, one bit per mipmap level */
- ushort *rendered_to;
+ uint16_t *rendered_to;
/** array indexed by cube face or 3D/array slice, one bit per mipmap level.
* Set if the level is marked as dirty.
*/
- ushort *dirty;
+ uint16_t *dirty;
enum svga_surface_state surface_state;
struct svga_shader_info
{
- ubyte num_inputs;
- ubyte num_outputs;
-
- ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
- ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
- ubyte input_usage_mask[PIPE_MAX_SHADER_INPUTS];
- ubyte output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
- ubyte output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
- ubyte output_usage_mask[PIPE_MAX_SHADER_OUTPUTS];
+ uint8_t num_inputs;
+ uint8_t num_outputs;
+
+ uint8_t input_semantic_name[PIPE_MAX_SHADER_INPUTS];
+ uint8_t input_semantic_index[PIPE_MAX_SHADER_INPUTS];
+ uint8_t input_usage_mask[PIPE_MAX_SHADER_INPUTS];
+ uint8_t output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
+ uint8_t output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
+ uint8_t output_usage_mask[PIPE_MAX_SHADER_OUTPUTS];
uint64_t generic_inputs_mask;
uint64_t generic_outputs_mask;
static bool
svga_vbuf_render_allocate_vertices(struct vbuf_render *render,
- ushort vertex_size,
- ushort nr_vertices)
+ uint16_t vertex_size,
+ uint16_t nr_vertices)
{
struct svga_vbuf_render *svga_render = svga_vbuf_render(render);
struct svga_context *svga = svga_render->svga;
static void
svga_vbuf_render_unmap_vertices(struct vbuf_render *render,
- ushort min_index,
- ushort max_index)
+ uint16_t min_index,
+ uint16_t max_index)
{
struct svga_vbuf_render *svga_render = svga_vbuf_render(render);
struct svga_context *svga = svga_render->svga;
static void
svga_vbuf_render_draw_elements(struct vbuf_render *render,
- const ushort *indices,
+ const uint16_t *indices,
uint nr_indices)
{
struct svga_vbuf_render *svga_render = svga_vbuf_render(render);
map = NULL;
if (info->index_size) {
if (info->has_user_indices) {
- map = (ubyte *) info->index.user;
+ map = (uint8_t *) info->index.user;
} else {
map = pipe_buffer_map(&svga->pipe, info->index.resource,
PIPE_MAP_READ |
PIPE_MAP_UNSYNCHRONIZED, &ib_transfer);
}
draw_set_indexes(draw,
- (const ubyte *) map,
+ (const uint8_t *) map,
info->index_size, ~0);
}
unsigned vdecl_offset;
unsigned vdecl_count;
- ushort min_index;
- ushort max_index;
+ uint16_t min_index;
+ uint16_t max_index;
};
/**
/** Translate PIPE_TEXTURE_x to SVGA3DSAMP_x */
-static ubyte
+static uint8_t
svga_tgsi_sampler_type(const struct svga_shader_emitter *emit, int idx)
{
switch (emit->sampler_target[idx]) {
/* Samplers */
unsigned num_samplers;
bool sampler_view[PIPE_MAX_SAMPLERS]; /**< True if sampler view exists*/
- ubyte sampler_target[PIPE_MAX_SAMPLERS]; /**< TGSI_TEXTURE_x */
- ubyte sampler_return_type[PIPE_MAX_SAMPLERS]; /**< TGSI_RETURN_TYPE_x */
+ uint8_t sampler_target[PIPE_MAX_SAMPLERS]; /**< TGSI_TEXTURE_x */
+ uint8_t sampler_return_type[PIPE_MAX_SAMPLERS]; /**< TGSI_RETURN_TYPE_x */
/* Images */
unsigned num_images;
unsigned address_reg_index[MAX_VGPU10_ADDR_REGS];
/* Output register usage masks */
- ubyte output_usage_mask[PIPE_MAX_SHADER_OUTPUTS];
+ uint8_t output_usage_mask[PIPE_MAX_SHADER_OUTPUTS];
/* To map TGSI system value index to VGPU shader input indexes */
- ubyte system_value_indexes[MAX_SYSTEM_VALUES];
+ uint8_t system_value_indexes[MAX_SYSTEM_VALUES];
struct {
/* vertex position scale/translation */
* for the specified stream.
*/
static unsigned
-output_writemask_for_stream(unsigned stream, ubyte output_streams,
- ubyte output_usagemask)
+output_writemask_for_stream(unsigned stream, uint8_t output_streams,
+ uint8_t output_usagemask)
{
unsigned i;
unsigned writemask = 0;
assert(map);
if (map) {
for (int z = 0; z < box.depth; ++z) {
- ubyte *dst = (ubyte*)map + z*transfer->layer_stride;
- const ubyte *src = (const ubyte*)pInitialDataUP->pSysMem + z*pInitialDataUP->SysMemSlicePitch;
+ uint8_t *dst = (uint8_t*)map + z*transfer->layer_stride;
+ const uint8_t *src = (const uint8_t*)pInitialDataUP->pSysMem + z*pInitialDataUP->SysMemSlicePitch;
util_copy_rect(dst,
templat.format,
transfer->stride,
assert(map);
if (map) {
for (int z = 0; z < box.depth; ++z) {
- ubyte *dst = (ubyte*)map + z*transfer->layer_stride;
- const ubyte *src = (const ubyte*)pSysMemUP + z*DepthPitch;
+ uint8_t *dst = (uint8_t*)map + z*transfer->layer_stride;
+ const uint8_t *src = (const uint8_t*)pSysMemUP + z*DepthPitch;
util_copy_rect(dst,
dst_resource->format,
transfer->stride,
} depth[16];
uint8_t patch_vertices;
- ubyte index_size;
+ uint8_t index_size;
unsigned index_offset;
struct pipe_resource *index_buffer;
struct pipe_constant_buffer const_buffer[LVP_SHADER_STAGES][16];
}
static void
-copy_depth_rect(ubyte * dst,
+copy_depth_rect(uint8_t * dst,
enum pipe_format dst_format,
unsigned dst_stride,
unsigned dst_x,
unsigned dst_y,
unsigned width,
unsigned height,
- const ubyte * src,
+ const uint8_t * src,
enum pipe_format src_format,
int src_stride,
unsigned src_x,
}
static void
-copy_depth_box(ubyte *dst,
+copy_depth_box(uint8_t *dst,
enum pipe_format dst_format,
unsigned dst_stride, uint64_t dst_slice_stride,
unsigned dst_x, unsigned dst_y, unsigned dst_z,
unsigned width, unsigned height, unsigned depth,
- const ubyte * src,
+ const uint8_t * src,
enum pipe_format src_format,
int src_stride, uint64_t src_slice_stride,
unsigned src_x, unsigned src_y, unsigned src_z)
LVP_FROM_HANDLE(lvp_image, src_image, copycmd->srcImage);
struct pipe_box box, dbox;
struct pipe_transfer *src_t, *dst_t;
- ubyte *src_data, *dst_data;
+ uint8_t *src_data, *dst_data;
for (uint32_t i = 0; i < copycmd->regionCount; i++) {
box.depth,
src_data, src_format, src_t->stride, src_t->layer_stride, 0, 0, 0);
} else {
- util_copy_box((ubyte *)dst_data, src_format,
+ util_copy_box((uint8_t *)dst_data, src_format,
buffer_layout.row_stride_B,
buffer_layout.image_stride_B,
0, 0, 0,
u_box_2d(0, 0, res->width0, res->height0, &box);
struct pipe_transfer *transfer = NULL;
- ubyte *src = pipe->texture_map(pipe, res, 0, PIPE_MAP_READ, &box,
+ uint8_t *src = pipe->texture_map(pipe, res, 0, PIPE_MAP_READ, &box,
&transfer);
/*
if (y_up) {
/* need to flip image upside down */
- dst = (ubyte *)dst + (res->height0 - 1) * dst_stride;
+ dst = (uint8_t *)dst + (res->height0 - 1) * dst_stride;
dst_stride = -dst_stride;
}
for (unsigned y = 0; y < res->height0; y++)
{
memcpy(dst, src, bpp * res->width0);
- dst = (ubyte *)dst + dst_stride;
+ dst = (uint8_t *)dst + dst_stride;
src += transfer->stride;
}
struct pipe_stencil_ref
{
- ubyte ref_value[2];
+ uint8_t ref_value[2];
};
{
uint16_t width, height;
uint16_t layers; /**< Number of layers in a no-attachment framebuffer */
- ubyte samples; /**< Number of samples in a no-attachment framebuffer */
+ uint8_t samples; /**< Number of samples in a no-attachment framebuffer */
/** multiple color buffers for multiple render targets */
- ubyte nr_cbufs;
+ uint8_t nr_cbufs;
struct pipe_surface *cbufs[PIPE_MAX_COLOR_BUFS];
struct pipe_surface *zsbuf; /**< Z/stencil buffer */
struct gl_program Base;
uint32_t vert_attrib_mask; /**< mask of sourced vertex attribs */
- ubyte num_inputs;
+ uint8_t num_inputs;
/** Maps VARYING_SLOT_x to slot */
- ubyte result_to_output[VARYING_SLOT_MAX];
+ uint8_t result_to_output[VARYING_SLOT_MAX];
};
/**
GLint px, GLint py, GLsizei width, GLsizei height,
const struct gl_pixelstore_attrib *unpack,
const GLubyte *bitmap,
- ubyte *destBuffer, uint destStride)
+ uint8_t *destBuffer, uint destStride)
{
destBuffer += py * destStride + px;
struct st_context *st = st_context(ctx);
struct pipe_context *pipe = st->pipe;
struct pipe_transfer *transfer;
- ubyte *dest;
+ uint8_t *dest;
struct pipe_resource *pt;
if (!st->bitmap.tex_format)
enum pipe_map_flags usage;
struct pipe_transfer *pt;
const GLboolean zoom = ctx->Pixel.ZoomX != 1.0 || ctx->Pixel.ZoomY != 1.0;
- ubyte *stmap;
+ uint8_t *stmap;
struct gl_pixelstore_attrib clippedUnpack = *unpack;
GLubyte *sValues;
GLuint *zValues;
switch (pt->resource->format) {
case PIPE_FORMAT_S8_UINT:
{
- ubyte *dest = stmap + spanY * pt->stride;
+ uint8_t *dest = stmap + spanY * pt->stride;
assert(usage == PIPE_MAP_WRITE);
memcpy(dest, sValues, width);
}
struct pipe_context *pipe = st_context(ctx)->pipe;
enum pipe_map_flags usage;
struct pipe_transfer *ptDraw;
- ubyte *drawMap;
- ubyte *buffer;
+ uint8_t *drawMap;
+ uint8_t *buffer;
int i;
- buffer = malloc(width * height * sizeof(ubyte));
+ buffer = malloc(width * height * sizeof(uint8_t));
if (!buffer) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glCopyPixels(stencil)");
return;
/* draw */
/* XXX PixelZoom not handled yet */
for (i = 0; i < height; i++) {
- ubyte *dst;
- const ubyte *src;
+ uint8_t *dst;
+ const uint8_t *src;
int y;
y = i;
(struct gl_vertex_program *)ctx->VertexProgram._Current;
GLfloat win[4];
const GLfloat *color, *texcoord;
- ubyte slot;
+ uint8_t slot;
win[0] = v->data[0][0];
if (_mesa_fb_orientation(ctx->DrawBuffer) == Y_0_TOP)
* else copy the current attrib.
*/
static void
-update_attrib(struct gl_context *ctx, const ubyte *outputMapping,
+update_attrib(struct gl_context *ctx, const uint8_t *outputMapping,
const struct vertex_header *vert,
GLfloat *dest,
GLuint result, GLuint defaultAttrib)
{
const GLfloat *src;
- const ubyte k = outputMapping[result];
+ const uint8_t k = outputMapping[result];
if (k != 0xff)
src = vert->data[k];
else
const GLfloat height = (GLfloat) ctx->DrawBuffer->Height;
struct gl_vertex_program *stvp =
(struct gl_vertex_program *)ctx->VertexProgram._Current;
- const ubyte *outputMapping = stvp->result_to_output;
+ const uint8_t *outputMapping = stvp->result_to_output;
const GLfloat *pos;
GLuint i;
enum pipe_format dst_format, src_format;
unsigned bind;
struct pipe_transfer *tex_xfer;
- ubyte *map = NULL;
+ uint8_t *map = NULL;
int dst_x, dst_y;
/* Validate state (to be sure we have up-to-date framebuffer surfaces)
GLenum gl_target = texImage->TexObject->Target;
unsigned dims;
struct pipe_transfer *tex_xfer;
- ubyte *map = NULL;
+ uint8_t *map = NULL;
bool done = false;
pixels = _mesa_map_pbo_dest(ctx, &ctx->Pack, pixels);
GLuint row, slice;
for (slice = 0; slice < depth; slice++) {
- ubyte *slice_map = map;
+ uint8_t *slice_map = map;
for (row = 0; row < height; row++) {
void *dest = _mesa_image_address(dims, &ctx->Pack, pixels,
memcpy(map, src, bytesPerRow);
}
else {
- ubyte *slice_map = map;
+ uint8_t *slice_map = map;
for (row = 0; row < (unsigned) height; row++) {
void *src = _mesa_image_address(dims, unpack, pixels,
GLboolean empty;
/** An I8 texture image: */
- ubyte *buffer;
+ uint8_t *buffer;
};
struct st_bound_handles
PIPE_MAP_READ, &ib_transfer);
}
- draw_set_indexes(draw, (ubyte *)mapped_indices, info->index_size, ~0);
+ draw_set_indexes(draw, (uint8_t *)mapped_indices, info->index_size, ~0);
}
/* set constant buffer 0 */
/* Determine the (default) output register mapping for each output. */
unsigned num_outputs = 0;
- ubyte output_mapping[VARYING_SLOT_TESS_MAX];
+ uint8_t output_mapping[VARYING_SLOT_TESS_MAX];
memset(output_mapping, 0, sizeof(output_mapping));
for (unsigned attr = 0; attr < VARYING_SLOT_MAX; attr++) {
{
struct pipe_transfer *xfer;
struct pipe_box region;
- ubyte *map;
+ uint8_t *map;
region.x = src->width0 / 2;
region.y = src->height0 / 2;