Gets glmark2 -bdesktop working.
Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11718>
unsigned rt = (loc - FRAG_RESULT_DATA0);
/* TODO: Reverse-engineer interactions with MRT */
- if (b->shader->did_writeout) {
+ if (b->shader->nir->info.internal) {
+ /* clear */
+ } else if (b->shader->did_writeout) {
agx_writeout(b, 0x0004);
} else {
agx_writeout(b, 0xC200);
#include "util/sparse_array.h"
#include "io.h"
+#include "agx_formats.h"
#if __APPLE__
#include <mach/mach.h>
uint32_t clear;
uint32_t store;
} internal;
+
+ struct {
+ struct agx_bo *bo;
+ uint32_t format[AGX_NUM_FORMATS];
+ } reload;
};
bool
--- /dev/null
+/*
+ * Copyright (C) 2021 Alyssa Rosenzweig
+ * Copyright (C) 2020-2021 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "agx_state.h"
+#include "compiler/nir/nir_builder.h"
+#include "asahi/compiler/agx_compile.h"
+
+void
+agx_build_reload_shader(struct agx_device *dev)
+{
+ nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_FRAGMENT,
+ &agx_nir_options, "agx_reload");
+ b.shader->info.internal = true;
+
+ nir_variable *out = nir_variable_create(b.shader, nir_var_shader_out,
+ glsl_vector_type(GLSL_TYPE_FLOAT, 4), "output");
+ out->data.location = FRAG_RESULT_DATA0;
+
+ nir_ssa_def *fragcoord = nir_load_frag_coord(&b);
+ nir_ssa_def *coord = nir_channels(&b, fragcoord, 0x3);
+
+ nir_tex_instr *tex = nir_tex_instr_create(b.shader, 1);
+ tex->dest_type = nir_type_float32;
+ tex->sampler_dim = GLSL_SAMPLER_DIM_RECT;
+ tex->op = nir_texop_tex;
+ tex->src[0].src_type = nir_tex_src_coord;
+ tex->src[0].src = nir_src_for_ssa(coord);
+ tex->coord_components = 2;
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_builder_instr_insert(&b, &tex->instr);
+ nir_store_var(&b, out, &tex->dest.ssa, 0xFF);
+
+ unsigned offset = 0;
+ unsigned bo_size = 4096;
+
+ struct agx_bo *bo = agx_bo_create(dev, bo_size, AGX_MEMORY_TYPE_SHADER);
+ dev->reload.bo = bo;
+
+ for (unsigned i = 0; i < AGX_NUM_FORMATS; ++i) {
+ struct util_dynarray binary;
+ util_dynarray_init(&binary, NULL);
+
+ nir_shader *s = nir_shader_clone(NULL, b.shader);
+ struct agx_shader_info info;
+
+ struct agx_shader_key key = {
+ .fs.tib_formats[0] = i
+ };
+
+ agx_compile_shader_nir(s, &key, &binary, &info);
+
+ assert(offset + binary.size < bo_size);
+ memcpy(((uint8_t *) bo->ptr.cpu) + offset, binary.data, binary.size);
+
+ dev->reload.format[i] = bo->ptr.gpu + offset;
+ offset += ALIGN_POT(binary.size, 128);
+
+ util_dynarray_fini(&binary);
+ }
+}
memcpy(ctx->batch->encoder_current, stop, sizeof(stop));
/* Emit the commandbuffer */
-
- uint16_t clear_colour[4] = {
- _mesa_float_to_half(ctx->batch->clear_color[0]),
- _mesa_float_to_half(ctx->batch->clear_color[1]),
- _mesa_float_to_half(ctx->batch->clear_color[2]),
- _mesa_float_to_half(ctx->batch->clear_color[3])
- };
+ uint64_t pipeline_clear = 0;
+ bool clear_pipeline_textures = false;
struct agx_device *dev = agx_device(pctx->screen);
- uint64_t pipeline_clear =
- agx_build_clear_pipeline(ctx,
+
+ if (ctx->batch->clear & PIPE_CLEAR_COLOR0) {
+ uint16_t clear_colour[4] = {
+ _mesa_float_to_half(ctx->batch->clear_color[0]),
+ _mesa_float_to_half(ctx->batch->clear_color[1]),
+ _mesa_float_to_half(ctx->batch->clear_color[2]),
+ _mesa_float_to_half(ctx->batch->clear_color[3])
+ };
+
+
+ pipeline_clear = agx_build_clear_pipeline(ctx,
dev->internal.clear,
agx_pool_upload(&ctx->batch->pool, clear_colour, sizeof(clear_colour)));
+ } else {
+ enum pipe_format fmt = ctx->batch->cbufs[0]->format;
+ enum agx_format internal = agx_pixel_format[fmt].internal;
+ uint32_t shader = dev->reload.format[internal];
+
+ pipeline_clear = agx_build_reload_pipeline(ctx, shader,
+ ctx->batch->cbufs[0]);
+
+ clear_pipeline_textures = true;
+ }
uint64_t pipeline_store =
agx_build_store_pipeline(ctx,
agx_batch_add_bo(batch, batch->encoder);
agx_batch_add_bo(batch, batch->scissor.bo);
agx_batch_add_bo(batch, dev->internal.bo);
+ agx_batch_add_bo(batch, dev->reload.bo);
for (unsigned i = 0; i < batch->nr_cbufs; ++i) {
struct pipe_surface *surf = batch->cbufs[i];
pipeline_null.gpu,
pipeline_clear,
pipeline_store,
- rt0->bo->ptr.gpu);
+ rt0->bo->ptr.gpu,
+ clear_pipeline_textures);
agx_submit_cmdbuf(dev, dev->cmdbuf.handle, dev->memmap.handle, dev->queue.id);
}
uint64_t
+agx_build_reload_pipeline(struct agx_context *ctx, uint32_t code, struct pipe_surface *surf)
+{
+ struct agx_ptr ptr = agx_pool_alloc_aligned(&ctx->batch->pipeline_pool,
+ (1 * AGX_BIND_TEXTURE_LENGTH) +
+ (1 * AGX_BIND_SAMPLER_LENGTH) +
+ AGX_SET_SHADER_EXTENDED_LENGTH + 8,
+ 64);
+
+ uint8_t *record = ptr.cpu;
+ struct agx_ptr sampler = agx_pool_alloc_aligned(&ctx->batch->pool, AGX_SAMPLER_LENGTH, 64);
+ struct agx_ptr texture = agx_pool_alloc_aligned(&ctx->batch->pool, AGX_TEXTURE_LENGTH, 64);
+
+ agx_pack(sampler.cpu, SAMPLER, cfg) {
+ cfg.magnify_linear = true;
+ cfg.minify_linear = false;
+ cfg.mip_filter = AGX_MIP_FILTER_NONE;
+ cfg.wrap_s = AGX_WRAP_CLAMP_TO_EDGE;
+ cfg.wrap_t = AGX_WRAP_CLAMP_TO_EDGE;
+ cfg.wrap_r = AGX_WRAP_CLAMP_TO_EDGE;
+ cfg.pixel_coordinates = true;
+ cfg.compare_func = AGX_COMPARE_FUNC_ALWAYS;
+ cfg.unk_2 = 0;
+ cfg.unk_3 = 0;
+ }
+
+ agx_pack(texture.cpu, TEXTURE, cfg) {
+ struct agx_resource *rsrc = agx_resource(surf->texture);
+ const struct util_format_description *desc =
+ util_format_description(surf->format);
+
+ cfg.layout = agx_translate_layout(rsrc->modifier);
+ cfg.format = agx_pixel_format[surf->format].hw;
+ cfg.swizzle_r = agx_channel_from_pipe(desc->swizzle[0]);
+ cfg.swizzle_g = agx_channel_from_pipe(desc->swizzle[1]);
+ cfg.swizzle_b = agx_channel_from_pipe(desc->swizzle[2]);
+ cfg.swizzle_a = agx_channel_from_pipe(desc->swizzle[3]);
+ cfg.width = surf->width;
+ cfg.height = surf->height;
+ cfg.levels = 1;
+ cfg.srgb = (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB);
+ cfg.unk_1 = rsrc->bo->ptr.gpu;
+ cfg.unk_2 = false;
+
+ cfg.stride = (rsrc->modifier == DRM_FORMAT_MOD_LINEAR) ?
+ (rsrc->slices[0].line_stride - 16) :
+ AGX_RT_STRIDE_TILED;
+ }
+
+ agx_pack(record, BIND_TEXTURE, cfg) {
+ cfg.start = 0;
+ cfg.count = 1;
+ cfg.buffer = texture.gpu;
+ }
+
+ record += AGX_BIND_TEXTURE_LENGTH;
+
+ agx_pack(record, BIND_SAMPLER, cfg) {
+ cfg.start = 0;
+ cfg.count = 1;
+ cfg.buffer = sampler.gpu;
+ }
+
+ record += AGX_BIND_SAMPLER_LENGTH;
+
+ /* TODO: Can we prepack this? */
+ agx_pack(record, SET_SHADER_EXTENDED, cfg) {
+ cfg.code = code;
+ cfg.register_quadwords = 0;
+ cfg.unk_3 = 0x8d;
+ cfg.unk_2 = 0x0d;
+ cfg.unk_2b = 4;
+ cfg.unk_4 = 0;
+ cfg.frag_unk = 0x880100;
+ cfg.preshader_mode = 0; // XXX
+ }
+
+ record += AGX_SET_SHADER_EXTENDED_LENGTH;
+
+ /* End pipeline */
+ memset(record, 0, 8);
+ return ptr.gpu;
+}
+
+uint64_t
agx_build_store_pipeline(struct agx_context *ctx, uint32_t code,
uint64_t render_target)
{
agx_build_store_pipeline(struct agx_context *ctx, uint32_t code,
uint64_t render_target);
+uint64_t
+agx_build_reload_pipeline(struct agx_context *ctx, uint32_t code, struct pipe_surface *surf);
+
/* Add a BO to a batch. This needs to be amortized O(1) since it's called in
* hot paths. To achieve this we model BO lists by bit sets */
BITSET_SET(batch->bo_list, bo->handle);
}
+/* Blit shaders */
+void agx_build_reload_shader(struct agx_device *dev);
+
#endif
uint32_t pipeline_null,
uint32_t pipeline_clear,
uint32_t pipeline_store,
- uint64_t rt0)
+ uint64_t rt0,
+ bool clear_pipeline_textures)
{
struct cmdbuf _cmdbuf = {
.map = (uint32_t *) buf,
EMIT_ZERO_WORDS(cmdbuf, 40);
- EMIT32(cmdbuf, 0xffff8002); // 0x270
+ EMIT32(cmdbuf, 0xffff8002 | (clear_pipeline_textures ? 0x210 : 0)); // 0x270
EMIT32(cmdbuf, 0);
EMIT64(cmdbuf, pipeline_clear | 0x4);
EMIT32(cmdbuf, 0);
dev->internal.bo = bo;
dev->internal.clear = bo->ptr.gpu + clear_offset;
dev->internal.store = bo->ptr.gpu + store_offset;
+
+ agx_build_reload_shader(dev);
}
uint32_t pipeline_null,
uint32_t pipeline_clear,
uint32_t pipeline_store,
- uint64_t rt0);
+ uint64_t rt0,
+ bool clear_pipeline_textures);
void
demo_mem_map(void *map, size_t size, unsigned *handles,
# SOFTWARE.
files_asahi = files(
+ 'agx_blit.c',
'agx_pipe.c',
'agx_state.c',
'agx_uniforms.c',