&state);
}
+static bool
+nir_shader_uses_64bit_alu(nir_shader *shader)
+{
+ nir_foreach_function(function, shader) {
+ if (!function->impl)
+ continue;
+
+ nir_foreach_block(block, function->impl) {
+ nir_foreach_instr(instr, block) {
+ if (instr->type != nir_instr_type_alu)
+ continue;
+ nir_alu_instr *alu = nir_instr_as_alu(instr);
+ if (nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type) != nir_type_float)
+ continue;
+ if (alu->dest.dest.ssa.bit_size == 64)
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
/* Eventually, this will become part of anv_CreateShader. Unfortunately,
* we can't do that yet because we don't have the ability to copy nir.
*/
/* Vulkan uses the separate-shader linking model */
nir->info.separate_shader = true;
- brw_preprocess_nir(compiler, nir, NULL);
+ assert(device->info->has_64bit_float || instance->fp64_workaround_enabled ||
+ !nir_shader_uses_64bit_alu(nir));
+
+ brw_preprocess_nir(compiler, nir, device->fp64_nir);
if (nir->info.stage == MESA_SHADER_MESH && !nir->info.mesh.nv) {
bool progress = false;
#include "anv_private.h"
#include "nir/nir_xfb_info.h"
#include "vulkan/util/vk_util.h"
+#include "compiler/spirv/nir_spirv.h"
+#include "float64_spv.h"
static bool
anv_shader_bin_serialize(struct vk_pipeline_cache_object *object,
vk_pipeline_cache_add_nir(cache, sha1_key, SHA1_KEY_SIZE, nir);
}
+
+void
+anv_load_fp64_shader(struct anv_device *device)
+{
+ const nir_shader_compiler_options *nir_options =
+ device->physical->compiler->nir_options[MESA_SHADER_VERTEX];
+
+ const char* shader_name = "float64_spv_lib";
+ struct mesa_sha1 sha1_ctx;
+ uint8_t sha1[20];
+ _mesa_sha1_init(&sha1_ctx);
+ _mesa_sha1_update(&sha1_ctx, shader_name, strlen(shader_name));
+ _mesa_sha1_final(&sha1_ctx, sha1);
+
+ device->fp64_nir =
+ anv_device_search_for_nir(device, device->internal_cache,
+ nir_options, sha1, NULL);
+
+ /* The shader found, no need to call spirv_to_nir() again. */
+ if (device->fp64_nir)
+ return;
+
+ struct spirv_to_nir_options spirv_options = {
+ .caps = {
+ .address = true,
+ .float64 = true,
+ .int8 = true,
+ .int16 = true,
+ .int64 = true,
+ },
+ .environment = MESA_SHADER_VERTEX,
+ .create_library = true
+ };
+
+ nir_shader* nir =
+ spirv_to_nir(float64_spv_source, sizeof(float64_spv_source) / 4,
+ NULL, 0, PIPE_SHADER_VERTEX, "main",
+ &spirv_options, nir_options);
+
+ assert(nir != NULL);
+
+ nir_validate_shader(nir, "after spirv_to_nir");
+ nir_validate_ssa_dominance(nir, "after spirv_to_nir");
+
+ NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
+ NIR_PASS_V(nir, nir_lower_returns);
+ NIR_PASS_V(nir, nir_inline_functions);
+ NIR_PASS_V(nir, nir_opt_deref);
+
+ NIR_PASS_V(nir, nir_lower_vars_to_ssa);
+ NIR_PASS_V(nir, nir_copy_prop);
+ NIR_PASS_V(nir, nir_opt_dce);
+ NIR_PASS_V(nir, nir_opt_cse);
+ NIR_PASS_V(nir, nir_opt_gcm, true);
+ NIR_PASS_V(nir, nir_opt_peephole_select, 1, false, false);
+ NIR_PASS_V(nir, nir_opt_dce);
+
+ NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_function_temp,
+ nir_address_format_62bit_generic);
+
+ anv_device_upload_nir(device, device->internal_cache,
+ nir, sha1);
+
+ device->fp64_nir = nir;
+}
\ No newline at end of file