anv: remove vk_sample_locations_state from emit_multisample
[platform/upstream/mesa.git] / src / intel / vulkan / genX_state.c
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "common/intel_aux_map.h"
33 #include "common/intel_sample_positions.h"
34 #include "common/intel_pixel_hash.h"
35 #include "genxml/gen_macros.h"
36 #include "genxml/genX_pack.h"
37
38 #include "vk_standard_sample_locations.h"
39 #include "vk_util.h"
40
41 static void
42 genX(emit_slice_hashing_state)(struct anv_device *device,
43                                struct anv_batch *batch)
44 {
45 #if GFX_VER == 11
46    /* Gfx11 hardware has two pixel pipes at most. */
47    for (unsigned i = 2; i < ARRAY_SIZE(device->info->ppipe_subslices); i++)
48       assert(device->info->ppipe_subslices[i] == 0);
49
50    if (device->info->ppipe_subslices[0] == device->info->ppipe_subslices[1])
51      return;
52
53    if (!device->slice_hash.alloc_size) {
54       unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;
55       device->slice_hash =
56          anv_state_pool_alloc(&device->dynamic_state_pool, size, 64);
57
58       const bool flip = device->info->ppipe_subslices[0] <
59                      device->info->ppipe_subslices[1];
60       struct GENX(SLICE_HASH_TABLE) table;
61       intel_compute_pixel_hash_table_3way(16, 16, 3, 3, flip, table.Entry[0]);
62
63       GENX(SLICE_HASH_TABLE_pack)(NULL, device->slice_hash.map, &table);
64    }
65
66    anv_batch_emit(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) {
67       ptr.SliceHashStatePointerValid = true;
68       ptr.SliceHashTableStatePointer = device->slice_hash.offset;
69    }
70
71    anv_batch_emit(batch, GENX(3DSTATE_3D_MODE), mode) {
72       mode.SliceHashingTableEnable = true;
73    }
74 #elif GFX_VERx10 == 120
75    /* For each n calculate ppipes_of[n], equal to the number of pixel pipes
76     * present with n active dual subslices.
77     */
78    unsigned ppipes_of[3] = {};
79
80    for (unsigned n = 0; n < ARRAY_SIZE(ppipes_of); n++) {
81       for (unsigned p = 0; p < 3; p++)
82          ppipes_of[n] += (device->info->ppipe_subslices[p] == n);
83    }
84
85    /* Gfx12 has three pixel pipes. */
86    for (unsigned p = 3; p < ARRAY_SIZE(device->info->ppipe_subslices); p++)
87       assert(device->info->ppipe_subslices[p] == 0);
88
89    if (ppipes_of[2] == 3 || ppipes_of[0] == 2) {
90       /* All three pixel pipes have the maximum number of active dual
91        * subslices, or there is only one active pixel pipe: Nothing to do.
92        */
93       return;
94    }
95
96    anv_batch_emit(batch, GENX(3DSTATE_SUBSLICE_HASH_TABLE), p) {
97       p.SliceHashControl[0] = TABLE_0;
98
99       if (ppipes_of[2] == 2 && ppipes_of[0] == 1)
100          intel_compute_pixel_hash_table_3way(8, 16, 2, 2, 0, p.TwoWayTableEntry[0]);
101       else if (ppipes_of[2] == 1 && ppipes_of[1] == 1 && ppipes_of[0] == 1)
102          intel_compute_pixel_hash_table_3way(8, 16, 3, 3, 0, p.TwoWayTableEntry[0]);
103
104       if (ppipes_of[2] == 2 && ppipes_of[1] == 1)
105          intel_compute_pixel_hash_table_3way(8, 16, 5, 4, 0, p.ThreeWayTableEntry[0]);
106       else if (ppipes_of[2] == 2 && ppipes_of[0] == 1)
107          intel_compute_pixel_hash_table_3way(8, 16, 2, 2, 0, p.ThreeWayTableEntry[0]);
108       else if (ppipes_of[2] == 1 && ppipes_of[1] == 1 && ppipes_of[0] == 1)
109          intel_compute_pixel_hash_table_3way(8, 16, 3, 3, 0, p.ThreeWayTableEntry[0]);
110       else
111          unreachable("Illegal fusing.");
112    }
113
114    anv_batch_emit(batch, GENX(3DSTATE_3D_MODE), p) {
115       p.SubsliceHashingTableEnable = true;
116       p.SubsliceHashingTableEnableMask = true;
117    }
118 #elif GFX_VERx10 == 125
119    uint32_t ppipe_mask = 0;
120    for (unsigned p = 0; p < ARRAY_SIZE(device->info->ppipe_subslices); p++) {
121       if (device->info->ppipe_subslices[p])
122          ppipe_mask |= (1u << p);
123    }
124    assert(ppipe_mask);
125
126    if (!device->slice_hash.alloc_size) {
127       unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;
128       device->slice_hash =
129          anv_state_pool_alloc(&device->dynamic_state_pool, size, 64);
130
131       struct GENX(SLICE_HASH_TABLE) table;
132
133       /* Note that the hardware expects an array with 7 tables, each
134        * table is intended to specify the pixel pipe hashing behavior
135        * for every possible slice count between 2 and 8, however that
136        * doesn't actually work, among other reasons due to hardware
137        * bugs that will cause the GPU to erroneously access the table
138        * at the wrong index in some cases, so in practice all 7 tables
139        * need to be initialized to the same value.
140        */
141       for (unsigned i = 0; i < 7; i++)
142          intel_compute_pixel_hash_table_nway(16, 16, ppipe_mask, table.Entry[i][0]);
143
144       GENX(SLICE_HASH_TABLE_pack)(NULL, device->slice_hash.map, &table);
145    }
146
147    anv_batch_emit(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) {
148       ptr.SliceHashStatePointerValid = true;
149       ptr.SliceHashTableStatePointer = device->slice_hash.offset;
150    }
151
152    anv_batch_emit(batch, GENX(3DSTATE_3D_MODE), mode) {
153       mode.SliceHashingTableEnable = true;
154       mode.SliceHashingTableEnableMask = true;
155       mode.CrossSliceHashingMode = (util_bitcount(ppipe_mask) > 1 ?
156                                     hashing32x32 : NormalMode);
157       mode.CrossSliceHashingModeMask = -1;
158    }
159 #endif
160 }
161
162 static void
163 init_common_queue_state(struct anv_queue *queue, struct anv_batch *batch)
164 {
165    UNUSED struct anv_device *device = queue->device;
166
167 #if GFX_VER >= 11
168    /* Starting with GFX version 11, SLM is no longer part of the L3$ config
169     * so it never changes throughout the lifetime of the VkDevice.
170     */
171    const struct intel_l3_config *cfg = intel_get_default_l3_config(device->info);
172    genX(emit_l3_config)(batch, device, cfg);
173    device->l3_config = cfg;
174 #endif
175
176 #if GFX_VERx10 >= 125
177    /* GEN:BUG:1607854226:
178     *
179     *  Non-pipelined state has issues with not applying in MEDIA/GPGPU mode.
180     *  Fortunately, we always start the context off in 3D mode.
181     */
182    uint32_t mocs = device->isl_dev.mocs.internal;
183    anv_batch_emit(batch, GENX(STATE_BASE_ADDRESS), sba) {
184       sba.GeneralStateBaseAddress = (struct anv_address) { NULL, 0 };
185       sba.GeneralStateBufferSize  = 0xfffff;
186       sba.GeneralStateMOCS = mocs;
187       sba.GeneralStateBaseAddressModifyEnable = true;
188       sba.GeneralStateBufferSizeModifyEnable = true;
189
190       sba.StatelessDataPortAccessMOCS = mocs;
191
192       sba.SurfaceStateBaseAddress =
193          (struct anv_address) { .offset = SURFACE_STATE_POOL_MIN_ADDRESS };
194       sba.SurfaceStateMOCS = mocs;
195       sba.SurfaceStateBaseAddressModifyEnable = true;
196
197       sba.DynamicStateBaseAddress =
198          (struct anv_address) { .offset = DYNAMIC_STATE_POOL_MIN_ADDRESS };
199       sba.DynamicStateBufferSize = DYNAMIC_STATE_POOL_SIZE / 4096;
200       sba.DynamicStateMOCS = mocs;
201       sba.DynamicStateBaseAddressModifyEnable = true;
202       sba.DynamicStateBufferSizeModifyEnable = true;
203
204       sba.IndirectObjectBaseAddress = (struct anv_address) { NULL, 0 };
205       sba.IndirectObjectBufferSize = 0xfffff;
206       sba.IndirectObjectMOCS = mocs;
207       sba.IndirectObjectBaseAddressModifyEnable = true;
208       sba.IndirectObjectBufferSizeModifyEnable = true;
209
210       sba.InstructionBaseAddress =
211          (struct anv_address) { .offset = INSTRUCTION_STATE_POOL_MIN_ADDRESS };
212       sba.InstructionBufferSize = INSTRUCTION_STATE_POOL_SIZE / 4096;
213       sba.InstructionMOCS = mocs;
214       sba.InstructionBaseAddressModifyEnable = true;
215       sba.InstructionBuffersizeModifyEnable = true;
216
217       sba.BindlessSurfaceStateBaseAddress =
218          (struct anv_address) { .offset = SURFACE_STATE_POOL_MIN_ADDRESS };
219       sba.BindlessSurfaceStateSize = (1 << 20) - 1;
220       sba.BindlessSurfaceStateMOCS = mocs;
221       sba.BindlessSurfaceStateBaseAddressModifyEnable = true;
222
223       sba.BindlessSamplerStateBaseAddress = (struct anv_address) { NULL, 0 };
224       sba.BindlessSamplerStateMOCS = mocs;
225       sba.BindlessSamplerStateBaseAddressModifyEnable = true;
226       sba.BindlessSamplerStateBufferSize = 0;
227
228       sba.L1CacheControl = L1CC_WB;
229    }
230 #endif
231 }
232
233 static VkResult
234 init_render_queue_state(struct anv_queue *queue)
235 {
236    struct anv_device *device = queue->device;
237    uint32_t cmds[128];
238    struct anv_batch batch = {
239       .start = cmds,
240       .next = cmds,
241       .end = (void *) cmds + sizeof(cmds),
242    };
243
244    anv_batch_emit(&batch, GENX(PIPELINE_SELECT), ps) {
245       ps.MaskBits = GFX_VER >= 12 ? 0x13 : 3;
246       ps.MediaSamplerDOPClockGateEnable = GFX_VER >= 12;
247       ps.PipelineSelection = _3D;
248    }
249
250 #if GFX_VER == 9
251    anv_batch_write_reg(&batch, GENX(CACHE_MODE_1), cm1) {
252       cm1.FloatBlendOptimizationEnable = true;
253       cm1.FloatBlendOptimizationEnableMask = true;
254       cm1.MSCRAWHazardAvoidanceBit = true;
255       cm1.MSCRAWHazardAvoidanceBitMask = true;
256       cm1.PartialResolveDisableInVC = true;
257       cm1.PartialResolveDisableInVCMask = true;
258    }
259 #endif
260
261    anv_batch_emit(&batch, GENX(3DSTATE_AA_LINE_PARAMETERS), aa);
262
263    anv_batch_emit(&batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {
264       rect.ClippedDrawingRectangleYMin = 0;
265       rect.ClippedDrawingRectangleXMin = 0;
266       rect.ClippedDrawingRectangleYMax = UINT16_MAX;
267       rect.ClippedDrawingRectangleXMax = UINT16_MAX;
268       rect.DrawingRectangleOriginY = 0;
269       rect.DrawingRectangleOriginX = 0;
270    }
271
272    anv_batch_emit(&batch, GENX(3DSTATE_WM_CHROMAKEY), ck);
273
274    genX(emit_sample_pattern)(&batch, NULL);
275
276    /* The BDW+ docs describe how to use the 3DSTATE_WM_HZ_OP instruction in the
277     * section titled, "Optimized Depth Buffer Clear and/or Stencil Buffer
278     * Clear." It mentions that the packet overrides GPU state for the clear
279     * operation and needs to be reset to 0s to clear the overrides. Depending
280     * on the kernel, we may not get a context with the state for this packet
281     * zeroed. Do it ourselves just in case. We've observed this to prevent a
282     * number of GPU hangs on ICL.
283     */
284    anv_batch_emit(&batch, GENX(3DSTATE_WM_HZ_OP), hzp);
285
286 #if GFX_VER == 11
287    /* The default behavior of bit 5 "Headerless Message for Pre-emptable
288     * Contexts" in SAMPLER MODE register is set to 0, which means
289     * headerless sampler messages are not allowed for pre-emptable
290     * contexts. Set the bit 5 to 1 to allow them.
291     */
292    anv_batch_write_reg(&batch, GENX(SAMPLER_MODE), sm) {
293       sm.HeaderlessMessageforPreemptableContexts = true;
294       sm.HeaderlessMessageforPreemptableContextsMask = true;
295    }
296
297    /* Bit 1 "Enabled Texel Offset Precision Fix" must be set in
298     * HALF_SLICE_CHICKEN7 register.
299     */
300    anv_batch_write_reg(&batch, GENX(HALF_SLICE_CHICKEN7), hsc7) {
301       hsc7.EnabledTexelOffsetPrecisionFix = true;
302       hsc7.EnabledTexelOffsetPrecisionFixMask = true;
303    }
304
305    anv_batch_write_reg(&batch, GENX(TCCNTLREG), tcc) {
306       tcc.L3DataPartialWriteMergingEnable = true;
307       tcc.ColorZPartialWriteMergingEnable = true;
308       tcc.URBPartialWriteMergingEnable = true;
309       tcc.TCDisable = true;
310    }
311 #endif
312    genX(emit_slice_hashing_state)(device, &batch);
313
314 #if GFX_VER >= 11
315    /* hardware specification recommends disabling repacking for
316     * the compatibility with decompression mechanism in display controller.
317     */
318    if (device->info->disable_ccs_repack) {
319       anv_batch_write_reg(&batch, GENX(CACHE_MODE_0), cm0) {
320          cm0.DisableRepackingforCompression = true;
321          cm0.DisableRepackingforCompressionMask = true;
322       }
323    }
324
325 #if GFX_VERx10 < 125
326    /* an unknown issue is causing vs push constants to become
327     * corrupted during object-level preemption. For now, restrict
328     * to command buffer level preemption to avoid rendering
329     * corruption.
330     */
331    anv_batch_write_reg(&batch, GENX(CS_CHICKEN1), cc1) {
332       cc1.ReplayMode = MidcmdbufferPreemption;
333       cc1.ReplayModeMask = true;
334
335 #if GFX_VERx10 == 120
336       cc1.DisablePreemptionandHighPriorityPausingdueto3DPRIMITIVECommand = true;
337       cc1.DisablePreemptionandHighPriorityPausingdueto3DPRIMITIVECommandMask = true;
338 #endif
339    }
340 #endif
341
342    /* Wa_14015207028
343     *
344     * Disable batch level preemption for some primitive topologies.
345     */
346 #if GFX_VERx10 == 125
347       anv_batch_write_reg(&batch, GENX(VFG_PREEMPTION_CHICKEN_BITS), vfgc) {
348          vfgc.PolygonTrifanLineLoopPreemptionDisable = true;
349          vfgc.PolygonTrifanLineLoopPreemptionDisableMask = true;
350       }
351 #endif
352
353 #if GFX_VERx10 == 120
354    /* Wa_1806527549 says to disable the following HiZ optimization when the
355     * depth buffer is D16_UNORM. We've found the WA to help with more depth
356     * buffer configurations however, so we always disable it just to be safe.
357     */
358    anv_batch_write_reg(&batch, GENX(HIZ_CHICKEN), reg) {
359       reg.HZDepthTestLEGEOptimizationDisable = true;
360       reg.HZDepthTestLEGEOptimizationDisableMask = true;
361    }
362 #endif
363
364 #if GFX_VERx10 < 125
365 #define AA_LINE_QUALITY_REG GENX(3D_CHICKEN3)
366 #else
367 #define AA_LINE_QUALITY_REG GENX(CHICKEN_RASTER_1)
368 #endif
369
370    /* Enable the new line drawing algorithm that produces higher quality
371     * lines.
372     */
373    anv_batch_write_reg(&batch, AA_LINE_QUALITY_REG, c3) {
374       c3.AALineQualityFix = true;
375       c3.AALineQualityFixMask = true;
376    }
377 #endif
378
379 #if GFX_VER == 12
380    if (device->info->has_aux_map) {
381       uint64_t aux_base_addr = intel_aux_map_get_base(device->aux_map_ctx);
382       assert(aux_base_addr % (32 * 1024) == 0);
383       anv_batch_emit(&batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
384          lri.RegisterOffset = GENX(GFX_AUX_TABLE_BASE_ADDR_num);
385          lri.DataDWord = aux_base_addr & 0xffffffff;
386       }
387       anv_batch_emit(&batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
388          lri.RegisterOffset = GENX(GFX_AUX_TABLE_BASE_ADDR_num) + 4;
389          lri.DataDWord = aux_base_addr >> 32;
390       }
391    }
392 #endif
393
394    /* Set the "CONSTANT_BUFFER Address Offset Disable" bit, so
395     * 3DSTATE_CONSTANT_XS buffer 0 is an absolute address.
396     *
397     * This is only safe on kernels with context isolation support.
398     */
399    if (device->physical->has_context_isolation) {
400       anv_batch_write_reg(&batch, GENX(CS_DEBUG_MODE2), csdm2) {
401          csdm2.CONSTANT_BUFFERAddressOffsetDisable = true;
402          csdm2.CONSTANT_BUFFERAddressOffsetDisableMask = true;
403       }
404    }
405
406    init_common_queue_state(queue, &batch);
407
408    anv_batch_emit(&batch, GENX(MI_BATCH_BUFFER_END), bbe);
409
410    assert(batch.next <= batch.end);
411
412    return anv_queue_submit_simple_batch(queue, &batch);
413 }
414
415 static VkResult
416 init_compute_queue_state(struct anv_queue *queue)
417 {
418    struct anv_batch batch;
419
420    uint32_t cmds[64];
421    batch.start = batch.next = cmds;
422    batch.end = (void *) cmds + sizeof(cmds);
423
424    anv_batch_emit(&batch, GENX(PIPELINE_SELECT), ps) {
425       ps.MaskBits = 3;
426 #if GFX_VER >= 11
427       ps.MaskBits |= 0x10;
428       ps.MediaSamplerDOPClockGateEnable = true;
429 #endif
430       ps.PipelineSelection = GPGPU;
431    }
432
433    init_common_queue_state(queue, &batch);
434
435    anv_batch_emit(&batch, GENX(MI_BATCH_BUFFER_END), bbe);
436
437    assert(batch.next <= batch.end);
438
439    return anv_queue_submit_simple_batch(queue, &batch);
440 }
441
442 void
443 genX(init_physical_device_state)(ASSERTED struct anv_physical_device *pdevice)
444 {
445    assert(pdevice->info.verx10 == GFX_VERx10);
446 }
447
448 VkResult
449 genX(init_device_state)(struct anv_device *device)
450 {
451    VkResult res;
452
453    device->slice_hash = (struct anv_state) { 0 };
454    for (uint32_t i = 0; i < device->queue_count; i++) {
455       struct anv_queue *queue = &device->queues[i];
456       switch (queue->family->engine_class) {
457       case I915_ENGINE_CLASS_RENDER:
458          res = init_render_queue_state(queue);
459          break;
460       case I915_ENGINE_CLASS_COMPUTE:
461          res = init_compute_queue_state(queue);
462          break;
463       default:
464          res = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
465          break;
466       }
467       if (res != VK_SUCCESS)
468          return res;
469    }
470
471    return res;
472 }
473
474 #if GFX_VERx10 >= 125
475 #define maybe_for_each_shading_rate_op(name) \
476    for (VkFragmentShadingRateCombinerOpKHR name = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR; \
477         name <= VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MUL_KHR; \
478         name++)
479 #elif GFX_VER >= 12
480 #define maybe_for_each_shading_rate_op(name)
481 #endif
482
483 /* Rather than reemitting the CPS_STATE structure everything those changes and
484  * for as many viewports as needed, we can just prepare all possible cases and
485  * just pick the right offset from the prepacked states when needed.
486  */
487 void
488 genX(init_cps_device_state)(struct anv_device *device)
489 {
490 #if GFX_VER >= 12
491    void *cps_state_ptr = device->cps_states.map;
492
493    /* Disabled CPS mode */
494    for (uint32_t __v = 0; __v < MAX_VIEWPORTS; __v++) {
495       struct GENX(CPS_STATE) cps_state = {
496          .CoarsePixelShadingMode = CPS_MODE_CONSTANT,
497          .MinCPSizeX = 1,
498          .MinCPSizeY = 1,
499 #if GFX_VERx10 >= 125
500          .Combiner0OpcodeforCPsize = PASSTHROUGH,
501          .Combiner1OpcodeforCPsize = PASSTHROUGH,
502 #endif /* GFX_VERx10 >= 125 */
503
504       };
505
506       GENX(CPS_STATE_pack)(NULL, cps_state_ptr, &cps_state);
507       cps_state_ptr += GENX(CPS_STATE_length) * 4;
508    }
509
510    maybe_for_each_shading_rate_op(op0) {
511       maybe_for_each_shading_rate_op(op1) {
512          for (uint32_t x = 1; x <= 4; x *= 2) {
513             for (uint32_t y = 1; y <= 4; y *= 2) {
514                struct GENX(CPS_STATE) cps_state = {
515                   .CoarsePixelShadingMode = CPS_MODE_CONSTANT,
516                   .MinCPSizeX = x,
517                   .MinCPSizeY = y,
518                };
519
520 #if GFX_VERx10 >= 125
521                static const uint32_t combiner_ops[] = {
522                   [VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR]    = PASSTHROUGH,
523                   [VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR] = OVERRIDE,
524                   [VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MIN_KHR]     = HIGH_QUALITY,
525                   [VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_KHR]     = LOW_QUALITY,
526                   [VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MUL_KHR]     = RELATIVE,
527                };
528
529                cps_state.Combiner0OpcodeforCPsize = combiner_ops[op0];
530                cps_state.Combiner1OpcodeforCPsize = combiner_ops[op1];
531 #endif /* GFX_VERx10 >= 125 */
532
533                for (uint32_t __v = 0; __v < MAX_VIEWPORTS; __v++) {
534                   GENX(CPS_STATE_pack)(NULL, cps_state_ptr, &cps_state);
535                   cps_state_ptr += GENX(CPS_STATE_length) * 4;
536                }
537             }
538          }
539       }
540    }
541 #endif /* GFX_VER >= 12 */
542 }
543
544 #if GFX_VER >= 12
545 static uint32_t
546 get_cps_state_offset(struct anv_device *device, bool cps_enabled,
547                      const struct vk_fragment_shading_rate_state *fsr)
548 {
549    if (!cps_enabled)
550       return device->cps_states.offset;
551
552    uint32_t offset;
553    static const uint32_t size_index[] = {
554       [1] = 0,
555       [2] = 1,
556       [4] = 2,
557    };
558
559 #if GFX_VERx10 >= 125
560    offset =
561       1 + /* skip disabled */
562       fsr->combiner_ops[0] * 5 * 3 * 3 +
563       fsr->combiner_ops[1] * 3 * 3 +
564       size_index[fsr->fragment_size.width] * 3 +
565       size_index[fsr->fragment_size.height];
566 #else
567    offset =
568       1 + /* skip disabled */
569       size_index[fsr->fragment_size.width] * 3 +
570       size_index[fsr->fragment_size.height];
571 #endif
572
573    offset *= MAX_VIEWPORTS * GENX(CPS_STATE_length) * 4;
574
575    return device->cps_states.offset + offset;
576 }
577 #endif /* GFX_VER >= 12 */
578
579 void
580 genX(emit_l3_config)(struct anv_batch *batch,
581                      const struct anv_device *device,
582                      const struct intel_l3_config *cfg)
583 {
584    UNUSED const struct intel_device_info *devinfo = device->info;
585
586 #if GFX_VER >= 12
587 #define L3_ALLOCATION_REG GENX(L3ALLOC)
588 #define L3_ALLOCATION_REG_num GENX(L3ALLOC_num)
589 #else
590 #define L3_ALLOCATION_REG GENX(L3CNTLREG)
591 #define L3_ALLOCATION_REG_num GENX(L3CNTLREG_num)
592 #endif
593
594    anv_batch_write_reg(batch, L3_ALLOCATION_REG, l3cr) {
595       if (cfg == NULL) {
596 #if GFX_VER >= 12
597          l3cr.L3FullWayAllocationEnable = true;
598 #else
599          unreachable("Invalid L3$ config");
600 #endif
601       } else {
602 #if GFX_VER < 11
603          l3cr.SLMEnable = cfg->n[INTEL_L3P_SLM];
604 #endif
605 #if GFX_VER == 11
606          /* Wa_1406697149: Bit 9 "Error Detection Behavior Control" must be
607           * set in L3CNTLREG register. The default setting of the bit is not
608           * the desirable behavior.
609           */
610          l3cr.ErrorDetectionBehaviorControl = true;
611          l3cr.UseFullWays = true;
612 #endif /* GFX_VER == 11 */
613          assert(cfg->n[INTEL_L3P_IS] == 0);
614          assert(cfg->n[INTEL_L3P_C] == 0);
615          assert(cfg->n[INTEL_L3P_T] == 0);
616          l3cr.URBAllocation = cfg->n[INTEL_L3P_URB];
617          l3cr.ROAllocation = cfg->n[INTEL_L3P_RO];
618          l3cr.DCAllocation = cfg->n[INTEL_L3P_DC];
619          l3cr.AllAllocation = cfg->n[INTEL_L3P_ALL];
620       }
621    }
622 }
623
624 void
625 genX(emit_multisample)(struct anv_batch *batch, uint32_t samples)
626 {
627    anv_batch_emit(batch, GENX(3DSTATE_MULTISAMPLE), ms) {
628       ms.NumberofMultisamples       = __builtin_ffs(samples) - 1;
629
630       ms.PixelLocation              = CENTER;
631
632       /* The PRM says that this bit is valid only for DX9:
633        *
634        *    SW can choose to set this bit only for DX9 API. DX10/OGL API's
635        *    should not have any effect by setting or not setting this bit.
636        */
637       ms.PixelPositionOffsetEnable  = false;
638    }
639 }
640
641 void
642 genX(emit_sample_pattern)(struct anv_batch *batch,
643                           const struct vk_sample_locations_state *sl)
644 {
645    assert(sl == NULL || sl->grid_size.width == 1);
646    assert(sl == NULL || sl->grid_size.height == 1);
647
648    /* See the Vulkan 1.0 spec Table 24.1 "Standard sample locations" and
649     * VkPhysicalDeviceFeatures::standardSampleLocations.
650     */
651    anv_batch_emit(batch, GENX(3DSTATE_SAMPLE_PATTERN), sp) {
652       /* The Skylake PRM Vol. 2a "3DSTATE_SAMPLE_PATTERN" says:
653        *
654        *    "When programming the sample offsets (for NUMSAMPLES_4 or _8
655        *    and MSRASTMODE_xxx_PATTERN), the order of the samples 0 to 3
656        *    (or 7 for 8X, or 15 for 16X) must have monotonically increasing
657        *    distance from the pixel center. This is required to get the
658        *    correct centroid computation in the device."
659        *
660        * However, the Vulkan spec seems to require that the the samples occur
661        * in the order provided through the API. The standard sample patterns
662        * have the above property that they have monotonically increasing
663        * distances from the center but client-provided ones do not. As long as
664        * this only affects centroid calculations as the docs say, we should be
665        * ok because OpenGL and Vulkan only require that the centroid be some
666        * lit sample and that it's the same for all samples in a pixel; they
667        * have no requirement that it be the one closest to center.
668        */
669       for (uint32_t i = 1; i <= 16; i *= 2) {
670          switch (i) {
671          case VK_SAMPLE_COUNT_1_BIT:
672             if (sl && sl->per_pixel == i) {
673                INTEL_SAMPLE_POS_1X_ARRAY(sp._1xSample, sl->locations);
674             } else {
675                INTEL_SAMPLE_POS_1X(sp._1xSample);
676             }
677             break;
678          case VK_SAMPLE_COUNT_2_BIT:
679             if (sl && sl->per_pixel == i) {
680                INTEL_SAMPLE_POS_2X_ARRAY(sp._2xSample, sl->locations);
681             } else {
682                INTEL_SAMPLE_POS_2X(sp._2xSample);
683             }
684             break;
685          case VK_SAMPLE_COUNT_4_BIT:
686             if (sl && sl->per_pixel == i) {
687                INTEL_SAMPLE_POS_4X_ARRAY(sp._4xSample, sl->locations);
688             } else {
689                INTEL_SAMPLE_POS_4X(sp._4xSample);
690             }
691             break;
692          case VK_SAMPLE_COUNT_8_BIT:
693             if (sl && sl->per_pixel == i) {
694                INTEL_SAMPLE_POS_8X_ARRAY(sp._8xSample, sl->locations);
695             } else {
696                INTEL_SAMPLE_POS_8X(sp._8xSample);
697             }
698             break;
699          case VK_SAMPLE_COUNT_16_BIT:
700             if (sl && sl->per_pixel == i) {
701                INTEL_SAMPLE_POS_16X_ARRAY(sp._16xSample, sl->locations);
702             } else {
703                INTEL_SAMPLE_POS_16X(sp._16xSample);
704             }
705             break;
706          default:
707             unreachable("Invalid sample count");
708          }
709       }
710    }
711 }
712
713 #if GFX_VER >= 11
714 void
715 genX(emit_shading_rate)(struct anv_batch *batch,
716                         const struct anv_graphics_pipeline *pipeline,
717                         const struct vk_fragment_shading_rate_state *fsr)
718 {
719    const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
720    const bool cps_enable = wm_prog_data && wm_prog_data->per_coarse_pixel_dispatch;
721
722 #if GFX_VER == 11
723    anv_batch_emit(batch, GENX(3DSTATE_CPS), cps) {
724       cps.CoarsePixelShadingMode = cps_enable ? CPS_MODE_CONSTANT : CPS_MODE_NONE;
725       if (cps_enable) {
726          cps.MinCPSizeX = fsr->fragment_size.width;
727          cps.MinCPSizeY = fsr->fragment_size.height;
728       }
729    }
730 #elif GFX_VER >= 12
731    /* TODO: we can optimize this flush in the following cases:
732     *
733     *    In the case where the last geometry shader emits a value that is not
734     *    constant, we can avoid this stall because we can synchronize the
735     *    pixel shader internally with
736     *    3DSTATE_PS::EnablePSDependencyOnCPsizeChange.
737     *
738     *    If we know that the previous pipeline and the current one are using
739     *    the same fragment shading rate.
740     */
741    anv_batch_emit(batch, GENX(PIPE_CONTROL), pc) {
742 #if GFX_VERx10 >= 125
743       pc.PSSStallSyncEnable = true;
744 #else
745       pc.PSDSyncEnable = true;
746 #endif
747    }
748
749    anv_batch_emit(batch, GENX(3DSTATE_CPS_POINTERS), cps) {
750       struct anv_device *device = pipeline->base.device;
751
752       cps.CoarsePixelShadingStateArrayPointer =
753          get_cps_state_offset(device, cps_enable, fsr);
754    }
755 #endif
756 }
757 #endif /* GFX_VER >= 11 */
758
759 static uint32_t
760 vk_to_intel_tex_filter(VkFilter filter, bool anisotropyEnable)
761 {
762    switch (filter) {
763    default:
764       unreachable("Invalid filter");
765    case VK_FILTER_NEAREST:
766       return anisotropyEnable ? MAPFILTER_ANISOTROPIC : MAPFILTER_NEAREST;
767    case VK_FILTER_LINEAR:
768       return anisotropyEnable ? MAPFILTER_ANISOTROPIC : MAPFILTER_LINEAR;
769    }
770 }
771
772 static uint32_t
773 vk_to_intel_max_anisotropy(float ratio)
774 {
775    return (anv_clamp_f(ratio, 2, 16) - 2) / 2;
776 }
777
778 static const uint32_t vk_to_intel_mipmap_mode[] = {
779    [VK_SAMPLER_MIPMAP_MODE_NEAREST]          = MIPFILTER_NEAREST,
780    [VK_SAMPLER_MIPMAP_MODE_LINEAR]           = MIPFILTER_LINEAR
781 };
782
783 static const uint32_t vk_to_intel_tex_address[] = {
784    [VK_SAMPLER_ADDRESS_MODE_REPEAT]          = TCM_WRAP,
785    [VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT] = TCM_MIRROR,
786    [VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE]   = TCM_CLAMP,
787    [VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE] = TCM_MIRROR_ONCE,
788    [VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER] = TCM_CLAMP_BORDER,
789 };
790
791 /* Vulkan specifies the result of shadow comparisons as:
792  *     1     if   ref <op> texel,
793  *     0     otherwise.
794  *
795  * The hardware does:
796  *     0     if texel <op> ref,
797  *     1     otherwise.
798  *
799  * So, these look a bit strange because there's both a negation
800  * and swapping of the arguments involved.
801  */
802 static const uint32_t vk_to_intel_shadow_compare_op[] = {
803    [VK_COMPARE_OP_NEVER]                        = PREFILTEROP_ALWAYS,
804    [VK_COMPARE_OP_LESS]                         = PREFILTEROP_LEQUAL,
805    [VK_COMPARE_OP_EQUAL]                        = PREFILTEROP_NOTEQUAL,
806    [VK_COMPARE_OP_LESS_OR_EQUAL]                = PREFILTEROP_LESS,
807    [VK_COMPARE_OP_GREATER]                      = PREFILTEROP_GEQUAL,
808    [VK_COMPARE_OP_NOT_EQUAL]                    = PREFILTEROP_EQUAL,
809    [VK_COMPARE_OP_GREATER_OR_EQUAL]             = PREFILTEROP_GREATER,
810    [VK_COMPARE_OP_ALWAYS]                       = PREFILTEROP_NEVER,
811 };
812
813 static const uint32_t vk_to_intel_sampler_reduction_mode[] = {
814    [VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE] = STD_FILTER,
815    [VK_SAMPLER_REDUCTION_MODE_MIN]              = MINIMUM,
816    [VK_SAMPLER_REDUCTION_MODE_MAX]              = MAXIMUM,
817 };
818
819 VkResult genX(CreateSampler)(
820     VkDevice                                    _device,
821     const VkSamplerCreateInfo*                  pCreateInfo,
822     const VkAllocationCallbacks*                pAllocator,
823     VkSampler*                                  pSampler)
824 {
825    ANV_FROM_HANDLE(anv_device, device, _device);
826    struct anv_sampler *sampler;
827
828    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
829
830    sampler = vk_object_zalloc(&device->vk, pAllocator, sizeof(*sampler),
831                               VK_OBJECT_TYPE_SAMPLER);
832    if (!sampler)
833       return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
834
835    sampler->n_planes = 1;
836
837    uint32_t border_color_stride = 64;
838    uint32_t border_color_offset;
839    ASSERTED bool has_custom_color = false;
840    if (pCreateInfo->borderColor <= VK_BORDER_COLOR_INT_OPAQUE_WHITE) {
841       border_color_offset = device->border_colors.offset +
842                             pCreateInfo->borderColor *
843                             border_color_stride;
844    } else {
845       sampler->custom_border_color =
846          anv_state_reserved_pool_alloc(&device->custom_border_colors);
847       border_color_offset = sampler->custom_border_color.offset;
848    }
849
850    unsigned sampler_reduction_mode = STD_FILTER;
851    bool enable_sampler_reduction = false;
852
853    vk_foreach_struct_const(ext, pCreateInfo->pNext) {
854       switch (ext->sType) {
855       case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO: {
856          VkSamplerYcbcrConversionInfo *pSamplerConversion =
857             (VkSamplerYcbcrConversionInfo *) ext;
858          ANV_FROM_HANDLE(anv_ycbcr_conversion, conversion,
859                          pSamplerConversion->conversion);
860
861          /* Ignore conversion for non-YUV formats. This fulfills a requirement
862           * for clients that want to utilize same code path for images with
863           * external formats (VK_FORMAT_UNDEFINED) and "regular" RGBA images
864           * where format is known.
865           */
866          if (conversion == NULL || !conversion->format->can_ycbcr)
867             break;
868
869          sampler->n_planes = conversion->format->n_planes;
870          sampler->conversion = conversion;
871          break;
872       }
873       case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO: {
874          VkSamplerReductionModeCreateInfo *sampler_reduction =
875             (VkSamplerReductionModeCreateInfo *) ext;
876          sampler_reduction_mode =
877             vk_to_intel_sampler_reduction_mode[sampler_reduction->reductionMode];
878          enable_sampler_reduction = true;
879          break;
880       }
881       case VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT: {
882          VkSamplerCustomBorderColorCreateInfoEXT *custom_border_color =
883             (VkSamplerCustomBorderColorCreateInfoEXT *) ext;
884          if (sampler->custom_border_color.map == NULL)
885             break;
886
887          union isl_color_value color = { .u32 = {
888             custom_border_color->customBorderColor.uint32[0],
889             custom_border_color->customBorderColor.uint32[1],
890             custom_border_color->customBorderColor.uint32[2],
891             custom_border_color->customBorderColor.uint32[3],
892          } };
893
894          const struct anv_format *format_desc =
895             custom_border_color->format != VK_FORMAT_UNDEFINED ?
896             anv_get_format(custom_border_color->format) : NULL;
897
898          /* For formats with a swizzle, it does not carry over to the sampler
899           * for border colors, so we need to do the swizzle ourselves here.
900           */
901          if (format_desc && format_desc->n_planes == 1 &&
902              !isl_swizzle_is_identity(format_desc->planes[0].swizzle)) {
903             const struct anv_format_plane *fmt_plane = &format_desc->planes[0];
904
905             assert(!isl_format_has_int_channel(fmt_plane->isl_format));
906             color = isl_color_value_swizzle(color, fmt_plane->swizzle, true);
907          }
908
909          memcpy(sampler->custom_border_color.map, color.u32, sizeof(color));
910          has_custom_color = true;
911          break;
912       }
913       case VK_STRUCTURE_TYPE_SAMPLER_BORDER_COLOR_COMPONENT_MAPPING_CREATE_INFO_EXT:
914          break;
915       default:
916          anv_debug_ignored_stype(ext->sType);
917          break;
918       }
919    }
920
921    assert((sampler->custom_border_color.map == NULL) || has_custom_color);
922
923    /* If we have bindless, allocate enough samplers.  We allocate 32 bytes
924     * for each sampler instead of 16 bytes because we want all bindless
925     * samplers to be 32-byte aligned so we don't have to use indirect
926     * sampler messages on them.
927     */
928    sampler->bindless_state =
929       anv_state_pool_alloc(&device->dynamic_state_pool,
930                            sampler->n_planes * 32, 32);
931
932    const bool seamless_cube =
933       !(pCreateInfo->flags & VK_SAMPLER_CREATE_NON_SEAMLESS_CUBE_MAP_BIT_EXT);
934
935    for (unsigned p = 0; p < sampler->n_planes; p++) {
936       const bool plane_has_chroma =
937          sampler->conversion && sampler->conversion->format->planes[p].has_chroma;
938       const VkFilter min_filter =
939          plane_has_chroma ? sampler->conversion->chroma_filter : pCreateInfo->minFilter;
940       const VkFilter mag_filter =
941          plane_has_chroma ? sampler->conversion->chroma_filter : pCreateInfo->magFilter;
942       const bool enable_min_filter_addr_rounding = min_filter != VK_FILTER_NEAREST;
943       const bool enable_mag_filter_addr_rounding = mag_filter != VK_FILTER_NEAREST;
944       /* From Broadwell PRM, SAMPLER_STATE:
945        *   "Mip Mode Filter must be set to MIPFILTER_NONE for Planar YUV surfaces."
946        */
947       const bool isl_format_is_planar_yuv = sampler->conversion &&
948          isl_format_is_yuv(sampler->conversion->format->planes[0].isl_format) &&
949          isl_format_is_planar(sampler->conversion->format->planes[0].isl_format);
950
951       const uint32_t mip_filter_mode =
952          isl_format_is_planar_yuv ?
953          MIPFILTER_NONE : vk_to_intel_mipmap_mode[pCreateInfo->mipmapMode];
954
955       struct GENX(SAMPLER_STATE) sampler_state = {
956          .SamplerDisable = false,
957          .TextureBorderColorMode = DX10OGL,
958
959 #if GFX_VER >= 11
960          .CPSLODCompensationEnable = true,
961 #endif
962
963          .LODPreClampMode = CLAMP_MODE_OGL,
964
965          .MipModeFilter = mip_filter_mode,
966          .MagModeFilter = vk_to_intel_tex_filter(mag_filter, pCreateInfo->anisotropyEnable),
967          .MinModeFilter = vk_to_intel_tex_filter(min_filter, pCreateInfo->anisotropyEnable),
968          .TextureLODBias = anv_clamp_f(pCreateInfo->mipLodBias, -16, 15.996),
969          .AnisotropicAlgorithm =
970             pCreateInfo->anisotropyEnable ? EWAApproximation : LEGACY,
971          .MinLOD = anv_clamp_f(pCreateInfo->minLod, 0, 14),
972          .MaxLOD = anv_clamp_f(pCreateInfo->maxLod, 0, 14),
973          .ChromaKeyEnable = 0,
974          .ChromaKeyIndex = 0,
975          .ChromaKeyMode = 0,
976          .ShadowFunction =
977             vk_to_intel_shadow_compare_op[pCreateInfo->compareEnable ?
978                                         pCreateInfo->compareOp : VK_COMPARE_OP_NEVER],
979          .CubeSurfaceControlMode = seamless_cube ? OVERRIDE : PROGRAMMED,
980
981          .BorderColorPointer = border_color_offset,
982
983          .LODClampMagnificationMode = MIPNONE,
984
985          .MaximumAnisotropy = vk_to_intel_max_anisotropy(pCreateInfo->maxAnisotropy),
986          .RAddressMinFilterRoundingEnable = enable_min_filter_addr_rounding,
987          .RAddressMagFilterRoundingEnable = enable_mag_filter_addr_rounding,
988          .VAddressMinFilterRoundingEnable = enable_min_filter_addr_rounding,
989          .VAddressMagFilterRoundingEnable = enable_mag_filter_addr_rounding,
990          .UAddressMinFilterRoundingEnable = enable_min_filter_addr_rounding,
991          .UAddressMagFilterRoundingEnable = enable_mag_filter_addr_rounding,
992          .TrilinearFilterQuality = 0,
993          .NonnormalizedCoordinateEnable = pCreateInfo->unnormalizedCoordinates,
994          .TCXAddressControlMode = vk_to_intel_tex_address[pCreateInfo->addressModeU],
995          .TCYAddressControlMode = vk_to_intel_tex_address[pCreateInfo->addressModeV],
996          .TCZAddressControlMode = vk_to_intel_tex_address[pCreateInfo->addressModeW],
997
998          .ReductionType = sampler_reduction_mode,
999          .ReductionTypeEnable = enable_sampler_reduction,
1000       };
1001
1002       GENX(SAMPLER_STATE_pack)(NULL, sampler->state[p], &sampler_state);
1003
1004       if (sampler->bindless_state.map) {
1005          memcpy(sampler->bindless_state.map + p * 32,
1006                 sampler->state[p], GENX(SAMPLER_STATE_length) * 4);
1007       }
1008    }
1009
1010    *pSampler = anv_sampler_to_handle(sampler);
1011
1012    return VK_SUCCESS;
1013 }