3 #extension GL_ARB_gpu_shader_int64: enable
\r
4 #extension GL_EXT_shader_image_int64: enable
\r
5 #extension GL_KHR_memory_scope_semantics: enable
\r
6 #extension GL_ARB_sparse_texture2: enable
\r
8 layout(binding = 0, r64i) uniform i64image1D i1D;
\r
9 layout(binding = 1, r64ui) uniform u64image2D u2D;
\r
10 layout(binding = 2, r64i) uniform i64image3D i3D;
\r
11 layout(binding = 3, r64ui) uniform u64imageCube uCube;
\r
12 layout(binding = 4, r64i) uniform i64imageBuffer iBuf;
\r
13 layout(binding = 5, r64ui) uniform u64image1DArray u1DArray;
\r
14 layout(binding = 6, r64i) uniform i64image2DArray i2DArray;
\r
15 layout(binding = 7, r64ui) uniform u64imageCubeArray uCubeArray;
\r
16 layout(binding = 8, r64i) uniform i64image2DRect i2DRect;
\r
17 layout(binding = 9, r64ui) uniform u64image2DMS u2DMS;
\r
18 layout(binding = 10, r64i) uniform i64image2DMSArray i2DMSArray;
\r
20 layout(binding = 11) buffer Buf
\r
31 i64 = imageAtomicAdd(i1D, i32v4.x, i64);
\r
32 i64 = imageAtomicMin(i3D, i32v4.xyz, i64);
\r
33 i64 = imageAtomicMax(iBuf, i32v4.x, i64);
\r
34 i64 = imageAtomicAnd(i2DArray, i32v4.xyz, i64);
\r
35 i64 = imageAtomicOr(i2DRect, i32v4.xy, i64);
\r
36 i64 = imageAtomicXor(i2DMSArray, i32v4.xyz, i32v4.w, i64);
\r
37 i64 = imageAtomicExchange(i1D, i32v4.x, i64);
\r
38 i64 = imageAtomicCompSwap(i3D, i32v4.xyz, i64, i64 + 1);
\r
40 i64 = imageAtomicAdd(i1D, i32v4.x, i64, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
\r
41 i64 = imageAtomicMin(i3D, i32v4.xyz, i64, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
\r
42 i64 = imageAtomicMax(iBuf, i32v4.x, i64, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
\r
43 i64 = imageAtomicAnd(i2DArray, i32v4.xyz, i64, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
\r
44 i64 = imageAtomicOr(i2DRect, i32v4.xy, i64, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
\r
45 i64 = imageAtomicXor(i2DMSArray, i32v4.xyz, i32v4.w, i64, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
\r
46 i64 = imageAtomicExchange(i1D, i32v4.x, i64, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
\r
47 i64 = imageAtomicCompSwap(i3D, i32v4.xyz, i64, i64 + 1, gl_ScopeDevice,
\r
48 gl_StorageSemanticsImage, gl_SemanticsRelaxed, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
\r
49 i64 = imageAtomicLoad(iBuf, i32v4.x, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
\r
50 imageAtomicStore(i2DArray, i32v4.xyz, i64, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
\r
52 u64 = imageAtomicAdd(u2D, i32v4.xy, u64);
\r
53 u64 = imageAtomicMin(uCube, i32v4.xyz, u64);
\r
54 u64 = imageAtomicMax(u1DArray, i32v4.xy, u64);
\r
55 u64 = imageAtomicAnd(uCubeArray, i32v4.xyz, u64);
\r
56 u64 = imageAtomicOr(u2DMS, i32v4.xy, i32v4.z, u64);
\r
57 u64 = imageAtomicXor(u2D, i32v4.xy, u64);
\r
58 u64 = imageAtomicExchange(uCube, i32v4.xyz, u64);
\r
59 u64 = imageAtomicCompSwap(u1DArray, i32v4.xy, u64, u64 + 1);
\r
61 u64 = imageAtomicAdd(u2D, i32v4.xy, u64, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
\r
62 u64 = imageAtomicMin(uCube, i32v4.xyz, u64, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
\r
63 u64 = imageAtomicMax(u1DArray, i32v4.xy, u64, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
\r
64 u64 = imageAtomicAnd(uCubeArray, i32v4.xyz, u64, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
\r
65 u64 = imageAtomicOr(u2DMS, i32v4.xy, i32v4.z, u64, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
\r
66 u64 = imageAtomicXor(u2D, i32v4.xy, u64, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
\r
67 u64 = imageAtomicExchange(uCube, i32v4.xyz, u64, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
\r
68 u64 = imageAtomicCompSwap(u1DArray, i32v4.xy, u64, u64 + 1, gl_ScopeDevice,
\r
69 gl_StorageSemanticsImage, gl_SemanticsRelaxed, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
\r
70 u64 = imageAtomicLoad(uCubeArray, i32v4.xyz, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
\r
71 imageAtomicStore(u2DMS, i32v4.xy, i32v4.z, u64, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
\r
73 i64v4 += imageLoad(i1D, i32v4.x);
\r
74 i64v4 += imageLoad(i3D, i32v4.xyz);
\r
75 i64v4 += imageLoad(iBuf, i32v4.x);
\r
76 i64v4 += imageLoad(i2DArray, i32v4.xyz);
\r
77 i64v4 += imageLoad(i2DRect, i32v4.xy);
\r
78 i64v4 += imageLoad(i2DMSArray, i32v4.xyz, i32v4.w);
\r
80 imageStore(u2D, i32v4.xy, u64v4);
\r
81 imageStore(uCube, i32v4.xyz, u64v4);
\r
82 imageStore(u1DArray, i32v4.xy, u64v4);
\r
83 imageStore(uCubeArray, i32v4.xyz, u64v4);
\r
84 imageStore(u2DMS, i32v4.xy, i32v4.z, u64v4);
\r
86 sparseImageLoadARB(i3D, i32v4.xyz, i64v4);
\r
87 sparseImageLoadARB(i2DArray, i32v4.xyz, i64v4);
\r
88 sparseImageLoadARB(i2DRect, i32v4.xy, i64v4);
\r
89 sparseImageLoadARB(u2D, i32v4.xy, u64v4);
\r
90 sparseImageLoadARB(uCube, i32v4.xyz, u64v4);
\r
91 sparseImageLoadARB(uCubeArray, i32v4.xyz, u64v4);
\r