dEQP-VK.glsl.builtin.precision.mod.mediump.vec2
dEQP-VK.glsl.builtin.precision.mod.mediump.vec3
dEQP-VK.glsl.builtin.precision.mod.mediump.vec4
-dEQP-VK.glsl.builtin.precision.modf.mediump
-dEQP-VK.glsl.builtin.precision.modf.highp
+dEQP-VK.glsl.builtin.precision.modf.mediump.scalar
+dEQP-VK.glsl.builtin.precision.modf.highp.scalar
dEQP-VK.glsl.builtin.precision.min.mediump.scalar
dEQP-VK.glsl.builtin.precision.min.mediump.vec2
dEQP-VK.glsl.builtin.precision.min.mediump.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage16b.fract.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage16b.fract.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage16b.fract.compute.vec4
-dEQP-VK.glsl.builtin.precision_fp16_storage16b.modf.compute
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modf.compute.scalar
dEQP-VK.glsl.builtin.precision_fp16_storage16b.min.compute.scalar
dEQP-VK.glsl.builtin.precision_fp16_storage16b.min.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage16b.min.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage32b.fract.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage32b.fract.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage32b.fract.compute.vec4
-dEQP-VK.glsl.builtin.precision_fp16_storage32b.modf.compute
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modf.compute.scalar
dEQP-VK.glsl.builtin.precision_fp16_storage32b.min.compute.scalar
dEQP-VK.glsl.builtin.precision_fp16_storage32b.min.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage32b.min.compute.vec3
dEQP-VK.glsl.builtin.precision.mod.mediump.vec2
dEQP-VK.glsl.builtin.precision.mod.mediump.vec3
dEQP-VK.glsl.builtin.precision.mod.mediump.vec4
-dEQP-VK.glsl.builtin.precision.modf.mediump
-dEQP-VK.glsl.builtin.precision.modf.highp
+dEQP-VK.glsl.builtin.precision.modf.mediump.scalar
+dEQP-VK.glsl.builtin.precision.modf.highp.scalar
dEQP-VK.glsl.builtin.precision.min.mediump.scalar
dEQP-VK.glsl.builtin.precision.min.mediump.vec2
dEQP-VK.glsl.builtin.precision.min.mediump.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage16b.fract.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage16b.fract.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage16b.fract.compute.vec4
-dEQP-VK.glsl.builtin.precision_fp16_storage16b.modf.compute
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modf.compute.scalar
dEQP-VK.glsl.builtin.precision_fp16_storage16b.min.compute.scalar
dEQP-VK.glsl.builtin.precision_fp16_storage16b.min.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage16b.min.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage32b.fract.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage32b.fract.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage32b.fract.compute.vec4
-dEQP-VK.glsl.builtin.precision_fp16_storage32b.modf.compute
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modf.compute.scalar
dEQP-VK.glsl.builtin.precision_fp16_storage32b.min.compute.scalar
dEQP-VK.glsl.builtin.precision_fp16_storage32b.min.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage32b.min.compute.vec3
dEQP-VK.glsl.builtin.precision.frem.highp.vec2
dEQP-VK.glsl.builtin.precision.frem.highp.vec3
dEQP-VK.glsl.builtin.precision.frem.highp.vec4
+dEQP-VK.glsl.builtin.precision.modf.mediump.vec2
+dEQP-VK.glsl.builtin.precision.modf.mediump.vec3
+dEQP-VK.glsl.builtin.precision.modf.mediump.vec4
+dEQP-VK.glsl.builtin.precision.modf.highp.vec2
+dEQP-VK.glsl.builtin.precision.modf.highp.vec3
+dEQP-VK.glsl.builtin.precision.modf.highp.vec4
+dEQP-VK.glsl.builtin.precision.modfstruct.mediump.scalar
+dEQP-VK.glsl.builtin.precision.modfstruct.mediump.vec2
+dEQP-VK.glsl.builtin.precision.modfstruct.mediump.vec3
+dEQP-VK.glsl.builtin.precision.modfstruct.mediump.vec4
+dEQP-VK.glsl.builtin.precision.modfstruct.highp.scalar
+dEQP-VK.glsl.builtin.precision.modfstruct.highp.vec2
+dEQP-VK.glsl.builtin.precision.modfstruct.highp.vec3
+dEQP-VK.glsl.builtin.precision.modfstruct.highp.vec4
dEQP-VK.glsl.builtin.precision.min.highp.scalar
dEQP-VK.glsl.builtin.precision.min.highp.vec2
dEQP-VK.glsl.builtin.precision.min.highp.vec3
dEQP-VK.glsl.builtin.precision.refract.highp.vec3
dEQP-VK.glsl.builtin.precision.refract.highp.vec4
dEQP-VK.glsl.builtin.precision.inverse.highp.mat2
+dEQP-VK.glsl.builtin.precision.frexpstruct.mediump.scalar
+dEQP-VK.glsl.builtin.precision.frexpstruct.mediump.vec2
+dEQP-VK.glsl.builtin.precision.frexpstruct.mediump.vec3
+dEQP-VK.glsl.builtin.precision.frexpstruct.mediump.vec4
+dEQP-VK.glsl.builtin.precision.frexpstruct.highp.scalar
+dEQP-VK.glsl.builtin.precision.frexpstruct.highp.vec2
+dEQP-VK.glsl.builtin.precision.frexpstruct.highp.vec3
+dEQP-VK.glsl.builtin.precision.frexpstruct.highp.vec4
dEQP-VK.glsl.builtin.precision_fp16_storage16b.mod.compute.scalar
dEQP-VK.glsl.builtin.precision_fp16_storage16b.mod.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage16b.mod.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage16b.frem.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage16b.frem.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage16b.frem.compute.vec4
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modf.compute.vec2
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modf.compute.vec3
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modf.compute.vec4
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modfstruct.compute.scalar
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modfstruct.compute.vec2
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modfstruct.compute.vec3
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modfstruct.compute.vec4
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.frexpstruct.compute.scalar
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.frexpstruct.compute.vec2
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.frexpstruct.compute.vec3
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.frexpstruct.compute.vec4
dEQP-VK.glsl.builtin.precision_fp16_storage32b.mod.compute.scalar
dEQP-VK.glsl.builtin.precision_fp16_storage32b.mod.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage32b.mod.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage32b.frem.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage32b.frem.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage32b.frem.compute.vec4
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modf.compute.vec2
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modf.compute.vec3
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modf.compute.vec4
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modfstruct.compute.scalar
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modfstruct.compute.vec2
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modfstruct.compute.vec3
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modfstruct.compute.vec4
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.frexpstruct.compute.scalar
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.frexpstruct.compute.vec2
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.frexpstruct.compute.vec3
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.frexpstruct.compute.vec4
dEQP-VK.glsl.builtin.precision_double.comparison.compute.scalar
dEQP-VK.glsl.builtin.precision_double.comparison.compute.vec2
dEQP-VK.glsl.builtin.precision_double.comparison.compute.vec3
dEQP-VK.glsl.builtin.precision_double.frem.compute.vec2
dEQP-VK.glsl.builtin.precision_double.frem.compute.vec3
dEQP-VK.glsl.builtin.precision_double.frem.compute.vec4
-dEQP-VK.glsl.builtin.precision_double.modf.compute
+dEQP-VK.glsl.builtin.precision_double.modf.compute.scalar
+dEQP-VK.glsl.builtin.precision_double.modf.compute.vec2
+dEQP-VK.glsl.builtin.precision_double.modf.compute.vec3
+dEQP-VK.glsl.builtin.precision_double.modf.compute.vec4
+dEQP-VK.glsl.builtin.precision_double.modfstruct.compute.scalar
+dEQP-VK.glsl.builtin.precision_double.modfstruct.compute.vec2
+dEQP-VK.glsl.builtin.precision_double.modfstruct.compute.vec3
+dEQP-VK.glsl.builtin.precision_double.modfstruct.compute.vec4
dEQP-VK.glsl.builtin.precision_double.min.compute.scalar
dEQP-VK.glsl.builtin.precision_double.min.compute.vec2
dEQP-VK.glsl.builtin.precision_double.min.compute.vec3
dEQP-VK.glsl.builtin.precision_double.frexp.compute.vec2
dEQP-VK.glsl.builtin.precision_double.frexp.compute.vec3
dEQP-VK.glsl.builtin.precision_double.frexp.compute.vec4
+dEQP-VK.glsl.builtin.precision_double.frexpstruct.compute.scalar
+dEQP-VK.glsl.builtin.precision_double.frexpstruct.compute.vec2
+dEQP-VK.glsl.builtin.precision_double.frexpstruct.compute.vec3
+dEQP-VK.glsl.builtin.precision_double.frexpstruct.compute.vec4
dEQP-VK.glsl.builtin.precision_double.ldexp.compute.scalar
dEQP-VK.glsl.builtin.precision_double.ldexp.compute.vec2
dEQP-VK.glsl.builtin.precision_double.ldexp.compute.vec3
dEQP-VK.glsl.builtin.precision.frem.highp.vec2
dEQP-VK.glsl.builtin.precision.frem.highp.vec3
dEQP-VK.glsl.builtin.precision.frem.highp.vec4
-dEQP-VK.glsl.builtin.precision.modf.mediump
-dEQP-VK.glsl.builtin.precision.modf.highp
+dEQP-VK.glsl.builtin.precision.modf.mediump.scalar
+dEQP-VK.glsl.builtin.precision.modf.mediump.vec2
+dEQP-VK.glsl.builtin.precision.modf.mediump.vec3
+dEQP-VK.glsl.builtin.precision.modf.mediump.vec4
+dEQP-VK.glsl.builtin.precision.modf.highp.scalar
+dEQP-VK.glsl.builtin.precision.modf.highp.vec2
+dEQP-VK.glsl.builtin.precision.modf.highp.vec3
+dEQP-VK.glsl.builtin.precision.modf.highp.vec4
+dEQP-VK.glsl.builtin.precision.modfstruct.mediump.scalar
+dEQP-VK.glsl.builtin.precision.modfstruct.mediump.vec2
+dEQP-VK.glsl.builtin.precision.modfstruct.mediump.vec3
+dEQP-VK.glsl.builtin.precision.modfstruct.mediump.vec4
+dEQP-VK.glsl.builtin.precision.modfstruct.highp.scalar
+dEQP-VK.glsl.builtin.precision.modfstruct.highp.vec2
+dEQP-VK.glsl.builtin.precision.modfstruct.highp.vec3
+dEQP-VK.glsl.builtin.precision.modfstruct.highp.vec4
dEQP-VK.glsl.builtin.precision.min.mediump.scalar
dEQP-VK.glsl.builtin.precision.min.mediump.vec2
dEQP-VK.glsl.builtin.precision.min.mediump.vec3
dEQP-VK.glsl.builtin.precision.frexp.highp.vec2
dEQP-VK.glsl.builtin.precision.frexp.highp.vec3
dEQP-VK.glsl.builtin.precision.frexp.highp.vec4
+dEQP-VK.glsl.builtin.precision.frexpstruct.mediump.scalar
+dEQP-VK.glsl.builtin.precision.frexpstruct.mediump.vec2
+dEQP-VK.glsl.builtin.precision.frexpstruct.mediump.vec3
+dEQP-VK.glsl.builtin.precision.frexpstruct.mediump.vec4
+dEQP-VK.glsl.builtin.precision.frexpstruct.highp.scalar
+dEQP-VK.glsl.builtin.precision.frexpstruct.highp.vec2
+dEQP-VK.glsl.builtin.precision.frexpstruct.highp.vec3
+dEQP-VK.glsl.builtin.precision.frexpstruct.highp.vec4
dEQP-VK.glsl.builtin.precision.ldexp.mediump.scalar
dEQP-VK.glsl.builtin.precision.ldexp.mediump.vec2
dEQP-VK.glsl.builtin.precision.ldexp.mediump.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage16b.frem.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage16b.frem.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage16b.frem.compute.vec4
-dEQP-VK.glsl.builtin.precision_fp16_storage16b.modf.compute
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modf.compute.scalar
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modf.compute.vec2
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modf.compute.vec3
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modf.compute.vec4
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modfstruct.compute.scalar
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modfstruct.compute.vec2
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modfstruct.compute.vec3
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modfstruct.compute.vec4
dEQP-VK.glsl.builtin.precision_fp16_storage16b.min.compute.scalar
dEQP-VK.glsl.builtin.precision_fp16_storage16b.min.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage16b.min.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage16b.frexp.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage16b.frexp.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage16b.frexp.compute.vec4
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.frexpstruct.compute.scalar
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.frexpstruct.compute.vec2
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.frexpstruct.compute.vec3
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.frexpstruct.compute.vec4
dEQP-VK.glsl.builtin.precision_fp16_storage16b.ldexp.compute.scalar
dEQP-VK.glsl.builtin.precision_fp16_storage16b.ldexp.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage16b.ldexp.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage32b.frem.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage32b.frem.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage32b.frem.compute.vec4
-dEQP-VK.glsl.builtin.precision_fp16_storage32b.modf.compute
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modf.compute.scalar
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modf.compute.vec2
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modf.compute.vec3
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modf.compute.vec4
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modfstruct.compute.scalar
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modfstruct.compute.vec2
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modfstruct.compute.vec3
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modfstruct.compute.vec4
dEQP-VK.glsl.builtin.precision_fp16_storage32b.min.compute.scalar
dEQP-VK.glsl.builtin.precision_fp16_storage32b.min.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage32b.min.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage32b.frexp.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage32b.frexp.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage32b.frexp.compute.vec4
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.frexpstruct.compute.scalar
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.frexpstruct.compute.vec2
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.frexpstruct.compute.vec3
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.frexpstruct.compute.vec4
dEQP-VK.glsl.builtin.precision_fp16_storage32b.ldexp.compute.scalar
dEQP-VK.glsl.builtin.precision_fp16_storage32b.ldexp.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage32b.ldexp.compute.vec3
dEQP-VK.glsl.builtin.precision_double.frem.compute.vec2
dEQP-VK.glsl.builtin.precision_double.frem.compute.vec3
dEQP-VK.glsl.builtin.precision_double.frem.compute.vec4
-dEQP-VK.glsl.builtin.precision_double.modf.compute
+dEQP-VK.glsl.builtin.precision_double.modf.compute.scalar
+dEQP-VK.glsl.builtin.precision_double.modf.compute.vec2
+dEQP-VK.glsl.builtin.precision_double.modf.compute.vec3
+dEQP-VK.glsl.builtin.precision_double.modf.compute.vec4
+dEQP-VK.glsl.builtin.precision_double.modfstruct.compute.scalar
+dEQP-VK.glsl.builtin.precision_double.modfstruct.compute.vec2
+dEQP-VK.glsl.builtin.precision_double.modfstruct.compute.vec3
+dEQP-VK.glsl.builtin.precision_double.modfstruct.compute.vec4
dEQP-VK.glsl.builtin.precision_double.min.compute.scalar
dEQP-VK.glsl.builtin.precision_double.min.compute.vec2
dEQP-VK.glsl.builtin.precision_double.min.compute.vec3
dEQP-VK.glsl.builtin.precision_double.frexp.compute.vec2
dEQP-VK.glsl.builtin.precision_double.frexp.compute.vec3
dEQP-VK.glsl.builtin.precision_double.frexp.compute.vec4
+dEQP-VK.glsl.builtin.precision_double.frexpstruct.compute.scalar
+dEQP-VK.glsl.builtin.precision_double.frexpstruct.compute.vec2
+dEQP-VK.glsl.builtin.precision_double.frexpstruct.compute.vec3
+dEQP-VK.glsl.builtin.precision_double.frexpstruct.compute.vec4
dEQP-VK.glsl.builtin.precision_double.ldexp.compute.scalar
dEQP-VK.glsl.builtin.precision_double.ldexp.compute.vec2
dEQP-VK.glsl.builtin.precision_double.ldexp.compute.vec3
typedef Modf< Signature<double, double, double> > Modf64Bit;
template <class T>
+class ModfStruct : public Modf<T>
+{
+public:
+ virtual string getName (void) const { return "modfstruct"; }
+ virtual SpirVCaseT getSpirvCase (void) const { return SPIRV_CASETYPE_MODFSTRUCT; }
+};
+typedef ModfStruct< Signature<float, float, float> > ModfStruct32Bit;
+typedef ModfStruct< Signature<deFloat16, deFloat16, deFloat16> > ModfStruct16Bit;
+typedef ModfStruct< Signature<double, double, double> > ModfStruct64Bit;
+
+template <class T>
class Min : public PreciseFunc2<T> { public: Min (void) : PreciseFunc2<T> ("min", deMin) {} };
template <class T>
class Max : public PreciseFunc2<T> { public: Max (void) : PreciseFunc2<T> ("max", deMax) {} };
return 1;
}
};
+typedef FrExp< Signature<float, float, int> > Frexp32Bit;
+typedef FrExp< Signature<deFloat16, deFloat16, int> > Frexp16Bit;
+typedef FrExp< Signature<double, double, int> > Frexp64Bit;
+
+template <class T>
+class FrexpStruct : public FrExp<T>
+{
+public:
+ virtual string getName (void) const { return "frexpstruct"; }
+ virtual SpirVCaseT getSpirvCase (void) const { return SPIRV_CASETYPE_FREXPSTRUCT; }
+};
+typedef FrexpStruct< Signature<float, float, int> > FrexpStruct32Bit;
+typedef FrexpStruct< Signature<deFloat16, deFloat16, int> > FrexpStruct16Bit;
+typedef FrexpStruct< Signature<double, double, int> > FrexpStruct64Bit;
//Signature<float, float, int>
//Signature<deFloat16, deFloat16, int>
addScalarFactory<Mod32Bit>(*funcs, "mod", true);
addScalarFactory<FRem32Bit>(*funcs);
- funcs->addFactory(createSimpleFuncCaseFactory<Modf32Bit>());
+ addScalarFactory<Modf32Bit>(*funcs);
+ addScalarFactory<ModfStruct32Bit>(*funcs);
addScalarFactory<Min< Signature<float, float, float> > >(*funcs);
addScalarFactory<Max< Signature<float, float, float> > >(*funcs);
addScalarFactory<Clamp< Signature<float, float, float, float> > >(*funcs);
funcs->addFactory(SharedPtr<const CaseFactory>(new SquareMatrixFuncCaseFactory<Determinant>()));
funcs->addFactory(SharedPtr<const CaseFactory>(new SquareMatrixFuncCaseFactory<Inverse>()));
- addScalarFactory<FrExp <Signature<float, float, int> > >(*funcs);
+ addScalarFactory<Frexp32Bit>(*funcs);
+ addScalarFactory<FrexpStruct32Bit>(*funcs);
addScalarFactory<LdExp <Signature<float, float, int> > >(*funcs);
addScalarFactory<Fma <Signature<float, float, float, float> > >(*funcs);
addScalarFactory<Mod64Bit>(*funcs, "mod", true);
addScalarFactory<FRem64Bit>(*funcs);
- funcs->addFactory(createSimpleFuncCaseFactory<Modf64Bit>());
+ addScalarFactory<Modf64Bit>(*funcs);
+ addScalarFactory<ModfStruct64Bit>(*funcs);
addScalarFactory<Min<Signature<double, double, double>>>(*funcs);
addScalarFactory<Max<Signature<double, double, double>>>(*funcs);
addScalarFactory<Clamp<Signature<double, double, double, double>>>(*funcs);
funcs->addFactory(SharedPtr<const CaseFactory>(new SquareMatrixFuncCaseFactory<Determinant64bit>()));
funcs->addFactory(SharedPtr<const CaseFactory>(new SquareMatrixFuncCaseFactory<Inverse64bit>()));
- addScalarFactory<FrExp<Signature<double, double, int>>>(*funcs);
+ addScalarFactory<Frexp64Bit>(*funcs);
+ addScalarFactory<FrexpStruct64Bit>(*funcs);
addScalarFactory<LdExp<Signature<double, double, int>>>(*funcs);
addScalarFactory<Fma<Signature<double, double, double, double>>>(*funcs);
addScalarFactory<Mod16Bit>(*funcs, "mod", true);
addScalarFactory<FRem16Bit>(*funcs);
- funcs->addFactory(createSimpleFuncCaseFactory<Modf16Bit>());
+ addScalarFactory<Modf16Bit>(*funcs);
+ addScalarFactory<ModfStruct16Bit>(*funcs);
addScalarFactory<Min< Signature<deFloat16, deFloat16, deFloat16> > >(*funcs);
addScalarFactory<Max< Signature<deFloat16, deFloat16, deFloat16> > >(*funcs);
addScalarFactory<Clamp< Signature<deFloat16, deFloat16, deFloat16, deFloat16> > >(*funcs);
funcs->addFactory(SharedPtr<const CaseFactory>(new SquareMatrixFuncCaseFactory<Determinant16bit>()));
funcs->addFactory(SharedPtr<const CaseFactory>(new SquareMatrixFuncCaseFactory<Inverse16bit>()));
- addScalarFactory<FrExp <Signature<deFloat16, deFloat16, int> > >(*funcs);
+ addScalarFactory<Frexp16Bit>(*funcs);
+ addScalarFactory<FrexpStruct16Bit>(*funcs);
addScalarFactory<LdExp <Signature<deFloat16, deFloat16, int> > >(*funcs);
addScalarFactory<Fma <Signature<deFloat16, deFloat16, deFloat16, deFloat16> > >(*funcs);
<< "OpBranchConditional %operation_result_" << operationNdx << " %label_IF_" << operationNdx << " %IF_" << operationNdx << "\n"
<< "%label_IF_" << operationNdx << " = OpLabel\n"
<< "%operation_val_" << operationNdx << " = OpLoad %i32 %operation\n"
- << "%out_val_" << operationNdx << " = OpLoad %i32 %out\n"
+ << "%out_val_" << operationNdx << " = OpLoad %i32 %out0\n"
<< "%add_if_" << operationNdx << " = OpIAdd %i32 %out_val_" << operationNdx << " %operation_val_" << operationNdx << "\n"
- << "OpStore %out %add_if_" << operationNdx << "\n"
+ << "OpStore %out0 %add_if_" << operationNdx << "\n"
<< "OpBranch %IF_" << operationNdx << "\n"
<< "%IF_" << operationNdx << " = OpLabel\n";
return src.str();
src << "\n";
src << "%toAdd" << operationNdx << " = OpIMul "<< outputType << " %ivec_result_" << operationNdx << " %operation_vec_" << operationNdx <<"\n"
- << "%out_val_" << operationNdx << " = OpLoad "<< outputType << " %out\n"
+ << "%out_val_" << operationNdx << " = OpLoad "<< outputType << " %out0\n"
<< "%add_if_" << operationNdx << " = OpIAdd " << outputType << " %out_val_" << operationNdx << " %toAdd" << operationNdx << "\n"
- << "OpStore %out %add_if_" << operationNdx << "\n";
+ << "OpStore %out0 %add_if_" << operationNdx << "\n";
return src.str();
}
};
int moveBitNdx = 0;
- const std::string inputType1 = getTypeSpirv(spec.inputs[0].varType.getBasicType(), spec.packFloat16Bit);
- const std::string inputType2 = getTypeSpirv(spec.inputs[1].varType.getBasicType(), spec.packFloat16Bit);
- const std::string outputType = getTypeSpirv(spec.outputs[0].varType.getBasicType(), spec.packFloat16Bit);
+ vector<std::string> inputTypes;
+ vector<std::string> outputTypes;
const std::string packType = spec.packFloat16Bit ? getTypeSpirv(getDataTypeFloat16Scalars(spec.inputs[0].varType.getBasicType())) : "";
- const bool floatResult = glu::isDataTypeFloatType(spec.outputs[0].varType.getBasicType());
- const bool packFloatRes = (floatResult && spec.packFloat16Bit);
+ vector<bool> floatResult;
+ for (const auto& symbol : spec.outputs)
+ floatResult.push_back(glu::isDataTypeFloatType(symbol.varType.getBasicType()));
+
+ const bool anyFloatResult = std::any_of(begin(floatResult), end(floatResult), [](bool b) { return b; });
+
+ vector<bool> packFloatRes;
+ for (const auto& floatRes : floatResult)
+ packFloatRes.push_back(floatRes && spec.packFloat16Bit);
+
const bool useF32Types = (!are16Bit && !are64Bit);
const bool useF64Types = are64Bit;
const bool useF16Types = (spec.packFloat16Bit || are16Bit);
- if (floatResult)
- DE_ASSERT(spec.spirvCase == SPIRV_CASETYPE_FREM);
+ for (const auto& symbol : spec.inputs)
+ inputTypes.push_back(getTypeSpirv(symbol.varType.getBasicType(), spec.packFloat16Bit));
+
+ for (const auto& symbol : spec.outputs)
+ outputTypes.push_back(getTypeSpirv(symbol.varType.getBasicType(), spec.packFloat16Bit));
+
+ DE_ASSERT(!inputTypes.empty());
+ DE_ASSERT(!outputTypes.empty());
+
+ // Assert input and output types match the expected operations.
+ switch (spec.spirvCase)
+ {
+ case SPIRV_CASETYPE_COMPARE:
+ case SPIRV_CASETYPE_FREM:
+ DE_ASSERT(inputTypes.size() == 2);
+ DE_ASSERT(outputTypes.size() == 1);
+ break;
+ case SPIRV_CASETYPE_MODFSTRUCT:
+ case SPIRV_CASETYPE_FREXPSTRUCT:
+ DE_ASSERT(inputTypes.size() == 1);
+ DE_ASSERT(outputTypes.size() == 2);
+ break;
+ default:
+ DE_ASSERT(false);
+ break;
+ };
std::ostringstream src;
src << "; SPIR-V\n"
if (are16Bit)
src << "OpExtension \"SPV_KHR_16bit_storage\"\n";
- src << "%1 = OpExtInstImport \"GLSL.std.450\"\n"
+ src << "%glslstd450 = OpExtInstImport \"GLSL.std.450\"\n"
"OpMemoryModel Logical GLSL450\n"
"OpEntryPoint GLCompute %BP_main \"main\" %BP_id3uNum %BP_id3uID\n"
"OpExecutionMode %BP_main LocalSize 1 1 1\n"
"OpDecorate %BP_id3uNum BuiltIn NumWorkgroups\n"
"OpDecorate %BP_id3uID BuiltIn WorkgroupId\n";
- //input offset
+ // Input offsets and stride.
{
- int offset = 0;
- int ndx = 0;
- for (vector<Symbol>::const_iterator symIter = spec.inputs.begin(); symIter != spec.inputs.end(); ++symIter)
+ int offset = 0;
+ int ndx = 0;
+ int largest = 0;
+ for (const auto& symbol : spec.inputs)
{
+ const int scalarSize = symbol.varType.getScalarSize();
+ const int memberSize = (scalarSize + ((scalarSize == 3) ? 1 : 0)) * (isDataTypeDoubleType(symbol.varType.getBasicType()) ? (int)sizeof(deUint64) : (isDataTypeFloat16OrVec(symbol.varType.getBasicType()) ? (int)sizeof(deUint16) : (int)sizeof(deUint32)));
+ const int extraMemberBytes = (offset % memberSize);
+
+ offset += ((extraMemberBytes == 0) ? 0 : (memberSize - extraMemberBytes));
src << "OpMemberDecorate %SSB0_IN "<< ndx <<" Offset " << offset << "\n";
++ndx;
- const int scalarSize = symIter->varType.getScalarSize();
- offset += (scalarSize + ((scalarSize == 3) ? 1 : 0)) * (isDataTypeDoubleType(symIter->varType.getBasicType()) ? (int)sizeof(deUint64) : (isDataTypeFloat16OrVec(symIter->varType.getBasicType()) ? (int)sizeof(deUint16) : (int)sizeof(deUint32)));
+
+ if (memberSize > largest)
+ largest = memberSize;
+
+ offset += memberSize;
}
- src << "OpDecorate %up_SSB0_IN ArrayStride "<< offset << "\n";
+ DE_ASSERT(largest > 0);
+ const int extraBytes = (offset % largest);
+ const int stride = offset + (extraBytes == 0 ? 0 : (largest - extraBytes));
+ src << "OpDecorate %up_SSB0_IN ArrayStride "<< stride << "\n";
}
src << "OpMemberDecorate %ssboIN 0 Offset 0\n"
if (isMediump)
{
- src << "OpMemberDecorate %SSB0_IN 1 RelaxedPrecision\n"
- "OpDecorate %in0 RelaxedPrecision\n"
- "OpMemberDecorate %SSB0_IN 0 RelaxedPrecision\n"
- "OpDecorate %src_val_0_0 RelaxedPrecision\n"
- "OpDecorate %src_val_0_0 RelaxedPrecision\n"
- "OpDecorate %in1 RelaxedPrecision\n"
- "OpDecorate %src_val_0_1 RelaxedPrecision\n"
- "OpDecorate %src_val_0_1 RelaxedPrecision\n"
- "OpDecorate %in0_val RelaxedPrecision\n"
- "OpDecorate %in1_val RelaxedPrecision\n"
- "OpDecorate %in0_val RelaxedPrecision\n"
- "OpDecorate %in1_val RelaxedPrecision\n"
- "OpMemberDecorate %SSB0_OUT 0 RelaxedPrecision\n";
-
- if (floatResult)
+ for (size_t i = 0; i < inputTypes.size(); ++i)
+ {
+ src <<
+ "OpMemberDecorate %SSB0_IN " << i << " RelaxedPrecision\n"
+ "OpDecorate %in" << i << " RelaxedPrecision\n"
+ "OpDecorate %src_val_0_" << i << " RelaxedPrecision\n"
+ "OpDecorate %in" << i << "_val RelaxedPrecision\n"
+ ;
+ }
+
+ if (anyFloatResult)
{
- src <<
- "OpDecorate %out RelaxedPrecision\n"
- "OpDecorate %frem_result RelaxedPrecision\n"
- "OpDecorate %out_val_final RelaxedPrecision\n";
+ switch (spec.spirvCase)
+ {
+ case SPIRV_CASETYPE_FREM:
+ src << "OpDecorate %frem_result RelaxedPrecision\n";
+ break;
+ case SPIRV_CASETYPE_MODFSTRUCT:
+ src << "OpDecorate %modfstruct_result RelaxedPrecision\n";
+ break;
+ case SPIRV_CASETYPE_FREXPSTRUCT:
+ src << "OpDecorate %frexpstruct_result RelaxedPrecision\n";
+ break;
+ default:
+ DE_ASSERT(false);
+ break;
+ }
+
+ for (size_t i = 0; i < outputTypes.size(); ++i)
+ {
+ src << "OpMemberDecorate %SSB0_OUT " << i << " RelaxedPrecision\n";
+ src << "OpDecorate %out_val_final_" << i << " RelaxedPrecision\n";
+ src << "OpDecorate %out" << i << " RelaxedPrecision\n";
+ }
}
}
- //output offset
+ // Output offsets and stride.
{
- int offset = 0;
- int ndx = 0;
- for (vector<Symbol>::const_iterator symIter = spec.outputs.begin(); symIter != spec.outputs.end(); ++symIter)
+ int offset = 0;
+ int ndx = 0;
+ int largest = 0;
+ for (const auto& symbol : spec.outputs)
{
+ const int scalarSize = symbol.varType.getScalarSize();
+ const int memberSize = (scalarSize + ((scalarSize == 3) ? 1 : 0)) * (isDataTypeDoubleType(symbol.varType.getBasicType()) ? (int)sizeof(deUint64) : (isDataTypeFloat16OrVec(symbol.varType.getBasicType()) ? (int)sizeof(deUint16) : (int)sizeof(deUint32)));
+ const int extraMemberBytes = (offset % memberSize);
+
+ offset += ((extraMemberBytes == 0) ? 0 : (memberSize - extraMemberBytes));
src << "OpMemberDecorate %SSB0_OUT " << ndx << " Offset " << offset << "\n";
++ndx;
- const int scalarSize = symIter->varType.getScalarSize();
- offset += (scalarSize + ((scalarSize == 3) ? 1 : 0)) * (isDataTypeDoubleType(symIter->varType.getBasicType()) ? (int)sizeof(deUint64) : (isDataTypeFloat16OrVec(symIter->varType.getBasicType()) ? (int)sizeof(deUint16) : (int)sizeof(deUint32)));
+
+ if (memberSize > largest)
+ largest = memberSize;
+
+ offset += memberSize;
}
- src << "OpDecorate %up_SSB0_OUT ArrayStride " << offset << "\n";
+ DE_ASSERT(largest > 0);
+ const int extraBytes = (offset % largest);
+ const int stride = offset + ((extraBytes == 0) ? 0 : (largest - extraBytes));
+ src << "OpDecorate %up_SSB0_OUT ArrayStride " << stride << "\n";
}
src << "OpMemberDecorate %ssboOUT 0 Offset 0\n"
"\n"
"%ip_u32 = OpTypePointer Input %u32\n"
"%ip_v3u32 = OpTypePointer Input %v3u32\n"
- "%up_float = OpTypePointer Uniform " << inputType1 << "\n"
+ "%up_float = OpTypePointer Uniform " << inputTypes[0] << "\n"
"\n"
- "%voidf = OpTypeFunction %void\n"
- "%fp_u32 = OpTypePointer Function %u32\n"
- "%fp_out = OpTypePointer Function " << outputType << "\n"
- "%fp_it1 = OpTypePointer Function " << inputType1 << "\n"
- "%fp_operation = OpTypePointer Function %i32\n";
+ "%fp_operation = OpTypePointer Function %i32\n"
+ "%voidf = OpTypeFunction %void\n"
+ "%fp_u32 = OpTypePointer Function %u32\n"
+ "%fp_it1 = OpTypePointer Function " << inputTypes[0] << "\n"
+ ;
+
+ for (size_t i = 0; i < outputTypes.size(); ++i)
+ {
+ src << "%fp_out_" << i << " = OpTypePointer Function " << outputTypes[i] << "\n"
+ << "%up_out_" << i << " = OpTypePointer Uniform " << outputTypes[i] << "\n";
+ }
if (spec.packFloat16Bit)
src << "%fp_f16 = OpTypePointer Function " << packType << "\n";
src << "%BP_id3uID = OpVariable %ip_v3u32 Input\n"
"%BP_id3uNum = OpVariable %ip_v3u32 Input\n"
- "%up_out = OpTypePointer Uniform " << outputType << "\n"
"\n"
"%c_u32_0 = OpConstant %u32 0\n"
"%c_u32_1 = OpConstant %u32 1\n"
"%c_v3f64_1 = OpConstantComposite %v3f64 %c_f64_1 %c_f64_1 %c_f64_1\n"
"%c_v4f64_0 = OpConstantComposite %v4f64 %c_f64_0 %c_f64_0 %c_f64_0 %c_f64_0\n"
"%c_v4f64_1 = OpConstantComposite %v4f64 %c_f64_1 %c_f64_1 %c_f64_1 %c_f64_1\n"
- ;
+ "\n";
- src << "\n"
- "%SSB0_IN = OpTypeStruct " << inputType1 << " " << inputType2 << "\n"
+ // Input struct.
+ {
+ src << "%SSB0_IN = OpTypeStruct";
+ for (const auto& t : inputTypes)
+ src << " " << t;
+ src << "\n";
+ }
+
+ src <<
"%up_SSB0_IN = OpTypeRuntimeArray %SSB0_IN\n"
"%ssboIN = OpTypeStruct %up_SSB0_IN\n"
"%up_ssboIN = OpTypePointer Uniform %ssboIN\n"
"%ssbo_src = OpVariable %up_ssboIN Uniform\n"
- "\n"
- "%SSB0_OUT = OpTypeStruct " << outputType << "\n"
+ "\n";
+
+ // Output struct.
+ {
+ src << "%SSB0_OUT = OpTypeStruct";
+ for (const auto& t : outputTypes)
+ src << " " << t;
+ src << "\n";
+ }
+
+ std::string modfStructMemberType;
+ std::string frexpStructFirstMemberType;
+ if (spec.spirvCase == SPIRV_CASETYPE_MODFSTRUCT)
+ {
+ modfStructMemberType = (packFloatRes[0] ? packType : outputTypes[0]);
+ src << "%modfstruct_ret_t = OpTypeStruct " << modfStructMemberType << " " << modfStructMemberType << "\n";
+ }
+ else if (spec.spirvCase == SPIRV_CASETYPE_FREXPSTRUCT)
+ {
+ frexpStructFirstMemberType = (packFloatRes[0] ? packType : outputTypes[0]);
+ src << "%frexpstruct_ret_t = OpTypeStruct " << frexpStructFirstMemberType << " " << outputTypes[1] << "\n";
+ }
+
+ src <<
"%up_SSB0_OUT = OpTypeRuntimeArray %SSB0_OUT\n"
"%ssboOUT = OpTypeStruct %up_SSB0_OUT\n"
"%up_ssboOUT = OpTypePointer Uniform %ssboOUT\n"
"%BP_label = OpLabel\n"
"%invocationNdx = OpVariable %fp_u32 Function\n";
- if (spec.packFloat16Bit)
- src << "%in0 = OpVariable %fp_f16 Function\n"
- "%in1 = OpVariable %fp_f16 Function\n";
- else
- src << "%in0 = OpVariable %fp_it1 Function\n"
- "%in1 = OpVariable %fp_it1 Function\n";
+ // Note: here we are supposing all inputs have the same type.
+ for (size_t i = 0; i < inputTypes.size(); ++i)
+ src << "%in" << i << " = OpVariable " << (spec.packFloat16Bit ? "%fp_f16" : "%fp_it1") << " Function\n";
- src << "%out = OpVariable " << (packFloatRes ? "%fp_f16" : "%fp_out") << " Function\n";
+ for (size_t i = 0; i < outputTypes.size(); ++i)
+ src << "%out" << i << " = OpVariable " << (packFloatRes[i] ? std::string("%fp_f16") : std::string("%fp_out_") + de::toString(i)) << " Function\n";
src << "%operation = OpVariable %fp_operation Function\n"
"%BP_id_0_ptr = OpAccessChain %ip_u32 %BP_id3uID %c_u32_0\n"
"%add_1 = OpIAdd %u32 %mul_2 %mul_3\n"
"%add_2 = OpIAdd %u32 %add_1 %BP_id_0_val\n"
"OpStore %invocationNdx %add_2\n"
- "%invocationNdx_val = OpLoad %u32 %invocationNdx\n"
- "\n"
- "%src_ptr_0_0 = OpAccessChain %up_float %ssbo_src %c_i32_0 %invocationNdx_val %c_i32_0\n"
- "%src_val_0_0 = OpLoad " << inputType1 << " %src_ptr_0_0\n";
+ "%invocationNdx_val = OpLoad %u32 %invocationNdx\n";
- if (spec.packFloat16Bit)
+ // Load input values.
+ for (size_t inputNdx = 0; inputNdx < inputTypes.size(); ++inputNdx)
{
- if (spec.inputs[0].varType.getScalarSize() > 1)
+ src << "\n"
+ << "%src_ptr_0_" << inputNdx << " = OpAccessChain %up_float %ssbo_src %c_i32_0 %invocationNdx_val %c_i32_" << inputNdx << "\n"
+ << "%src_val_0_" << inputNdx << " = OpLoad " << inputTypes[inputNdx] << " %src_ptr_0_" << inputNdx << "\n";
+
+ if (spec.packFloat16Bit)
{
- // Extract the val0 u32 input channels into individual f16 values.
- for (int i=0;i<spec.inputs[0].varType.getScalarSize();++i)
+ if (spec.inputs[inputNdx].varType.getScalarSize() > 1)
{
- src << "%src_val_0_0_" << i << " = OpCompositeExtract %u32 %src_val_0_0 " << i << "\n"
- "%val_v2f16_0_0_" << i << " = OpBitcast %v2f16 %src_val_0_0_" << i << "\n"
- "%val_f16_0_0_" << i << " = OpCompositeExtract %f16 %val_v2f16_0_0_" << i << " 0\n";
- }
+ // Extract the val<inputNdx> u32 input channels into individual f16 values.
+ for (int i = 0; i < spec.inputs[inputNdx].varType.getScalarSize(); ++i)
+ {
+ src << "%src_val_0_" << inputNdx << "_" << i << " = OpCompositeExtract %u32 %src_val_0_" << inputNdx << " " << i << "\n"
+ "%val_v2f16_0_" << inputNdx << "_" << i << " = OpBitcast %v2f16 %src_val_0_" << inputNdx << "_" << i << "\n"
+ "%val_f16_0_" << inputNdx << "_" << i << " = OpCompositeExtract %f16 %val_v2f16_0_" << inputNdx << "_" << i << " 0\n";
+ }
- if (spec.inputs[0].varType.getScalarSize() > 1)
- {
// Construct the input vector.
- src << "%val_f16_0_0 = OpCompositeConstruct " << packType;
- for (int i=0;i<spec.inputs[0].varType.getScalarSize();++i)
+ src << "%val_f16_0_" << inputNdx << " = OpCompositeConstruct " << packType;
+ for (int i = 0; i < spec.inputs[inputNdx].varType.getScalarSize(); ++i)
{
- src << " %val_f16_0_0_" << i;
+ src << " %val_f16_0_" << inputNdx << "_" << i;
}
src << "\n";
- src << "OpStore %in0 %val_f16_0_0\n";
+ src << "OpStore %in" << inputNdx << " %val_f16_0_" << inputNdx << "\n";
}
- }
- else
- {
- src << "%val_v2f16_0_0 = OpBitcast %v2f16 %src_val_0_0\n"
- "%val_f16_0_0 = OpCompositeExtract %f16 %val_v2f16_0_0 0\n";
-
- src << "OpStore %in0 %val_f16_0_0\n";
- }
- }
- else
- src << "OpStore %in0 %src_val_0_0\n";
-
- src << "\n"
- "%src_ptr_0_1 = OpAccessChain %up_float %ssbo_src %c_i32_0 %invocationNdx_val %c_i32_1\n"
- "%src_val_0_1 = OpLoad " << inputType2 << " %src_ptr_0_1\n";
-
- if (spec.packFloat16Bit)
- {
- if (spec.inputs[0].varType.getScalarSize() > 1)
- {
- // Extract the val1 u32 input channels into individual f16 values.
- for (int i=0;i<spec.inputs[0].varType.getScalarSize();++i)
- {
- src << "%src_val_0_1_" << i << " = OpCompositeExtract %u32 %src_val_0_1 " << i << "\n"
- "%val_v2f16_0_1_" << i << " = OpBitcast %v2f16 %src_val_0_1_" << i << "\n"
- "%val_f16_0_1_" << i << " = OpCompositeExtract %f16 %val_v2f16_0_1_" << i << " 0\n";
- }
-
- if (spec.inputs[0].varType.getScalarSize() > 1)
+ else
{
- // Construct the input vector.
- src << "%val_f16_0_1 = OpCompositeConstruct " << packType;
- for (int i=0;i<spec.inputs[0].varType.getScalarSize();++i)
- {
- src << " %val_f16_0_1_" << i;
- }
+ src << "%val_v2f16_0_" << inputNdx << " = OpBitcast %v2f16 %src_val_0_" << inputNdx << "\n"
+ "%val_f16_0_" << inputNdx << " = OpCompositeExtract %f16 %val_v2f16_0_" << inputNdx << " 0\n";
- src << "\n";
- src << "OpStore %in1 %val_f16_0_1\n";
+ src << "OpStore %in" << inputNdx << " %val_f16_0_" << inputNdx << "\n";
}
}
else
- {
- src << "%val_v2f16_0_1 = OpBitcast %v2f16 %src_val_0_1\n"
- "%val_f16_0_1 = OpCompositeExtract %f16 %val_v2f16_0_1 0\n";
+ src << "OpStore %in" << inputNdx << " %src_val_0_" << inputNdx << "\n";
- src << "OpStore %in1 %val_f16_0_1\n";
- }
+ src << "%in" << inputNdx << "_val = OpLoad " << (spec.packFloat16Bit ? packType : inputTypes[inputNdx]) << " %in" << inputNdx << "\n";
}
- else
- src << "OpStore %in1 %src_val_0_1\n";
src << "\n"
- "OpStore %operation %c_i32_1\n"
- "OpStore %out %c_" << (packFloatRes ? &packType[1] : &outputType[1]) << "_0\n"
- "\n";
+ "OpStore %operation %c_i32_1\n";
- if (spec.packFloat16Bit)
- src << "%in0_val = OpLoad " << packType << " %in0\n"
- "%in1_val = OpLoad " << packType << " %in1\n";
- else
- src << "%in0_val = OpLoad " << inputType1 << " %in0\n"
- "%in1_val = OpLoad " << inputType2 << " %in1\n";
+ // Fill output values with dummy data.
+ for (size_t i = 0; i < outputTypes.size(); ++i)
+ src << "OpStore %out" << i << " %c_" << (packFloatRes[i] ? &packType[1] : &outputTypes[i][1]) << "_0\n";
src << "\n";
+ // Run operation.
switch (spec.spirvCase)
{
case SPIRV_CASETYPE_COMPARE:
{
src << scalarComparison (COMPARE_OPERATIONS[operationNdx], operationNdx,
spec.inputs[0].varType.getBasicType(),
- outputType,
+ outputTypes[0],
spec.outputs[0].varType.getScalarSize());
src << moveBitOperation("%operation", moveBitNdx);
++moveBitNdx;
}
break;
case SPIRV_CASETYPE_FREM:
- src << "%frem_result = OpFRem " << (packFloatRes ? packType : outputType) << " %in0_val %in1_val\n"
- << "OpStore %out %frem_result\n";
+ src << "%frem_result = OpFRem " << (packFloatRes[0] ? packType : outputTypes[0]) << " %in0_val %in1_val\n"
+ << "OpStore %out0 %frem_result\n";
+ break;
+ case SPIRV_CASETYPE_MODFSTRUCT:
+ src << "%modfstruct_result = OpExtInst %modfstruct_ret_t %glslstd450 ModfStruct %in0_val\n"
+ << "%modfstruct_result_0 = OpCompositeExtract " << modfStructMemberType << " %modfstruct_result 0\n"
+ << "%modfstruct_result_1 = OpCompositeExtract " << modfStructMemberType << " %modfstruct_result 1\n"
+ << "OpStore %out0 %modfstruct_result_0\n"
+ << "OpStore %out1 %modfstruct_result_1\n";
+ break;
+ case SPIRV_CASETYPE_FREXPSTRUCT:
+ src << "%frexpstruct_result = OpExtInst %frexpstruct_ret_t %glslstd450 FrexpStruct %in0_val\n"
+ << "%frexpstruct_result_0 = OpCompositeExtract " << frexpStructFirstMemberType << " %frexpstruct_result 0\n"
+ << "%frexpstruct_result_1 = OpCompositeExtract " << outputTypes[1] << " %frexpstruct_result 1\n"
+ << "OpStore %out0 %frexpstruct_result_0\n"
+ << "OpStore %out1 %frexpstruct_result_1\n";
break;
default:
DE_ASSERT(false);
break;
}
- src << "\n"
- "%out_val_final = OpLoad " << (packFloatRes ? packType : outputType) << " %out\n"
- "%ssbo_dst_ptr = OpAccessChain %up_out %ssbo_dst %c_i32_0 %invocationNdx_val %c_i32_0\n";
-
- if (packFloatRes)
+ for (size_t outputNdx = 0; outputNdx < outputTypes.size(); ++outputNdx)
{
- if (spec.inputs[0].varType.getScalarSize() > 1)
+ src << "\n"
+ "%out_val_final_" << outputNdx << " = OpLoad " << (packFloatRes[outputNdx] ? packType : outputTypes[outputNdx]) << " %out" << outputNdx << "\n"
+ "%ssbo_dst_ptr_" << outputNdx << " = OpAccessChain %up_out_" << outputNdx << " %ssbo_dst %c_i32_0 %invocationNdx_val %c_i32_" << outputNdx << "\n";
+
+ if (packFloatRes[outputNdx])
{
- for (int i = 0; i < spec.inputs[0].varType.getScalarSize(); ++i)
+ if (spec.outputs[outputNdx].varType.getScalarSize() > 1)
{
- src << "%out_val_final_" << i << " = OpCompositeExtract %f16 %out_val_final " << i << "\n";
- src << "%out_composite_" << i << " = OpCompositeConstruct %v2f16 %out_val_final_" << i << " %c_f16_minus1\n";
- src << "%u32_val_" << i << " = OpBitcast %u32 %out_composite_" << i << "\n";
- }
+ for (int i = 0; i < spec.outputs[outputNdx].varType.getScalarSize(); ++i)
+ {
+ src << "%out_val_final_" << outputNdx << "_" << i << " = OpCompositeExtract %f16 %out_val_final_" << outputNdx << " " << i << "\n";
+ src << "%out_composite_" << outputNdx << "_" << i << " = OpCompositeConstruct %v2f16 %out_val_final_" << outputNdx << "_" << i << " %c_f16_minus1\n";
+ src << "%u32_val_" << outputNdx << "_" << i << " = OpBitcast %u32 %out_composite_" << outputNdx << "_" << i << "\n";
+ }
- src << "%u32_final_val = OpCompositeConstruct " << outputType;
- for (int i = 0; i < spec.inputs[0].varType.getScalarSize(); ++i)
- src << " %u32_val_" << i;
- src << "\n";
- src << "OpStore %ssbo_dst_ptr %u32_final_val\n";
+ src << "%u32_final_val_" << outputNdx << " = OpCompositeConstruct " << outputTypes[outputNdx];
+ for (int i = 0; i < spec.outputs[outputNdx].varType.getScalarSize(); ++i)
+ src << " %u32_val_" << outputNdx << "_" << i;
+ src << "\n";
+ src << "OpStore %ssbo_dst_ptr_" << outputNdx << " %u32_final_val_" << outputNdx << "\n";
+ }
+ else
+ {
+ src <<
+ "%out_composite_" << outputNdx << " = OpCompositeConstruct %v2f16 %out_val_final_" << outputNdx << " %c_f16_minus1\n"
+ "%out_result_" << outputNdx << " = OpBitcast " << outputTypes[outputNdx] << " %out_composite_" << outputNdx << "\n"
+ "OpStore %ssbo_dst_ptr_" << outputNdx << " %out_result_" << outputNdx << "\n";
+ }
}
else
{
- src <<
- "%out_composite = OpCompositeConstruct %v2f16 %out_val_final %c_f16_minus1\n"
- "%out_result = OpBitcast " << outputType << " %out_composite\n"
- "OpStore %ssbo_dst_ptr %out_result\n";
+ src << "OpStore %ssbo_dst_ptr_" << outputNdx << " %out_val_final_" << outputNdx << "\n";
}
}
- else
- {
- src << "OpStore %ssbo_dst_ptr %out_val_final\n";
- }
src << "\n"
"OpReturn\n"
SPIRV_CASETYPE_NONE = 0,
SPIRV_CASETYPE_COMPARE,
SPIRV_CASETYPE_FREM,
+ SPIRV_CASETYPE_MODFSTRUCT,
+ SPIRV_CASETYPE_FREXPSTRUCT,
SPIRV_CASETYPE_MAX_ENUM,
};
dEQP-VK.glsl.builtin.precision.frem.highp.vec2
dEQP-VK.glsl.builtin.precision.frem.highp.vec3
dEQP-VK.glsl.builtin.precision.frem.highp.vec4
-dEQP-VK.glsl.builtin.precision.modf.mediump
-dEQP-VK.glsl.builtin.precision.modf.highp
+dEQP-VK.glsl.builtin.precision.modf.mediump.scalar
+dEQP-VK.glsl.builtin.precision.modf.mediump.vec2
+dEQP-VK.glsl.builtin.precision.modf.mediump.vec3
+dEQP-VK.glsl.builtin.precision.modf.mediump.vec4
+dEQP-VK.glsl.builtin.precision.modf.highp.scalar
+dEQP-VK.glsl.builtin.precision.modf.highp.vec2
+dEQP-VK.glsl.builtin.precision.modf.highp.vec3
+dEQP-VK.glsl.builtin.precision.modf.highp.vec4
+dEQP-VK.glsl.builtin.precision.modfstruct.mediump.scalar
+dEQP-VK.glsl.builtin.precision.modfstruct.mediump.vec2
+dEQP-VK.glsl.builtin.precision.modfstruct.mediump.vec3
+dEQP-VK.glsl.builtin.precision.modfstruct.mediump.vec4
+dEQP-VK.glsl.builtin.precision.modfstruct.highp.scalar
+dEQP-VK.glsl.builtin.precision.modfstruct.highp.vec2
+dEQP-VK.glsl.builtin.precision.modfstruct.highp.vec3
+dEQP-VK.glsl.builtin.precision.modfstruct.highp.vec4
dEQP-VK.glsl.builtin.precision.min.mediump.scalar
dEQP-VK.glsl.builtin.precision.min.mediump.vec2
dEQP-VK.glsl.builtin.precision.min.mediump.vec3
dEQP-VK.glsl.builtin.precision.frexp.highp.vec2
dEQP-VK.glsl.builtin.precision.frexp.highp.vec3
dEQP-VK.glsl.builtin.precision.frexp.highp.vec4
+dEQP-VK.glsl.builtin.precision.frexpstruct.mediump.scalar
+dEQP-VK.glsl.builtin.precision.frexpstruct.mediump.vec2
+dEQP-VK.glsl.builtin.precision.frexpstruct.mediump.vec3
+dEQP-VK.glsl.builtin.precision.frexpstruct.mediump.vec4
+dEQP-VK.glsl.builtin.precision.frexpstruct.highp.scalar
+dEQP-VK.glsl.builtin.precision.frexpstruct.highp.vec2
+dEQP-VK.glsl.builtin.precision.frexpstruct.highp.vec3
+dEQP-VK.glsl.builtin.precision.frexpstruct.highp.vec4
dEQP-VK.glsl.builtin.precision.ldexp.mediump.scalar
dEQP-VK.glsl.builtin.precision.ldexp.mediump.vec2
dEQP-VK.glsl.builtin.precision.ldexp.mediump.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage16b.frem.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage16b.frem.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage16b.frem.compute.vec4
-dEQP-VK.glsl.builtin.precision_fp16_storage16b.modf.compute
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modf.compute.scalar
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modf.compute.vec2
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modf.compute.vec3
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modf.compute.vec4
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modfstruct.compute.scalar
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modfstruct.compute.vec2
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modfstruct.compute.vec3
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.modfstruct.compute.vec4
dEQP-VK.glsl.builtin.precision_fp16_storage16b.min.compute.scalar
dEQP-VK.glsl.builtin.precision_fp16_storage16b.min.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage16b.min.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage16b.frexp.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage16b.frexp.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage16b.frexp.compute.vec4
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.frexpstruct.compute.scalar
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.frexpstruct.compute.vec2
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.frexpstruct.compute.vec3
+dEQP-VK.glsl.builtin.precision_fp16_storage16b.frexpstruct.compute.vec4
dEQP-VK.glsl.builtin.precision_fp16_storage16b.ldexp.compute.scalar
dEQP-VK.glsl.builtin.precision_fp16_storage16b.ldexp.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage16b.ldexp.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage32b.frem.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage32b.frem.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage32b.frem.compute.vec4
-dEQP-VK.glsl.builtin.precision_fp16_storage32b.modf.compute
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modf.compute.scalar
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modf.compute.vec2
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modf.compute.vec3
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modf.compute.vec4
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modfstruct.compute.scalar
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modfstruct.compute.vec2
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modfstruct.compute.vec3
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.modfstruct.compute.vec4
dEQP-VK.glsl.builtin.precision_fp16_storage32b.min.compute.scalar
dEQP-VK.glsl.builtin.precision_fp16_storage32b.min.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage32b.min.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage32b.frexp.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage32b.frexp.compute.vec3
dEQP-VK.glsl.builtin.precision_fp16_storage32b.frexp.compute.vec4
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.frexpstruct.compute.scalar
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.frexpstruct.compute.vec2
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.frexpstruct.compute.vec3
+dEQP-VK.glsl.builtin.precision_fp16_storage32b.frexpstruct.compute.vec4
dEQP-VK.glsl.builtin.precision_fp16_storage32b.ldexp.compute.scalar
dEQP-VK.glsl.builtin.precision_fp16_storage32b.ldexp.compute.vec2
dEQP-VK.glsl.builtin.precision_fp16_storage32b.ldexp.compute.vec3
dEQP-VK.glsl.builtin.precision_double.frem.compute.vec2
dEQP-VK.glsl.builtin.precision_double.frem.compute.vec3
dEQP-VK.glsl.builtin.precision_double.frem.compute.vec4
-dEQP-VK.glsl.builtin.precision_double.modf.compute
+dEQP-VK.glsl.builtin.precision_double.modf.compute.scalar
+dEQP-VK.glsl.builtin.precision_double.modf.compute.vec2
+dEQP-VK.glsl.builtin.precision_double.modf.compute.vec3
+dEQP-VK.glsl.builtin.precision_double.modf.compute.vec4
+dEQP-VK.glsl.builtin.precision_double.modfstruct.compute.scalar
+dEQP-VK.glsl.builtin.precision_double.modfstruct.compute.vec2
+dEQP-VK.glsl.builtin.precision_double.modfstruct.compute.vec3
+dEQP-VK.glsl.builtin.precision_double.modfstruct.compute.vec4
dEQP-VK.glsl.builtin.precision_double.min.compute.scalar
dEQP-VK.glsl.builtin.precision_double.min.compute.vec2
dEQP-VK.glsl.builtin.precision_double.min.compute.vec3
dEQP-VK.glsl.builtin.precision_double.frexp.compute.vec2
dEQP-VK.glsl.builtin.precision_double.frexp.compute.vec3
dEQP-VK.glsl.builtin.precision_double.frexp.compute.vec4
+dEQP-VK.glsl.builtin.precision_double.frexpstruct.compute.scalar
+dEQP-VK.glsl.builtin.precision_double.frexpstruct.compute.vec2
+dEQP-VK.glsl.builtin.precision_double.frexpstruct.compute.vec3
+dEQP-VK.glsl.builtin.precision_double.frexpstruct.compute.vec4
dEQP-VK.glsl.builtin.precision_double.ldexp.compute.scalar
dEQP-VK.glsl.builtin.precision_double.ldexp.compute.vec2
dEQP-VK.glsl.builtin.precision_double.ldexp.compute.vec3