summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIan Romanick <ian.d.romanick@intel.com>2018-11-20 16:35:27 -0800
committerIan Romanick <ian.d.romanick@intel.com>2019-12-04 12:31:37 -0800
commitd51b1547baf6282d9a4dcc436cbace5f6c705bd2 (patch)
tree8a70a144a4b4a13ecdb7e6769533d099c6e701cf
parent94e65bfe577db81f1e63612375f6dad591f97b4a (diff)
glsl: Very mean test case for memory usage, scheduling, and register allocationINTEL_shader_integer_functions2
Currently on Mesa this test case is pessimal for memory usage. According to Valgrind massif, it require > 5GiB of memory to compile. Defining the macro FAIL_REGISTER_ALLOCATION causes the test case to fall off a cliff on the i965 driver. The scheduler causes > 22,000 live values, and *shock* register allocation fails. Signed-off-by: Ian Romanick <ian.d.romanick@intel.com>
-rw-r--r--tests/shaders/mean-soft-fp64-using-uint64.shader_test1111
1 files changed, 1111 insertions, 0 deletions
diff --git a/tests/shaders/mean-soft-fp64-using-uint64.shader_test b/tests/shaders/mean-soft-fp64-using-uint64.shader_test
new file mode 100644
index 000000000..fd778ffc7
--- /dev/null
+++ b/tests/shaders/mean-soft-fp64-using-uint64.shader_test
@@ -0,0 +1,1111 @@
+[require]
+GLSL >= 1.50
+#GL_ARB_arrays_of_arrays
+#GL_ARB_gpu_shader_int64
+#GL_ARB_shader_bit_encoding
+#GL_EXT_shader_integer_mix
+#GL_MESA_shader_integer_functions
+
+[vertex shader]
+#version 150
+#extension GL_ARB_arrays_of_arrays: require
+#extension GL_ARB_gpu_shader_int64: require
+#extension GL_ARB_shader_bit_encoding: require
+#extension GL_EXT_shader_integer_mix: require
+#extension GL_MESA_shader_integer_functions: require
+
+//#define FAIL_REGISTER_ALLOCATION
+
+/* Software IEEE floating-point rounding mode.
+ * GLSL spec section "4.7.1 Range and Precision":
+ * The rounding mode cannot be set and is undefined.
+ * But here, we are able to define the rounding mode at the compilation time.
+ */
+#define FLOAT_ROUND_NEAREST_EVEN 0
+#define FLOAT_ROUND_TO_ZERO 1
+#define FLOAT_ROUND_DOWN 2
+#define FLOAT_ROUND_UP 3
+#define FLOAT_ROUNDING_MODE FLOAT_ROUND_NEAREST_EVEN
+
+/* Returns 1 if the double-precision floating-point value `a' is a NaN;
+ * otherwise returns 0.
+ */
+bool
+__is_nan(uint64_t __a)
+{
+ uvec2 a = unpackUint2x32(__a);
+ return (0xFFE00000u <= (a.y<<1)) &&
+ ((a.x != 0u) || ((a.y & 0x000FFFFFu) != 0u));
+}
+
+/* Returns the fraction bits of the double-precision floating-point value `a'.*/
+uint
+__extractFloat64FracLo(uint64_t a)
+{
+ return unpackUint2x32(a).x;
+}
+
+uint
+__extractFloat64FracHi(uint64_t a)
+{
+ return unpackUint2x32(a).y & 0x000FFFFFu;
+}
+
+/* Returns the exponent bits of the double-precision floating-point value `a'.*/
+int
+__extractFloat64Exp(uint64_t __a)
+{
+ uvec2 a = unpackUint2x32(__a);
+ return int((a.y>>20) & 0x7FFu);
+}
+
+/* Returns the sign bit of the double-precision floating-point value `a'.*/
+uint
+__extractFloat64Sign(uint64_t a)
+{
+ return unpackUint2x32(a).y >> 31;
+}
+
+/* Adds the 64-bit value formed by concatenating `a0' and `a1' to the 64-bit
+ * value formed by concatenating `b0' and `b1'. Addition is modulo 2^64, so
+ * any carry out is lost. The result is broken into two 32-bit pieces which
+ * are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
+ */
+void
+__add64(uint a0, uint a1, uint b0, uint b1,
+ out uint z0Ptr,
+ out uint z1Ptr)
+{
+ uint z1 = a1 + b1;
+ z1Ptr = z1;
+ z0Ptr = a0 + b0 + uint(z1 < a1);
+}
+
+
+/* Subtracts the 64-bit value formed by concatenating `b0' and `b1' from the
+ * 64-bit value formed by concatenating `a0' and `a1'. Subtraction is modulo
+ * 2^64, so any borrow out (carry out) is lost. The result is broken into two
+ * 32-bit pieces which are stored at the locations pointed to by `z0Ptr' and
+ * `z1Ptr'.
+ */
+void
+__sub64(uint a0, uint a1, uint b0, uint b1,
+ out uint z0Ptr,
+ out uint z1Ptr)
+{
+ z1Ptr = a1 - b1;
+ z0Ptr = a0 - b0 - uint(a1 < b1);
+}
+
+/* Shifts the 64-bit value formed by concatenating `a0' and `a1' right by the
+ * number of bits given in `count'. If any nonzero bits are shifted off, they
+ * are "jammed" into the least significant bit of the result by setting the
+ * least significant bit to 1. The value of `count' can be arbitrarily large;
+ * in particular, if `count' is greater than 64, the result will be either 0
+ * or 1, depending on whether the concatenation of `a0' and `a1' is zero or
+ * nonzero. The result is broken into two 32-bit pieces which are stored at
+ * the locations pointed to by `z0Ptr' and `z1Ptr'.
+ */
+void
+__shift64RightJamming(uint a0,
+ uint a1,
+ int count,
+ out uint z0Ptr,
+ out uint z1Ptr)
+{
+ uint z0;
+ uint z1;
+ int negCount = (-count) & 31;
+
+ z0 = mix(0u, a0, count == 0);
+ z0 = mix(z0, (a0 >> count), count < 32);
+
+ z1 = uint((a0 | a1) != 0u); /* count >= 64 */
+ uint z1_lt64 = (a0>>(count & 31)) | uint(((a0<<negCount) | a1) != 0u);
+ z1 = mix(z1, z1_lt64, count < 64);
+ z1 = mix(z1, (a0 | uint(a1 != 0u)), count == 32);
+ uint z1_lt32 = (a0<<negCount) | (a1>>count) | uint ((a1<<negCount) != 0u);
+ z1 = mix(z1, z1_lt32, count < 32);
+ z1 = mix(z1, a1, count == 0);
+ z1Ptr = z1;
+ z0Ptr = z0;
+}
+
+/* Shifts the 96-bit value formed by concatenating `a0', `a1', and `a2' right
+ * by 32 _plus_ the number of bits given in `count'. The shifted result is
+ * at most 64 nonzero bits; these are broken into two 32-bit pieces which are
+ * stored at the locations pointed to by `z0Ptr' and `z1Ptr'. The bits shifted
+ * off form a third 32-bit result as follows: The _last_ bit shifted off is
+ * the most-significant bit of the extra result, and the other 31 bits of the
+ * extra result are all zero if and only if _all_but_the_last_ bits shifted off
+ * were all zero. This extra result is stored in the location pointed to by
+ * `z2Ptr'. The value of `count' can be arbitrarily large.
+ * (This routine makes more sense if `a0', `a1', and `a2' are considered
+ * to form a fixed-point value with binary point between `a1' and `a2'. This
+ * fixed-point value is shifted right by the number of bits given in `count',
+ * and the integer part of the result is returned at the locations pointed to
+ * by `z0Ptr' and `z1Ptr'. The fractional part of the result may be slightly
+ * corrupted as described above, and is returned at the location pointed to by
+ * `z2Ptr'.)
+ */
+void
+__shift64ExtraRightJamming(uint a0, uint a1, uint a2,
+ int count,
+ out uint z0Ptr,
+ out uint z1Ptr,
+ out uint z2Ptr)
+{
+ uint z0 = 0u;
+ uint z1;
+ uint z2;
+ int negCount = (-count) & 31;
+
+ z2 = mix(uint(a0 != 0u), a0, count == 64);
+ z2 = mix(z2, a0 << negCount, count < 64);
+ z2 = mix(z2, a1 << negCount, count < 32);
+
+ z1 = mix(0u, (a0 >> (count & 31)), count < 64);
+ z1 = mix(z1, (a0<<negCount) | (a1>>count), count < 32);
+
+ a2 = mix(a2 | a1, a2, count < 32);
+ z0 = mix(z0, a0 >> count, count < 32);
+ z2 |= uint(a2 != 0u);
+
+ z0 = mix(z0, 0u, (count == 32));
+ z1 = mix(z1, a0, (count == 32));
+ z2 = mix(z2, a1, (count == 32));
+ z0 = mix(z0, a0, (count == 0));
+ z1 = mix(z1, a1, (count == 0));
+ z2 = mix(z2, a2, (count == 0));
+ z2Ptr = z2;
+ z1Ptr = z1;
+ z0Ptr = z0;
+}
+
+/* Shifts the 64-bit value formed by concatenating `a0' and `a1' left by the
+ * number of bits given in `count'. Any bits shifted off are lost. The value
+ * of `count' must be less than 32. The result is broken into two 32-bit
+ * pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
+ */
+void
+__shortShift64Left(uint a0, uint a1,
+ int count,
+ out uint z0Ptr,
+ out uint z1Ptr)
+{
+ z1Ptr = a1<<count;
+ z0Ptr = mix((a0 << count | (a1 >> ((-count) & 31))), a0, count == 0);
+}
+
+/* Packs the sign `zSign', the exponent `zExp', and the significand formed by
+ * the concatenation of `zFrac0' and `zFrac1' into a double-precision floating-
+ * point value, returning the result. After being shifted into the proper
+ * positions, the three fields `zSign', `zExp', and `zFrac0' are simply added
+ * together to form the most significant 32 bits of the result. This means
+ * that any integer portion of `zFrac0' will be added into the exponent. Since
+ * a properly normalized significand will have an integer portion equal to 1,
+ * the `zExp' input should be 1 less than the desired result exponent whenever
+ * `zFrac0' and `zFrac1' concatenated form a complete, normalized significand.
+ */
+uint64_t
+__packFloat64(uint zSign, int zExp, uint zFrac0, uint zFrac1)
+{
+ uvec2 z;
+
+ z.y = (zSign << 31) + (uint(zExp) << 20) + zFrac0;
+ z.x = zFrac1;
+ return packUint2x32(z);
+}
+
+/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',
+ * and extended significand formed by the concatenation of `zFrac0', `zFrac1',
+ * and `zFrac2', and returns the proper double-precision floating-point value
+ * corresponding to the abstract input. Ordinarily, the abstract value is
+ * simply rounded and packed into the double-precision format, with the inexact
+ * exception raised if the abstract input cannot be represented exactly.
+ * However, if the abstract value is too large, the overflow and inexact
+ * exceptions are raised and an infinity or maximal finite value is returned.
+ * If the abstract value is too small, the input value is rounded to a
+ * subnormal number, and the underflow and inexact exceptions are raised if the
+ * abstract input cannot be represented exactly as a subnormal double-precision
+ * floating-point number.
+ * The input significand must be normalized or smaller. If the input
+ * significand is not normalized, `zExp' must be 0; in that case, the result
+ * returned is a subnormal number, and it must not require rounding. In the
+ * usual case that the input significand is normalized, `zExp' must be 1 less
+ * than the "true" floating-point exponent. The handling of underflow and
+ * overflow follows the IEEE Standard for Floating-Point Arithmetic.
+ */
+uint64_t
+__roundAndPackFloat64(uint zSign,
+ int zExp,
+ uint zFrac0,
+ uint zFrac1,
+ uint zFrac2)
+{
+ bool roundNearestEven;
+ bool increment;
+
+ roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;
+ increment = int(zFrac2) < 0;
+ if (!roundNearestEven) {
+ if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) {
+ increment = false;
+ } else {
+ if (zSign != 0u) {
+ increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&
+ (zFrac2 != 0u);
+ } else {
+ increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&
+ (zFrac2 != 0u);
+ }
+ }
+ }
+ if (0x7FD <= zExp) {
+ if ((0x7FD < zExp) ||
+ ((zExp == 0x7FD) &&
+ (0x001FFFFFu == zFrac0 && 0xFFFFFFFFu == zFrac1) &&
+ increment)) {
+ if ((FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) ||
+ ((zSign != 0u) && (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP)) ||
+ ((zSign == 0u) && (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN))) {
+ return __packFloat64(zSign, 0x7FE, 0x000FFFFFu, 0xFFFFFFFFu);
+ }
+ return __packFloat64(zSign, 0x7FF, 0u, 0u);
+ }
+ if (zExp < 0) {
+ __shift64ExtraRightJamming(
+ zFrac0, zFrac1, zFrac2, -zExp, zFrac0, zFrac1, zFrac2);
+ zExp = 0;
+ if (roundNearestEven) {
+ increment = zFrac2 < 0u;
+ } else {
+ if (zSign != 0u) {
+ increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&
+ (zFrac2 != 0u);
+ } else {
+ increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&
+ (zFrac2 != 0u);
+ }
+ }
+ }
+ }
+ if (increment) {
+ __add64(zFrac0, zFrac1, 0u, 1u, zFrac0, zFrac1);
+ zFrac1 &= ~((zFrac2 + uint(zFrac2 == 0u)) & uint(roundNearestEven));
+ } else {
+ zExp = mix(zExp, 0, (zFrac0 | zFrac1) == 0u);
+ }
+ return __packFloat64(zSign, zExp, zFrac0, zFrac1);
+}
+
+/* Returns the number of leading 0 bits before the most-significant 1 bit of
+ * `a'. If `a' is zero, 32 is returned.
+ */
+int
+__countLeadingZeros32(uint a)
+{
+ int shiftCount;
+ shiftCount = mix(31 - findMSB(a), 32, a == 0u);
+ return shiftCount;
+}
+
+/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',
+ * and significand formed by the concatenation of `zSig0' and `zSig1', and
+ * returns the proper double-precision floating-point value corresponding
+ * to the abstract input. This routine is just like `__roundAndPackFloat64'
+ * except that the input significand has fewer bits and does not have to be
+ * normalized. In all cases, `zExp' must be 1 less than the "true" floating-
+ * point exponent.
+ */
+uint64_t
+__normalizeRoundAndPackFloat64(uint zSign,
+ int zExp,
+ uint zFrac0,
+ uint zFrac1)
+{
+ int shiftCount;
+ uint zFrac2;
+
+ zExp = mix(zExp, zExp - 32, zFrac0 == 0u);
+ zFrac1 = mix(zFrac1, 0u, zFrac0 == 0u);
+ zFrac0 = mix(zFrac0, zFrac1, zFrac0 == 0u);
+ shiftCount = __countLeadingZeros32(zFrac0) - 11;
+ if (0 <= shiftCount) {
+ zFrac2 = 0u;
+ __shortShift64Left(zFrac0, zFrac1, shiftCount, zFrac0, zFrac1);
+ } else {
+ __shift64ExtraRightJamming(
+ zFrac0, zFrac1, 0u, -shiftCount, zFrac0, zFrac1, zFrac2);
+ }
+ zExp -= shiftCount;
+ return __roundAndPackFloat64(zSign, zExp, zFrac0, zFrac1, zFrac2);
+}
+
+/* Takes two double-precision floating-point values `a' and `b', one of which
+ * is a NaN, and returns the appropriate NaN result.
+ */
+uint64_t
+__propagateFloat64NaN(uint64_t __a, uint64_t __b)
+{
+ bool aIsNaN = __is_nan(__a);
+ bool bIsNaN = __is_nan(__b);
+ uvec2 a = unpackUint2x32(__a);
+ uvec2 b = unpackUint2x32(__b);
+ a.y |= 0x00080000u;
+ b.y |= 0x00080000u;
+
+ return packUint2x32(mix(b, mix(a, b, bvec2(bIsNaN, bIsNaN)), bvec2(aIsNaN, aIsNaN)));
+}
+
+/* Returns the result of adding the double-precision floating-point values
+ * `a' and `b'. The operation is performed according to the IEEE Standard for
+ * Floating-Point Arithmetic.
+ */
+uint64_t
+__fadd64(uint64_t a, uint64_t b)
+{
+ uint aSign = __extractFloat64Sign(a);
+ uint bSign = __extractFloat64Sign(b);
+ uint aFracLo = __extractFloat64FracLo(a);
+ uint aFracHi = __extractFloat64FracHi(a);
+ uint bFracLo = __extractFloat64FracLo(b);
+ uint bFracHi = __extractFloat64FracHi(b);
+ int aExp = __extractFloat64Exp(a);
+ int bExp = __extractFloat64Exp(b);
+ uint zFrac0 = 0u;
+ uint zFrac1 = 0u;
+ int expDiff = aExp - bExp;
+ if (aSign == bSign) {
+ uint zFrac2 = 0u;
+ int zExp;
+ bool orig_exp_diff_is_zero = (expDiff == 0);
+
+ if (orig_exp_diff_is_zero) {
+ if (aExp == 0x7FF) {
+ bool propagate = (aFracHi | aFracLo | bFracHi | bFracLo) != 0u;
+ return mix(a, __propagateFloat64NaN(a, b), propagate);
+ }
+ __add64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
+ if (aExp == 0)
+ return __packFloat64(aSign, 0, zFrac0, zFrac1);
+ zFrac2 = 0u;
+ zFrac0 |= 0x00200000u;
+ zExp = aExp;
+ __shift64ExtraRightJamming(
+ zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);
+ } else if (0 < expDiff) {
+ if (aExp == 0x7FF) {
+ bool propagate = (aFracHi | aFracLo) != 0u;
+ return mix(a, __propagateFloat64NaN(a, b), propagate);
+ }
+
+ expDiff = mix(expDiff, expDiff - 1, bExp == 0);
+ bFracHi = mix(bFracHi | 0x00100000u, bFracHi, bExp == 0);
+ __shift64ExtraRightJamming(
+ bFracHi, bFracLo, 0u, expDiff, bFracHi, bFracLo, zFrac2);
+ zExp = aExp;
+ } else if (expDiff < 0) {
+ if (bExp == 0x7FF) {
+ bool propagate = (bFracHi | bFracLo) != 0u;
+ return mix(__packFloat64(aSign, 0x7ff, 0u, 0u), __propagateFloat64NaN(a, b), propagate);
+ }
+ expDiff = mix(expDiff, expDiff + 1, aExp == 0);
+ aFracHi = mix(aFracHi | 0x00100000u, aFracHi, aExp == 0);
+ __shift64ExtraRightJamming(
+ aFracHi, aFracLo, 0u, - expDiff, aFracHi, aFracLo, zFrac2);
+ zExp = bExp;
+ }
+ if (!orig_exp_diff_is_zero) {
+ aFracHi |= 0x00100000u;
+ __add64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
+ --zExp;
+ if (!(zFrac0 < 0x00200000u)) {
+ __shift64ExtraRightJamming(zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);
+ ++zExp;
+ }
+ }
+ return __roundAndPackFloat64(aSign, zExp, zFrac0, zFrac1, zFrac2);
+
+ } else {
+ int zExp;
+
+ __shortShift64Left(aFracHi, aFracLo, 10, aFracHi, aFracLo);
+ __shortShift64Left(bFracHi, bFracLo, 10, bFracHi, bFracLo);
+ if (0 < expDiff) {
+ if (aExp == 0x7FF) {
+ bool propagate = (aFracHi | aFracLo) != 0u;
+ return mix(a, __propagateFloat64NaN(a, b), propagate);
+ }
+ expDiff = mix(expDiff, expDiff - 1, bExp == 0);
+ bFracHi = mix(bFracHi | 0x40000000u, bFracHi, bExp == 0);
+ __shift64RightJamming(bFracHi, bFracLo, expDiff, bFracHi, bFracLo);
+ aFracHi |= 0x40000000u;
+ __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
+ zExp = aExp;
+ --zExp;
+ return __normalizeRoundAndPackFloat64(aSign, zExp - 10, zFrac0, zFrac1);
+ }
+ if (expDiff < 0) {
+ if (bExp == 0x7FF) {
+ bool propagate = (bFracHi | bFracLo) != 0u;
+ return mix(__packFloat64(aSign ^ 1u, 0x7ff, 0u, 0u), __propagateFloat64NaN(a, b), propagate);
+ }
+ expDiff = mix(expDiff, expDiff + 1, aExp == 0);
+ aFracHi = mix(aFracHi | 0x40000000u, aFracHi, aExp == 0);
+ __shift64RightJamming(aFracHi, aFracLo, - expDiff, aFracHi, aFracLo);
+ bFracHi |= 0x40000000u;
+ __sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);
+ zExp = bExp;
+ aSign ^= 1u;
+ --zExp;
+ return __normalizeRoundAndPackFloat64(aSign, zExp - 10, zFrac0, zFrac1);
+ }
+ if (aExp == 0x7FF) {
+ bool propagate = (aFracHi | aFracLo | bFracHi | bFracLo) != 0u;
+ return mix(0xFFFFFFFFFFFFFFFFUL, __propagateFloat64NaN(a, b), propagate);
+ }
+ bExp = mix(bExp, 1, aExp == 0);
+ aExp = mix(aExp, 1, aExp == 0);
+ bool zexp_normal = false;
+ bool blta = true;
+ if (bFracHi < aFracHi) {
+ __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
+ zexp_normal = true;
+ }
+ else if (aFracHi < bFracHi) {
+ __sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);
+ blta = false;
+ zexp_normal = true;
+ }
+ else if (bFracLo < aFracLo) {
+ __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
+ zexp_normal = true;
+ }
+ else if (aFracLo < bFracLo) {
+ __sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);
+ blta = false;
+ zexp_normal = true;
+ }
+ zExp = mix(bExp, aExp, blta);
+ aSign = mix(aSign ^ 1u, aSign, blta);
+ uint64_t retval_0 = __packFloat64(uint(FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN), 0, 0u, 0u);
+ uint64_t retval_1 = __normalizeRoundAndPackFloat64(aSign, zExp - 11, zFrac0, zFrac1);
+ return mix(retval_0, retval_1, zexp_normal);
+ }
+}
+
+u64vec2
+__fadd64(u64vec2 a, u64vec2 b)
+{
+ return u64vec2(__fadd64(a.x, b.x),
+ __fadd64(a.y, b.y));
+}
+
+u64vec3
+__fadd64(u64vec3 a, u64vec3 b)
+{
+ return u64vec3(__fadd64(a.x, b.x),
+ __fadd64(a.y, b.y),
+ __fadd64(a.z, b.z));
+}
+
+u64vec4
+__fadd64(u64vec4 a, u64vec4 b)
+{
+ return u64vec4(__fadd64(a.x, b.x),
+ __fadd64(a.y, b.y),
+ __fadd64(a.z, b.z),
+ __fadd64(a.w, b.w));
+}
+
+u64vec4
+__fadd64(u64vec4 a, uint64_t b)
+{
+ return u64vec4(__fadd64(a.x, b),
+ __fadd64(a.y, b),
+ __fadd64(a.z, b),
+ __fadd64(a.w, b));
+}
+
+uint64_t
+__fadd64(uint64_t a, uint64_t b, uint64_t c)
+{
+ return __fadd64(__fadd64(a, b), c);
+}
+
+u64vec2
+__fadd64(u64vec2 a, u64vec2 b, u64vec2 c)
+{
+ return __fadd64(__fadd64(a, b), c);
+}
+
+u64vec3
+__fadd64(u64vec3 a, u64vec3 b, u64vec3 c)
+{
+ return __fadd64(__fadd64(a, b), c);
+}
+
+u64vec4
+__fadd64(u64vec4 a, u64vec4 b, u64vec4 c)
+{
+ return __fadd64(__fadd64(a, b), c);
+}
+
+uint64_t
+__fadd64(uint64_t a, uint64_t b, uint64_t c, uint64_t d)
+{
+ return __fadd64(__fadd64(a, b), __fadd64(c, d));
+}
+
+u64vec2
+__fadd64(u64vec2 a, u64vec2 b, u64vec2 c, u64vec2 d)
+{
+ return __fadd64(__fadd64(a, b), __fadd64(c, d));
+}
+
+u64vec3
+__fadd64(u64vec3 a, u64vec3 b, u64vec3 c, u64vec3 d)
+{
+ return __fadd64(__fadd64(a, b), __fadd64(c, d));
+}
+
+u64vec4
+__fadd64(u64vec4 a, u64vec4 b, u64vec4 c, u64vec4 d)
+{
+ return __fadd64(__fadd64(a, b), __fadd64(c, d));
+}
+
+
+/* Multiplies `a' by `b' to obtain a 64-bit product. The product is broken
+ * into two 32-bit pieces which are stored at the locations pointed to by
+ * `z0Ptr' and `z1Ptr'.
+ */
+void
+__mul32To64(uint a, uint b, out uint z0Ptr, out uint z1Ptr)
+{
+ uint aLow = a & 0x0000FFFFu;
+ uint aHigh = a>>16;
+ uint bLow = b & 0x0000FFFFu;
+ uint bHigh = b>>16;
+ uint z1 = aLow * bLow;
+ uint zMiddleA = aLow * bHigh;
+ uint zMiddleB = aHigh * bLow;
+ uint z0 = aHigh * bHigh;
+ zMiddleA += zMiddleB;
+ z0 += ((uint(zMiddleA < zMiddleB)) << 16) + (zMiddleA >> 16);
+ zMiddleA <<= 16;
+ z1 += zMiddleA;
+ z0 += uint(z1 < zMiddleA);
+ z1Ptr = z1;
+ z0Ptr = z0;
+}
+
+/* Multiplies the 64-bit value formed by concatenating `a0' and `a1' to the
+ * 64-bit value formed by concatenating `b0' and `b1' to obtain a 128-bit
+ * product. The product is broken into four 32-bit pieces which are stored at
+ * the locations pointed to by `z0Ptr', `z1Ptr', `z2Ptr', and `z3Ptr'.
+ */
+void
+__mul64To128(uint a0, uint a1, uint b0, uint b1,
+ out uint z0Ptr,
+ out uint z1Ptr,
+ out uint z2Ptr,
+ out uint z3Ptr)
+{
+ uint z0 = 0u;
+ uint z1 = 0u;
+ uint z2 = 0u;
+ uint z3 = 0u;
+ uint more1 = 0u;
+ uint more2 = 0u;
+
+ __mul32To64(a1, b1, z2, z3);
+ __mul32To64(a1, b0, z1, more2);
+ __add64(z1, more2, 0u, z2, z1, z2);
+ __mul32To64(a0, b0, z0, more1);
+ __add64(z0, more1, 0u, z1, z0, z1);
+ __mul32To64(a0, b1, more1, more2);
+ __add64(more1, more2, 0u, z2, more1, z2);
+ __add64(z0, z1, 0u, more1, z0, z1);
+ z3Ptr = z3;
+ z2Ptr = z2;
+ z1Ptr = z1;
+ z0Ptr = z0;
+}
+
+/* Normalizes the subnormal double-precision floating-point value represented
+ * by the denormalized significand formed by the concatenation of `aFrac0' and
+ * `aFrac1'. The normalized exponent is stored at the location pointed to by
+ * `zExpPtr'. The most significant 21 bits of the normalized significand are
+ * stored at the location pointed to by `zFrac0Ptr', and the least significant
+ * 32 bits of the normalized significand are stored at the location pointed to
+ * by `zFrac1Ptr'.
+ */
+void
+__normalizeFloat64Subnormal(uint aFrac0, uint aFrac1,
+ out int zExpPtr,
+ out uint zFrac0Ptr,
+ out uint zFrac1Ptr)
+{
+ int shiftCount;
+ uint temp_zfrac0, temp_zfrac1;
+ shiftCount = __countLeadingZeros32(mix(aFrac0, aFrac1, aFrac0 == 0u)) - 11;
+ zExpPtr = mix(1 - shiftCount, -shiftCount - 31, aFrac0 == 0u);
+
+ temp_zfrac0 = mix(aFrac1<<shiftCount, aFrac1>>(-shiftCount), shiftCount < 0);
+ temp_zfrac1 = mix(0u, aFrac1<<(shiftCount & 31), shiftCount < 0);
+
+ __shortShift64Left(aFrac0, aFrac1, shiftCount, zFrac0Ptr, zFrac1Ptr);
+
+ zFrac0Ptr = mix(zFrac0Ptr, temp_zfrac0, aFrac0 == 0);
+ zFrac1Ptr = mix(zFrac1Ptr, temp_zfrac1, aFrac0 == 0);
+}
+
+/* Returns the result of multiplying the double-precision floating-point values
+ * `a' and `b'. The operation is performed according to the IEEE Standard for
+ * Floating-Point Arithmetic.
+ */
+uint64_t
+__fmul64(uint64_t a, uint64_t b)
+{
+ uint zFrac0 = 0u;
+ uint zFrac1 = 0u;
+ uint zFrac2 = 0u;
+ uint zFrac3 = 0u;
+ int zExp;
+
+ uint aFracLo = __extractFloat64FracLo(a);
+ uint aFracHi = __extractFloat64FracHi(a);
+ uint bFracLo = __extractFloat64FracLo(b);
+ uint bFracHi = __extractFloat64FracHi(b);
+ int aExp = __extractFloat64Exp(a);
+ uint aSign = __extractFloat64Sign(a);
+ int bExp = __extractFloat64Exp(b);
+ uint bSign = __extractFloat64Sign(b);
+ uint zSign = aSign ^ bSign;
+ if (aExp == 0x7FF) {
+ if (((aFracHi | aFracLo) != 0u) ||
+ ((bExp == 0x7FF) && ((bFracHi | bFracLo) != 0u))) {
+ return __propagateFloat64NaN(a, b);
+ }
+ if ((uint(bExp) | bFracHi | bFracLo) == 0u)
+ return 0xFFFFFFFFFFFFFFFFUL;
+ return __packFloat64(zSign, 0x7FF, 0u, 0u);
+ }
+ if (bExp == 0x7FF) {
+ if ((bFracHi | bFracLo) != 0u)
+ return __propagateFloat64NaN(a, b);
+ if ((uint(aExp) | aFracHi | aFracLo) == 0u)
+ return 0xFFFFFFFFFFFFFFFFUL;
+ return __packFloat64(zSign, 0x7FF, 0u, 0u);
+ }
+ if (aExp == 0) {
+ if ((aFracHi | aFracLo) == 0u)
+ return __packFloat64(zSign, 0, 0u, 0u);
+ __normalizeFloat64Subnormal(aFracHi, aFracLo, aExp, aFracHi, aFracLo);
+ }
+ if (bExp == 0) {
+ if ((bFracHi | bFracLo) == 0u)
+ return __packFloat64(zSign, 0, 0u, 0u);
+ __normalizeFloat64Subnormal(bFracHi, bFracLo, bExp, bFracHi, bFracLo);
+ }
+ zExp = aExp + bExp - 0x400;
+ aFracHi |= 0x00100000u;
+ __shortShift64Left(bFracHi, bFracLo, 12, bFracHi, bFracLo);
+ __mul64To128(
+ aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1, zFrac2, zFrac3);
+ __add64(zFrac0, zFrac1, aFracHi, aFracLo, zFrac0, zFrac1);
+ zFrac2 |= uint(zFrac3 != 0u);
+ if (0x00200000u <= zFrac0) {
+ __shift64ExtraRightJamming(
+ zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);
+ ++zExp;
+ }
+ return __roundAndPackFloat64(zSign, zExp, zFrac0, zFrac1, zFrac2);
+}
+
+u64vec2
+__fmul64(u64vec2 a, u64vec2 b)
+{
+ return u64vec2(__fmul64(a.x, b.x),
+ __fmul64(a.y, b.y));
+}
+
+u64vec3
+__fmul64(u64vec3 a, u64vec3 b)
+{
+ return u64vec3(__fmul64(a.x, b.x),
+ __fmul64(a.y, b.y),
+ __fmul64(a.z, b.z));
+}
+
+u64vec4
+__fmul64(u64vec4 a, u64vec4 b)
+{
+ return u64vec4(__fmul64(a.x, b.x),
+ __fmul64(a.y, b.y),
+ __fmul64(a.z, b.z),
+ __fmul64(a.w, b.w));
+}
+
+u64vec4
+__fmul64(u64vec4 a, uint64_t b)
+{
+ return u64vec4(__fmul64(a.x, b),
+ __fmul64(a.y, b),
+ __fmul64(a.z, b),
+ __fmul64(a.w, b));
+}
+
+/* dvec2 * dmat2x2 */
+u64vec2
+__fmul64(u64vec2 v, u64vec2 m[2])
+{
+ return u64vec2(__fadd64(__fmul64(v.x, m[0].x),
+ __fmul64(v.y, m[0].y)),
+ __fadd64(__fmul64(v.x, m[1].x),
+ __fmul64(v.y, m[1].y)));
+}
+
+/* dvec2 * dmat2x3 */
+u64vec3
+__fmul64(u64vec2 v, u64vec2 m[3])
+{
+ return u64vec3(__fadd64(__fmul64(v.x, m[0].x),
+ __fmul64(v.y, m[0].y)),
+ __fadd64(__fmul64(v.x, m[1].x),
+ __fmul64(v.y, m[1].y)),
+ __fadd64(__fmul64(v.x, m[2].x),
+ __fmul64(v.y, m[2].y)));
+}
+
+/* dvec2 * dmat4x2 */
+u64vec4
+__fmul64(u64vec2 v, u64vec2 m[4])
+{
+ return u64vec4(__fadd64(__fmul64(v.x, m[0].x),
+ __fmul64(v.y, m[0].y)),
+ __fadd64(__fmul64(v.x, m[1].x),
+ __fmul64(v.y, m[1].y)),
+ __fadd64(__fmul64(v.x, m[2].x),
+ __fmul64(v.y, m[2].y)),
+ __fadd64(__fmul64(v.x, m[3].x),
+ __fmul64(v.y, m[3].y)));
+}
+
+
+
+/* dvec3 * dmat2x3 */
+u64vec2
+__fmul64(u64vec3 v, u64vec3 m[2])
+{
+ return u64vec2(__fadd64(__fmul64(v.x, m[0].x),
+ __fmul64(v.y, m[0].y),
+ __fmul64(v.z, m[0].z)),
+ __fadd64(__fmul64(v.x, m[1].x),
+ __fmul64(v.y, m[1].y),
+ __fmul64(v.z, m[1].z)));
+}
+
+/* dvec3 * dmat3 */
+u64vec3
+__fmul64(u64vec3 v, u64vec3 m[3])
+{
+ return u64vec3(__fadd64(__fmul64(v.x, m[0].x),
+ __fmul64(v.y, m[0].y),
+ __fmul64(v.z, m[0].z)),
+ __fadd64(__fmul64(v.x, m[1].x),
+ __fmul64(v.y, m[1].y),
+ __fmul64(v.z, m[1].z)),
+ __fadd64(__fmul64(v.x, m[2].x),
+ __fmul64(v.y, m[2].y),
+ __fmul64(v.z, m[2].z)));
+}
+
+/* dvec3 * dmat4x3 */
+u64vec4
+__fmul64(u64vec3 v, u64vec3 m[4])
+{
+ return u64vec4(__fadd64(__fmul64(v.x, m[0].x),
+ __fmul64(v.y, m[0].y),
+ __fmul64(v.z, m[0].z)),
+ __fadd64(__fmul64(v.x, m[1].x),
+ __fmul64(v.y, m[1].y),
+ __fmul64(v.z, m[1].z)),
+ __fadd64(__fmul64(v.x, m[2].x),
+ __fmul64(v.y, m[2].y),
+ __fmul64(v.z, m[2].z)),
+ __fadd64(__fmul64(v.x, m[3].x),
+ __fmul64(v.y, m[3].y),
+ __fmul64(v.z, m[3].z)));
+}
+
+
+
+/* dvec4 * dmat2x4 */
+u64vec2
+__fmul64(u64vec4 v, u64vec4 m[2])
+{
+ return u64vec2(__fadd64(__fmul64(v.x, m[0].x),
+ __fmul64(v.y, m[0].y),
+ __fmul64(v.z, m[0].z),
+ __fmul64(v.w, m[0].w)),
+ __fadd64(__fmul64(v.x, m[1].x),
+ __fmul64(v.y, m[1].y),
+ __fmul64(v.z, m[1].z),
+ __fmul64(v.w, m[1].w)));
+}
+
+/* dvec4 * dmat3x4 */
+u64vec3
+__fmul64(u64vec4 v, u64vec4 m[3])
+{
+ return u64vec3(__fadd64(__fmul64(v.x, m[0].x),
+ __fmul64(v.y, m[0].y),
+ __fmul64(v.z, m[0].z),
+ __fmul64(v.w, m[0].w)),
+ __fadd64(__fmul64(v.x, m[1].x),
+ __fmul64(v.y, m[1].y),
+ __fmul64(v.z, m[1].z),
+ __fmul64(v.w, m[1].w)),
+ __fadd64(__fmul64(v.x, m[2].x),
+ __fmul64(v.y, m[2].y),
+ __fmul64(v.z, m[2].z),
+ __fmul64(v.w, m[2].w)));
+}
+
+/* dvec4 * dmat4 */
+u64vec4
+__fmul64(u64vec4 v, u64vec4 m[4])
+{
+ return u64vec4(__fadd64(__fmul64(v.x, m[0].x),
+ __fmul64(v.y, m[0].y),
+ __fmul64(v.z, m[0].z),
+ __fmul64(v.w, m[0].w)),
+ __fadd64(__fmul64(v.x, m[1].x),
+ __fmul64(v.y, m[1].y),
+ __fmul64(v.z, m[1].z),
+ __fmul64(v.w, m[1].w)),
+ __fadd64(__fmul64(v.x, m[2].x),
+ __fmul64(v.y, m[2].y),
+ __fmul64(v.z, m[2].z),
+ __fmul64(v.w, m[2].w)),
+ __fadd64(__fmul64(v.x, m[3].x),
+ __fmul64(v.y, m[3].y),
+ __fmul64(v.z, m[3].z),
+ __fmul64(v.w, m[3].w)));
+}
+
+
+uint64_t
+__ffma64(uint64_t a, uint64_t b, uint64_t c)
+{
+ return __fadd64(__fmul64(a, b), c);
+}
+
+u64vec2
+__ffma64(u64vec2 a, u64vec2 b, u64vec2 c)
+{
+ return u64vec2(__ffma64(a.x, b.x, c.x),
+ __ffma64(a.y, b.y, c.y));
+}
+
+u64vec3
+__ffma64(u64vec3 a, u64vec3 b, u64vec3 c)
+{
+ return u64vec3(__ffma64(a.x, b.x, c.x),
+ __ffma64(a.y, b.y, c.y),
+ __ffma64(a.z, b.z, c.z));
+}
+
+u64vec4
+__ffma64(u64vec4 a, u64vec4 b, u64vec4 c)
+{
+ return u64vec4(__ffma64(a.x, b.x, c.x),
+ __ffma64(a.y, b.y, c.y),
+ __ffma64(a.z, b.z, c.z),
+ __ffma64(a.w, b.w, c.w));
+}
+
+u64vec4
+__ffma64(u64vec4 a, uint64_t b, uint64_t c)
+{
+ return u64vec4(__ffma64(a.x, b, c),
+ __ffma64(a.y, b, c),
+ __ffma64(a.z, b, c),
+ __ffma64(a.w, b, c));
+}
+
+/* Packs the sign `zSign', exponent `zExp', and significand `zFrac' into a
+ * single-precision floating-point value, returning the result. After being
+ * shifted into the proper positions, the three fields are simply added
+ * together to form the result. This means that any integer portion of `zSig'
+ * will be added into the exponent. Since a properly normalized significand
+ * will have an integer portion equal to 1, the `zExp' input should be 1 less
+ * than the desired result exponent whenever `zFrac' is a complete, normalized
+ * significand.
+ */
+float
+__packFloat32(uint zSign, int zExp, uint zFrac)
+{
+ return uintBitsToFloat((zSign<<31) + (uint(zExp)<<23) + zFrac);
+}
+
+/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',
+ * and significand `zFrac', and returns the proper single-precision floating-
+ * point value corresponding to the abstract input. Ordinarily, the abstract
+ * value is simply rounded and packed into the single-precision format, with
+ * the inexact exception raised if the abstract input cannot be represented
+ * exactly. However, if the abstract value is too large, the overflow and
+ * inexact exceptions are raised and an infinity or maximal finite value is
+ * returned. If the abstract value is too small, the input value is rounded to
+ * a subnormal number, and the underflow and inexact exceptions are raised if
+ * the abstract input cannot be represented exactly as a subnormal single-
+ * precision floating-point number.
+ * The input significand `zFrac' has its binary point between bits 30
+ * and 29, which is 7 bits to the left of the usual location. This shifted
+ * significand must be normalized or smaller. If `zFrac' is not normalized,
+ * `zExp' must be 0; in that case, the result returned is a subnormal number,
+ * and it must not require rounding. In the usual case that `zFrac' is
+ * normalized, `zExp' must be 1 less than the "true" floating-point exponent.
+ * The handling of underflow and overflow follows the IEEE Standard for
+ * Floating-Point Arithmetic.
+ */
+float
+__roundAndPackFloat32(uint zSign, int zExp, uint zFrac)
+{
+ bool roundNearestEven;
+ int roundIncrement;
+ int roundBits;
+
+ roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;
+ roundIncrement = 0x40;
+ if (!roundNearestEven) {
+ if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) {
+ roundIncrement = 0;
+ } else {
+ roundIncrement = 0x7F;
+ if (zSign != 0u) {
+ if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP)
+ roundIncrement = 0;
+ } else {
+ if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN)
+ roundIncrement = 0;
+ }
+ }
+ }
+ roundBits = int(zFrac & 0x7Fu);
+ if (0xFDu <= uint(zExp)) {
+ if ((0xFD < zExp) || ((zExp == 0xFD) && (int(zFrac) + roundIncrement) < 0))
+ return __packFloat32(zSign, 0xFF, 0u) - float(roundIncrement == 0);
+ int count = -zExp;
+ bool zexp_lt0 = zExp < 0;
+ uint zFrac_lt0 = mix(uint(zFrac != 0u), (zFrac>>count) | uint((zFrac<<((-count) & 31)) != 0u), (-zExp) < 32);
+ zFrac = mix(zFrac, zFrac_lt0, zexp_lt0);
+ roundBits = mix(roundBits, int(zFrac) & 0x7f, zexp_lt0);
+ zExp = mix(zExp, 0, zexp_lt0);
+ }
+ zFrac = (zFrac + uint(roundIncrement))>>7;
+ zFrac &= ~uint(((roundBits ^ 0x40) == 0) && roundNearestEven);
+
+ return __packFloat32(zSign, mix(zExp, 0, zFrac == 0u), zFrac);
+}
+
+/* Returns the result of converting the double-precision floating-point value
+ * `a' to the single-precision floating-point format. The conversion is
+ * performed according to the IEEE Standard for Floating-Point Arithmetic.
+ */
+float
+__fp64_to_fp32(uint64_t __a)
+{
+ uvec2 a = unpackUint2x32(__a);
+ uint zFrac = 0u;
+ uint allZero = 0u;
+
+ uint aFracLo = __extractFloat64FracLo(__a);
+ uint aFracHi = __extractFloat64FracHi(__a);
+ int aExp = __extractFloat64Exp(__a);
+ uint aSign = __extractFloat64Sign(__a);
+ if (aExp == 0x7FF) {
+ __shortShift64Left(a.y, a.x, 12, a.y, a.x);
+ float rval = uintBitsToFloat((aSign<<31) | 0x7FC00000u | (a.y>>9));
+ rval = mix(__packFloat32(aSign, 0xFF, 0u), rval, (aFracHi | aFracLo) != 0u);
+ return rval;
+ }
+ __shift64RightJamming(aFracHi, aFracLo, 22, allZero, zFrac);
+ zFrac = mix(zFrac, zFrac | 0x40000000u, aExp != 0);
+ return __roundAndPackFloat32(aSign, aExp - 0x381, zFrac);
+}
+
+vec4
+__fp64_to_fp32(u64vec4 a)
+{
+ return vec4(__fp64_to_fp32(a.x),
+ __fp64_to_fp32(a.y),
+ __fp64_to_fp32(a.z),
+ __fp64_to_fp32(a.w));
+}
+
+struct s1 {
+ /* double */ uint64_t a, b, c, d;
+};
+
+uniform /* double */ uint64_t d1;
+uniform /* dvec2 */ u64vec2 u1[2];
+uniform /* dvec3 */ u64vec3 u2[4];
+uniform /* dvec4 */ u64vec4 v[3];
+uniform /* dmat2 */ u64vec2[2] m1;
+uniform /* dmat3 */ u64vec3[3] m2;
+uniform /* dmat4 */ u64vec4[4] m3[3];
+uniform /* dmat2x3 */ u64vec3[2] m4;
+uniform /* dmat2x4 */ u64vec4[2] m5;
+uniform /* dmat3x2 */ u64vec2[3] m6;
+uniform /* dmat3x4 */ u64vec4[3] m7;
+uniform /* dmat4x2 */ u64vec2[4] m8[2];
+uniform /* dmat4x3 */ u64vec3[4] m9;
+uniform s1 s;
+uniform /* double */ uint64_t d2;
+
+out vec4 vscolor;
+
+void main()
+{
+ gl_Position = vec4(0.0, 0.0, 0.0, 1.0);
+ u64vec4 t = __fadd64(__ffma64(u64vec4(s.a, s.b, s.c, s.d), d1, d2),
+ __fadd64(__fmul64(u1[0], m8[0]),
+ __fmul64(u1[1], m8[1])));
+#if !defined FAIL_REGISTER_ALLOCATION
+ t = __fadd64(__fadd64(t,
+ __fmul64(v[0], m3[0])),
+ __fadd64(v[2],
+ __fmul64(v[1], m3[1])));
+ t.rb = __fadd64(t.rb, __fmul64(u1[0], m1));
+#else
+ t = __fadd64(__fadd64(__fadd64(t, __fmul64(v[0], m3[0])),
+ __fadd64(__fmul64(v[1], m3[1]),
+ __fmul64(v[2], m3[2]))),
+ __fmul64(u2[0], m9));
+ t.rb = __fadd64(__fadd64(__fadd64(t.rb, __fmul64(u1[0], m1)),
+ __fadd64(u1[1], __fmul64(u2[0], m4))),
+ __fmul64(v[0], m5));
+ t.xyw = __fadd64(__fadd64(__fadd64(t.xyw, __fmul64(u2[0], m2)),
+ __fadd64(u2[1], u2[2])),
+ __fadd64(__fadd64(u2[3], __fmul64(u1[1], m6)),
+ __fmul64(v[0], m7)));
+#endif
+ vscolor = __fp64_to_fp32(t);
+}
+
+[fragment shader]
+#version 150
+
+in vec4 vscolor;
+out vec4 fscolor;
+
+void main()
+{
+ fscolor = vscolor;
+}
+
+[test]