summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHarry Wentland <harry.wentland@amd.com>2015-07-31 14:56:06 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-09-21 17:45:13 -0400
commit0d2cc5ed36b9cd9cf6aafb8b16707663c8cf6165 (patch)
tree4cdea53504c2575acddad875c410c8500d8ba377
parent35e13a20eab489a44d426661a09b197314c4ca45 (diff)
amd/dal: Fixed point arithmetic
Arithmetic operations on real numbers represented as fixed-point numbers. Signed-off-by: Harry Wentland <harry.wentland@amd.com>
-rw-r--r--drivers/gpu/drm/amd/dal/basics/Makefile2
-rw-r--r--drivers/gpu/drm/amd/dal/basics/fixed31_32.c692
-rw-r--r--drivers/gpu/drm/amd/dal/basics/fixed32_32.c223
-rw-r--r--drivers/gpu/drm/amd/dal/include/fixed31_32.h389
-rw-r--r--drivers/gpu/drm/amd/dal/include/fixed32_32.h80
5 files changed, 1385 insertions, 1 deletions
diff --git a/drivers/gpu/drm/amd/dal/basics/Makefile b/drivers/gpu/drm/amd/dal/basics/Makefile
index 9f369873ae33..51a11fc4b2fb 100644
--- a/drivers/gpu/drm/amd/dal/basics/Makefile
+++ b/drivers/gpu/drm/amd/dal/basics/Makefile
@@ -4,7 +4,7 @@
# subcomponents.
CONTAINERS = vector.o flat_set.o
-BASICS = signal_types.o grph_object_id.o \
+BASICS = signal_types.o grph_object_id.o fixed31_32.o fixed32_32.o \
$(CONTAINERS) logger.o register_logger.o
AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/basics/,$(BASICS))
diff --git a/drivers/gpu/drm/amd/dal/basics/fixed31_32.c b/drivers/gpu/drm/amd/dal/basics/fixed31_32.c
new file mode 100644
index 000000000000..6ce75b382087
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/basics/fixed31_32.c
@@ -0,0 +1,692 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dal_services.h"
+#include "include/fixed31_32.h"
+
+static inline uint64_t abs_i64(
+ int64_t arg)
+{
+ if (arg > 0)
+ return (uint64_t)arg;
+ else
+ return (uint64_t)(-arg);
+}
+
+/*
+ * @brief
+ * result = dividend / divisor
+ * *remainder = dividend % divisor
+ */
+static inline uint64_t complete_integer_division_u64(
+ uint64_t dividend,
+ uint64_t divisor,
+ uint64_t *remainder)
+{
+ uint64_t result;
+
+ ASSERT(divisor);
+
+ result = div64_u64_rem(dividend, divisor, remainder);
+
+ return result;
+}
+
+#define BITS_PER_FRACTIONAL_PART \
+ 32
+
+#define FRACTIONAL_PART_MASK \
+ ((1ULL << BITS_PER_FRACTIONAL_PART) - 1)
+
+#define GET_INTEGER_PART(x) \
+ ((x) >> BITS_PER_FRACTIONAL_PART)
+
+#define GET_FRACTIONAL_PART(x) \
+ (FRACTIONAL_PART_MASK & (x))
+
+struct fixed31_32 dal_fixed31_32_from_fraction(
+ int64_t numerator,
+ int64_t denominator)
+{
+ struct fixed31_32 res;
+
+ bool arg1_negative = numerator < 0;
+ bool arg2_negative = denominator < 0;
+
+ uint64_t arg1_value = arg1_negative ? -numerator : numerator;
+ uint64_t arg2_value = arg2_negative ? -denominator : denominator;
+
+ uint64_t remainder;
+
+ /* determine integer part */
+
+ uint64_t res_value = complete_integer_division_u64(
+ arg1_value, arg2_value, &remainder);
+
+ ASSERT(res_value <= LONG_MAX);
+
+ /* determine fractional part */
+ {
+ uint32_t i = BITS_PER_FRACTIONAL_PART;
+
+ do {
+ remainder <<= 1;
+
+ res_value <<= 1;
+
+ if (remainder >= arg2_value) {
+ res_value |= 1;
+ remainder -= arg2_value;
+ }
+ } while (--i != 0);
+ }
+
+ /* round up LSB */
+ {
+ uint64_t summand = (remainder << 1) >= arg2_value;
+
+ ASSERT(res_value <= LLONG_MAX - summand);
+
+ res_value += summand;
+ }
+
+ res.value = (int64_t)res_value;
+
+ if (arg1_negative ^ arg2_negative)
+ res.value = -res.value;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_from_int(
+ int64_t arg)
+{
+ struct fixed31_32 res;
+
+ ASSERT((LONG_MIN <= arg) && (arg <= LONG_MAX));
+
+ res.value = arg << BITS_PER_FRACTIONAL_PART;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_neg(
+ struct fixed31_32 arg)
+{
+ struct fixed31_32 res;
+
+ res.value = -arg.value;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_abs(
+ struct fixed31_32 arg)
+{
+ if (arg.value < 0)
+ return dal_fixed31_32_neg(arg);
+ else
+ return arg;
+}
+
+bool dal_fixed31_32_lt(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ return arg1.value < arg2.value;
+}
+
+bool dal_fixed31_32_le(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ return arg1.value <= arg2.value;
+}
+
+bool dal_fixed31_32_eq(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ return arg1.value == arg2.value;
+}
+
+struct fixed31_32 dal_fixed31_32_min(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ if (arg1.value <= arg2.value)
+ return arg1;
+ else
+ return arg2;
+}
+
+struct fixed31_32 dal_fixed31_32_max(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ if (arg1.value <= arg2.value)
+ return arg2;
+ else
+ return arg1;
+}
+
+struct fixed31_32 dal_fixed31_32_clamp(
+ struct fixed31_32 arg,
+ struct fixed31_32 min_value,
+ struct fixed31_32 max_value)
+{
+ if (dal_fixed31_32_le(arg, min_value))
+ return min_value;
+ else if (dal_fixed31_32_le(max_value, arg))
+ return max_value;
+ else
+ return arg;
+}
+
+struct fixed31_32 dal_fixed31_32_shl(
+ struct fixed31_32 arg,
+ uint8_t shift)
+{
+ struct fixed31_32 res;
+
+ ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
+ ((arg.value < 0) && (arg.value >= LLONG_MIN >> shift)));
+
+ res.value = arg.value << shift;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_shr(
+ struct fixed31_32 arg,
+ uint8_t shift)
+{
+ struct fixed31_32 res;
+
+ ASSERT(shift < 64);
+
+ res.value = arg.value >> shift;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_add(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ struct fixed31_32 res;
+
+ ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) ||
+ ((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value)));
+
+ res.value = arg1.value + arg2.value;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_sub_int(
+ struct fixed31_32 arg1,
+ int32_t arg2)
+{
+ return dal_fixed31_32_sub(
+ arg1,
+ dal_fixed31_32_from_int(arg2));
+}
+
+struct fixed31_32 dal_fixed31_32_sub(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ struct fixed31_32 res;
+
+ ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) ||
+ ((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value)));
+
+ res.value = arg1.value - arg2.value;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_mul_int(
+ struct fixed31_32 arg1,
+ int32_t arg2)
+{
+ return dal_fixed31_32_mul(
+ arg1,
+ dal_fixed31_32_from_int(arg2));
+}
+
+struct fixed31_32 dal_fixed31_32_mul(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ struct fixed31_32 res;
+
+ bool arg1_negative = arg1.value < 0;
+ bool arg2_negative = arg2.value < 0;
+
+ uint64_t arg1_value = arg1_negative ? -arg1.value : arg1.value;
+ uint64_t arg2_value = arg2_negative ? -arg2.value : arg2.value;
+
+ uint64_t arg1_int = GET_INTEGER_PART(arg1_value);
+ uint64_t arg2_int = GET_INTEGER_PART(arg2_value);
+
+ uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value);
+ uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value);
+
+ uint64_t tmp;
+
+ res.value = arg1_int * arg2_int;
+
+ ASSERT(res.value <= LONG_MAX);
+
+ res.value <<= BITS_PER_FRACTIONAL_PART;
+
+ tmp = arg1_int * arg2_fra;
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg2_int * arg1_fra;
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg1_fra * arg2_fra;
+
+ tmp = (tmp >> BITS_PER_FRACTIONAL_PART) +
+ (tmp >= (uint64_t)dal_fixed31_32_half.value);
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ if (arg1_negative ^ arg2_negative)
+ res.value = -res.value;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_sqr(
+ struct fixed31_32 arg)
+{
+ struct fixed31_32 res;
+
+ uint64_t arg_value = abs_i64(arg.value);
+
+ uint64_t arg_int = GET_INTEGER_PART(arg_value);
+
+ uint64_t arg_fra = GET_FRACTIONAL_PART(arg_value);
+
+ uint64_t tmp;
+
+ res.value = arg_int * arg_int;
+
+ ASSERT(res.value <= LONG_MAX);
+
+ res.value <<= BITS_PER_FRACTIONAL_PART;
+
+ tmp = arg_int * arg_fra;
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg_fra * arg_fra;
+
+ tmp = (tmp >> BITS_PER_FRACTIONAL_PART) +
+ (tmp >= (uint64_t)dal_fixed31_32_half.value);
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_div_int(
+ struct fixed31_32 arg1,
+ int64_t arg2)
+{
+ return dal_fixed31_32_from_fraction(
+ arg1.value,
+ dal_fixed31_32_from_int(arg2).value);
+}
+
+struct fixed31_32 dal_fixed31_32_div(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ return dal_fixed31_32_from_fraction(
+ arg1.value,
+ arg2.value);
+}
+
+struct fixed31_32 dal_fixed31_32_recip(
+ struct fixed31_32 arg)
+{
+ /*
+ * @note
+ * Good idea to use Newton's method
+ */
+
+ ASSERT(arg.value);
+
+ return dal_fixed31_32_from_fraction(
+ dal_fixed31_32_one.value,
+ arg.value);
+}
+
+struct fixed31_32 dal_fixed31_32_sinc(
+ struct fixed31_32 arg)
+{
+ struct fixed31_32 square;
+
+ struct fixed31_32 res = dal_fixed31_32_one;
+
+ int32_t n = 27;
+
+ struct fixed31_32 arg_norm = arg;
+
+ if (dal_fixed31_32_le(
+ dal_fixed31_32_two_pi,
+ dal_fixed31_32_abs(arg))) {
+ arg_norm = dal_fixed31_32_sub(
+ arg_norm,
+ dal_fixed31_32_mul_int(
+ dal_fixed31_32_two_pi,
+ (int32_t)div64_s64(
+ arg_norm.value,
+ dal_fixed31_32_two_pi.value)));
+ }
+
+ square = dal_fixed31_32_sqr(arg_norm);
+
+ do {
+ res = dal_fixed31_32_sub(
+ dal_fixed31_32_one,
+ dal_fixed31_32_div_int(
+ dal_fixed31_32_mul(
+ square,
+ res),
+ n * (n - 1)));
+
+ n -= 2;
+ } while (n > 2);
+
+ if (arg.value != arg_norm.value)
+ res = dal_fixed31_32_div(
+ dal_fixed31_32_mul(res, arg_norm),
+ arg);
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_sin(
+ struct fixed31_32 arg)
+{
+ return dal_fixed31_32_mul(
+ arg,
+ dal_fixed31_32_sinc(arg));
+}
+
+struct fixed31_32 dal_fixed31_32_cos(
+ struct fixed31_32 arg)
+{
+ /* TODO implement argument normalization */
+
+ const struct fixed31_32 square = dal_fixed31_32_sqr(arg);
+
+ struct fixed31_32 res = dal_fixed31_32_one;
+
+ int32_t n = 26;
+
+ do {
+ res = dal_fixed31_32_sub(
+ dal_fixed31_32_one,
+ dal_fixed31_32_div_int(
+ dal_fixed31_32_mul(
+ square,
+ res),
+ n * (n - 1)));
+
+ n -= 2;
+ } while (n != 0);
+
+ return res;
+}
+
+/*
+ * @brief
+ * result = exp(arg),
+ * where abs(arg) < 1
+ *
+ * Calculated as Taylor series.
+ */
+static struct fixed31_32 fixed31_32_exp_from_taylor_series(
+ struct fixed31_32 arg)
+{
+ uint32_t n = 9;
+
+ struct fixed31_32 res = dal_fixed31_32_from_fraction(
+ n + 2,
+ n + 1);
+ /* TODO find correct res */
+
+ ASSERT(dal_fixed31_32_lt(arg, dal_fixed31_32_one));
+
+ do
+ res = dal_fixed31_32_add(
+ dal_fixed31_32_one,
+ dal_fixed31_32_div_int(
+ dal_fixed31_32_mul(
+ arg,
+ res),
+ n));
+ while (--n != 1);
+
+ return dal_fixed31_32_add(
+ dal_fixed31_32_one,
+ dal_fixed31_32_mul(
+ arg,
+ res));
+}
+
+struct fixed31_32 dal_fixed31_32_exp(
+ struct fixed31_32 arg)
+{
+ /*
+ * @brief
+ * Main equation is:
+ * exp(x) = exp(r + m * ln(2)) = (1 << m) * exp(r),
+ * where m = round(x / ln(2)), r = x - m * ln(2)
+ */
+
+ if (dal_fixed31_32_le(
+ dal_fixed31_32_ln2_div_2,
+ dal_fixed31_32_abs(arg))) {
+ int32_t m = dal_fixed31_32_round(
+ dal_fixed31_32_div(
+ arg,
+ dal_fixed31_32_ln2));
+
+ struct fixed31_32 r = dal_fixed31_32_sub(
+ arg,
+ dal_fixed31_32_mul_int(
+ dal_fixed31_32_ln2,
+ m));
+
+ ASSERT(m != 0);
+
+ ASSERT(dal_fixed31_32_lt(
+ dal_fixed31_32_abs(r),
+ dal_fixed31_32_one));
+
+ if (m > 0)
+ return dal_fixed31_32_shl(
+ fixed31_32_exp_from_taylor_series(r),
+ (uint8_t)m);
+ else
+ return dal_fixed31_32_div_int(
+ fixed31_32_exp_from_taylor_series(r),
+ 1LL << -m);
+ } else if (arg.value != 0)
+ return fixed31_32_exp_from_taylor_series(arg);
+ else
+ return dal_fixed31_32_one;
+}
+
+struct fixed31_32 dal_fixed31_32_log(
+ struct fixed31_32 arg)
+{
+ struct fixed31_32 res = dal_fixed31_32_neg(dal_fixed31_32_one);
+ /* TODO improve 1st estimation */
+
+ struct fixed31_32 error;
+
+ ASSERT(arg.value > 0);
+ /* TODO if arg is negative, return NaN */
+ /* TODO if arg is zero, return -INF */
+
+ do {
+ struct fixed31_32 res1 = dal_fixed31_32_add(
+ dal_fixed31_32_sub(
+ res,
+ dal_fixed31_32_one),
+ dal_fixed31_32_div(
+ arg,
+ dal_fixed31_32_exp(res)));
+
+ error = dal_fixed31_32_sub(
+ res,
+ res1);
+
+ res = res1;
+ /* TODO determine max_allowed_error based on quality of exp() */
+ } while (abs_i64(error.value) > 100ULL);
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_pow(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ return dal_fixed31_32_exp(
+ dal_fixed31_32_mul(
+ dal_fixed31_32_log(arg1),
+ arg2));
+}
+
+int32_t dal_fixed31_32_floor(
+ struct fixed31_32 arg)
+{
+ uint64_t arg_value = abs_i64(arg.value);
+
+ if (arg.value >= 0)
+ return (int32_t)GET_INTEGER_PART(arg_value);
+ else
+ return -(int32_t)GET_INTEGER_PART(arg_value);
+}
+
+int32_t dal_fixed31_32_round(
+ struct fixed31_32 arg)
+{
+ uint64_t arg_value = abs_i64(arg.value);
+
+ const int64_t summand = dal_fixed31_32_half.value;
+
+ ASSERT(LLONG_MAX - (int64_t)arg_value >= summand);
+
+ arg_value += summand;
+
+ if (arg.value >= 0)
+ return (int32_t)GET_INTEGER_PART(arg_value);
+ else
+ return -(int32_t)GET_INTEGER_PART(arg_value);
+}
+
+int32_t dal_fixed31_32_ceil(
+ struct fixed31_32 arg)
+{
+ uint64_t arg_value = abs_i64(arg.value);
+
+ const int64_t summand = dal_fixed31_32_one.value -
+ dal_fixed31_32_epsilon.value;
+
+ ASSERT(LLONG_MAX - (int64_t)arg_value >= summand);
+
+ arg_value += summand;
+
+ if (arg.value >= 0)
+ return (int32_t)GET_INTEGER_PART(arg_value);
+ else
+ return -(int32_t)GET_INTEGER_PART(arg_value);
+}
+
+/* this function is a generic helper to translate fixed point value to
+ * specified integer format that will consist of integer_bits integer part and
+ * fractional_bits fractional part. For example it is used in
+ * dal_fixed31_32_u2d19 to receive 2 bits integer part and 19 bits fractional
+ * part in 32 bits. It is used in hw programming (scaler)
+ */
+
+static inline uint32_t ux_dy(
+ int64_t value,
+ uint32_t integer_bits,
+ uint32_t fractional_bits)
+{
+ /* 1. create mask of integer part */
+ uint32_t result =
+ (1 << integer_bits) - 1;
+ /* 2. mask out fractional part */
+ uint32_t fractional_part = FRACTIONAL_PART_MASK & value;
+ /* 3. shrink fixed point integer part to be of integer_bits width*/
+ result &= GET_INTEGER_PART(value);
+ /* 4. make space for fractional part to be filled in after integer */
+ result <<= fractional_bits;
+ /* 5. shrink fixed point fractional part to of fractional_bits width*/
+ fractional_part >>= BITS_PER_FRACTIONAL_PART - fractional_bits;
+ /* 6. merge the result */
+ return result | fractional_part;
+}
+
+uint32_t dal_fixed31_32_u2d19(
+ struct fixed31_32 arg)
+{
+ return ux_dy(arg.value, 2, 19);
+}
+
+uint32_t dal_fixed31_32_u0d19(
+ struct fixed31_32 arg)
+{
+ return ux_dy(arg.value, 0, 19);
+}
diff --git a/drivers/gpu/drm/amd/dal/basics/fixed32_32.c b/drivers/gpu/drm/amd/dal/basics/fixed32_32.c
new file mode 100644
index 000000000000..114013215118
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/basics/fixed32_32.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dal_services.h"
+#include "include/fixed32_32.h"
+
+static uint64_t u64_div(uint64_t n, uint64_t d)
+{
+ uint32_t i = 0;
+ uint64_t r;
+ uint64_t q = div64_u64_rem(n, d, &r);
+
+ for (i = 0; i < 32; ++i) {
+ uint64_t sbit = q & (1ULL<<63);
+
+ r <<= 1;
+ r |= sbit ? 1 : 0;
+ q <<= 1;
+ if (r >= d) {
+ r -= d;
+ q |= 1;
+ }
+ }
+
+ if (2*r >= d)
+ q += 1;
+ return q;
+}
+
+struct fixed32_32 dal_fixed32_32_from_fraction(uint32_t n, uint32_t d)
+{
+ struct fixed32_32 fx;
+
+ fx.value = u64_div((uint64_t)n << 32, (uint64_t)d << 32);
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_from_int(uint32_t value)
+{
+ struct fixed32_32 fx;
+
+ fx.value = (uint64_t)value<<32;
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_add(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ struct fixed32_32 fx = {lhs.value + rhs.value};
+
+ ASSERT(fx.value >= rhs.value);
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_add_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ struct fixed32_32 fx = {lhs.value + ((uint64_t)rhs << 32)};
+
+ ASSERT(fx.value >= (uint64_t)rhs << 32);
+ return fx;
+
+}
+struct fixed32_32 dal_fixed32_32_sub(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ struct fixed32_32 fx;
+
+ ASSERT(lhs.value >= rhs.value);
+ fx.value = lhs.value - rhs.value;
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_sub_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ struct fixed32_32 fx;
+
+ ASSERT(lhs.value >= ((uint64_t)rhs<<32));
+ fx.value = lhs.value - ((uint64_t)rhs<<32);
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_mul(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ struct fixed32_32 fx;
+ uint64_t lhs_int = lhs.value>>32;
+ uint64_t lhs_frac = (uint32_t)lhs.value;
+ uint64_t rhs_int = rhs.value>>32;
+ uint64_t rhs_frac = (uint32_t)rhs.value;
+ uint64_t ahbh = lhs_int * rhs_int;
+ uint64_t ahbl = lhs_int * rhs_frac;
+ uint64_t albh = lhs_frac * rhs_int;
+ uint64_t albl = lhs_frac * rhs_frac;
+
+ ASSERT((ahbh>>32) == 0);
+
+ fx.value = (ahbh<<32) + ahbl + albh + (albl>>32);
+ return fx;
+
+}
+
+struct fixed32_32 dal_fixed32_32_mul_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ struct fixed32_32 fx;
+ uint64_t lhsi = (lhs.value>>32) * (uint64_t)rhs;
+ uint64_t lhsf;
+
+ ASSERT((lhsi>>32) == 0);
+ lhsf = ((uint32_t)lhs.value) * (uint64_t)rhs;
+ ASSERT((lhsi<<32) + lhsf >= lhsf);
+ fx.value = (lhsi<<32) + lhsf;
+ return fx;
+}
+
+
+
+struct fixed32_32 dal_fixed32_32_div(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ struct fixed32_32 fx;
+
+ fx.value = u64_div(lhs.value, rhs.value);
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_div_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ struct fixed32_32 fx;
+
+ fx.value = u64_div(lhs.value, (uint64_t)rhs << 32);
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_min(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ return (lhs.value < rhs.value) ? lhs : rhs;
+}
+
+struct fixed32_32 dal_fixed32_32_max(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ return (lhs.value > rhs.value) ? lhs : rhs;
+}
+
+bool dal_fixed32_32_gt(struct fixed32_32 lhs, struct fixed32_32 rhs)
+{
+ return lhs.value > rhs.value;
+}
+bool dal_fixed32_32_gt_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ return lhs.value > ((uint64_t)rhs<<32);
+}
+
+bool dal_fixed32_32_lt(struct fixed32_32 lhs, struct fixed32_32 rhs)
+{
+ return lhs.value < rhs.value;
+}
+
+bool dal_fixed32_32_le(struct fixed32_32 lhs, struct fixed32_32 rhs)
+{
+ return lhs.value <= rhs.value;
+}
+
+bool dal_fixed32_32_lt_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ return lhs.value < ((uint64_t)rhs<<32);
+}
+
+bool dal_fixed32_32_le_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ return lhs.value <= ((uint64_t)rhs<<32);
+}
+
+uint32_t dal_fixed32_32_ceil(struct fixed32_32 v)
+{
+ ASSERT((uint32_t)v.value ? (v.value >> 32) + 1 >= 1 : true);
+ return (v.value>>32) + ((uint32_t)v.value ? 1 : 0);
+}
+
+uint32_t dal_fixed32_32_floor(struct fixed32_32 v)
+{
+ return v.value>>32;
+}
+
+uint32_t dal_fixed32_32_round(struct fixed32_32 v)
+{
+ ASSERT(v.value + (1ULL<<31) >= (1ULL<<31));
+ return (v.value + (1ULL<<31))>>32;
+}
+
+bool dal_fixed32_32_eq(struct fixed32_32 lhs, struct fixed32_32 rhs)
+{
+ return lhs.value == rhs.value;
+}
diff --git a/drivers/gpu/drm/amd/dal/include/fixed31_32.h b/drivers/gpu/drm/amd/dal/include/fixed31_32.h
new file mode 100644
index 000000000000..507f9f639099
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/include/fixed31_32.h
@@ -0,0 +1,389 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_FIXED31_32_H__
+#define __DAL_FIXED31_32_H__
+
+/*
+ * @brief
+ * Arithmetic operations on real numbers
+ * represented as fixed-point numbers.
+ * There are: 1 bit for sign,
+ * 31 bit for integer part,
+ * 32 bits for fractional part.
+ *
+ * @note
+ * Currently, overflows and underflows are asserted;
+ * no special result returned.
+ */
+
+struct fixed31_32 {
+ int64_t value;
+};
+
+/*
+ * @brief
+ * Useful constants
+ */
+
+static const struct fixed31_32 dal_fixed31_32_zero = { 0 };
+static const struct fixed31_32 dal_fixed31_32_epsilon = { 1LL };
+static const struct fixed31_32 dal_fixed31_32_half = { 0x80000000LL };
+static const struct fixed31_32 dal_fixed31_32_one = { 0x100000000LL };
+
+static const struct fixed31_32 dal_fixed31_32_pi = { 13493037705LL };
+static const struct fixed31_32 dal_fixed31_32_two_pi = { 26986075409LL };
+static const struct fixed31_32 dal_fixed31_32_e = { 11674931555LL };
+static const struct fixed31_32 dal_fixed31_32_ln2 = { 2977044471LL };
+static const struct fixed31_32 dal_fixed31_32_ln2_div_2 = { 1488522236LL };
+
+/*
+ * @brief
+ * Initialization routines
+ */
+
+/*
+ * @brief
+ * result = numerator / denominator
+ */
+struct fixed31_32 dal_fixed31_32_from_fraction(
+ int64_t numerator,
+ int64_t denominator);
+
+/*
+ * @brief
+ * result = arg
+ */
+struct fixed31_32 dal_fixed31_32_from_int(
+ int64_t arg);
+
+/*
+ * @brief
+ * Unary operators
+ */
+
+/*
+ * @brief
+ * result = -arg
+ */
+struct fixed31_32 dal_fixed31_32_neg(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = abs(arg) := (arg >= 0) ? arg : -arg
+ */
+struct fixed31_32 dal_fixed31_32_abs(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * Binary relational operators
+ */
+
+/*
+ * @brief
+ * result = arg1 < arg2
+ */
+bool dal_fixed31_32_lt(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+/*
+ * @brief
+ * result = arg1 <= arg2
+ */
+bool dal_fixed31_32_le(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+/*
+ * @brief
+ * result = arg1 == arg2
+ */
+bool dal_fixed31_32_eq(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+/*
+ * @brief
+ * result = min(arg1, arg2) := (arg1 <= arg2) ? arg1 : arg2
+ */
+struct fixed31_32 dal_fixed31_32_min(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+/*
+ * @brief
+ * result = max(arg1, arg2) := (arg1 <= arg2) ? arg2 : arg1
+ */
+struct fixed31_32 dal_fixed31_32_max(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+/*
+ * @brief
+ * | min_value, when arg <= min_value
+ * result = | arg, when min_value < arg < max_value
+ * | max_value, when arg >= max_value
+ */
+struct fixed31_32 dal_fixed31_32_clamp(
+ struct fixed31_32 arg,
+ struct fixed31_32 min_value,
+ struct fixed31_32 max_value);
+
+/*
+ * @brief
+ * Binary shift operators
+ */
+
+/*
+ * @brief
+ * result = arg << shift
+ */
+struct fixed31_32 dal_fixed31_32_shl(
+ struct fixed31_32 arg,
+ uint8_t shift);
+
+/*
+ * @brief
+ * result = arg >> shift
+ */
+struct fixed31_32 dal_fixed31_32_shr(
+ struct fixed31_32 arg,
+ uint8_t shift);
+
+/*
+ * @brief
+ * Binary additive operators
+ */
+
+/*
+ * @brief
+ * result = arg1 + arg2
+ */
+struct fixed31_32 dal_fixed31_32_add(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+/*
+ * @brief
+ * result = arg1 - arg2
+ */
+struct fixed31_32 dal_fixed31_32_sub_int(
+ struct fixed31_32 arg1,
+ int32_t arg2);
+
+/*
+ * @brief
+ * result = arg1 - arg2
+ */
+struct fixed31_32 dal_fixed31_32_sub(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+/*
+ * @brief
+ * Binary multiplicative operators
+ */
+
+/*
+ * @brief
+ * result = arg1 * arg2
+ */
+struct fixed31_32 dal_fixed31_32_mul_int(
+ struct fixed31_32 arg1,
+ int32_t arg2);
+
+/*
+ * @brief
+ * result = arg1 * arg2
+ */
+struct fixed31_32 dal_fixed31_32_mul(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+/*
+ * @brief
+ * result = square(arg) := arg * arg
+ */
+struct fixed31_32 dal_fixed31_32_sqr(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = arg1 / arg2
+ */
+struct fixed31_32 dal_fixed31_32_div_int(
+ struct fixed31_32 arg1,
+ int64_t arg2);
+
+/*
+ * @brief
+ * result = arg1 / arg2
+ */
+struct fixed31_32 dal_fixed31_32_div(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+/*
+ * @brief
+ * Reciprocal function
+ */
+
+/*
+ * @brief
+ * result = reciprocal(arg) := 1 / arg
+ *
+ * @note
+ * No special actions taken in case argument is zero.
+ */
+struct fixed31_32 dal_fixed31_32_recip(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * Trigonometric functions
+ */
+
+/*
+ * @brief
+ * result = sinc(arg) := sin(arg) / arg
+ *
+ * @note
+ * Argument specified in radians,
+ * internally it's normalized to [-2pi...2pi] range.
+ */
+struct fixed31_32 dal_fixed31_32_sinc(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = sin(arg)
+ *
+ * @note
+ * Argument specified in radians,
+ * internally it's normalized to [-2pi...2pi] range.
+ */
+struct fixed31_32 dal_fixed31_32_sin(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = cos(arg)
+ *
+ * @note
+ * Argument specified in radians
+ * and should be in [-2pi...2pi] range -
+ * passing arguments outside that range
+ * will cause incorrect result!
+ */
+struct fixed31_32 dal_fixed31_32_cos(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * Transcendent functions
+ */
+
+/*
+ * @brief
+ * result = exp(arg)
+ *
+ * @note
+ * Currently, function is verified for abs(arg) <= 1.
+ */
+struct fixed31_32 dal_fixed31_32_exp(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = log(arg)
+ *
+ * @note
+ * Currently, abs(arg) should be less than 1.
+ * No normalization is done.
+ * Currently, no special actions taken
+ * in case of invalid argument(s). Take care!
+ */
+struct fixed31_32 dal_fixed31_32_log(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * Power function
+ */
+
+/*
+ * @brief
+ * result = pow(arg1, arg2)
+ *
+ * @note
+ * Currently, abs(arg1) should be less than 1. Take care!
+ */
+struct fixed31_32 dal_fixed31_32_pow(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+/*
+ * @brief
+ * Rounding functions
+ */
+
+/*
+ * @brief
+ * result = floor(arg) := greatest integer lower than or equal to arg
+ */
+int32_t dal_fixed31_32_floor(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = round(arg) := integer nearest to arg
+ */
+int32_t dal_fixed31_32_round(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = ceil(arg) := lowest integer greater than or equal to arg
+ */
+int32_t dal_fixed31_32_ceil(
+ struct fixed31_32 arg);
+
+/* the following two function are used in scaler hw programming to convert fixed
+ * point value to format 2 bits from integer part and 19 bits from fractional
+ * part. The same applies for u0d19, 0 bits from integer part and 19 bits from
+ * fractional
+ */
+
+uint32_t dal_fixed31_32_u2d19(
+ struct fixed31_32 arg);
+
+uint32_t dal_fixed31_32_u0d19(
+ struct fixed31_32 arg);
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/dal/include/fixed32_32.h b/drivers/gpu/drm/amd/dal/include/fixed32_32.h
new file mode 100644
index 000000000000..5fca9573930b
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/include/fixed32_32.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_FIXED32_32_H__
+#define __DAL_FIXED32_32_H__
+
+struct fixed32_32 {
+ uint64_t value;
+};
+
+static const struct fixed32_32 dal_fixed32_32_zero = { 0 };
+static const struct fixed32_32 dal_fixed32_32_one = { 0x100000000LL };
+static const struct fixed32_32 dal_fixed32_32_half = { 0x80000000LL };
+
+struct fixed32_32 dal_fixed32_32_from_fraction(uint32_t n, uint32_t d);
+struct fixed32_32 dal_fixed32_32_from_int(uint32_t value);
+struct fixed32_32 dal_fixed32_32_add(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs);
+struct fixed32_32 dal_fixed32_32_add_int(
+ struct fixed32_32 lhs,
+ uint32_t rhs);
+struct fixed32_32 dal_fixed32_32_sub(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs);
+struct fixed32_32 dal_fixed32_32_sub_int(
+ struct fixed32_32 lhs,
+ uint32_t rhs);
+struct fixed32_32 dal_fixed32_32_mul(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs);
+struct fixed32_32 dal_fixed32_32_mul_int(
+ struct fixed32_32 lhs,
+ uint32_t rhs);
+struct fixed32_32 dal_fixed32_32_div(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs);
+struct fixed32_32 dal_fixed32_32_div_int(
+ struct fixed32_32 lhs,
+ uint32_t rhs);
+struct fixed32_32 dal_fixed32_32_min(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs);
+struct fixed32_32 dal_fixed32_32_max(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs);
+bool dal_fixed32_32_gt(struct fixed32_32 lhs, struct fixed32_32 rhs);
+bool dal_fixed32_32_gt_int(struct fixed32_32 lhs, uint32_t rhs);
+bool dal_fixed32_32_lt(struct fixed32_32 lhs, struct fixed32_32 rhs);
+bool dal_fixed32_32_lt_int(struct fixed32_32 lhs, uint32_t rhs);
+bool dal_fixed32_32_le(struct fixed32_32 lhs, struct fixed32_32 rhs);
+bool dal_fixed32_32_le_int(struct fixed32_32 lhs, uint32_t rhs);
+bool dal_fixed32_32_eq(struct fixed32_32 lhs, struct fixed32_32 rhs);
+uint32_t dal_fixed32_32_ceil(struct fixed32_32 value);
+uint32_t dal_fixed32_32_floor(struct fixed32_32 value);
+uint32_t dal_fixed32_32_round(struct fixed32_32 value);
+
+#endif