summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-06-09 10:19:49 +1000
committerNicolai Hähnle <nicolai.haehnle@amd.com>2016-09-21 10:24:30 +0200
commit5561a377107cee5e07348fce020de579463c298f (patch)
tree8d533b754458d23b9e0a7314b2b70b5283660472
parent6b26039da3deb3950d9798150d5431cb942f5637 (diff)
gallivm/llvmpipe: prepare support for ARB_gpu_shader_int64.
This enables 64-bit integer support in gallivm and llvmpipe. v2: add conversion opcodes. v3: - PIPE_CAP_INT64 is not there yet - restrict DIV/MOD defaults to the CPU, as for 32 bits - TGSI_OPCODE_I2U64 becomes TGSI_OPCODE_U2I64 Reviewed-by: Roland Scheidegger <sroland@vmware.com> Reviewed-by: Edward O'Callaghan <funfunctor@folklore1984.net> Signed-off-by: Dave Airlie <airlied@redhat.com> Signed-off-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
-rw-r--r--src/gallium/auxiliary/gallivm/lp_bld_tgsi.c2
-rw-r--r--src/gallium/auxiliary/gallivm/lp_bld_tgsi.h4
-rw-r--r--src/gallium/auxiliary/gallivm/lp_bld_tgsi_action.c456
-rw-r--r--src/gallium/auxiliary/gallivm/lp_bld_tgsi_soa.c40
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_info.h3
5 files changed, 500 insertions, 5 deletions
diff --git a/src/gallium/auxiliary/gallivm/lp_bld_tgsi.c b/src/gallium/auxiliary/gallivm/lp_bld_tgsi.c
index 1ef6ae4268..b3972610ff 100644
--- a/src/gallium/auxiliary/gallivm/lp_bld_tgsi.c
+++ b/src/gallium/auxiliary/gallivm/lp_bld_tgsi.c
@@ -364,6 +364,8 @@ lp_build_emit_fetch(
break;
case TGSI_TYPE_UNSIGNED:
case TGSI_TYPE_SIGNED:
+ case TGSI_TYPE_UNSIGNED64:
+ case TGSI_TYPE_SIGNED64:
case TGSI_TYPE_VOID:
default:
/* abs modifier is only legal on floating point types */
diff --git a/src/gallium/auxiliary/gallivm/lp_bld_tgsi.h b/src/gallium/auxiliary/gallivm/lp_bld_tgsi.h
index de1150ccd1..b6b3fe369b 100644
--- a/src/gallium/auxiliary/gallivm/lp_bld_tgsi.h
+++ b/src/gallium/auxiliary/gallivm/lp_bld_tgsi.h
@@ -337,6 +337,10 @@ struct lp_build_tgsi_context
struct lp_build_context int_bld;
struct lp_build_context dbl_bld;
+
+ struct lp_build_context uint64_bld;
+ struct lp_build_context int64_bld;
+
/** This array stores functions that are used to transform TGSI opcodes to
* LLVM instructions.
*/
diff --git a/src/gallium/auxiliary/gallivm/lp_bld_tgsi_action.c b/src/gallium/auxiliary/gallivm/lp_bld_tgsi_action.c
index 1ee9704923..2e837afe29 100644
--- a/src/gallium/auxiliary/gallivm/lp_bld_tgsi_action.c
+++ b/src/gallium/auxiliary/gallivm/lp_bld_tgsi_action.c
@@ -1093,6 +1093,210 @@ static void dfrac_emit(
emit_data->args[0], tmp, "");
}
+static void
+u64mul_emit(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ emit_data->output[emit_data->chan] = lp_build_mul(&bld_base->uint64_bld,
+ emit_data->args[0], emit_data->args[1]);
+}
+
+static void
+u64mod_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ LLVMBuilderRef builder = bld_base->base.gallivm->builder;
+ LLVMValueRef div_mask = lp_build_cmp(&bld_base->uint64_bld,
+ PIPE_FUNC_EQUAL, emit_data->args[1],
+ bld_base->uint64_bld.zero);
+ /* We want to make sure that we never divide/mod by zero to not
+ * generate sigfpe. We don't want to crash just because the
+ * shader is doing something weird. */
+ LLVMValueRef divisor = LLVMBuildOr(builder,
+ div_mask,
+ emit_data->args[1], "");
+ LLVMValueRef result = lp_build_mod(&bld_base->uint64_bld,
+ emit_data->args[0], divisor);
+ /* umod by zero doesn't have a guaranteed return value chose -1 for now. */
+ emit_data->output[emit_data->chan] = LLVMBuildOr(builder,
+ div_mask,
+ result, "");
+}
+
+static void
+i64mod_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ LLVMBuilderRef builder = bld_base->base.gallivm->builder;
+ LLVMValueRef div_mask = lp_build_cmp(&bld_base->uint64_bld,
+ PIPE_FUNC_EQUAL, emit_data->args[1],
+ bld_base->uint64_bld.zero);
+ /* We want to make sure that we never divide/mod by zero to not
+ * generate sigfpe. We don't want to crash just because the
+ * shader is doing something weird. */
+ LLVMValueRef divisor = LLVMBuildOr(builder,
+ div_mask,
+ emit_data->args[1], "");
+ LLVMValueRef result = lp_build_mod(&bld_base->int64_bld,
+ emit_data->args[0], divisor);
+ /* umod by zero doesn't have a guaranteed return value chose -1 for now. */
+ emit_data->output[emit_data->chan] = LLVMBuildOr(builder,
+ div_mask,
+ result, "");
+}
+
+static void
+u64div_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+
+ LLVMBuilderRef builder = bld_base->base.gallivm->builder;
+ LLVMValueRef div_mask = lp_build_cmp(&bld_base->uint64_bld,
+ PIPE_FUNC_EQUAL, emit_data->args[1],
+ bld_base->uint64_bld.zero);
+ /* We want to make sure that we never divide/mod by zero to not
+ * generate sigfpe. We don't want to crash just because the
+ * shader is doing something weird. */
+ LLVMValueRef divisor = LLVMBuildOr(builder,
+ div_mask,
+ emit_data->args[1], "");
+ LLVMValueRef result = LLVMBuildUDiv(builder,
+ emit_data->args[0], divisor, "");
+ /* udiv by zero is guaranteed to return 0xffffffff at least with d3d10 */
+ emit_data->output[emit_data->chan] = LLVMBuildOr(builder,
+ div_mask,
+ result, "");
+}
+
+static void
+i64div_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+
+ LLVMBuilderRef builder = bld_base->base.gallivm->builder;
+ LLVMValueRef div_mask = lp_build_cmp(&bld_base->int64_bld,
+ PIPE_FUNC_EQUAL, emit_data->args[1],
+ bld_base->int64_bld.zero);
+ /* We want to make sure that we never divide/mod by zero to not
+ * generate sigfpe. We don't want to crash just because the
+ * shader is doing something weird. */
+ LLVMValueRef divisor = LLVMBuildOr(builder,
+ div_mask,
+ emit_data->args[1], "");
+ LLVMValueRef result = LLVMBuildSDiv(builder,
+ emit_data->args[0], divisor, "");
+ /* udiv by zero is guaranteed to return 0xffffffff at least with d3d10 */
+ emit_data->output[emit_data->chan] = LLVMBuildOr(builder,
+ div_mask,
+ result, "");
+}
+
+static void
+f2u64_emit(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ emit_data->output[emit_data->chan] =
+ LLVMBuildFPToUI(bld_base->base.gallivm->builder,
+ emit_data->args[0],
+ bld_base->uint64_bld.vec_type, "");
+}
+
+static void
+f2i64_emit(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ emit_data->output[emit_data->chan] =
+ LLVMBuildFPToSI(bld_base->base.gallivm->builder,
+ emit_data->args[0],
+ bld_base->int64_bld.vec_type, "");
+}
+
+static void
+u2i64_emit(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ emit_data->output[emit_data->chan] =
+ LLVMBuildZExt(bld_base->base.gallivm->builder,
+ emit_data->args[0],
+ bld_base->uint64_bld.vec_type, "");
+}
+
+static void
+i2i64_emit(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ emit_data->output[emit_data->chan] =
+ LLVMBuildSExt(bld_base->base.gallivm->builder,
+ emit_data->args[0],
+ bld_base->int64_bld.vec_type, "");
+}
+
+static void
+i642f_emit(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ emit_data->output[emit_data->chan] =
+ LLVMBuildSIToFP(bld_base->base.gallivm->builder,
+ emit_data->args[0],
+ bld_base->base.vec_type, "");
+}
+
+static void
+u642f_emit(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ emit_data->output[emit_data->chan] =
+ LLVMBuildUIToFP(bld_base->base.gallivm->builder,
+ emit_data->args[0],
+ bld_base->base.vec_type, "");
+}
+
+static void
+i642d_emit(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ emit_data->output[emit_data->chan] =
+ LLVMBuildSIToFP(bld_base->base.gallivm->builder,
+ emit_data->args[0],
+ bld_base->dbl_bld.vec_type, "");
+}
+
+static void
+u642d_emit(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ emit_data->output[emit_data->chan] =
+ LLVMBuildUIToFP(bld_base->base.gallivm->builder,
+ emit_data->args[0],
+ bld_base->dbl_bld.vec_type, "");
+}
+
void
lp_set_default_actions(struct lp_build_tgsi_context * bld_base)
{
@@ -1168,6 +1372,26 @@ lp_set_default_actions(struct lp_build_tgsi_context * bld_base)
bld_base->op_actions[TGSI_OPCODE_DRCP].emit = drcp_emit;
bld_base->op_actions[TGSI_OPCODE_DFRAC].emit = dfrac_emit;
+ bld_base->op_actions[TGSI_OPCODE_U64MUL].emit = u64mul_emit;
+
+ bld_base->op_actions[TGSI_OPCODE_F2I64].emit = f2i64_emit;
+ bld_base->op_actions[TGSI_OPCODE_F2U64].emit = f2u64_emit;
+
+ bld_base->op_actions[TGSI_OPCODE_D2I64].emit = f2i64_emit;
+ bld_base->op_actions[TGSI_OPCODE_D2U64].emit = f2u64_emit;
+
+ bld_base->op_actions[TGSI_OPCODE_I2I64].emit = i2i64_emit;
+ bld_base->op_actions[TGSI_OPCODE_U2I64].emit = u2i64_emit;
+
+ bld_base->op_actions[TGSI_OPCODE_I642F].emit = i642f_emit;
+ bld_base->op_actions[TGSI_OPCODE_U642F].emit = u642f_emit;
+
+ bld_base->op_actions[TGSI_OPCODE_I642F].emit = i642f_emit;
+ bld_base->op_actions[TGSI_OPCODE_U642F].emit = u642f_emit;
+
+ bld_base->op_actions[TGSI_OPCODE_I642D].emit = i642d_emit;
+ bld_base->op_actions[TGSI_OPCODE_U642D].emit = u642d_emit;
+
}
/* CPU Only default actions */
@@ -2140,6 +2364,213 @@ dsqrt_emit_cpu(
emit_data->args[0]);
}
+static void
+i64abs_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ emit_data->output[emit_data->chan] = lp_build_abs(&bld_base->int64_bld,
+ emit_data->args[0]);
+}
+
+static void
+i64ssg_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ emit_data->output[emit_data->chan] = lp_build_sgn(&bld_base->int64_bld,
+ emit_data->args[0]);
+}
+
+static void
+i64neg_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ emit_data->output[emit_data->chan] = lp_build_sub(&bld_base->int64_bld,
+ bld_base->int64_bld.zero,
+ emit_data->args[0]);
+}
+
+static void
+u64set_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data,
+ unsigned pipe_func)
+{
+ LLVMBuilderRef builder = bld_base->base.gallivm->builder;
+ LLVMValueRef cond = lp_build_cmp(&bld_base->uint64_bld, pipe_func,
+ emit_data->args[0], emit_data->args[1]);
+ /* arguments were 64 bit but store as 32 bit */
+ cond = LLVMBuildTrunc(builder, cond, bld_base->int_bld.int_vec_type, "");
+ emit_data->output[emit_data->chan] = cond;
+}
+
+static void
+u64seq_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ u64set_emit_cpu(action, bld_base, emit_data, PIPE_FUNC_EQUAL);
+}
+
+static void
+u64sne_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ u64set_emit_cpu(action, bld_base, emit_data, PIPE_FUNC_NOTEQUAL);
+}
+
+static void
+u64slt_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ u64set_emit_cpu(action, bld_base, emit_data, PIPE_FUNC_LESS);
+}
+
+static void
+u64sge_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ u64set_emit_cpu(action, bld_base, emit_data, PIPE_FUNC_GEQUAL);
+}
+
+static void
+i64set_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data,
+ unsigned pipe_func)
+{
+ LLVMBuilderRef builder = bld_base->base.gallivm->builder;
+ LLVMValueRef cond = lp_build_cmp(&bld_base->int64_bld, pipe_func,
+ emit_data->args[0], emit_data->args[1]);
+ /* arguments were 64 bit but store as 32 bit */
+ cond = LLVMBuildTrunc(builder, cond, bld_base->int_bld.int_vec_type, "");
+ emit_data->output[emit_data->chan] = cond;
+}
+
+static void
+i64slt_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ i64set_emit_cpu(action, bld_base, emit_data, PIPE_FUNC_LESS);
+}
+
+static void
+i64sge_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ i64set_emit_cpu(action, bld_base, emit_data, PIPE_FUNC_GEQUAL);
+}
+
+static void
+u64max_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ emit_data->output[emit_data->chan] = lp_build_max(&bld_base->uint64_bld,
+ emit_data->args[0], emit_data->args[1]);
+}
+
+static void
+u64min_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ emit_data->output[emit_data->chan] = lp_build_min(&bld_base->uint64_bld,
+ emit_data->args[0], emit_data->args[1]);
+}
+
+static void
+i64max_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ emit_data->output[emit_data->chan] = lp_build_max(&bld_base->int64_bld,
+ emit_data->args[0], emit_data->args[1]);
+}
+
+static void
+i64min_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ emit_data->output[emit_data->chan] = lp_build_min(&bld_base->int64_bld,
+ emit_data->args[0], emit_data->args[1]);
+}
+
+static void
+u64add_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ emit_data->output[emit_data->chan] = lp_build_add(&bld_base->uint64_bld,
+ emit_data->args[0], emit_data->args[1]);
+}
+
+static void
+u64shl_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ struct lp_build_context *uint_bld = &bld_base->uint64_bld;
+ LLVMValueRef mask = lp_build_const_vec(uint_bld->gallivm, uint_bld->type,
+ uint_bld->type.width - 1);
+ LLVMValueRef masked_count = lp_build_and(uint_bld, emit_data->args[1], mask);
+ emit_data->output[emit_data->chan] = lp_build_shl(uint_bld, emit_data->args[0],
+ masked_count);
+}
+
+static void
+i64shr_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ struct lp_build_context *int_bld = &bld_base->int64_bld;
+ LLVMValueRef mask = lp_build_const_vec(int_bld->gallivm, int_bld->type,
+ int_bld->type.width - 1);
+ LLVMValueRef masked_count = lp_build_and(int_bld, emit_data->args[1], mask);
+ emit_data->output[emit_data->chan] = lp_build_shr(int_bld, emit_data->args[0],
+ masked_count);
+}
+
+static void
+u64shr_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ struct lp_build_context *uint_bld = &bld_base->uint64_bld;
+ LLVMValueRef mask = lp_build_const_vec(uint_bld->gallivm, uint_bld->type,
+ uint_bld->type.width - 1);
+ LLVMValueRef masked_count = lp_build_and(uint_bld, emit_data->args[1], mask);
+ emit_data->output[emit_data->chan] = lp_build_shr(uint_bld, emit_data->args[0],
+ masked_count);
+}
+
void
lp_set_default_actions_cpu(
struct lp_build_tgsi_context * bld_base)
@@ -2223,4 +2654,29 @@ lp_set_default_actions_cpu(
bld_base->op_actions[TGSI_OPCODE_DRSQ].emit = drecip_sqrt_emit_cpu;
bld_base->op_actions[TGSI_OPCODE_DSQRT].emit = dsqrt_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_I64ABS].emit = i64abs_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_I64SSG].emit = i64ssg_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_I64NEG].emit = i64neg_emit_cpu;
+
+ bld_base->op_actions[TGSI_OPCODE_U64SEQ].emit = u64seq_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_U64SNE].emit = u64sne_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_U64SLT].emit = u64slt_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_U64SGE].emit = u64sge_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_I64SLT].emit = i64slt_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_I64SGE].emit = i64sge_emit_cpu;
+
+ bld_base->op_actions[TGSI_OPCODE_U64MIN].emit = u64min_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_U64MAX].emit = u64max_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_I64MIN].emit = i64min_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_I64MAX].emit = i64max_emit_cpu;
+
+ bld_base->op_actions[TGSI_OPCODE_U64ADD].emit = u64add_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_U64MOD].emit = u64mod_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_I64MOD].emit = i64mod_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_U64DIV].emit = u64div_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_I64DIV].emit = i64div_emit_cpu;
+
+ bld_base->op_actions[TGSI_OPCODE_U64SHL].emit = u64shl_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_I64SHR].emit = i64shr_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_U64SHR].emit = u64shr_emit_cpu;
}
diff --git a/src/gallium/auxiliary/gallivm/lp_bld_tgsi_soa.c b/src/gallium/auxiliary/gallivm/lp_bld_tgsi_soa.c
index 5b76733797..6871795b4b 100644
--- a/src/gallium/auxiliary/gallivm/lp_bld_tgsi_soa.c
+++ b/src/gallium/auxiliary/gallivm/lp_bld_tgsi_soa.c
@@ -1168,6 +1168,12 @@ stype_to_fetch(struct lp_build_tgsi_context * bld_base,
case TGSI_TYPE_DOUBLE:
bld_fetch = &bld_base->dbl_bld;
break;
+ case TGSI_TYPE_UNSIGNED64:
+ bld_fetch = &bld_base->uint64_bld;
+ break;
+ case TGSI_TYPE_SIGNED64:
+ bld_fetch = &bld_base->int64_bld;
+ break;
case TGSI_TYPE_VOID:
default:
assert(0);
@@ -1285,12 +1291,20 @@ emit_fetch_constant(
LLVMTypeRef dptr_type = LLVMPointerType(LLVMDoubleTypeInContext(gallivm->context), 0);
scalar_ptr = LLVMBuildBitCast(builder, scalar_ptr, dptr_type, "");
bld_broad = &bld_base->dbl_bld;
+ } else if (stype == TGSI_TYPE_UNSIGNED64) {
+ LLVMTypeRef u64ptr_type = LLVMPointerType(LLVMInt64TypeInContext(gallivm->context), 0);
+ scalar_ptr = LLVMBuildBitCast(builder, scalar_ptr, u64ptr_type, "");
+ bld_broad = &bld_base->uint64_bld;
+ } else if (stype == TGSI_TYPE_SIGNED64) {
+ LLVMTypeRef i64ptr_type = LLVMPointerType(LLVMInt64TypeInContext(gallivm->context), 0);
+ scalar_ptr = LLVMBuildBitCast(builder, scalar_ptr, i64ptr_type, "");
+ bld_broad = &bld_base->int64_bld;
}
scalar = LLVMBuildLoad(builder, scalar_ptr, "");
res = lp_build_broadcast_scalar(bld_broad, scalar);
}
- if (stype == TGSI_TYPE_SIGNED || stype == TGSI_TYPE_UNSIGNED || stype == TGSI_TYPE_DOUBLE) {
+ if (stype == TGSI_TYPE_SIGNED || stype == TGSI_TYPE_UNSIGNED || stype == TGSI_TYPE_DOUBLE || stype == TGSI_TYPE_SIGNED64 || stype == TGSI_TYPE_UNSIGNED64) {
struct lp_build_context *bld_fetch = stype_to_fetch(bld_base, stype);
res = LLVMBuildBitCast(builder, res, bld_fetch->vec_type, "");
}
@@ -1403,7 +1417,7 @@ emit_fetch_immediate(
res = emit_fetch_64bit(bld_base, stype, res, bld->immediates[reg->Register.Index][swizzle + 1]);
}
- if (stype == TGSI_TYPE_SIGNED || stype == TGSI_TYPE_UNSIGNED || stype == TGSI_TYPE_DOUBLE) {
+ if (stype == TGSI_TYPE_SIGNED || stype == TGSI_TYPE_UNSIGNED || tgsi_type_is_64bit(stype)) {
struct lp_build_context *bld_fetch = stype_to_fetch(bld_base, stype);
res = LLVMBuildBitCast(builder, res, bld_fetch->vec_type, "");
}
@@ -1480,7 +1494,7 @@ emit_fetch_input(
assert(res);
- if (stype == TGSI_TYPE_SIGNED || stype == TGSI_TYPE_UNSIGNED || stype == TGSI_TYPE_DOUBLE) {
+ if (stype == TGSI_TYPE_SIGNED || stype == TGSI_TYPE_UNSIGNED || tgsi_type_is_64bit(stype)) {
struct lp_build_context *bld_fetch = stype_to_fetch(bld_base, stype);
res = LLVMBuildBitCast(builder, res, bld_fetch->vec_type, "");
}
@@ -1617,7 +1631,11 @@ emit_fetch_temporary(
}
}
- if (stype == TGSI_TYPE_SIGNED || stype == TGSI_TYPE_UNSIGNED || stype == TGSI_TYPE_DOUBLE) {
+ if (stype == TGSI_TYPE_SIGNED ||
+ stype == TGSI_TYPE_UNSIGNED ||
+ stype == TGSI_TYPE_DOUBLE ||
+ stype == TGSI_TYPE_SIGNED64 ||
+ stype == TGSI_TYPE_UNSIGNED64) {
struct lp_build_context *bld_fetch = stype_to_fetch(bld_base, stype);
res = LLVMBuildBitCast(builder, res, bld_fetch->vec_type, "");
}
@@ -3045,6 +3063,8 @@ void lp_emit_immediate_soa(
break;
case TGSI_IMM_FLOAT64:
+ case TGSI_IMM_UINT64:
+ case TGSI_IMM_INT64:
case TGSI_IMM_UINT32:
for( i = 0; i < size; ++i ) {
LLVMValueRef tmp = lp_build_const_vec(gallivm, bld_base->uint_bld.type, imm->u[i].Uint);
@@ -3902,6 +3922,18 @@ lp_build_tgsi_soa(struct gallivm_state *gallivm,
dbl_type.width *= 2;
lp_build_context_init(&bld.bld_base.dbl_bld, gallivm, dbl_type);
}
+ {
+ struct lp_type uint64_type;
+ uint64_type = lp_uint_type(type);
+ uint64_type.width *= 2;
+ lp_build_context_init(&bld.bld_base.uint64_bld, gallivm, uint64_type);
+ }
+ {
+ struct lp_type int64_type;
+ int64_type = lp_int_type(type);
+ int64_type.width *= 2;
+ lp_build_context_init(&bld.bld_base.int64_bld, gallivm, int64_type);
+ }
bld.mask = mask;
bld.inputs = inputs;
bld.outputs = outputs;
diff --git a/src/gallium/auxiliary/tgsi/tgsi_info.h b/src/gallium/auxiliary/tgsi/tgsi_info.h
index 8830f5a380..e60888fec8 100644
--- a/src/gallium/auxiliary/tgsi/tgsi_info.h
+++ b/src/gallium/auxiliary/tgsi/tgsi_info.h
@@ -105,7 +105,8 @@ enum tgsi_opcode_type {
static inline bool tgsi_type_is_64bit(enum tgsi_opcode_type type)
{
- if (type == TGSI_TYPE_DOUBLE)
+ if (type == TGSI_TYPE_DOUBLE || type == TGSI_TYPE_UNSIGNED64 ||
+ type == TGSI_TYPE_SIGNED64)
return true;
return false;
}