summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/amd/common/ac_nir.c3
-rw-r--r--src/amd/common/ac_nir_lower_tess_io_to_mem.c4
-rw-r--r--src/amd/vulkan/nir/radv_nir_lower_primitive_shading_rate.c2
-rw-r--r--src/amd/vulkan/radv_rt_shader.c4
-rw-r--r--src/asahi/compiler/agx_nir_lower_shared_bitsize.c2
-rw-r--r--src/asahi/compiler/agx_nir_lower_texture.c3
-rw-r--r--src/broadcom/compiler/v3d_nir_lower_image_load_store.c2
-rw-r--r--src/broadcom/compiler/v3d_nir_lower_io.c3
-rw-r--r--src/broadcom/compiler/v3d_nir_lower_load_store_bitsize.c2
-rw-r--r--src/broadcom/compiler/v3d_nir_lower_scratch.c5
-rw-r--r--src/broadcom/vulkan/v3dv_pipeline.c4
-rw-r--r--src/compiler/glsl/gl_nir_lower_buffers.c2
-rw-r--r--src/compiler/glsl/gl_nir_lower_samplers_as_deref.c2
-rw-r--r--src/compiler/nir/nir_deref.c2
-rw-r--r--src/compiler/nir/nir_functions.c3
-rw-r--r--src/compiler/nir/nir_lower_alpha_test.c4
-rw-r--r--src/compiler/nir/nir_lower_array_deref_of_vec.c4
-rw-r--r--src/compiler/nir/nir_lower_clamp_color_outputs.c2
-rw-r--r--src/compiler/nir/nir_lower_clip_disable.c4
-rw-r--r--src/compiler/nir/nir_lower_clip_halfz.c2
-rw-r--r--src/compiler/nir/nir_lower_discard_if.c2
-rw-r--r--src/compiler/nir/nir_lower_io.c6
-rw-r--r--src/compiler/nir/nir_lower_io_arrays_to_elements.c2
-rw-r--r--src/compiler/nir/nir_lower_io_to_scalar.c8
-rw-r--r--src/compiler/nir/nir_lower_locals_to_regs.c2
-rw-r--r--src/compiler/nir/nir_lower_mediump.c3
-rw-r--r--src/compiler/nir/nir_lower_samplers.c2
-rw-r--r--src/compiler/nir/nir_lower_ssbo.c2
-rw-r--r--src/compiler/nir/nir_lower_subgroups.c5
-rw-r--r--src/compiler/nir/nir_lower_tex.c12
-rw-r--r--src/compiler/nir/nir_lower_texcoord_replace.c2
-rw-r--r--src/compiler/nir/nir_lower_ubo_vec4.c2
-rw-r--r--src/compiler/nir/nir_lower_uniforms_to_ubo.c4
-rw-r--r--src/compiler/nir/nir_lower_viewport_transform.c2
-rw-r--r--src/compiler/nir/nir_lower_wpos_ytransform.c2
-rw-r--r--src/compiler/nir/nir_lower_wrmasks.c5
-rw-r--r--src/compiler/nir/nir_normalize_cubemap_coords.c2
-rw-r--r--src/compiler/nir/nir_opt_large_constants.c2
-rw-r--r--src/compiler/nir/nir_split_64bit_vec3_and_vec4.c2
-rw-r--r--src/freedreno/ir3/ir3_nir.c2
-rw-r--r--src/freedreno/ir3/ir3_nir_analyze_ubo_ranges.c6
-rw-r--r--src/freedreno/ir3/ir3_nir_lower_64b.c12
-rw-r--r--src/freedreno/ir3/ir3_nir_lower_wide_load_store.c6
-rw-r--r--src/freedreno/vulkan/tu_shader.cc4
-rw-r--r--src/gallium/drivers/crocus/crocus_program.c4
-rw-r--r--src/gallium/drivers/d3d12/d3d12_nir_passes.c8
-rw-r--r--src/gallium/drivers/etnaviv/etnaviv_nir_lower_ubo_to_uniform.c2
-rw-r--r--src/gallium/drivers/freedreno/ir3/ir3_descriptor.c2
-rw-r--r--src/gallium/drivers/iris/iris_program.c4
-rw-r--r--src/gallium/drivers/lima/ir/lima_nir_lower_txp.c5
-rw-r--r--src/gallium/drivers/r600/sfn/sfn_nir_lower_64bit.cpp24
-rw-r--r--src/gallium/drivers/r600/sfn/sfn_nir_lower_fs_out_to_vector.cpp2
-rw-r--r--src/gallium/drivers/r600/sfn/sfn_nir_lower_tex.cpp10
-rw-r--r--src/gallium/drivers/r600/sfn/sfn_nir_vectorize_vs_inputs.c2
-rw-r--r--src/gallium/drivers/zink/zink_lower_cubemap_to_array.c2
-rw-r--r--src/gallium/frontends/lavapipe/lvp_lower_vulkan_resource.c8
-rw-r--r--src/gallium/frontends/rusticl/rusticl_nir.c2
-rw-r--r--src/intel/compiler/brw_mesh.cpp2
-rw-r--r--src/intel/compiler/brw_nir.c4
-rw-r--r--src/intel/compiler/brw_nir_lower_intersection_shader.c4
-rw-r--r--src/intel/compiler/brw_nir_lower_ray_queries.c2
-rw-r--r--src/intel/vulkan/anv_nir_apply_pipeline_layout.c4
-rw-r--r--src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c6
-rw-r--r--src/mesa/state_tracker/st_atifs_to_nir.c2
-rw-r--r--src/mesa/state_tracker/st_nir_lower_fog.c2
-rw-r--r--src/microsoft/clc/clc_compiler.c2
-rw-r--r--src/microsoft/compiler/dxil_nir.c2
-rw-r--r--src/microsoft/spirv_to_dxil/dxil_spirv_nir.c4
-rw-r--r--src/panfrost/compiler/bifrost_compile.c8
-rw-r--r--src/panfrost/midgard/midgard_errata_lod.c2
-rw-r--r--src/panfrost/midgard/midgard_nir_lower_image_bitsize.c3
-rw-r--r--src/panfrost/util/pan_lower_framebuffer.c2
72 files changed, 135 insertions, 145 deletions
diff --git a/src/amd/common/ac_nir.c b/src/amd/common/ac_nir.c
index 60f904413ff..1ef90161a1f 100644
--- a/src/amd/common/ac_nir.c
+++ b/src/amd/common/ac_nir.c
@@ -485,7 +485,8 @@ ac_nir_calc_io_offset(nir_builder *b,
* so the instruction effectively reads/writes another input/output
* when it has an offset
*/
- nir_def *offset_op = nir_imul(b, base_stride, nir_ssa_for_src(b, *nir_get_io_offset_src(intrin), 1));
+ nir_def *offset_op = nir_imul(b, base_stride,
+ nir_get_io_offset_src(intrin)->ssa);
/* component is in bytes */
unsigned const_op = nir_intrinsic_component(intrin) * component_stride;
diff --git a/src/amd/common/ac_nir_lower_tess_io_to_mem.c b/src/amd/common/ac_nir_lower_tess_io_to_mem.c
index 566b8956137..2e28c28315d 100644
--- a/src/amd/common/ac_nir_lower_tess_io_to_mem.c
+++ b/src/amd/common/ac_nir_lower_tess_io_to_mem.c
@@ -334,7 +334,7 @@ hs_output_lds_offset(nir_builder *b,
}
if (per_vertex) {
- nir_def *vertex_index = nir_ssa_for_src(b, *nir_get_io_arrayed_index_src(intrin), 1);
+ nir_def *vertex_index = nir_get_io_arrayed_index_src(intrin)->ssa;
nir_def *vertex_index_off = nir_imul_imm(b, vertex_index, output_vertex_size);
off = nir_iadd_nuw(b, off, vertex_index_off);
@@ -361,7 +361,7 @@ hs_per_vertex_output_vmem_offset(nir_builder *b,
nir_def *rel_patch_id = nir_load_tess_rel_patch_id_amd(b);
nir_def *patch_offset = nir_imul(b, rel_patch_id, nir_imul_imm(b, out_vertices_per_patch, 16u));
- nir_def *vertex_index = nir_ssa_for_src(b, *nir_get_io_arrayed_index_src(intrin), 1);
+ nir_def *vertex_index = nir_get_io_arrayed_index_src(intrin)->ssa;
nir_def *vertex_index_off = nir_imul_imm(b, vertex_index, 16u);
return nir_iadd_nuw(b, nir_iadd_nuw(b, patch_offset, vertex_index_off), io_offset);
diff --git a/src/amd/vulkan/nir/radv_nir_lower_primitive_shading_rate.c b/src/amd/vulkan/nir/radv_nir_lower_primitive_shading_rate.c
index a0a291a7af5..e57c85b37dc 100644
--- a/src/amd/vulkan/nir/radv_nir_lower_primitive_shading_rate.c
+++ b/src/amd/vulkan/nir/radv_nir_lower_primitive_shading_rate.c
@@ -51,7 +51,7 @@ radv_nir_lower_primitive_shading_rate(nir_shader *nir, enum amd_gfx_level gfx_le
b.cursor = nir_before_instr(instr);
- nir_def *val = nir_ssa_for_src(&b, intr->src[1], 1);
+ nir_def *val = intr->src[1].ssa;
/* x_rate = (shadingRate & (Horizontal2Pixels | Horizontal4Pixels)) ? 0x1 : 0x0; */
nir_def *x_rate = nir_iand_imm(&b, val, 12);
diff --git a/src/amd/vulkan/radv_rt_shader.c b/src/amd/vulkan/radv_rt_shader.c
index f99f478a55e..f812d156aa7 100644
--- a/src/amd/vulkan/radv_rt_shader.c
+++ b/src/amd/vulkan/radv_rt_shader.c
@@ -992,8 +992,8 @@ nir_lower_intersection_shader(nir_shader *intersection, nir_shader *any_hit)
continue;
b->cursor = nir_instr_remove(&intrin->instr);
- nir_def *hit_t = nir_ssa_for_src(b, intrin->src[0], 1);
- nir_def *hit_kind = nir_ssa_for_src(b, intrin->src[1], 1);
+ nir_def *hit_t = intrin->src[0].ssa;
+ nir_def *hit_kind = intrin->src[1].ssa;
nir_def *min_t = nir_load_ray_t_min(b);
nir_def *max_t = nir_load_ray_t_max(b);
diff --git a/src/asahi/compiler/agx_nir_lower_shared_bitsize.c b/src/asahi/compiler/agx_nir_lower_shared_bitsize.c
index b80045d1416..6f91f8358f2 100644
--- a/src/asahi/compiler/agx_nir_lower_shared_bitsize.c
+++ b/src/asahi/compiler/agx_nir_lower_shared_bitsize.c
@@ -25,7 +25,7 @@ pass(struct nir_builder *b, nir_intrinsic_instr *intr, UNUSED void *data)
return false;
b->cursor = nir_before_instr(&intr->instr);
- nir_src_rewrite(offset, nir_u2u16(b, nir_ssa_for_src(b, *offset, 1)));
+ nir_src_rewrite(offset, nir_u2u16(b, offset->ssa));
return true;
}
diff --git a/src/asahi/compiler/agx_nir_lower_texture.c b/src/asahi/compiler/agx_nir_lower_texture.c
index c940d1299b4..ccaa7331ccb 100644
--- a/src/asahi/compiler/agx_nir_lower_texture.c
+++ b/src/asahi/compiler/agx_nir_lower_texture.c
@@ -84,8 +84,7 @@ agx_txs(nir_builder *b, nir_tex_instr *tex)
/* Add LOD offset to first level to get the interesting LOD */
int lod_idx = nir_tex_instr_src_index(tex, nir_tex_src_lod);
if (lod_idx >= 0) {
- lod = nir_iadd(
- b, lod, nir_u2u32(b, nir_ssa_for_src(b, tex->src[lod_idx].src, 1)));
+ lod = nir_iadd(b, lod, nir_u2u32(b, tex->src[lod_idx].src.ssa));
}
if (tex->sampler_dim == GLSL_SAMPLER_DIM_2D && tex->is_array) {
diff --git a/src/broadcom/compiler/v3d_nir_lower_image_load_store.c b/src/broadcom/compiler/v3d_nir_lower_image_load_store.c
index 4c72bcb6afa..5f8363377cb 100644
--- a/src/broadcom/compiler/v3d_nir_lower_image_load_store.c
+++ b/src/broadcom/compiler/v3d_nir_lower_image_load_store.c
@@ -104,7 +104,7 @@ v3d_nir_lower_image_store(nir_builder *b, nir_intrinsic_instr *instr)
b->cursor = nir_before_instr(&instr->instr);
nir_def *color = nir_trim_vector(b,
- nir_ssa_for_src(b, instr->src[3], 4),
+ instr->src[3].ssa,
num_components);
nir_def *formatted = NULL;
diff --git a/src/broadcom/compiler/v3d_nir_lower_io.c b/src/broadcom/compiler/v3d_nir_lower_io.c
index 9a9061488d6..7d1b9ef030d 100644
--- a/src/broadcom/compiler/v3d_nir_lower_io.c
+++ b/src/broadcom/compiler/v3d_nir_lower_io.c
@@ -140,8 +140,7 @@ v3d_nir_lower_vpm_output(struct v3d_compile *c, nir_builder *b,
int start_comp = nir_intrinsic_component(intr);
unsigned location = nir_intrinsic_io_semantics(intr).location;
- nir_def *src = nir_ssa_for_src(b, intr->src[0],
- intr->num_components);
+ nir_def *src = intr->src[0].ssa;
/* Save off the components of the position for the setup of VPM inputs
* read by fixed function HW.
*/
diff --git a/src/broadcom/compiler/v3d_nir_lower_load_store_bitsize.c b/src/broadcom/compiler/v3d_nir_lower_load_store_bitsize.c
index 7644cb5b144..0caf5dbc92c 100644
--- a/src/broadcom/compiler/v3d_nir_lower_load_store_bitsize.c
+++ b/src/broadcom/compiler/v3d_nir_lower_load_store_bitsize.c
@@ -181,7 +181,7 @@ lower_store_bitsize(nir_builder *b,
if (nir_src_bit_size(intr->src[value_idx]) == 32)
return false;
- nir_def *value = nir_ssa_for_src(b, intr->src[value_idx], num_comp);
+ nir_def *value = intr->src[value_idx].ssa;
b->cursor = nir_before_instr(&intr->instr);
diff --git a/src/broadcom/compiler/v3d_nir_lower_scratch.c b/src/broadcom/compiler/v3d_nir_lower_scratch.c
index 789020f80f6..93ed1bb6e26 100644
--- a/src/broadcom/compiler/v3d_nir_lower_scratch.c
+++ b/src/broadcom/compiler/v3d_nir_lower_scratch.c
@@ -38,7 +38,7 @@ static nir_def *
v3d_nir_scratch_offset(nir_builder *b, nir_intrinsic_instr *instr)
{
bool is_store = instr->intrinsic == nir_intrinsic_store_scratch;
- nir_def *offset = nir_ssa_for_src(b, instr->src[is_store ? 1 : 0], 1);
+ nir_def *offset = instr->src[is_store ? 1 : 0].ssa;
assert(nir_intrinsic_align_mul(instr) >= 4);
assert(nir_intrinsic_align_offset(instr) == 0);
@@ -88,8 +88,7 @@ v3d_nir_lower_store_scratch(nir_builder *b, nir_intrinsic_instr *instr)
b->cursor = nir_before_instr(&instr->instr);
nir_def *offset = v3d_nir_scratch_offset(b, instr);
- nir_def *value = nir_ssa_for_src(b, instr->src[0],
- instr->num_components);
+ nir_def *value = instr->src[0].ssa;
for (int i = 0; i < instr->num_components; i++) {
if (!(nir_intrinsic_write_mask(instr) & (1 << i)))
diff --git a/src/broadcom/vulkan/v3dv_pipeline.c b/src/broadcom/vulkan/v3dv_pipeline.c
index 908e4ff3965..fa9789068be 100644
--- a/src/broadcom/vulkan/v3dv_pipeline.c
+++ b/src/broadcom/vulkan/v3dv_pipeline.c
@@ -622,7 +622,7 @@ lower_tex_src(nir_builder *b,
}
index = nir_iadd(b, index,
- nir_imul_imm(b, nir_ssa_for_src(b, deref->arr.index, 1),
+ nir_imul_imm(b, deref->arr.index.ssa,
array_elements));
}
@@ -760,7 +760,7 @@ lower_image_deref(nir_builder *b,
}
index = nir_iadd(b, index,
- nir_imul_imm(b, nir_ssa_for_src(b, deref->arr.index, 1),
+ nir_imul_imm(b, deref->arr.index.ssa,
array_elements));
}
diff --git a/src/compiler/glsl/gl_nir_lower_buffers.c b/src/compiler/glsl/gl_nir_lower_buffers.c
index c46b976d6e7..59126f02369 100644
--- a/src/compiler/glsl/gl_nir_lower_buffers.c
+++ b/src/compiler/glsl/gl_nir_lower_buffers.c
@@ -56,7 +56,7 @@ get_block_array_index(nir_builder *b, nir_deref_instr *deref,
const_array_offset += arr_index * array_elements;
} else {
- nir_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
+ nir_def *arr_index = deref->arr.index.ssa;
arr_index = nir_umin(b, arr_index, nir_imm_int(b, arr_size - 1));
nir_def *arr_offset = nir_amul_imm(b, arr_index, array_elements);
if (nonconst_index)
diff --git a/src/compiler/glsl/gl_nir_lower_samplers_as_deref.c b/src/compiler/glsl/gl_nir_lower_samplers_as_deref.c
index 7c333af7d4c..2d45c257e6f 100644
--- a/src/compiler/glsl/gl_nir_lower_samplers_as_deref.c
+++ b/src/compiler/glsl/gl_nir_lower_samplers_as_deref.c
@@ -226,7 +226,7 @@ lower_deref(nir_builder *b, struct lower_samplers_as_deref_state *state,
assert((*p)->deref_type == nir_deref_type_array);
new_deref = nir_build_deref_array(b, new_deref,
- nir_ssa_for_src(b, (*p)->arr.index, 1));
+ (*p)->arr.index.ssa);
}
return new_deref;
diff --git a/src/compiler/nir/nir_deref.c b/src/compiler/nir/nir_deref.c
index b67620c8980..d26ef268707 100644
--- a/src/compiler/nir/nir_deref.c
+++ b/src/compiler/nir/nir_deref.c
@@ -351,7 +351,7 @@ nir_build_deref_offset(nir_builder *b, nir_deref_instr *deref,
switch ((*p)->deref_type) {
case nir_deref_type_array:
case nir_deref_type_ptr_as_array: {
- nir_def *index = nir_ssa_for_src(b, (*p)->arr.index, 1);
+ nir_def *index = (*p)->arr.index.ssa;
int stride = type_get_array_stride((*p)->type, size_align);
offset = nir_iadd(b, offset, nir_amul_imm(b, index, stride));
break;
diff --git a/src/compiler/nir/nir_functions.c b/src/compiler/nir/nir_functions.c
index 6d39ac06dc3..baefb023b69 100644
--- a/src/compiler/nir/nir_functions.c
+++ b/src/compiler/nir/nir_functions.c
@@ -179,8 +179,7 @@ static bool inline_functions_pass(nir_builder *b,
const unsigned num_params = call->num_params;
NIR_VLA(nir_def *, params, num_params);
for (unsigned i = 0; i < num_params; i++) {
- params[i] = nir_ssa_for_src(b, call->params[i],
- call->callee->params[i].num_components);
+ params[i] = call->params[i].ssa;
}
nir_inline_function_impl(b, call->callee->impl, params, NULL);
diff --git a/src/compiler/nir/nir_lower_alpha_test.c b/src/compiler/nir/nir_lower_alpha_test.c
index f4e9ef0e250..0ff36ed913b 100644
--- a/src/compiler/nir/nir_lower_alpha_test.c
+++ b/src/compiler/nir/nir_lower_alpha_test.c
@@ -85,10 +85,10 @@ nir_lower_alpha_test(nir_shader *shader, enum compare_func func,
if (alpha_to_one) {
alpha = nir_imm_float(&b, 1.0);
} else if (intr->intrinsic == nir_intrinsic_store_deref) {
- alpha = nir_channel(&b, nir_ssa_for_src(&b, intr->src[1], 4),
+ alpha = nir_channel(&b, intr->src[1].ssa,
3);
} else {
- alpha = nir_channel(&b, nir_ssa_for_src(&b, intr->src[0], 4),
+ alpha = nir_channel(&b, intr->src[0].ssa,
3);
}
diff --git a/src/compiler/nir/nir_lower_array_deref_of_vec.c b/src/compiler/nir/nir_lower_array_deref_of_vec.c
index 8b87b958213..c5da05798c9 100644
--- a/src/compiler/nir/nir_lower_array_deref_of_vec.c
+++ b/src/compiler/nir/nir_lower_array_deref_of_vec.c
@@ -122,7 +122,7 @@ nir_lower_array_deref_of_vec_impl(nir_function_impl *impl,
if (!(options & nir_lower_indirect_array_deref_of_vec_store))
continue;
- nir_def *index = nir_ssa_for_src(&b, deref->arr.index, 1);
+ nir_def *index = deref->arr.index.ssa;
build_write_masked_stores(&b, vec_deref, value, index,
0, num_components);
}
@@ -143,7 +143,7 @@ nir_lower_array_deref_of_vec_impl(nir_function_impl *impl,
intrin->def.num_components = num_components;
intrin->num_components = num_components;
- nir_def *index = nir_ssa_for_src(&b, deref->arr.index, 1);
+ nir_def *index = deref->arr.index.ssa;
nir_def *scalar =
nir_vector_extract(&b, &intrin->def, index);
if (scalar->parent_instr->type == nir_instr_type_undef) {
diff --git a/src/compiler/nir/nir_lower_clamp_color_outputs.c b/src/compiler/nir/nir_lower_clamp_color_outputs.c
index 8d95f0f9095..c13e90705d7 100644
--- a/src/compiler/nir/nir_lower_clamp_color_outputs.c
+++ b/src/compiler/nir/nir_lower_clamp_color_outputs.c
@@ -80,7 +80,7 @@ lower_intrinsic(nir_builder *b, nir_intrinsic_instr *intr, nir_shader *shader)
if (is_color_output(shader, out)) {
b->cursor = nir_before_instr(&intr->instr);
int src = intr->intrinsic == nir_intrinsic_store_deref ? 1 : 0;
- s = nir_ssa_for_src(b, intr->src[src], intr->num_components);
+ s = intr->src[src].ssa;
s = nir_fsat(b, s);
nir_src_rewrite(&intr->src[src], s);
}
diff --git a/src/compiler/nir/nir_lower_clip_disable.c b/src/compiler/nir/nir_lower_clip_disable.c
index e1682dc0951..7bb9c1c8aba 100644
--- a/src/compiler/nir/nir_lower_clip_disable.c
+++ b/src/compiler/nir/nir_lower_clip_disable.c
@@ -90,7 +90,7 @@ lower_clip_plane_store(nir_builder *b, nir_intrinsic_instr *instr,
if (!(clip_plane_enable & (1 << (start + i))))
components[i] = nir_imm_int(b, 0);
else
- components[i] = nir_channel(b, nir_ssa_for_src(b, instr->src[1], nir_src_num_components(instr->src[1])), i);
+ components[i] = nir_channel(b, instr->src[1].ssa, i);
} else
components[i] = nir_undef(b, 1, 32);
}
@@ -106,7 +106,7 @@ lower_clip_plane_store(nir_builder *b, nir_intrinsic_instr *instr,
nir_store_deref(b, deref, nir_imm_int(b, 0), 1);
} else {
/* storing using a variable index */
- nir_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
+ nir_def *index = deref->arr.index.ssa;
unsigned length = glsl_get_length(nir_deref_instr_parent(deref)->type);
recursive_if_chain(b, deref, instr->src[1].ssa, clip_plane_enable, index, 0, length);
diff --git a/src/compiler/nir/nir_lower_clip_halfz.c b/src/compiler/nir/nir_lower_clip_halfz.c
index a1fb43f88b7..2b049cc7c5d 100644
--- a/src/compiler/nir/nir_lower_clip_halfz.c
+++ b/src/compiler/nir/nir_lower_clip_halfz.c
@@ -37,7 +37,7 @@ lower_pos_write(nir_builder *b, nir_intrinsic_instr *intr,
b->cursor = nir_before_instr(&intr->instr);
- nir_def *pos = nir_ssa_for_src(b, intr->src[1], 4);
+ nir_def *pos = intr->src[1].ssa;
nir_def *def = nir_vec4(b,
nir_channel(b, pos, 0),
nir_channel(b, pos, 1),
diff --git a/src/compiler/nir/nir_lower_discard_if.c b/src/compiler/nir/nir_lower_discard_if.c
index e0522086b55..018664531ca 100644
--- a/src/compiler/nir/nir_lower_discard_if.c
+++ b/src/compiler/nir/nir_lower_discard_if.c
@@ -48,7 +48,7 @@ lower_discard_if(nir_builder *b, nir_intrinsic_instr *instr, void *cb_data)
b->cursor = nir_before_instr(&instr->instr);
- nir_if *if_stmt = nir_push_if(b, nir_ssa_for_src(b, instr->src[0], 1));
+ nir_if *if_stmt = nir_push_if(b, instr->src[0].ssa);
switch (instr->intrinsic) {
case nir_intrinsic_discard_if:
nir_discard(b);
diff --git a/src/compiler/nir/nir_lower_io.c b/src/compiler/nir/nir_lower_io.c
index 7d72ba9851d..34f86c00475 100644
--- a/src/compiler/nir/nir_lower_io.c
+++ b/src/compiler/nir/nir_lower_io.c
@@ -201,7 +201,7 @@ get_io_offset(nir_builder *b, nir_deref_instr *deref,
*/
if (array_index != NULL) {
assert((*p)->deref_type == nir_deref_type_array);
- *array_index = nir_ssa_for_src(b, (*p)->arr.index, 1);
+ *array_index = (*p)->arr.index.ssa;
p++;
}
@@ -225,7 +225,7 @@ get_io_offset(nir_builder *b, nir_deref_instr *deref,
unsigned size = type_size((*p)->type, bts);
nir_def *mul =
- nir_amul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
+ nir_amul_imm(b, (*p)->arr.index.ssa, size);
offset = nir_iadd(b, offset, mul);
} else if ((*p)->deref_type == nir_deref_type_struct) {
@@ -1847,7 +1847,7 @@ nir_explicit_io_address_from_deref(nir_builder *b, nir_deref_instr *deref,
assert(stride > 0);
unsigned offset_bit_size = addr_get_offset_bit_size(base_addr, addr_format);
- nir_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
+ nir_def *index = deref->arr.index.ssa;
nir_def *offset;
/* If the access chain has been declared in-bounds, then we know it doesn't
diff --git a/src/compiler/nir/nir_lower_io_arrays_to_elements.c b/src/compiler/nir/nir_lower_io_arrays_to_elements.c
index d2e6757d389..8c8780adc85 100644
--- a/src/compiler/nir/nir_lower_io_arrays_to_elements.c
+++ b/src/compiler/nir/nir_lower_io_arrays_to_elements.c
@@ -47,7 +47,7 @@ get_io_offset(nir_builder *b, nir_deref_instr *deref, nir_variable *var,
* inputs), skip the outermost array index. Process the rest normally.
*/
if (nir_is_arrayed_io(var, b->shader->info.stage)) {
- *array_index = nir_ssa_for_src(b, (*p)->arr.index, 1);
+ *array_index = (*p)->arr.index.ssa;
p++;
}
diff --git a/src/compiler/nir/nir_lower_io_to_scalar.c b/src/compiler/nir/nir_lower_io_to_scalar.c
index 7b833878fb0..751402ddaa8 100644
--- a/src/compiler/nir/nir_lower_io_to_scalar.c
+++ b/src/compiler/nir/nir_lower_io_to_scalar.c
@@ -130,7 +130,7 @@ lower_store_output_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
{
b->cursor = nir_before_instr(&intr->instr);
- nir_def *value = nir_ssa_for_src(b, intr->src[0], intr->num_components);
+ nir_def *value = intr->src[0].ssa;
for (unsigned i = 0; i < intr->num_components; i++) {
if (!(nir_intrinsic_write_mask(intr) & (1 << i)))
@@ -195,7 +195,7 @@ lower_store_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
{
b->cursor = nir_before_instr(&intr->instr);
- nir_def *value = nir_ssa_for_src(b, intr->src[0], intr->num_components);
+ nir_def *value = intr->src[0].ssa;
nir_def *base_offset = nir_get_io_offset_src(intr)->ssa;
/* iterate wrmask instead of num_components to handle split components */
@@ -345,7 +345,7 @@ clone_deref_array(nir_builder *b, nir_deref_instr *dst_tail,
dst_tail = clone_deref_array(b, dst_tail, parent);
return nir_build_deref_array(b, dst_tail,
- nir_ssa_for_src(b, src_head->arr.index, 1));
+ src_head->arr.index.ssa);
}
static void
@@ -412,7 +412,7 @@ lower_store_output_to_scalar_early(nir_builder *b, nir_intrinsic_instr *intr,
{
b->cursor = nir_before_instr(&intr->instr);
- nir_def *value = nir_ssa_for_src(b, intr->src[1], intr->num_components);
+ nir_def *value = intr->src[1].ssa;
nir_variable **chan_vars = get_channel_variables(split_outputs, var);
for (unsigned i = 0; i < intr->num_components; i++) {
diff --git a/src/compiler/nir/nir_lower_locals_to_regs.c b/src/compiler/nir/nir_lower_locals_to_regs.c
index e2e071610e2..472a5147230 100644
--- a/src/compiler/nir/nir_lower_locals_to_regs.c
+++ b/src/compiler/nir/nir_lower_locals_to_regs.c
@@ -172,7 +172,7 @@ get_deref_reg_location(nir_deref_instr *deref,
base_offset = 0;
}
- nir_def *index = nir_i2iN(b, nir_ssa_for_src(b, d->arr.index, 1), 32);
+ nir_def *index = nir_i2iN(b, d->arr.index.ssa, 32);
nir_def *offset = nir_imul_imm(b, index, inner_array_size);
/* Avoid emitting iadd with 0, which is otherwise common, since this
diff --git a/src/compiler/nir/nir_lower_mediump.c b/src/compiler/nir/nir_lower_mediump.c
index 113c3dce8c4..e2604996c25 100644
--- a/src/compiler/nir/nir_lower_mediump.c
+++ b/src/compiler/nir/nir_lower_mediump.c
@@ -712,8 +712,7 @@ nir_legalize_16bit_sampler_srcs(nir_shader *nir,
b.cursor = nir_before_instr(&tex->instr);
nir_def *conv =
- convert(&b, nir_ssa_for_src(&b, tex->src[i].src,
- tex->src[i].src.ssa->num_components));
+ convert(&b, tex->src[i].src.ssa);
nir_src_rewrite(&tex->src[i].src, conv);
changed = true;
}
diff --git a/src/compiler/nir/nir_lower_samplers.c b/src/compiler/nir/nir_lower_samplers.c
index 150d930257f..bf31aebeea8 100644
--- a/src/compiler/nir/nir_lower_samplers.c
+++ b/src/compiler/nir/nir_lower_samplers.c
@@ -76,7 +76,7 @@ lower_tex_src_to_offset(nir_builder *b,
index = nir_iadd(b, index,
nir_imul_imm(b,
- nir_ssa_for_src(b, deref->arr.index, 1),
+ deref->arr.index.ssa,
array_elements));
}
diff --git a/src/compiler/nir/nir_lower_ssbo.c b/src/compiler/nir/nir_lower_ssbo.c
index 51a0171e6b0..56814c9f662 100644
--- a/src/compiler/nir/nir_lower_ssbo.c
+++ b/src/compiler/nir/nir_lower_ssbo.c
@@ -88,7 +88,7 @@ lower_ssbo_instr(nir_builder *b, nir_intrinsic_instr *intr)
nir_src index = intr->src[is_store ? 1 : 0];
nir_src *offset_src = nir_get_io_offset_src(intr);
- nir_def *offset = nir_ssa_for_src(b, *offset_src, 1);
+ nir_def *offset = offset_src->ssa;
nir_def *address =
nir_iadd(b,
diff --git a/src/compiler/nir/nir_lower_subgroups.c b/src/compiler/nir/nir_lower_subgroups.c
index d3bf51c563b..afc31ea2684 100644
--- a/src/compiler/nir/nir_lower_subgroups.c
+++ b/src/compiler/nir/nir_lower_subgroups.c
@@ -110,8 +110,7 @@ lower_subgroup_op_to_scalar(nir_builder *b, nir_intrinsic_instr *intrin,
/* This is safe to call on scalar things but it would be silly */
assert(intrin->def.num_components > 1);
- nir_def *value = nir_ssa_for_src(b, intrin->src[0],
- intrin->num_components);
+ nir_def *value = intrin->src[0].ssa;
nir_def *reads[NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < intrin->num_components; i++) {
@@ -623,7 +622,7 @@ lower_subgroups_instr(nir_builder *b, nir_instr *instr, void *_options)
case nir_intrinsic_vote_any:
case nir_intrinsic_vote_all:
if (options->lower_vote_trivial)
- return nir_ssa_for_src(b, intrin->src[0], 1);
+ return intrin->src[0].ssa;
break;
case nir_intrinsic_vote_feq:
diff --git a/src/compiler/nir/nir_lower_tex.c b/src/compiler/nir/nir_lower_tex.c
index 617976ce3f6..432eaccd5e3 100644
--- a/src/compiler/nir/nir_lower_tex.c
+++ b/src/compiler/nir/nir_lower_tex.c
@@ -116,7 +116,7 @@ project_src(nir_builder *b, nir_tex_instr *tex)
continue;
}
nir_def *unprojected =
- nir_ssa_for_src(b, tex->src[i].src, nir_tex_instr_src_size(tex, i));
+ tex->src[i].src.ssa;
nir_def *projected = nir_fmul(b, unprojected, inv_proj);
/* Array indices don't get projected, so make an new vector with the
@@ -225,7 +225,7 @@ lower_rect(nir_builder *b, nir_tex_instr *tex)
if (coord_index != -1) {
nir_def *coords =
- nir_ssa_for_src(b, tex->src[coord_index].src, tex->coord_components);
+ tex->src[coord_index].src.ssa;
nir_src_rewrite(&tex->src[coord_index].src, nir_fmul(b, coords, scale));
}
}
@@ -241,7 +241,7 @@ lower_rect_tex_scale(nir_builder *b, nir_tex_instr *tex)
if (coord_index != -1) {
nir_def *coords =
- nir_ssa_for_src(b, tex->src[coord_index].src, tex->coord_components);
+ tex->src[coord_index].src.ssa;
nir_src_rewrite(&tex->src[coord_index].src, nir_fmul(b, coords, scale));
}
}
@@ -916,7 +916,7 @@ lower_txb_to_txl(nir_builder *b, nir_tex_instr *tex)
int bias_idx = nir_tex_instr_src_index(tex, nir_tex_src_bias);
assert(bias_idx >= 0);
- lod = nir_fadd(b, nir_channel(b, lod, 1), nir_ssa_for_src(b, tex->src[bias_idx].src, 1));
+ lod = nir_fadd(b, nir_channel(b, lod, 1), tex->src[bias_idx].src.ssa);
txl->src[tex->num_srcs - 1] = nir_tex_src_for_ssa(nir_tex_src_lod, lod);
nir_def_init(&txl->instr, &txl->def,
@@ -941,7 +941,7 @@ saturate_src(nir_builder *b, nir_tex_instr *tex, unsigned sat_mask)
if (coord_index != -1) {
nir_def *src =
- nir_ssa_for_src(b, tex->src[coord_index].src, tex->coord_components);
+ tex->src[coord_index].src.ssa;
/* split src into components: */
nir_def *comp[4];
@@ -1245,7 +1245,7 @@ nir_lower_txs_lod(nir_builder *b, nir_tex_instr *tex)
unsigned dest_size = nir_tex_instr_dest_size(tex);
b->cursor = nir_before_instr(&tex->instr);
- nir_def *lod = nir_ssa_for_src(b, tex->src[lod_idx].src, 1);
+ nir_def *lod = tex->src[lod_idx].src.ssa;
/* Replace the non-0-LOD in the initial TXS operation by a 0-LOD. */
nir_src_rewrite(&tex->src[lod_idx].src, nir_imm_int(b, 0));
diff --git a/src/compiler/nir/nir_lower_texcoord_replace.c b/src/compiler/nir/nir_lower_texcoord_replace.c
index 2bbc617dc85..17991f3df41 100644
--- a/src/compiler/nir/nir_lower_texcoord_replace.c
+++ b/src/compiler/nir/nir_lower_texcoord_replace.c
@@ -45,7 +45,7 @@ get_io_index(nir_builder *b, nir_deref_instr *deref)
unsigned size = glsl_get_length((*p)->type);
nir_def *mul =
- nir_amul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
+ nir_amul_imm(b, (*p)->arr.index.ssa, size);
offset = nir_iadd(b, offset, mul);
} else
diff --git a/src/compiler/nir/nir_lower_ubo_vec4.c b/src/compiler/nir/nir_lower_ubo_vec4.c
index c73c2292b84..5b81fdd53e5 100644
--- a/src/compiler/nir/nir_lower_ubo_vec4.c
+++ b/src/compiler/nir/nir_lower_ubo_vec4.c
@@ -87,7 +87,7 @@ nir_lower_ubo_vec4_lower(nir_builder *b, nir_instr *instr, void *data)
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
- nir_def *byte_offset = nir_ssa_for_src(b, intr->src[1], 1);
+ nir_def *byte_offset = intr->src[1].ssa;
nir_def *vec4_offset = nir_ushr_imm(b, byte_offset, 4);
unsigned align_mul = nir_intrinsic_align_mul(intr);
diff --git a/src/compiler/nir/nir_lower_uniforms_to_ubo.c b/src/compiler/nir/nir_lower_uniforms_to_ubo.c
index 85fe1966359..573f572b719 100644
--- a/src/compiler/nir/nir_lower_uniforms_to_ubo.c
+++ b/src/compiler/nir/nir_lower_uniforms_to_ubo.c
@@ -58,7 +58,7 @@ nir_lower_uniforms_to_ubo_instr(nir_builder *b, nir_instr *instr, void *data)
/* Increase all UBO binding points by 1. */
if (intr->intrinsic == nir_intrinsic_load_ubo &&
!b->shader->info.first_ubo_is_default_ubo) {
- nir_def *old_idx = nir_ssa_for_src(b, intr->src[0], 1);
+ nir_def *old_idx = intr->src[0].ssa;
nir_def *new_idx = nir_iadd_imm(b, old_idx, 1);
nir_src_rewrite(&intr->src[0], new_idx);
return true;
@@ -66,7 +66,7 @@ nir_lower_uniforms_to_ubo_instr(nir_builder *b, nir_instr *instr, void *data)
if (intr->intrinsic == nir_intrinsic_load_uniform) {
nir_def *ubo_idx = nir_imm_int(b, 0);
- nir_def *uniform_offset = nir_ssa_for_src(b, intr->src[0], 1);
+ nir_def *uniform_offset = intr->src[0].ssa;
assert(intr->def.bit_size >= 8);
nir_def *load_result;
diff --git a/src/compiler/nir/nir_lower_viewport_transform.c b/src/compiler/nir/nir_lower_viewport_transform.c
index 4b4c26e29d1..f99fca1c94d 100644
--- a/src/compiler/nir/nir_lower_viewport_transform.c
+++ b/src/compiler/nir/nir_lower_viewport_transform.c
@@ -55,7 +55,7 @@ lower_viewport_transform_instr(nir_builder *b, nir_intrinsic_instr *intr,
b->cursor = nir_before_instr(&intr->instr);
/* Grab the source and viewport */
- nir_def *input_point = nir_ssa_for_src(b, intr->src[1], 4);
+ nir_def *input_point = intr->src[1].ssa;
nir_def *scale = nir_load_viewport_scale(b);
nir_def *offset = nir_load_viewport_offset(b);
diff --git a/src/compiler/nir/nir_lower_wpos_ytransform.c b/src/compiler/nir/nir_lower_wpos_ytransform.c
index 0a61420077e..d44a8a1ffd0 100644
--- a/src/compiler/nir/nir_lower_wpos_ytransform.c
+++ b/src/compiler/nir/nir_lower_wpos_ytransform.c
@@ -257,7 +257,7 @@ lower_interp_deref_or_load_baryc_at_offset(lower_wpos_ytransform_state *state,
b->cursor = nir_before_instr(&intr->instr);
- offset = nir_ssa_for_src(b, intr->src[offset_src], 2);
+ offset = intr->src[offset_src].ssa;
flip_y = nir_fmul(b, nir_channel(b, offset, 1),
nir_channel(b, get_transform(state), 0));
nir_src_rewrite(&intr->src[offset_src],
diff --git a/src/compiler/nir/nir_lower_wrmasks.c b/src/compiler/nir/nir_lower_wrmasks.c
index e57ef4957f7..e608662147e 100644
--- a/src/compiler/nir/nir_lower_wrmasks.c
+++ b/src/compiler/nir/nir_lower_wrmasks.c
@@ -103,15 +103,14 @@ split_wrmask(nir_builder *b, nir_intrinsic_instr *intr)
unsigned num_srcs = info->num_srcs;
unsigned value_idx = value_src(intr->intrinsic);
unsigned offset_idx = offset_src(intr->intrinsic);
- unsigned num_comp = nir_intrinsic_src_components(intr, value_idx);
unsigned wrmask = nir_intrinsic_write_mask(intr);
while (wrmask) {
unsigned first_component = ffs(wrmask) - 1;
unsigned length = ffs(~(wrmask >> first_component)) - 1;
- nir_def *value = nir_ssa_for_src(b, intr->src[value_idx], num_comp);
- nir_def *offset = nir_ssa_for_src(b, intr->src[offset_idx], 1);
+ nir_def *value = intr->src[value_idx].ssa;
+ nir_def *offset = intr->src[offset_idx].ssa;
/* swizzle out the consecutive components that we'll store
* in this iteration:
diff --git a/src/compiler/nir/nir_normalize_cubemap_coords.c b/src/compiler/nir/nir_normalize_cubemap_coords.c
index 5abf8119734..f32df0717ac 100644
--- a/src/compiler/nir/nir_normalize_cubemap_coords.c
+++ b/src/compiler/nir/nir_normalize_cubemap_coords.c
@@ -45,7 +45,7 @@ normalize_cubemap_coords(nir_builder *b, nir_instr *instr, void *data)
return false;
nir_def *orig_coord =
- nir_ssa_for_src(b, tex->src[idx].src, nir_tex_instr_src_size(tex, idx));
+ tex->src[idx].src.ssa;
assert(orig_coord->num_components >= 3);
nir_def *orig_xyz = nir_trim_vector(b, orig_coord, 3);
diff --git a/src/compiler/nir/nir_opt_large_constants.c b/src/compiler/nir/nir_opt_large_constants.c
index 567f28c57fa..5cbde3b0e27 100644
--- a/src/compiler/nir/nir_opt_large_constants.c
+++ b/src/compiler/nir/nir_opt_large_constants.c
@@ -312,7 +312,7 @@ build_small_constant_load(nir_builder *b, nir_deref_instr *deref,
nir_def *imm = nir_imm_intN_t(b, constant->data, constant->bit_size);
assert(deref->deref_type == nir_deref_type_array);
- nir_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
+ nir_def *index = deref->arr.index.ssa;
nir_def *shift = nir_imul_imm(b, index, constant->bit_stride);
diff --git a/src/compiler/nir/nir_split_64bit_vec3_and_vec4.c b/src/compiler/nir/nir_split_64bit_vec3_and_vec4.c
index 510b2c185ac..4a5511ce00b 100644
--- a/src/compiler/nir/nir_split_64bit_vec3_and_vec4.c
+++ b/src/compiler/nir/nir_split_64bit_vec3_and_vec4.c
@@ -111,7 +111,7 @@ get_linear_array_offset(nir_builder *b, nir_deref_instr *deref)
for (nir_deref_instr **p = &path.path[1]; *p; p++) {
switch ((*p)->deref_type) {
case nir_deref_type_array: {
- nir_def *index = nir_ssa_for_src(b, (*p)->arr.index, 1);
+ nir_def *index = (*p)->arr.index.ssa;
int stride = glsl_array_size((*p)->type);
if (stride >= 0)
offset = nir_iadd(b, offset, nir_amul_imm(b, index, stride));
diff --git a/src/freedreno/ir3/ir3_nir.c b/src/freedreno/ir3/ir3_nir.c
index dc39c5115c7..d9ec05a0210 100644
--- a/src/freedreno/ir3/ir3_nir.c
+++ b/src/freedreno/ir3/ir3_nir.c
@@ -309,7 +309,7 @@ ir3_nir_lower_array_sampler_cb(struct nir_builder *b, nir_instr *instr, void *_d
b->cursor = nir_before_instr(&tex->instr);
unsigned ncomp = tex->coord_components;
- nir_def *src = nir_ssa_for_src(b, tex->src[coord_idx].src, ncomp);
+ nir_def *src = tex->src[coord_idx].src.ssa;
assume(ncomp >= 1);
nir_def *ai = nir_channel(b, src, ncomp - 1);
diff --git a/src/freedreno/ir3/ir3_nir_analyze_ubo_ranges.c b/src/freedreno/ir3/ir3_nir_analyze_ubo_ranges.c
index f94afb0c67e..d0ad45ec831 100644
--- a/src/freedreno/ir3/ir3_nir_analyze_ubo_ranges.c
+++ b/src/freedreno/ir3/ir3_nir_analyze_ubo_ranges.c
@@ -287,7 +287,7 @@ lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
return false;
}
- nir_def *ubo_offset = nir_ssa_for_src(b, instr->src[1], 1);
+ nir_def *ubo_offset = instr->src[1].ssa;
int const_offset = 0;
handle_partial_const(b, &ubo_offset, &const_offset);
@@ -534,7 +534,7 @@ fixup_load_uniform_instr(struct nir_builder *b, nir_instr *instr, void *arg)
b->cursor = nir_before_instr(instr);
- nir_def *offset = nir_ssa_for_src(b, intr->src[0], 1);
+ nir_def *offset = intr->src[0].ssa;
/* We'd like to avoid a sequence like:
*
@@ -606,7 +606,7 @@ ir3_nir_lower_load_const_instr(nir_builder *b, nir_instr *in_instr, void *data)
unsigned base = nir_intrinsic_base(instr);
nir_def *index = nir_imm_int(b, const_state->constant_data_ubo);
nir_def *offset =
- nir_iadd_imm(b, nir_ssa_for_src(b, instr->src[0], 1), base);
+ nir_iadd_imm(b, instr->src[0].ssa, base);
nir_def *result =
nir_load_ubo(b, num_components, 32, index, offset,
diff --git a/src/freedreno/ir3/ir3_nir_lower_64b.c b/src/freedreno/ir3/ir3_nir_lower_64b.c
index 5b53c2a309d..041c4bcd0c1 100644
--- a/src/freedreno/ir3/ir3_nir_lower_64b.c
+++ b/src/freedreno/ir3/ir3_nir_lower_64b.c
@@ -78,8 +78,8 @@ lower_64b_intrinsics(nir_builder *b, nir_instr *instr, void *unused)
unsigned num_comp = nir_intrinsic_src_components(intr, 0);
unsigned wrmask = nir_intrinsic_has_write_mask(intr) ?
nir_intrinsic_write_mask(intr) : BITSET_MASK(num_comp);
- nir_def *val = nir_ssa_for_src(b, intr->src[0], num_comp);
- nir_def *off = nir_ssa_for_src(b, intr->src[offset_src_idx], 1);
+ nir_def *val = intr->src[0].ssa;
+ nir_def *off = intr->src[offset_src_idx].ssa;
for (unsigned i = 0; i < num_comp; i++) {
if (!(wrmask & BITFIELD_BIT(i)))
@@ -115,7 +115,7 @@ lower_64b_intrinsics(nir_builder *b, nir_instr *instr, void *unused)
assert(num_comp == 1);
nir_def *offset = nir_iadd_imm(b,
- nir_ssa_for_src(b, intr->src[0], 1), 4);
+ intr->src[0].ssa, 4);
nir_def *upper = nir_load_kernel_input(b, 1, 32, offset);
@@ -136,7 +136,7 @@ lower_64b_intrinsics(nir_builder *b, nir_instr *instr, void *unused)
offset_src_idx = 0;
}
- nir_def *off = nir_ssa_for_src(b, intr->src[offset_src_idx], 1);
+ nir_def *off = intr->src[offset_src_idx].ssa;
for (unsigned i = 0; i < num_comp; i++) {
nir_intrinsic_instr *load =
@@ -247,7 +247,7 @@ lower_64b_global(nir_builder *b, nir_instr *instr, void *unused)
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
bool load = intr->intrinsic != nir_intrinsic_store_global;
- nir_def *addr64 = nir_ssa_for_src(b, intr->src[load ? 0 : 1], 1);
+ nir_def *addr64 = intr->src[load ? 0 : 1].ssa;
nir_def *addr = nir_unpack_64_2x32(b, addr64);
/*
@@ -283,7 +283,7 @@ lower_64b_global(nir_builder *b, nir_instr *instr, void *unused)
return nir_build_alu_src_arr(b, nir_op_vec(num_comp), components);
} else {
unsigned num_comp = nir_intrinsic_src_components(intr, 0);
- nir_def *value = nir_ssa_for_src(b, intr->src[0], num_comp);
+ nir_def *value = intr->src[0].ssa;
for (unsigned off = 0; off < num_comp; off += 4) {
unsigned c = MIN2(num_comp - off, 4);
nir_def *v = nir_channels(b, value, BITFIELD_MASK(c) << off);
diff --git a/src/freedreno/ir3/ir3_nir_lower_wide_load_store.c b/src/freedreno/ir3/ir3_nir_lower_wide_load_store.c
index a39ab1d199f..a3fb2eec7d9 100644
--- a/src/freedreno/ir3/ir3_nir_lower_wide_load_store.c
+++ b/src/freedreno/ir3/ir3_nir_lower_wide_load_store.c
@@ -57,8 +57,8 @@ lower_wide_load_store(nir_builder *b, nir_instr *instr, void *unused)
if (is_intrinsic_store(intr->intrinsic)) {
unsigned num_comp = nir_intrinsic_src_components(intr, 0);
unsigned wrmask = nir_intrinsic_write_mask(intr);
- nir_def *val = nir_ssa_for_src(b, intr->src[0], num_comp);
- nir_def *addr = nir_ssa_for_src(b, intr->src[1], 1);
+ nir_def *val = intr->src[0].ssa;
+ nir_def *addr = intr->src[1].ssa;
for (unsigned off = 0; off < num_comp; off += 4) {
unsigned c = MIN2(num_comp - off, 4);
@@ -82,7 +82,7 @@ lower_wide_load_store(nir_builder *b, nir_instr *instr, void *unused)
} else {
unsigned num_comp = nir_intrinsic_dest_components(intr);
unsigned bit_size = intr->def.bit_size;
- nir_def *addr = nir_ssa_for_src(b, intr->src[0], 1);
+ nir_def *addr = intr->src[0].ssa;
nir_def *components[num_comp];
for (unsigned off = 0; off < num_comp;) {
diff --git a/src/freedreno/vulkan/tu_shader.cc b/src/freedreno/vulkan/tu_shader.cc
index dc21507aac1..8013858877a 100644
--- a/src/freedreno/vulkan/tu_shader.cc
+++ b/src/freedreno/vulkan/tu_shader.cc
@@ -374,7 +374,7 @@ build_bindless(struct tu_device *dev, nir_builder *b,
if (deref->deref_type == nir_deref_type_var)
return nir_imm_int(b, idx);
- nir_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
+ nir_def *arr_index = deref->arr.index.ssa;
return nir_iadd_imm(b, nir_imul_imm(b, arr_index, 2), idx);
}
@@ -398,7 +398,7 @@ build_bindless(struct tu_device *dev, nir_builder *b,
if (deref->deref_type != nir_deref_type_var) {
assert(deref->deref_type == nir_deref_type_array);
- nir_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
+ nir_def *arr_index = deref->arr.index.ssa;
desc_offset = nir_iadd(b, desc_offset,
nir_imul_imm(b, arr_index, descriptor_stride));
}
diff --git a/src/gallium/drivers/crocus/crocus_program.c b/src/gallium/drivers/crocus/crocus_program.c
index eef7b43270d..c44e08e13f1 100644
--- a/src/gallium/drivers/crocus/crocus_program.c
+++ b/src/gallium/drivers/crocus/crocus_program.c
@@ -255,7 +255,7 @@ get_aoa_deref_offset(nir_builder *b,
assert(deref->deref_type == nir_deref_type_array);
/* This level's element size is the previous level's array size */
- nir_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
+ nir_def *index = deref->arr.index.ssa;
assert(deref->arr.index.ssa);
offset = nir_iadd(b, offset,
nir_imul_imm(b, index, array_size));
@@ -477,7 +477,7 @@ crocus_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
*/
b.cursor = nir_before_instr(instr);
nir_def *offset =
- nir_iadd_imm(&b, nir_ssa_for_src(&b, intrin->src[0], 1),
+ nir_iadd_imm(&b, intrin->src[0].ssa,
nir_intrinsic_base(intrin));
if (temp_const_ubo_name == NULL)
diff --git a/src/gallium/drivers/d3d12/d3d12_nir_passes.c b/src/gallium/drivers/d3d12/d3d12_nir_passes.c
index 0e95eb0e54f..530755cb551 100644
--- a/src/gallium/drivers/d3d12/d3d12_nir_passes.c
+++ b/src/gallium/drivers/d3d12/d3d12_nir_passes.c
@@ -71,7 +71,7 @@ lower_pos_write(nir_builder *b, struct nir_instr *instr, nir_variable **flip)
b->cursor = nir_before_instr(&intr->instr);
- nir_def *pos = nir_ssa_for_src(b, intr->src[1], 4);
+ nir_def *pos = intr->src[1].ssa;
nir_def *flip_y = d3d12_get_state_var(b, D3D12_STATE_VAR_Y_FLIP, "d3d12_FlipY",
glsl_float_type(), flip);
nir_def *def = nir_vec4(b,
@@ -224,7 +224,7 @@ lower_uint_color_write(nir_builder *b, struct nir_instr *instr, bool is_signed)
b->cursor = nir_before_instr(&intr->instr);
- nir_def *col = nir_ssa_for_src(b, intr->src[1], intr->num_components);
+ nir_def *col = intr->src[1].ssa;
nir_def *def = is_signed ? nir_format_float_to_snorm(b, col, bits) :
nir_format_float_to_unorm(b, col, bits);
if (is_signed)
@@ -342,7 +342,7 @@ invert_depth_impl(nir_builder *b, struct invert_depth_state *state)
b->cursor = nir_before_instr(&intr->instr);
- nir_def *pos = nir_ssa_for_src(b, intr->src[1], 4);
+ nir_def *pos = intr->src[1].ssa;
if (state->viewport_index) {
nir_push_if(b, nir_test_mask(b, nir_ishl(b, nir_imm_int(b, 1), state->viewport_index), state->viewport_mask));
@@ -652,7 +652,7 @@ lower_triangle_strip_store(nir_builder *b, nir_intrinsic_instr *intr,
return;
nir_deref_instr *deref = nir_build_deref_array(b, nir_build_deref_var(b, varyings[var->data.location]), index);
- nir_def *value = nir_ssa_for_src(b, intr->src[1], intr->num_components);
+ nir_def *value = intr->src[1].ssa;
nir_store_deref(b, deref, value, 0xf);
nir_instr_remove(&intr->instr);
}
diff --git a/src/gallium/drivers/etnaviv/etnaviv_nir_lower_ubo_to_uniform.c b/src/gallium/drivers/etnaviv/etnaviv_nir_lower_ubo_to_uniform.c
index 13e571beb00..c6dca757440 100644
--- a/src/gallium/drivers/etnaviv/etnaviv_nir_lower_ubo_to_uniform.c
+++ b/src/gallium/drivers/etnaviv/etnaviv_nir_lower_ubo_to_uniform.c
@@ -57,7 +57,7 @@ lower_ubo_to_uniform(nir_builder *b, nir_instr *instr, void *_data)
b->cursor = nir_before_instr(instr);
/* Undo the operations done in nir_lower_uniforms_to_ubo. */
- nir_def *ubo_offset = nir_ssa_for_src(b, intr->src[1], 1);
+ nir_def *ubo_offset = intr->src[1].ssa;
nir_def *range_base = nir_imm_int(b, nir_intrinsic_range_base(intr));
nir_def *uniform_offset =
diff --git a/src/gallium/drivers/freedreno/ir3/ir3_descriptor.c b/src/gallium/drivers/freedreno/ir3/ir3_descriptor.c
index 2cef3ea75fa..dedead104b7 100644
--- a/src/gallium/drivers/freedreno/ir3/ir3_descriptor.c
+++ b/src/gallium/drivers/freedreno/ir3/ir3_descriptor.c
@@ -61,7 +61,7 @@ lower_intrinsic(nir_builder *b, nir_intrinsic_instr *intr)
}
unsigned set = ir3_shader_descriptor_set(b->shader->info.stage);
- nir_def *src = nir_ssa_for_src(b, intr->src[buffer_src], 1);
+ nir_def *src = intr->src[buffer_src].ssa;
src = nir_iadd_imm(b, src, desc_offset);
/* An out-of-bounds index into an SSBO/image array can cause a GPU fault
* on access to the descriptor (I don't see any hw mechanism to bound the
diff --git a/src/gallium/drivers/iris/iris_program.c b/src/gallium/drivers/iris/iris_program.c
index 79ebbcf2ceb..44f2d6ed419 100644
--- a/src/gallium/drivers/iris/iris_program.c
+++ b/src/gallium/drivers/iris/iris_program.c
@@ -236,7 +236,7 @@ get_aoa_deref_offset(nir_builder *b,
assert(deref->deref_type == nir_deref_type_array);
/* This level's element size is the previous level's array size */
- nir_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
+ nir_def *index = deref->arr.index.ssa;
assert(deref->arr.index.ssa);
offset = nir_iadd(b, offset,
nir_imul_imm(b, index, array_size));
@@ -494,7 +494,7 @@ iris_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
b.cursor = nir_instr_remove(&intrin->instr);
nir_def *offset =
- nir_iadd_imm(&b, nir_ssa_for_src(&b, intrin->src[0], 1),
+ nir_iadd_imm(&b, intrin->src[0].ssa,
nir_intrinsic_base(intrin));
assert(load_size < b.shader->constant_data_size);
diff --git a/src/gallium/drivers/lima/ir/lima_nir_lower_txp.c b/src/gallium/drivers/lima/ir/lima_nir_lower_txp.c
index 799f3365443..8ee6a4b3528 100644
--- a/src/gallium/drivers/lima/ir/lima_nir_lower_txp.c
+++ b/src/gallium/drivers/lima/ir/lima_nir_lower_txp.c
@@ -101,9 +101,8 @@ lima_nir_lower_txp_instr(nir_builder *b, nir_instr *instr,
* step back and use load_input SSA instead of mov as a source for
* newly constructed vec4
*/
- nir_def *proj_ssa = nir_ssa_for_src(b, tex->src[proj_idx].src, 1);
- nir_def *coords_ssa = nir_ssa_for_src(b, tex->src[coords_idx].src,
- nir_tex_instr_src_size(tex, coords_idx));
+ nir_def *proj_ssa = tex->src[proj_idx].src.ssa;
+ nir_def *coords_ssa = tex->src[coords_idx].src.ssa;
int proj_idx_in_vec = -1;
nir_def *load_input = get_proj_index(coords_ssa->parent_instr,
diff --git a/src/gallium/drivers/r600/sfn/sfn_nir_lower_64bit.cpp b/src/gallium/drivers/r600/sfn/sfn_nir_lower_64bit.cpp
index d59e8bb8034..8d8d6d50ad6 100644
--- a/src/gallium/drivers/r600/sfn/sfn_nir_lower_64bit.cpp
+++ b/src/gallium/drivers/r600/sfn/sfn_nir_lower_64bit.cpp
@@ -398,12 +398,12 @@ LowerSplit64BitVar::split_load_deref_array(nir_intrinsic_instr *intr, nir_src& i
auto vars = get_var_pair(old_var);
auto deref1 = nir_build_deref_var(b, vars.first);
- auto deref_array1 = nir_build_deref_array(b, deref1, nir_ssa_for_src(b, index, 1));
+ auto deref_array1 = nir_build_deref_array(b, deref1, index.ssa);
auto load1 =
nir_build_load_deref(b, 2, 64, &deref_array1->def, (enum gl_access_qualifier)0);
auto deref2 = nir_build_deref_var(b, vars.second);
- auto deref_array2 = nir_build_deref_array(b, deref2, nir_ssa_for_src(b, index, 1));
+ auto deref_array2 = nir_build_deref_array(b, deref2, index.ssa);
auto load2 = nir_build_load_deref(
b, old_components - 2, 64, &deref_array2->def, (enum gl_access_qualifier)0);
@@ -426,13 +426,13 @@ LowerSplit64BitVar::split_store_deref_array(nir_intrinsic_instr *intr,
auto deref1 = nir_build_deref_var(b, vars.first);
auto deref_array1 =
- nir_build_deref_array(b, deref1, nir_ssa_for_src(b, deref->arr.index, 1));
+ nir_build_deref_array(b, deref1, deref->arr.index.ssa);
nir_build_store_deref(b, &deref_array1->def, src_xy, 3);
auto deref2 = nir_build_deref_var(b, vars.second);
auto deref_array2 =
- nir_build_deref_array(b, deref2, nir_ssa_for_src(b, deref->arr.index, 1));
+ nir_build_deref_array(b, deref2, deref->arr.index.ssa);
if (old_components == 3)
nir_build_store_deref(b,
@@ -669,11 +669,11 @@ LowerSplit64BitVar::split_reduction3(nir_alu_instr *alu,
{
nir_def *src[2][2];
- src[0][0] = nir_trim_vector(b, nir_ssa_for_src(b, alu->src[0].src, 2), 2);
- src[0][1] = nir_trim_vector(b, nir_ssa_for_src(b, alu->src[1].src, 2), 2);
+ src[0][0] = nir_trim_vector(b, alu->src[0].src.ssa, 2);
+ src[0][1] = nir_trim_vector(b, alu->src[1].src.ssa, 2);
- src[1][0] = nir_channel(b, nir_ssa_for_src(b, alu->src[0].src, 3), 2);
- src[1][1] = nir_channel(b, nir_ssa_for_src(b, alu->src[1].src, 3), 2);
+ src[1][0] = nir_channel(b, alu->src[0].src.ssa, 2);
+ src[1][1] = nir_channel(b, alu->src[1].src.ssa, 2);
return split_reduction(src, op1, op2, reduction);
}
@@ -686,11 +686,11 @@ LowerSplit64BitVar::split_reduction4(nir_alu_instr *alu,
{
nir_def *src[2][2];
- src[0][0] = nir_trim_vector(b, nir_ssa_for_src(b, alu->src[0].src, 2), 2);
- src[0][1] = nir_trim_vector(b, nir_ssa_for_src(b, alu->src[1].src, 2), 2);
+ src[0][0] = nir_trim_vector(b, alu->src[0].src.ssa, 2);
+ src[0][1] = nir_trim_vector(b, alu->src[1].src.ssa, 2);
- src[1][0] = nir_channels(b, nir_ssa_for_src(b, alu->src[0].src, 4), 0xc);
- src[1][1] = nir_channels(b, nir_ssa_for_src(b, alu->src[1].src, 4), 0xc);
+ src[1][0] = nir_channels(b, alu->src[0].src.ssa, 0xc);
+ src[1][1] = nir_channels(b, alu->src[1].src.ssa, 0xc);
return split_reduction(src, op1, op2, reduction);
}
diff --git a/src/gallium/drivers/r600/sfn/sfn_nir_lower_fs_out_to_vector.cpp b/src/gallium/drivers/r600/sfn/sfn_nir_lower_fs_out_to_vector.cpp
index c6602b22bd1..f0c837e82cc 100644
--- a/src/gallium/drivers/r600/sfn/sfn_nir_lower_fs_out_to_vector.cpp
+++ b/src/gallium/drivers/r600/sfn/sfn_nir_lower_fs_out_to_vector.cpp
@@ -324,7 +324,7 @@ NirLowerIOToVector::clone_deref_array(nir_builder *b,
dst_tail = clone_deref_array(b, dst_tail, parent);
- return nir_build_deref_array(b, dst_tail, nir_ssa_for_src(b, src_head->arr.index, 1));
+ return nir_build_deref_array(b, dst_tail, src_head->arr.index.ssa);
}
NirLowerFSOutToVector::NirLowerFSOutToVector():
diff --git a/src/gallium/drivers/r600/sfn/sfn_nir_lower_tex.cpp b/src/gallium/drivers/r600/sfn/sfn_nir_lower_tex.cpp
index 53b87abff1c..802f8009868 100644
--- a/src/gallium/drivers/r600/sfn/sfn_nir_lower_tex.cpp
+++ b/src/gallium/drivers/r600/sfn/sfn_nir_lower_tex.cpp
@@ -159,14 +159,14 @@ lower_txl_txf_array_or_cube(nir_builder *b, nir_tex_instr *tex)
assert(lod_idx >= 0 || bias_idx >= 0);
nir_def *size = nir_i2f32(b, nir_get_texture_size(b, tex));
- nir_def *lod = (lod_idx >= 0) ? nir_ssa_for_src(b, tex->src[lod_idx].src, 1)
+ nir_def *lod = (lod_idx >= 0) ? tex->src[lod_idx].src.ssa
: nir_get_texture_lod(b, tex);
if (bias_idx >= 0)
- lod = nir_fadd(b, lod, nir_ssa_for_src(b, tex->src[bias_idx].src, 1));
+ lod = nir_fadd(b, lod, tex->src[bias_idx].src.ssa);
if (min_lod_idx >= 0)
- lod = nir_fmax(b, lod, nir_ssa_for_src(b, tex->src[min_lod_idx].src, 1));
+ lod = nir_fmax(b, lod, tex->src[min_lod_idx].src.ssa);
/* max lod? */
@@ -282,11 +282,11 @@ r600_nir_lower_cube_to_2darray_impl(nir_builder *b, nir_instr *instr, void *_opt
if (tex->op == nir_texop_txd) {
int ddx_idx = nir_tex_instr_src_index(tex, nir_tex_src_ddx);
nir_src_rewrite(&tex->src[ddx_idx].src,
- nir_fmul_imm(b, nir_ssa_for_src(b, tex->src[ddx_idx].src, 3), 0.5));
+ nir_fmul_imm(b, tex->src[ddx_idx].src.ssa, 0.5));
int ddy_idx = nir_tex_instr_src_index(tex, nir_tex_src_ddy);
nir_src_rewrite(&tex->src[ddy_idx].src,
- nir_fmul_imm(b, nir_ssa_for_src(b, tex->src[ddy_idx].src, 3), 0.5));
+ nir_fmul_imm(b, tex->src[ddy_idx].src.ssa, 0.5));
}
auto new_coord = nir_vec3(b, nir_channel(b, xy, 0), nir_channel(b, xy, 1), z);
diff --git a/src/gallium/drivers/r600/sfn/sfn_nir_vectorize_vs_inputs.c b/src/gallium/drivers/r600/sfn/sfn_nir_vectorize_vs_inputs.c
index 5168c479b2d..41850c82a72 100644
--- a/src/gallium/drivers/r600/sfn/sfn_nir_vectorize_vs_inputs.c
+++ b/src/gallium/drivers/r600/sfn/sfn_nir_vectorize_vs_inputs.c
@@ -51,7 +51,7 @@ r600_clone_deref_array(nir_builder *b,
dst_tail = r600_clone_deref_array(b, dst_tail, parent);
- return nir_build_deref_array(b, dst_tail, nir_ssa_for_src(b, src_head->arr.index, 1));
+ return nir_build_deref_array(b, dst_tail, src_head->arr.index.ssa);
}
static bool
diff --git a/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c b/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c
index d8968cc7dc4..55a8425f130 100644
--- a/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c
+++ b/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c
@@ -441,7 +441,7 @@ lower_tex_to_txl(nir_builder *b, nir_tex_instr *tex)
nir_def *lod = nir_get_texture_lod(b, tex);
if (bias_idx >= 0)
- lod = nir_fadd(b, lod, nir_ssa_for_src(b, tex->src[bias_idx].src, 1));
+ lod = nir_fadd(b, lod, tex->src[bias_idx].src.ssa);
lod = nir_fadd_imm(b, lod, -1.0);
txl->src[s] = nir_tex_src_for_ssa(nir_tex_src_lod, lod);
diff --git a/src/gallium/frontends/lavapipe/lvp_lower_vulkan_resource.c b/src/gallium/frontends/lavapipe/lvp_lower_vulkan_resource.c
index a8c24c1f5ac..2255c28c523 100644
--- a/src/gallium/frontends/lavapipe/lvp_lower_vulkan_resource.c
+++ b/src/gallium/frontends/lavapipe/lvp_lower_vulkan_resource.c
@@ -72,8 +72,8 @@ static nir_def *lower_vri_intrin_vrri(struct nir_builder *b,
nir_instr *instr, void *data_cb)
{
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
- nir_def *old_index = nir_ssa_for_src(b, intrin->src[0], 3);
- nir_def *delta = nir_ssa_for_src(b, intrin->src[1], 1);
+ nir_def *old_index = intrin->src[0].ssa;
+ nir_def *delta = intrin->src[1].ssa;
return nir_vec3(b, nir_channel(b, old_index, 0),
nir_iadd(b, nir_channel(b, old_index, 1), delta),
nir_channel(b, old_index, 2));
@@ -83,7 +83,7 @@ static nir_def *lower_vri_intrin_lvd(struct nir_builder *b,
nir_instr *instr, void *data_cb)
{
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
- return nir_ssa_for_src(b, intrin->src[0], 3);
+ return intrin->src[0].ssa;
}
static nir_def *
@@ -193,7 +193,7 @@ static nir_def *lower_vri_instr(struct nir_builder *b,
case nir_intrinsic_get_ssbo_size: {
/* Ignore the offset component. */
b->cursor = nir_before_instr(instr);
- nir_def *resource = nir_ssa_for_src(b, intrin->src[0], 2);
+ nir_def *resource = intrin->src[0].ssa;
nir_src_rewrite(&intrin->src[0], resource);
return NULL;
}
diff --git a/src/gallium/frontends/rusticl/rusticl_nir.c b/src/gallium/frontends/rusticl/rusticl_nir.c
index 3f9ab01869e..86d68c1e318 100644
--- a/src/gallium/frontends/rusticl/rusticl_nir.c
+++ b/src/gallium/frontends/rusticl/rusticl_nir.c
@@ -97,7 +97,7 @@ rusticl_lower_input_instr(struct nir_builder *b, nir_instr *instr, void *_)
return NULL;
nir_def *ubo_idx = nir_imm_int(b, 0);
- nir_def *uniform_offset = nir_ssa_for_src(b, intrins->src[0], 1);
+ nir_def *uniform_offset = intrins->src[0].ssa;
assert(intrins->def.bit_size >= 8);
nir_def *load_result =
diff --git a/src/intel/compiler/brw_mesh.cpp b/src/intel/compiler/brw_mesh.cpp
index b9527841482..e889fad3cc4 100644
--- a/src/intel/compiler/brw_mesh.cpp
+++ b/src/intel/compiler/brw_mesh.cpp
@@ -1429,7 +1429,7 @@ brw_pack_primitive_indices_instr(nir_builder *b, nir_intrinsic_instr *intrin,
nir_src *data_src = &intrin->src[1];
nir_def *data_def =
- nir_ssa_for_src(b, *data_src, vertices_per_primitive);
+ data_src->ssa;
nir_def *new_data =
nir_ior(b, nir_ishl_imm(b, nir_channel(b, data_def, 0), 0),
diff --git a/src/intel/compiler/brw_nir.c b/src/intel/compiler/brw_nir.c
index 21a024ea2ad..1f58b061e62 100644
--- a/src/intel/compiler/brw_nir.c
+++ b/src/intel/compiler/brw_nir.c
@@ -215,7 +215,7 @@ remap_patch_urb_offsets(nir_block *block, nir_builder *b,
/* Multiply by the number of per-vertex slots. */
nir_def *vertex_offset =
nir_imul(b,
- nir_ssa_for_src(b, *vertex, 1),
+ vertex->ssa,
nir_imm_int(b,
vue_map->num_per_vertex_slots));
@@ -223,7 +223,7 @@ remap_patch_urb_offsets(nir_block *block, nir_builder *b,
nir_src *offset = nir_get_io_offset_src(intrin);
nir_def *total_offset =
nir_iadd(b, vertex_offset,
- nir_ssa_for_src(b, *offset, 1));
+ offset->ssa);
nir_src_rewrite(offset, total_offset);
}
diff --git a/src/intel/compiler/brw_nir_lower_intersection_shader.c b/src/intel/compiler/brw_nir_lower_intersection_shader.c
index da80c0367ec..90e2f03684a 100644
--- a/src/intel/compiler/brw_nir_lower_intersection_shader.c
+++ b/src/intel/compiler/brw_nir_lower_intersection_shader.c
@@ -193,8 +193,8 @@ brw_nir_lower_intersection_shader(nir_shader *intersection,
switch (intrin->intrinsic) {
case nir_intrinsic_report_ray_intersection: {
b->cursor = nir_instr_remove(&intrin->instr);
- nir_def *hit_t = nir_ssa_for_src(b, intrin->src[0], 1);
- nir_def *hit_kind = nir_ssa_for_src(b, intrin->src[1], 1);
+ nir_def *hit_t = intrin->src[0].ssa;
+ nir_def *hit_kind = intrin->src[1].ssa;
nir_def *min_t = nir_load_ray_t_min(b);
nir_def *max_t = nir_load_global(b, t_addr, 4, 1, 32);
diff --git a/src/intel/compiler/brw_nir_lower_ray_queries.c b/src/intel/compiler/brw_nir_lower_ray_queries.c
index 5a3aaaecfef..bcade17e803 100644
--- a/src/intel/compiler/brw_nir_lower_ray_queries.c
+++ b/src/intel/compiler/brw_nir_lower_ray_queries.c
@@ -131,7 +131,7 @@ get_ray_query_shadow_addr(nir_builder *b,
nir_deref_instr **p = &path.path[1];
for (; *p; p++) {
if ((*p)->deref_type == nir_deref_type_array) {
- nir_def *index = nir_ssa_for_src(b, (*p)->arr.index, 1);
+ nir_def *index = (*p)->arr.index.ssa;
/**/
*out_state_deref = nir_build_deref_array(b, *out_state_deref, index);
diff --git a/src/intel/vulkan/anv_nir_apply_pipeline_layout.c b/src/intel/vulkan/anv_nir_apply_pipeline_layout.c
index b1cca1b2c0f..dc39affe05f 100644
--- a/src/intel/vulkan/anv_nir_apply_pipeline_layout.c
+++ b/src/intel/vulkan/anv_nir_apply_pipeline_layout.c
@@ -1496,7 +1496,7 @@ lower_load_constant(nir_builder *b, nir_intrinsic_instr *intrin,
* by constant folding.
*/
assert(!nir_src_is_const(intrin->src[0]));
- nir_def *offset = nir_iadd_imm(b, nir_ssa_for_src(b, intrin->src[0], 1),
+ nir_def *offset = nir_iadd_imm(b, intrin->src[0].ssa,
nir_intrinsic_base(intrin));
unsigned load_size = intrin->def.num_components *
@@ -1561,7 +1561,7 @@ lower_tex_deref(nir_builder *b, nir_tex_instr *tex,
if (deref->deref_type != nir_deref_type_var) {
assert(deref->deref_type == nir_deref_type_array);
- array_index = nir_ssa_for_src(b, deref->arr.index, 1);
+ array_index = deref->arr.index.ssa;
} else {
array_index = nir_imm_int(b, 0);
}
diff --git a/src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c b/src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c
index 57d458a8faf..7c38af818dc 100644
--- a/src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c
+++ b/src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c
@@ -878,7 +878,7 @@ lower_image_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
nir_def *index = NULL;
if (deref->deref_type != nir_deref_type_var) {
assert(deref->deref_type == nir_deref_type_array);
- index = nir_ssa_for_src(b, deref->arr.index, 1);
+ index = deref->arr.index.ssa;
} else {
index = nir_imm_int(b, 0);
}
@@ -900,7 +900,7 @@ lower_load_constant(nir_builder *b, nir_intrinsic_instr *intrin,
* by constant folding.
*/
assert(!nir_src_is_const(intrin->src[0]));
- nir_def *offset = nir_iadd_imm(b, nir_ssa_for_src(b, intrin->src[0], 1),
+ nir_def *offset = nir_iadd_imm(b, intrin->src[0].ssa,
nir_intrinsic_base(intrin));
nir_def *data;
@@ -1036,7 +1036,7 @@ lower_tex_deref(nir_builder *b, nir_tex_instr *tex,
*/
assert(nir_tex_instr_src_index(tex, nir_tex_src_plane) == -1);
- index = nir_ssa_for_src(b, deref->arr.index, 1);
+ index = deref->arr.index.ssa;
}
}
}
diff --git a/src/mesa/state_tracker/st_atifs_to_nir.c b/src/mesa/state_tracker/st_atifs_to_nir.c
index 0ad01d8408d..8a1ebbdde97 100644
--- a/src/mesa/state_tracker/st_atifs_to_nir.c
+++ b/src/mesa/state_tracker/st_atifs_to_nir.c
@@ -516,7 +516,7 @@ st_nir_lower_atifs_samplers_instr(nir_builder *b, nir_instr *instr, void *data)
* accidentally enables a cube array).
*/
if (coord_components != tex->coord_components) {
- nir_def *coords = nir_ssa_for_src(b, tex->src[coords_idx].src, tex->coord_components);
+ nir_def *coords = tex->src[coords_idx].src.ssa;
nir_src_rewrite(&tex->src[coords_idx].src,
nir_resize_vector(b, coords, coord_components));
tex->coord_components = coord_components;
diff --git a/src/mesa/state_tracker/st_nir_lower_fog.c b/src/mesa/state_tracker/st_nir_lower_fog.c
index 674b6d69662..816f36ca9f0 100644
--- a/src/mesa/state_tracker/st_nir_lower_fog.c
+++ b/src/mesa/state_tracker/st_nir_lower_fog.c
@@ -94,7 +94,7 @@ st_nir_lower_fog_instr(nir_builder *b, nir_instr *instr, void *_state)
b->cursor = nir_before_instr(instr);
- nir_def *color = nir_ssa_for_src(b, intr->src[0], intr->num_components);
+ nir_def *color = intr->src[0].ssa;
color = nir_resize_vector(b, color, 4);
nir_def *fog = fog_result(b, color, state->fog_mode, state->paramList);
diff --git a/src/microsoft/clc/clc_compiler.c b/src/microsoft/clc/clc_compiler.c
index 7744c95d73b..2815ab42491 100644
--- a/src/microsoft/clc/clc_compiler.c
+++ b/src/microsoft/clc/clc_compiler.c
@@ -360,7 +360,7 @@ clc_lower_nonnormalized_samplers(nir_shader *nir,
int coords_idx = nir_tex_instr_src_index(tex, nir_tex_src_coord);
assert(coords_idx != -1);
nir_def *coords =
- nir_ssa_for_src(&b, tex->src[coords_idx].src, tex->coord_components);
+ tex->src[coords_idx].src.ssa;
nir_def *txs = nir_i2f32(&b, nir_get_texture_size(&b, tex));
diff --git a/src/microsoft/compiler/dxil_nir.c b/src/microsoft/compiler/dxil_nir.c
index 108d3b52463..e1a9d9d4717 100644
--- a/src/microsoft/compiler/dxil_nir.c
+++ b/src/microsoft/compiler/dxil_nir.c
@@ -1662,7 +1662,7 @@ lower_fquantize2f16(struct nir_builder *b, nir_instr *instr, void *data)
*/
nir_alu_instr *alu = nir_instr_as_alu(instr);
nir_def *src =
- nir_ssa_for_src(b, alu->src[0].src, nir_src_num_components(alu->src[0].src));
+ alu->src[0].src.ssa;
nir_def *neg_inf_cond =
nir_flt_imm(b, src, -65504.0f);
diff --git a/src/microsoft/spirv_to_dxil/dxil_spirv_nir.c b/src/microsoft/spirv_to_dxil/dxil_spirv_nir.c
index 20bbe1ec7d2..8b76fd4ebdd 100644
--- a/src/microsoft/spirv_to_dxil/dxil_spirv_nir.c
+++ b/src/microsoft/spirv_to_dxil/dxil_spirv_nir.c
@@ -335,7 +335,7 @@ lower_load_push_constant(struct nir_builder *builder, nir_instr *instr,
nir_address_format_bit_size(ubo_format),
index, .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
- nir_def *offset = nir_ssa_for_src(builder, intrin->src[0], 1);
+ nir_def *offset = intrin->src[0].ssa;
nir_def *load_data = nir_load_ubo(
builder,
intrin->def.num_components,
@@ -406,7 +406,7 @@ lower_yz_flip(struct nir_builder *builder, nir_instr *instr,
const struct dxil_spirv_runtime_conf *rt_conf = data->rt_conf;
- nir_def *pos = nir_ssa_for_src(builder, intrin->src[1], 4);
+ nir_def *pos = intrin->src[1].ssa;
nir_def *y_pos = nir_channel(builder, pos, 1);
nir_def *z_pos = nir_channel(builder, pos, 2);
nir_def *y_flip_mask = NULL, *z_flip_mask = NULL, *dyn_yz_flip_mask = NULL;
diff --git a/src/panfrost/compiler/bifrost_compile.c b/src/panfrost/compiler/bifrost_compile.c
index 735a1563bf5..72c65cb8d5c 100644
--- a/src/panfrost/compiler/bifrost_compile.c
+++ b/src/panfrost/compiler/bifrost_compile.c
@@ -4584,11 +4584,9 @@ bi_lower_sample_mask_writes(nir_builder *b, nir_intrinsic_instr *intr,
nir_def *orig = nir_load_sample_mask(b);
- nir_src_rewrite(
- &intr->src[0],
- nir_b32csel(b, nir_load_multisampled_pan(b),
- nir_iand(b, orig, nir_ssa_for_src(b, intr->src[0], 1)),
- orig));
+ nir_src_rewrite(&intr->src[0],
+ nir_b32csel(b, nir_load_multisampled_pan(b),
+ nir_iand(b, orig, intr->src[0].ssa), orig));
return true;
}
diff --git a/src/panfrost/midgard/midgard_errata_lod.c b/src/panfrost/midgard/midgard_errata_lod.c
index f871fc8734d..0fa2d5e50bd 100644
--- a/src/panfrost/midgard/midgard_errata_lod.c
+++ b/src/panfrost/midgard/midgard_errata_lod.c
@@ -67,7 +67,7 @@ nir_lod_errata_instr(nir_builder *b, nir_instr *instr, void *data)
if (tex->src[i].src_type != nir_tex_src_lod)
continue;
- nir_def *lod = nir_ssa_for_src(b, tex->src[i].src, 1);
+ nir_def *lod = tex->src[i].src.ssa;
nir_def *biased = nir_fadd(b, lod, lod_bias);
nir_def *clamped = nir_fmin(b, nir_fmax(b, biased, min_lod), max_lod);
diff --git a/src/panfrost/midgard/midgard_nir_lower_image_bitsize.c b/src/panfrost/midgard/midgard_nir_lower_image_bitsize.c
index e8498668bdb..1a902a09e08 100644
--- a/src/panfrost/midgard/midgard_nir_lower_image_bitsize.c
+++ b/src/panfrost/midgard/midgard_nir_lower_image_bitsize.c
@@ -46,8 +46,7 @@ nir_lower_image_bitsize(nir_builder *b, nir_intrinsic_instr *intr,
b->cursor = nir_before_instr(&intr->instr);
- nir_def *coord =
- nir_ssa_for_src(b, intr->src[1], nir_src_num_components(intr->src[1]));
+ nir_def *coord = intr->src[1].ssa;
nir_def *coord16 = nir_u2u16(b, coord);
diff --git a/src/panfrost/util/pan_lower_framebuffer.c b/src/panfrost/util/pan_lower_framebuffer.c
index 9bd24901fc8..96e24683b25 100644
--- a/src/panfrost/util/pan_lower_framebuffer.c
+++ b/src/panfrost/util/pan_lower_framebuffer.c
@@ -498,7 +498,7 @@ pan_lower_fb_store(nir_builder *b, nir_intrinsic_instr *intr,
bool reorder_comps, unsigned nr_samples)
{
/* For stores, add conversion before */
- nir_def *unpacked = nir_ssa_for_src(b, intr->src[0], intr->num_components);
+ nir_def *unpacked = intr->src[0].ssa;
unpacked = nir_pad_vec4(b, unpacked);
/* Re-order the components */