summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorTom Stellard <thomas.stellard@amd.com>2014-10-01 17:15:17 +0000
committerTom Stellard <thomas.stellard@amd.com>2014-10-01 17:15:17 +0000
commit56077f5796a966f952ced32423740b36b32779e4 (patch)
treeb51120bb22035fcfdb58d4b24a32b748442ccff7 /test
parent6a0fcf7f530a2b2842df233c3982b2d40799393f (diff)
R600: Call EmitFunctionHeader() in the AsmPrinter to populate the ELF symbol table
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@218776 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/R600/128bit-kernel-args.ll10
-rw-r--r--test/CodeGen/R600/32-bit-local-address-space.ll24
-rw-r--r--test/CodeGen/R600/64bit-kernel-args.ll2
-rw-r--r--test/CodeGen/R600/add.ll16
-rw-r--r--test/CodeGen/R600/add_i64.ll12
-rw-r--r--test/CodeGen/R600/address-space.ll2
-rw-r--r--test/CodeGen/R600/and.ll28
-rw-r--r--test/CodeGen/R600/anyext.ll2
-rw-r--r--test/CodeGen/R600/array-ptr-calc-i32.ll2
-rw-r--r--test/CodeGen/R600/array-ptr-calc-i64.ll2
-rw-r--r--test/CodeGen/R600/atomic_cmp_swap_local.ll10
-rw-r--r--test/CodeGen/R600/atomic_load_add.ll8
-rw-r--r--test/CodeGen/R600/atomic_load_sub.ll8
-rw-r--r--test/CodeGen/R600/basic-branch.ll2
-rw-r--r--test/CodeGen/R600/basic-loop.ll2
-rw-r--r--test/CodeGen/R600/bfe_uint.ll4
-rw-r--r--test/CodeGen/R600/bfi_int.ll6
-rw-r--r--test/CodeGen/R600/bitcast.ll8
-rw-r--r--test/CodeGen/R600/build_vector.ll8
-rw-r--r--test/CodeGen/R600/call_fs.ll4
-rw-r--r--test/CodeGen/R600/cayman-loop-bug.ll2
-rw-r--r--test/CodeGen/R600/cf-stack-bug.ll8
-rw-r--r--test/CodeGen/R600/codegen-prepare-addrmode-sext.ll7
-rw-r--r--test/CodeGen/R600/combine_vloads.ll2
-rw-r--r--test/CodeGen/R600/complex-folding.ll2
-rw-r--r--test/CodeGen/R600/concat_vectors.ll62
-rw-r--r--test/CodeGen/R600/copy-illegal-type.ll18
-rw-r--r--test/CodeGen/R600/ctlz_zero_undef.ll8
-rw-r--r--test/CodeGen/R600/ctpop.ll30
-rw-r--r--test/CodeGen/R600/ctpop64.ll14
-rw-r--r--test/CodeGen/R600/cttz_zero_undef.ll8
-rw-r--r--test/CodeGen/R600/cvt_f32_ubyte.ll18
-rw-r--r--test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll4
-rw-r--r--test/CodeGen/R600/default-fp-mode.ll2
-rw-r--r--test/CodeGen/R600/disconnected-predset-break-bug.ll2
-rw-r--r--test/CodeGen/R600/dot4-folding.ll2
-rw-r--r--test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll2
-rw-r--r--test/CodeGen/R600/extload.ll20
-rw-r--r--test/CodeGen/R600/extract_vector_elt_i16.ll4
-rw-r--r--test/CodeGen/R600/fabs.f64.ll16
-rw-r--r--test/CodeGen/R600/fabs.ll14
-rw-r--r--test/CodeGen/R600/fadd.ll8
-rw-r--r--test/CodeGen/R600/fadd64.ll2
-rw-r--r--test/CodeGen/R600/fceil.ll12
-rw-r--r--test/CodeGen/R600/fceil64.ll12
-rw-r--r--test/CodeGen/R600/fcmp.ll4
-rw-r--r--test/CodeGen/R600/fcmp64.ll12
-rw-r--r--test/CodeGen/R600/fconst64.ll2
-rw-r--r--test/CodeGen/R600/fcopysign.f32.ll6
-rw-r--r--test/CodeGen/R600/fcopysign.f64.ll6
-rw-r--r--test/CodeGen/R600/fdiv.ll6
-rw-r--r--test/CodeGen/R600/fdiv64.ll2
-rw-r--r--test/CodeGen/R600/fetch-limits.r600.ll2
-rw-r--r--test/CodeGen/R600/fetch-limits.r700+.ll2
-rw-r--r--test/CodeGen/R600/ffloor.ll12
-rw-r--r--test/CodeGen/R600/flat-address-space.ll14
-rw-r--r--test/CodeGen/R600/fma.f64.ll6
-rw-r--r--test/CodeGen/R600/fma.ll6
-rw-r--r--test/CodeGen/R600/fmul.ll10
-rw-r--r--test/CodeGen/R600/fmul64.ll6
-rw-r--r--test/CodeGen/R600/fmuladd.ll20
-rw-r--r--test/CodeGen/R600/fneg-fabs.f64.ll14
-rw-r--r--test/CodeGen/R600/fneg-fabs.ll16
-rw-r--r--test/CodeGen/R600/fneg.f64.ll10
-rw-r--r--test/CodeGen/R600/fneg.ll10
-rw-r--r--test/CodeGen/R600/fp16_to_fp.ll4
-rw-r--r--test/CodeGen/R600/fp32_to_fp16.ll2
-rw-r--r--test/CodeGen/R600/fp64_to_sint.ll6
-rw-r--r--test/CodeGen/R600/fp_to_sint.ll12
-rw-r--r--test/CodeGen/R600/fp_to_uint.f64.ll2
-rw-r--r--test/CodeGen/R600/fp_to_uint.ll12
-rw-r--r--test/CodeGen/R600/fpext.ll2
-rw-r--r--test/CodeGen/R600/fptrunc.ll2
-rw-r--r--test/CodeGen/R600/frem.ll8
-rw-r--r--test/CodeGen/R600/fsqrt.ll4
-rw-r--r--test/CodeGen/R600/fsub.ll10
-rw-r--r--test/CodeGen/R600/fsub64.ll2
-rw-r--r--test/CodeGen/R600/ftrunc.ll12
-rw-r--r--test/CodeGen/R600/gep-address-space.ll8
-rw-r--r--test/CodeGen/R600/global_atomics.ll8
-rw-r--r--test/CodeGen/R600/gv-const-addrspace-fail.ll8
-rw-r--r--test/CodeGen/R600/gv-const-addrspace.ll8
-rw-r--r--test/CodeGen/R600/half.ll12
-rw-r--r--test/CodeGen/R600/icmp64.ll20
-rw-r--r--test/CodeGen/R600/imm.ll42
-rw-r--r--test/CodeGen/R600/indirect-private-64.ll8
-rw-r--r--test/CodeGen/R600/infinite-loop.ll2
-rw-r--r--test/CodeGen/R600/input-mods.ll4
-rw-r--r--test/CodeGen/R600/insert_vector_elt.ll48
-rw-r--r--test/CodeGen/R600/kcache-fold.ll4
-rw-r--r--test/CodeGen/R600/kernel-args.ll116
-rw-r--r--test/CodeGen/R600/lds-oqap-crash.ll2
-rw-r--r--test/CodeGen/R600/lds-output-queue.ll4
-rw-r--r--test/CodeGen/R600/lds-size.ll2
-rw-r--r--test/CodeGen/R600/legalizedag-bug-expand-setcc.ll2
-rw-r--r--test/CodeGen/R600/literals.ll8
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.abs.ll6
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll2
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll2
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll72
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll88
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.bfi.ll8
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.bfm.ll8
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.brev.ll4
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.clamp.ll4
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.cube.ll2
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll8
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll4
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll4
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.fract.ll4
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.imad24.ll2
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.imax.ll4
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.imin.ll4
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.imul24.ll2
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.kill.ll2
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.ldexp.ll4
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll2
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.rcp.f64.ll6
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.rcp.ll6
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll2
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll2
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.rsq.ll6
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll4
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.trunc.ll4
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.umad24.ll2
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.umax.ll6
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.umin.ll6
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.umul24.ll2
-rw-r--r--test/CodeGen/R600/llvm.SI.gather4.ll70
-rw-r--r--test/CodeGen/R600/llvm.SI.getlod.ll6
-rw-r--r--test/CodeGen/R600/llvm.SI.image.ll6
-rw-r--r--test/CodeGen/R600/llvm.SI.image.sample.ll40
-rw-r--r--test/CodeGen/R600/llvm.SI.image.sample.o.ll40
-rw-r--r--test/CodeGen/R600/llvm.SI.load.dword.ll2
-rw-r--r--test/CodeGen/R600/llvm.SI.sample-masked.ll14
-rw-r--r--test/CodeGen/R600/llvm.SI.sample.ll2
-rw-r--r--test/CodeGen/R600/llvm.SI.sendmsg.ll2
-rw-r--r--test/CodeGen/R600/llvm.SI.tbuffer.store.ll8
-rw-r--r--test/CodeGen/R600/llvm.amdgpu.kilp.ll2
-rw-r--r--test/CodeGen/R600/llvm.amdgpu.lrp.ll2
-rw-r--r--test/CodeGen/R600/llvm.exp2.ll6
-rw-r--r--test/CodeGen/R600/llvm.floor.ll12
-rw-r--r--test/CodeGen/R600/llvm.log2.ll6
-rw-r--r--test/CodeGen/R600/llvm.memcpy.ll18
-rw-r--r--test/CodeGen/R600/llvm.rint.f64.ll6
-rw-r--r--test/CodeGen/R600/llvm.rint.ll8
-rw-r--r--test/CodeGen/R600/llvm.round.ll2
-rw-r--r--test/CodeGen/R600/llvm.sin.ll8
-rw-r--r--test/CodeGen/R600/llvm.sqrt.ll12
-rw-r--r--test/CodeGen/R600/llvm.trunc.ll2
-rw-r--r--test/CodeGen/R600/load-i1.ll20
-rw-r--r--test/CodeGen/R600/load.ll86
-rw-r--r--test/CodeGen/R600/load.vec.ll8
-rw-r--r--test/CodeGen/R600/load64.ll6
-rw-r--r--test/CodeGen/R600/local-64.ll32
-rw-r--r--test/CodeGen/R600/local-atomics.ll108
-rw-r--r--test/CodeGen/R600/local-atomics64.ll100
-rw-r--r--test/CodeGen/R600/local-memory-two-objects.ll2
-rw-r--r--test/CodeGen/R600/local-memory.ll2
-rw-r--r--test/CodeGen/R600/loop-idiom.ll12
-rw-r--r--test/CodeGen/R600/mad-sub.ll18
-rw-r--r--test/CodeGen/R600/mad_int24.ll2
-rw-r--r--test/CodeGen/R600/mad_uint24.ll8
-rw-r--r--test/CodeGen/R600/max-literals.ll4
-rw-r--r--test/CodeGen/R600/missing-store.ll2
-rw-r--r--test/CodeGen/R600/mubuf.ll24
-rw-r--r--test/CodeGen/R600/mul.ll26
-rw-r--r--test/CodeGen/R600/mul_int24.ll2
-rw-r--r--test/CodeGen/R600/mul_uint24.ll8
-rw-r--r--test/CodeGen/R600/no-initializer-constant-addrspace.ll4
-rw-r--r--test/CodeGen/R600/operand-spacing.ll2
-rw-r--r--test/CodeGen/R600/or.ll36
-rw-r--r--test/CodeGen/R600/packetizer.ll2
-rw-r--r--test/CodeGen/R600/predicate-dp4.ll2
-rw-r--r--test/CodeGen/R600/predicates.ll8
-rw-r--r--test/CodeGen/R600/private-memory.ll14
-rw-r--r--test/CodeGen/R600/r600-encoding.ll4
-rw-r--r--test/CodeGen/R600/register-count-comments.ll4
-rw-r--r--test/CodeGen/R600/reorder-stores.ll8
-rw-r--r--test/CodeGen/R600/rotl.i64.ll4
-rw-r--r--test/CodeGen/R600/rotl.ll6
-rw-r--r--test/CodeGen/R600/rotr.i64.ll8
-rw-r--r--test/CodeGen/R600/rotr.ll6
-rw-r--r--test/CodeGen/R600/rsq.ll6
-rw-r--r--test/CodeGen/R600/saddo.ll10
-rw-r--r--test/CodeGen/R600/salu-to-valu.ll10
-rw-r--r--test/CodeGen/R600/scalar_to_vector.ll4
-rw-r--r--test/CodeGen/R600/schedule-global-loads.ll4
-rw-r--r--test/CodeGen/R600/schedule-kernel-arg-loads.ll2
-rw-r--r--test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll2
-rw-r--r--test/CodeGen/R600/sdiv.ll6
-rw-r--r--test/CodeGen/R600/sdivrem24.ll24
-rw-r--r--test/CodeGen/R600/select-i1.ll2
-rw-r--r--test/CodeGen/R600/select-vectors.ll22
-rw-r--r--test/CodeGen/R600/select.ll2
-rw-r--r--test/CodeGen/R600/select64.ll8
-rw-r--r--test/CodeGen/R600/selectcc-opt.ll8
-rw-r--r--test/CodeGen/R600/selectcc.ll2
-rw-r--r--test/CodeGen/R600/set-dx10.ll24
-rw-r--r--test/CodeGen/R600/setcc-equivalent.ll4
-rw-r--r--test/CodeGen/R600/setcc-opt.ll2
-rw-r--r--test/CodeGen/R600/setcc.ll52
-rw-r--r--test/CodeGen/R600/setcc64.ll48
-rw-r--r--test/CodeGen/R600/seto.ll2
-rw-r--r--test/CodeGen/R600/setuo.ll2
-rw-r--r--test/CodeGen/R600/sext-in-reg.ll64
-rw-r--r--test/CodeGen/R600/sgpr-control-flow.ll4
-rw-r--r--test/CodeGen/R600/sgpr-copy-duplicate-operand.ll2
-rw-r--r--test/CodeGen/R600/sgpr-copy.ll12
-rw-r--r--test/CodeGen/R600/shared-op-cycle.ll2
-rw-r--r--test/CodeGen/R600/shl.ll20
-rw-r--r--test/CodeGen/R600/shl_add_constant.ll10
-rw-r--r--test/CodeGen/R600/shl_add_ptr.ll32
-rw-r--r--test/CodeGen/R600/si-annotate-cf-assertion.ll2
-rw-r--r--test/CodeGen/R600/si-lod-bias.ll2
-rw-r--r--test/CodeGen/R600/si-sgpr-spill.ll4
-rw-r--r--test/CodeGen/R600/si-vector-hang.ll2
-rw-r--r--test/CodeGen/R600/sign_extend.ll12
-rw-r--r--test/CodeGen/R600/simplify-demanded-bits-build-pair.ll2
-rw-r--r--test/CodeGen/R600/sint_to_fp.ll10
-rw-r--r--test/CodeGen/R600/sint_to_fp64.ll6
-rw-r--r--test/CodeGen/R600/smrd.ll14
-rw-r--r--test/CodeGen/R600/split-scalar-i64-add.ll4
-rw-r--r--test/CodeGen/R600/sra.ll24
-rw-r--r--test/CodeGen/R600/srl.ll20
-rw-r--r--test/CodeGen/R600/ssubo.ll10
-rw-r--r--test/CodeGen/R600/store-v3i32.ll2
-rw-r--r--test/CodeGen/R600/store-v3i64.ll8
-rw-r--r--test/CodeGen/R600/store-vector-ptrs.ll2
-rw-r--r--test/CodeGen/R600/store.ll98
-rw-r--r--test/CodeGen/R600/store.r600.ll4
-rw-r--r--test/CodeGen/R600/structurize.ll2
-rw-r--r--test/CodeGen/R600/structurize1.ll2
-rw-r--r--test/CodeGen/R600/sub.ll8
-rw-r--r--test/CodeGen/R600/swizzle-export.ll4
-rw-r--r--test/CodeGen/R600/trunc-store-i1.ll6
-rw-r--r--test/CodeGen/R600/trunc-vector-store-assertion-failure.ll2
-rw-r--r--test/CodeGen/R600/trunc.ll12
-rw-r--r--test/CodeGen/R600/uaddo.ll10
-rw-r--r--test/CodeGen/R600/udiv.ll10
-rw-r--r--test/CodeGen/R600/udivrem.ll6
-rw-r--r--test/CodeGen/R600/udivrem24.ll24
-rw-r--r--test/CodeGen/R600/udivrem64.ll4
-rw-r--r--test/CodeGen/R600/uint_to_fp.f64.ll6
-rw-r--r--test/CodeGen/R600/uint_to_fp.ll10
-rw-r--r--test/CodeGen/R600/unaligned-load-store.ll16
-rw-r--r--test/CodeGen/R600/unhandled-loop-condition-assertion.ll6
-rw-r--r--test/CodeGen/R600/unsupported-cc.ll20
-rw-r--r--test/CodeGen/R600/urem.ll8
-rw-r--r--test/CodeGen/R600/use-sgpr-multiple-times.ll16
-rw-r--r--test/CodeGen/R600/usubo.ll10
-rw-r--r--test/CodeGen/R600/v1i64-kernel-arg.ll4
-rw-r--r--test/CodeGen/R600/v_cndmask.ll4
-rw-r--r--test/CodeGen/R600/vector-alloca.ll6
-rw-r--r--test/CodeGen/R600/vertex-fetch-encoding.ll6
-rw-r--r--test/CodeGen/R600/vop-shrink.ll4
-rw-r--r--test/CodeGen/R600/vselect.ll14
-rw-r--r--test/CodeGen/R600/vselect64.ll2
-rw-r--r--test/CodeGen/R600/vtx-fetch-branch.ll2
-rw-r--r--test/CodeGen/R600/vtx-schedule.ll2
-rw-r--r--test/CodeGen/R600/wait.ll2
-rw-r--r--test/CodeGen/R600/work-item-intrinsics.ll30
-rw-r--r--test/CodeGen/R600/wrong-transalu-pos-fix.ll2
-rw-r--r--test/CodeGen/R600/xor.ll30
-rw-r--r--test/CodeGen/R600/zero_extend.ll8
265 files changed, 1520 insertions, 1519 deletions
diff --git a/test/CodeGen/R600/128bit-kernel-args.ll b/test/CodeGen/R600/128bit-kernel-args.ll
index 3c4fcf740c3..27fae51ce76 100644
--- a/test/CodeGen/R600/128bit-kernel-args.ll
+++ b/test/CodeGen/R600/128bit-kernel-args.ll
@@ -1,12 +1,12 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
-; R600-CHECK: @v4i32_kernel_arg
+; R600-CHECK: {{^}}v4i32_kernel_arg:
; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR:[0-9]]].X, KC0[3].Y
; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR]].Y, KC0[3].Z
; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR]].Z, KC0[3].W
; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR]].W, KC0[4].X
-; SI-CHECK: @v4i32_kernel_arg
+; SI-CHECK: {{^}}v4i32_kernel_arg:
; SI-CHECK: BUFFER_STORE_DWORDX4
define void @v4i32_kernel_arg(<4 x i32> addrspace(1)* %out, <4 x i32> %in) {
entry:
@@ -14,14 +14,14 @@ entry:
ret void
}
-; R600-CHECK: @v4f32_kernel_arg
+; R600-CHECK: {{^}}v4f32_kernel_arg:
; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR:[0-9]]].X, KC0[3].Y
; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR]].Y, KC0[3].Z
; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR]].Z, KC0[3].W
; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR]].W, KC0[4].X
-; SI-CHECK: @v4f32_kernel_arg
+; SI-CHECK: {{^}}v4f32_kernel_arg:
; SI-CHECK: BUFFER_STORE_DWORDX4
-define void @v4f32_kernel_args(<4 x float> addrspace(1)* %out, <4 x float> %in) {
+define void @v4f32_kernel_arg(<4 x float> addrspace(1)* %out, <4 x float> %in) {
entry:
store <4 x float> %in, <4 x float> addrspace(1)* %out
ret void
diff --git a/test/CodeGen/R600/32-bit-local-address-space.ll b/test/CodeGen/R600/32-bit-local-address-space.ll
index e13d719f626..3618f8271b5 100644
--- a/test/CodeGen/R600/32-bit-local-address-space.ll
+++ b/test/CodeGen/R600/32-bit-local-address-space.ll
@@ -10,7 +10,7 @@
; Instructions with B32, U32, and I32 in their name take 32-bit operands, while
; instructions with B64, U64, and I64 take 64-bit operands.
-; FUNC-LABEL: @local_address_load
+; FUNC-LABEL: {{^}}local_address_load:
; CHECK: V_MOV_B32_e{{32|64}} [[PTR:v[0-9]]]
; CHECK: DS_READ_B32 v{{[0-9]+}}, [[PTR]]
define void @local_address_load(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
@@ -20,7 +20,7 @@ entry:
ret void
}
-; FUNC-LABEL: @local_address_gep
+; FUNC-LABEL: {{^}}local_address_gep:
; CHECK: S_ADD_I32 [[SPTR:s[0-9]]]
; CHECK: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
; CHECK: DS_READ_B32 [[VPTR]]
@@ -32,7 +32,7 @@ entry:
ret void
}
-; FUNC-LABEL: @local_address_gep_const_offset
+; FUNC-LABEL: {{^}}local_address_gep_const_offset:
; CHECK: V_MOV_B32_e32 [[VPTR:v[0-9]+]], s{{[0-9]+}}
; CHECK: DS_READ_B32 v{{[0-9]+}}, [[VPTR]], 0x4,
define void @local_address_gep_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
@@ -44,7 +44,7 @@ entry:
}
; Offset too large, can't fold into 16-bit immediate offset.
-; FUNC-LABEL: @local_address_gep_large_const_offset
+; FUNC-LABEL: {{^}}local_address_gep_large_const_offset:
; CHECK: S_ADD_I32 [[SPTR:s[0-9]]], s{{[0-9]+}}, 0x10004
; CHECK: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
; CHECK: DS_READ_B32 [[VPTR]]
@@ -56,7 +56,7 @@ entry:
ret void
}
-; FUNC-LABEL: @null_32bit_lds_ptr:
+; FUNC-LABEL: {{^}}null_32bit_lds_ptr:
; CHECK: V_CMP_NE_I32
; CHECK-NOT: V_CMP_NE_I32
; CHECK: V_CNDMASK_B32
@@ -67,7 +67,7 @@ define void @null_32bit_lds_ptr(i32 addrspace(1)* %out, i32 addrspace(3)* %lds)
ret void
}
-; FUNC-LABEL: @mul_32bit_ptr:
+; FUNC-LABEL: {{^}}mul_32bit_ptr:
; CHECK: V_MUL_LO_I32
; CHECK-NEXT: V_ADD_I32_e32
; CHECK-NEXT: DS_READ_B32
@@ -80,7 +80,7 @@ define void @mul_32bit_ptr(float addrspace(1)* %out, [3 x float] addrspace(3)* %
@g_lds = addrspace(3) global float zeroinitializer, align 4
-; FUNC-LABEL: @infer_ptr_alignment_global_offset:
+; FUNC-LABEL: {{^}}infer_ptr_alignment_global_offset:
; CHECK: V_MOV_B32_e32 [[REG:v[0-9]+]], 0
; CHECK: DS_READ_B32 v{{[0-9]+}}, [[REG]]
define void @infer_ptr_alignment_global_offset(float addrspace(1)* %out, i32 %tid) {
@@ -93,21 +93,21 @@ define void @infer_ptr_alignment_global_offset(float addrspace(1)* %out, i32 %ti
@ptr = addrspace(3) global i32 addrspace(3)* null
@dst = addrspace(3) global [16384 x i32] zeroinitializer
-; FUNC-LABEL: @global_ptr:
+; FUNC-LABEL: {{^}}global_ptr:
; CHECK: DS_WRITE_B32
define void @global_ptr() nounwind {
store i32 addrspace(3)* getelementptr ([16384 x i32] addrspace(3)* @dst, i32 0, i32 16), i32 addrspace(3)* addrspace(3)* @ptr
ret void
}
-; FUNC-LABEL: @local_address_store
+; FUNC-LABEL: {{^}}local_address_store:
; CHECK: DS_WRITE_B32
define void @local_address_store(i32 addrspace(3)* %out, i32 %val) {
store i32 %val, i32 addrspace(3)* %out
ret void
}
-; FUNC-LABEL: @local_address_gep_store
+; FUNC-LABEL: {{^}}local_address_gep_store:
; CHECK: S_ADD_I32 [[SADDR:s[0-9]+]],
; CHECK: V_MOV_B32_e32 [[ADDR:v[0-9]+]], [[SADDR]]
; CHECK: DS_WRITE_B32 [[ADDR]], v{{[0-9]+}},
@@ -117,7 +117,7 @@ define void @local_address_gep_store(i32 addrspace(3)* %out, i32, i32 %val, i32
ret void
}
-; FUNC-LABEL: @local_address_gep_const_offset_store
+; FUNC-LABEL: {{^}}local_address_gep_const_offset_store:
; CHECK: V_MOV_B32_e32 [[VPTR:v[0-9]+]], s{{[0-9]+}}
; CHECK: V_MOV_B32_e32 [[VAL:v[0-9]+]], s{{[0-9]+}}
; CHECK: DS_WRITE_B32 [[VPTR]], [[VAL]], 0x4
@@ -128,7 +128,7 @@ define void @local_address_gep_const_offset_store(i32 addrspace(3)* %out, i32 %v
}
; Offset too large, can't fold into 16-bit immediate offset.
-; FUNC-LABEL: @local_address_gep_large_const_offset_store
+; FUNC-LABEL: {{^}}local_address_gep_large_const_offset_store:
; CHECK: S_ADD_I32 [[SPTR:s[0-9]]], s{{[0-9]+}}, 0x10004
; CHECK: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
; CHECK: DS_WRITE_B32 [[VPTR]], v{{[0-9]+}}, 0
diff --git a/test/CodeGen/R600/64bit-kernel-args.ll b/test/CodeGen/R600/64bit-kernel-args.ll
index 2d82c1e5391..9e1f02baab7 100644
--- a/test/CodeGen/R600/64bit-kernel-args.ll
+++ b/test/CodeGen/R600/64bit-kernel-args.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
-; SI-CHECK: @f64_kernel_arg
+; SI-CHECK: {{^}}f64_kernel_arg:
; SI-CHECK-DAG: S_LOAD_DWORDX2 s[{{[0-9]:[0-9]}}], s[0:1], 0x9
; SI-CHECK-DAG: S_LOAD_DWORDX2 s[{{[0-9]:[0-9]}}], s[0:1], 0xb
; SI-CHECK: BUFFER_STORE_DWORDX2
diff --git a/test/CodeGen/R600/add.ll b/test/CodeGen/R600/add.ll
index 8cf43d196ec..9bea9be8485 100644
--- a/test/CodeGen/R600/add.ll
+++ b/test/CodeGen/R600/add.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK --check-prefix=FUNC %s
; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK --check-prefix=FUNC %s
-;FUNC-LABEL: @test1:
+;FUNC-LABEL: {{^}}test1:
;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;SI-CHECK: V_ADD_I32_e32 [[REG:v[0-9]+]], {{v[0-9]+, v[0-9]+}}
@@ -16,7 +16,7 @@ define void @test1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
ret void
}
-;FUNC-LABEL: @test2:
+;FUNC-LABEL: {{^}}test2:
;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
@@ -32,7 +32,7 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
ret void
}
-;FUNC-LABEL: @test4:
+;FUNC-LABEL: {{^}}test4:
;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
@@ -52,7 +52,7 @@ define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @test8
+; FUNC-LABEL: {{^}}test8:
; EG-CHECK: ADD_INT
; EG-CHECK: ADD_INT
; EG-CHECK: ADD_INT
@@ -76,7 +76,7 @@ entry:
ret void
}
-; FUNC-LABEL: @test16
+; FUNC-LABEL: {{^}}test16:
; EG-CHECK: ADD_INT
; EG-CHECK: ADD_INT
; EG-CHECK: ADD_INT
@@ -116,7 +116,7 @@ entry:
ret void
}
-; FUNC-LABEL: @add64
+; FUNC-LABEL: {{^}}add64:
; SI-CHECK: S_ADD_U32
; SI-CHECK: S_ADDC_U32
define void @add64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
@@ -131,7 +131,7 @@ entry:
; %0 will be stored in a VGPR, so the comiler will be forced to copy %a
; to a VGPR before doing the add.
-; FUNC-LABEL: @add64_sgpr_vgpr
+; FUNC-LABEL: {{^}}add64_sgpr_vgpr:
; SI-CHECK-NOT: V_ADDC_U32_e32 s
define void @add64_sgpr_vgpr(i64 addrspace(1)* %out, i64 %a, i64 addrspace(1)* %in) {
entry:
@@ -142,7 +142,7 @@ entry:
}
; Test i64 add inside a branch.
-; FUNC-LABEL: @add64_in_branch
+; FUNC-LABEL: {{^}}add64_in_branch:
; SI-CHECK: S_ADD_U32
; SI-CHECK: S_ADDC_U32
define void @add64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) {
diff --git a/test/CodeGen/R600/add_i64.ll b/test/CodeGen/R600/add_i64.ll
index 5be969ce568..0e3789e4c02 100644
--- a/test/CodeGen/R600/add_i64.ll
+++ b/test/CodeGen/R600/add_i64.ll
@@ -3,7 +3,7 @@
declare i32 @llvm.r600.read.tidig.x() readnone
-; SI-LABEL: @test_i64_vreg:
+; SI-LABEL: {{^}}test_i64_vreg:
; SI: V_ADD_I32
; SI: V_ADDC_U32
define void @test_i64_vreg(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %inA, i64 addrspace(1)* noalias %inB) {
@@ -18,7 +18,7 @@ define void @test_i64_vreg(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noa
}
; Check that the SGPR add operand is correctly moved to a VGPR.
-; SI-LABEL: @sgpr_operand:
+; SI-LABEL: {{^}}sgpr_operand:
; SI: V_ADD_I32
; SI: V_ADDC_U32
define void @sgpr_operand(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i64 addrspace(1)* noalias %in_bar, i64 %a) {
@@ -31,7 +31,7 @@ define void @sgpr_operand(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noal
; Swap the arguments. Check that the SGPR -> VGPR copy works with the
; SGPR as other operand.
;
-; SI-LABEL: @sgpr_operand_reversed:
+; SI-LABEL: {{^}}sgpr_operand_reversed:
; SI: V_ADD_I32
; SI: V_ADDC_U32
define void @sgpr_operand_reversed(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i64 %a) {
@@ -42,7 +42,7 @@ define void @sgpr_operand_reversed(i64 addrspace(1)* noalias %out, i64 addrspace
}
-; SI-LABEL: @test_v2i64_sreg:
+; SI-LABEL: {{^}}test_v2i64_sreg:
; SI: S_ADD_U32
; SI: S_ADDC_U32
; SI: S_ADD_U32
@@ -53,7 +53,7 @@ define void @test_v2i64_sreg(<2 x i64> addrspace(1)* noalias %out, <2 x i64> %a,
ret void
}
-; SI-LABEL: @test_v2i64_vreg:
+; SI-LABEL: {{^}}test_v2i64_vreg:
; SI: V_ADD_I32
; SI: V_ADDC_U32
; SI: V_ADD_I32
@@ -69,7 +69,7 @@ define void @test_v2i64_vreg(<2 x i64> addrspace(1)* noalias %out, <2 x i64> add
ret void
}
-; SI-LABEL: @trunc_i64_add_to_i32
+; SI-LABEL: {{^}}trunc_i64_add_to_i32:
; SI: S_LOAD_DWORD s[[SREG0:[0-9]+]]
; SI: S_LOAD_DWORD s[[SREG1:[0-9]+]]
; SI: S_ADD_I32 [[SRESULT:s[0-9]+]], s[[SREG1]], s[[SREG0]]
diff --git a/test/CodeGen/R600/address-space.ll b/test/CodeGen/R600/address-space.ll
index 7f52472c384..7efb5cd7df6 100644
--- a/test/CodeGen/R600/address-space.ll
+++ b/test/CodeGen/R600/address-space.ll
@@ -7,7 +7,7 @@
; FIXME: Extra V_MOV from SGPR to VGPR for second read. The address is
; already in a VGPR after the first read.
-; CHECK-LABEL: @do_as_ptr_calcs:
+; CHECK-LABEL: {{^}}do_as_ptr_calcs:
; CHECK: S_LOAD_DWORD [[SREG1:s[0-9]+]],
; CHECK: V_MOV_B32_e32 [[VREG1:v[0-9]+]], [[SREG1]]
; CHECK-DAG: DS_READ_B32 v{{[0-9]+}}, [[VREG1]], 0xc
diff --git a/test/CodeGen/R600/and.ll b/test/CodeGen/R600/and.ll
index ccb97e38f3b..9c0eb89e7df 100644
--- a/test/CodeGen/R600/and.ll
+++ b/test/CodeGen/R600/and.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @test2
+; FUNC-LABEL: {{^}}test2:
; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
@@ -17,7 +17,7 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @test4
+; FUNC-LABEL: {{^}}test4:
; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
@@ -37,7 +37,7 @@ define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @s_and_i32
+; FUNC-LABEL: {{^}}s_and_i32:
; SI: S_AND_B32
define void @s_and_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
%and = and i32 %a, %b
@@ -45,7 +45,7 @@ define void @s_and_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
ret void
}
-; FUNC-LABEL: @s_and_constant_i32
+; FUNC-LABEL: {{^}}s_and_constant_i32:
; SI: S_AND_B32 s{{[0-9]+}}, s{{[0-9]+}}, 0x12d687
define void @s_and_constant_i32(i32 addrspace(1)* %out, i32 %a) {
%and = and i32 %a, 1234567
@@ -53,7 +53,7 @@ define void @s_and_constant_i32(i32 addrspace(1)* %out, i32 %a) {
ret void
}
-; FUNC-LABEL: @v_and_i32
+; FUNC-LABEL: {{^}}v_and_i32:
; SI: V_AND_B32
define void @v_and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) {
%a = load i32 addrspace(1)* %aptr, align 4
@@ -63,7 +63,7 @@ define void @v_and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addr
ret void
}
-; FUNC-LABEL: @v_and_constant_i32
+; FUNC-LABEL: {{^}}v_and_constant_i32:
; SI: V_AND_B32
define void @v_and_constant_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
%a = load i32 addrspace(1)* %aptr, align 4
@@ -72,7 +72,7 @@ define void @v_and_constant_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr)
ret void
}
-; FUNC-LABEL: @s_and_i64
+; FUNC-LABEL: {{^}}s_and_i64:
; SI: S_AND_B64
define void @s_and_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
%and = and i64 %a, %b
@@ -81,7 +81,7 @@ define void @s_and_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
}
; FIXME: Should use SGPRs
-; FUNC-LABEL: @s_and_i1
+; FUNC-LABEL: {{^}}s_and_i1:
; SI: V_AND_B32
define void @s_and_i1(i1 addrspace(1)* %out, i1 %a, i1 %b) {
%and = and i1 %a, %b
@@ -89,7 +89,7 @@ define void @s_and_i1(i1 addrspace(1)* %out, i1 %a, i1 %b) {
ret void
}
-; FUNC-LABEL: @s_and_constant_i64
+; FUNC-LABEL: {{^}}s_and_constant_i64:
; SI: S_AND_B64
define void @s_and_constant_i64(i64 addrspace(1)* %out, i64 %a) {
%and = and i64 %a, 281474976710655
@@ -97,7 +97,7 @@ define void @s_and_constant_i64(i64 addrspace(1)* %out, i64 %a) {
ret void
}
-; FUNC-LABEL: @v_and_i64
+; FUNC-LABEL: {{^}}v_and_i64:
; SI: V_AND_B32
; SI: V_AND_B32
define void @v_and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
@@ -108,7 +108,7 @@ define void @v_and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addr
ret void
}
-; FUNC-LABEL: @v_and_i64_br
+; FUNC-LABEL: {{^}}v_and_i64_br:
; SI: V_AND_B32
; SI: V_AND_B32
define void @v_and_i64_br(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr, i32 %cond) {
@@ -128,7 +128,7 @@ endif:
ret void
}
-; FUNC-LABEL: @v_and_constant_i64
+; FUNC-LABEL: {{^}}v_and_constant_i64:
; SI: V_AND_B32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
; SI: V_AND_B32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
define void @v_and_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
@@ -139,7 +139,7 @@ define void @v_and_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr)
}
; FIXME: Replace and 0 with mov 0
-; FUNC-LABEL: @v_and_inline_imm_i64
+; FUNC-LABEL: {{^}}v_and_inline_imm_i64:
; SI: V_AND_B32_e32 {{v[0-9]+}}, 64, {{v[0-9]+}}
; SI: V_AND_B32_e32 {{v[0-9]+}}, 0, {{v[0-9]+}}
define void @v_and_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
@@ -149,7 +149,7 @@ define void @v_and_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %apt
ret void
}
-; FUNC-LABEL: @s_and_inline_imm_i64
+; FUNC-LABEL: {{^}}s_and_inline_imm_i64:
; SI: S_AND_B64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 64
define void @s_and_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%and = and i64 %a, 64
diff --git a/test/CodeGen/R600/anyext.ll b/test/CodeGen/R600/anyext.ll
index bbe5d0a393e..e120951d3c8 100644
--- a/test/CodeGen/R600/anyext.ll
+++ b/test/CodeGen/R600/anyext.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
-; CHECK-LABEL: @anyext_i1_i32
+; CHECK-LABEL: {{^}}anyext_i1_i32:
; CHECK: V_CNDMASK_B32_e64
define void @anyext_i1_i32(i32 addrspace(1)* %out, i32 %cond) {
entry:
diff --git a/test/CodeGen/R600/array-ptr-calc-i32.ll b/test/CodeGen/R600/array-ptr-calc-i32.ll
index fd5bd529c24..413aba3f17d 100644
--- a/test/CodeGen/R600/array-ptr-calc-i32.ll
+++ b/test/CodeGen/R600/array-ptr-calc-i32.ll
@@ -9,7 +9,7 @@ declare void @llvm.AMDGPU.barrier.local() nounwind noduplicate
; 64-bit pointer add. This should work since private pointers should
; be 32-bits.
-; SI-LABEL: @test_private_array_ptr_calc:
+; SI-LABEL: {{^}}test_private_array_ptr_calc:
; FIXME: We end up with zero argument for ADD, because
; SIRegisterInfo::eliminateFrameIndex() blindly replaces the frame index
diff --git a/test/CodeGen/R600/array-ptr-calc-i64.ll b/test/CodeGen/R600/array-ptr-calc-i64.ll
index 296247147ff..3ffc0b8c348 100644
--- a/test/CodeGen/R600/array-ptr-calc-i64.ll
+++ b/test/CodeGen/R600/array-ptr-calc-i64.ll
@@ -2,7 +2,7 @@
declare i32 @llvm.SI.tid() readnone
-; SI-LABEL: @test_array_ptr_calc
+; SI-LABEL: {{^}}test_array_ptr_calc:
; SI: V_MUL_LO_I32
; SI: V_MUL_HI_I32
define void @test_array_ptr_calc(i32 addrspace(1)* noalias %out, [1025 x i32] addrspace(1)* noalias %inA, i32 addrspace(1)* noalias %inB) {
diff --git a/test/CodeGen/R600/atomic_cmp_swap_local.ll b/test/CodeGen/R600/atomic_cmp_swap_local.ll
index 9f897743752..9377b9c1446 100644
--- a/test/CodeGen/R600/atomic_cmp_swap_local.ll
+++ b/test/CodeGen/R600/atomic_cmp_swap_local.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
-; FUNC-LABEL: @lds_atomic_cmpxchg_ret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_cmpxchg_ret_i32_offset:
; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI: S_LOAD_DWORD [[SWAP:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
; SI-DAG: V_MOV_B32_e32 [[VCMP:v[0-9]+]], 7
@@ -17,7 +17,7 @@ define void @lds_atomic_cmpxchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrs
ret void
}
-; FUNC-LABEL: @lds_atomic_cmpxchg_ret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_cmpxchg_ret_i64_offset:
; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI: S_LOAD_DWORDX2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
; SI: S_MOV_B64 s{{\[}}[[LOSCMP:[0-9]+]]:[[HISCMP:[0-9]+]]{{\]}}, 7
@@ -37,7 +37,7 @@ define void @lds_atomic_cmpxchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrs
ret void
}
-; FUNC-LABEL: @lds_atomic_cmpxchg_ret_i32_bad_si_offset
+; FUNC-LABEL: {{^}}lds_atomic_cmpxchg_ret_i32_bad_si_offset:
; SI: DS_CMPST_RTN_B32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x0
; CI: DS_CMPST_RTN_B32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
@@ -51,7 +51,7 @@ define void @lds_atomic_cmpxchg_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i3
ret void
}
-; FUNC-LABEL: @lds_atomic_cmpxchg_noret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_cmpxchg_noret_i32_offset:
; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x9
; SI: S_LOAD_DWORD [[SWAP:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xa
; SI-DAG: V_MOV_B32_e32 [[VCMP:v[0-9]+]], 7
@@ -66,7 +66,7 @@ define void @lds_atomic_cmpxchg_noret_i32_offset(i32 addrspace(3)* %ptr, i32 %sw
ret void
}
-; FUNC-LABEL: @lds_atomic_cmpxchg_noret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_cmpxchg_noret_i64_offset:
; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x9
; SI: S_LOAD_DWORDX2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI: S_MOV_B64 s{{\[}}[[LOSCMP:[0-9]+]]:[[HISCMP:[0-9]+]]{{\]}}, 7
diff --git a/test/CodeGen/R600/atomic_load_add.ll b/test/CodeGen/R600/atomic_load_add.ll
index 6dfb8c73079..8e75822d774 100644
--- a/test/CodeGen/R600/atomic_load_add.ll
+++ b/test/CodeGen/R600/atomic_load_add.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s -check-prefix=SI -check-prefix=FUNC
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
-; FUNC-LABEL: @atomic_add_local
+; FUNC-LABEL: {{^}}atomic_add_local:
; R600: LDS_ADD *
; SI: DS_ADD_U32
define void @atomic_add_local(i32 addrspace(3)* %local) {
@@ -9,7 +9,7 @@ define void @atomic_add_local(i32 addrspace(3)* %local) {
ret void
}
-; FUNC-LABEL: @atomic_add_local_const_offset
+; FUNC-LABEL: {{^}}atomic_add_local_const_offset:
; R600: LDS_ADD *
; SI: DS_ADD_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
define void @atomic_add_local_const_offset(i32 addrspace(3)* %local) {
@@ -18,7 +18,7 @@ define void @atomic_add_local_const_offset(i32 addrspace(3)* %local) {
ret void
}
-; FUNC-LABEL: @atomic_add_ret_local
+; FUNC-LABEL: {{^}}atomic_add_ret_local:
; R600: LDS_ADD_RET *
; SI: DS_ADD_RTN_U32
define void @atomic_add_ret_local(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
@@ -27,7 +27,7 @@ define void @atomic_add_ret_local(i32 addrspace(1)* %out, i32 addrspace(3)* %loc
ret void
}
-; FUNC-LABEL: @atomic_add_ret_local_const_offset
+; FUNC-LABEL: {{^}}atomic_add_ret_local_const_offset:
; R600: LDS_ADD_RET *
; SI: DS_ADD_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x14
define void @atomic_add_ret_local_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
diff --git a/test/CodeGen/R600/atomic_load_sub.ll b/test/CodeGen/R600/atomic_load_sub.ll
index e5937f96676..7c5eb5b5297 100644
--- a/test/CodeGen/R600/atomic_load_sub.ll
+++ b/test/CodeGen/R600/atomic_load_sub.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @atomic_sub_local
+; FUNC-LABEL: {{^}}atomic_sub_local:
; R600: LDS_SUB *
; SI: DS_SUB_U32
define void @atomic_sub_local(i32 addrspace(3)* %local) {
@@ -9,7 +9,7 @@ define void @atomic_sub_local(i32 addrspace(3)* %local) {
ret void
}
-; FUNC-LABEL: @atomic_sub_local_const_offset
+; FUNC-LABEL: {{^}}atomic_sub_local_const_offset:
; R600: LDS_SUB *
; SI: DS_SUB_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
define void @atomic_sub_local_const_offset(i32 addrspace(3)* %local) {
@@ -18,7 +18,7 @@ define void @atomic_sub_local_const_offset(i32 addrspace(3)* %local) {
ret void
}
-; FUNC-LABEL: @atomic_sub_ret_local
+; FUNC-LABEL: {{^}}atomic_sub_ret_local:
; R600: LDS_SUB_RET *
; SI: DS_SUB_RTN_U32
define void @atomic_sub_ret_local(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
@@ -27,7 +27,7 @@ define void @atomic_sub_ret_local(i32 addrspace(1)* %out, i32 addrspace(3)* %loc
ret void
}
-; FUNC-LABEL: @atomic_sub_ret_local_const_offset
+; FUNC-LABEL: {{^}}atomic_sub_ret_local_const_offset:
; R600: LDS_SUB_RET *
; SI: DS_SUB_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x14
define void @atomic_sub_ret_local_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
diff --git a/test/CodeGen/R600/basic-branch.ll b/test/CodeGen/R600/basic-branch.ll
index d084132d4fc..073ab796137 100644
--- a/test/CodeGen/R600/basic-branch.ll
+++ b/test/CodeGen/R600/basic-branch.ll
@@ -1,7 +1,7 @@
; XFAIL: *
; RUN: llc -O0 -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s
-; CHECK-LABEL: @test_branch(
+; CHECK-LABEL: {{^}}test_branch(
define void @test_branch(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %val) nounwind {
%cmp = icmp ne i32 %val, 0
br i1 %cmp, label %store, label %end
diff --git a/test/CodeGen/R600/basic-loop.ll b/test/CodeGen/R600/basic-loop.ll
index 6d0ff0743b8..3cd60913518 100644
--- a/test/CodeGen/R600/basic-loop.ll
+++ b/test/CodeGen/R600/basic-loop.ll
@@ -1,7 +1,7 @@
; XFAIL: *
; RUN: llc -O0 -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck %s
-; CHECK-LABEL: @test_loop:
+; CHECK-LABEL: {{^}}test_loop:
define void @test_loop(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %val) nounwind {
entry:
br label %loop.body
diff --git a/test/CodeGen/R600/bfe_uint.ll b/test/CodeGen/R600/bfe_uint.ll
index fe466e6ad5f..6fe23e91295 100644
--- a/test/CodeGen/R600/bfe_uint.ll
+++ b/test/CodeGen/R600/bfe_uint.ll
@@ -2,7 +2,7 @@
; XFAIL: *
-; CHECK: @bfe_def
+; CHECK: {{^}}bfe_def:
; CHECK: BFE_UINT
define void @bfe_def(i32 addrspace(1)* %out, i32 %x) {
entry:
@@ -17,7 +17,7 @@ entry:
; implmented with a LSHR instruction, which is better, because LSHR has less
; operands and requires less constants.
-; CHECK: @bfe_shift
+; CHECK: {{^}}bfe_shift:
; CHECK-NOT: BFE_UINT
define void @bfe_shift(i32 addrspace(1)* %out, i32 %x) {
entry:
diff --git a/test/CodeGen/R600/bfi_int.ll b/test/CodeGen/R600/bfi_int.ll
index 107158de54d..0514ae1da43 100644
--- a/test/CodeGen/R600/bfi_int.ll
+++ b/test/CodeGen/R600/bfi_int.ll
@@ -4,7 +4,7 @@
; BFI_INT Definition pattern from ISA docs
; (y & x) | (z & ~x)
;
-; R600-CHECK: @bfi_def
+; R600-CHECK: {{^}}bfi_def:
; R600-CHECK: BFI_INT
; SI-CHECK: @bfi_def
; SI-CHECK: V_BFI_B32
@@ -20,7 +20,7 @@ entry:
; SHA-256 Ch function
; z ^ (x & (y ^ z))
-; R600-CHECK: @bfi_sha256_ch
+; R600-CHECK: {{^}}bfi_sha256_ch:
; R600-CHECK: BFI_INT
; SI-CHECK: @bfi_sha256_ch
; SI-CHECK: V_BFI_B32
@@ -35,7 +35,7 @@ entry:
; SHA-256 Ma function
; ((x & z) | (y & (x | z)))
-; R600-CHECK: @bfi_sha256_ma
+; R600-CHECK: {{^}}bfi_sha256_ma:
; R600-CHECK: XOR_INT * [[DST:T[0-9]+\.[XYZW]]], KC0[2].Z, KC0[2].W
; R600-CHECK: BFI_INT * {{T[0-9]+\.[XYZW]}}, {{[[DST]]|PV\.[XYZW]}}, KC0[3].X, KC0[2].W
; SI-CHECK: V_XOR_B32_e32 [[DST:v[0-9]+]], {{s[0-9]+, v[0-9]+}}
diff --git a/test/CodeGen/R600/bitcast.ll b/test/CodeGen/R600/bitcast.ll
index 0be79e658f5..cd1e719a4c1 100644
--- a/test/CodeGen/R600/bitcast.ll
+++ b/test/CodeGen/R600/bitcast.ll
@@ -4,7 +4,7 @@
declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-; FUNC-LABEL: @v32i8_to_v8i32
+; FUNC-LABEL: {{^}}v32i8_to_v8i32:
; SI: S_ENDPGM
define void @v32i8_to_v8i32(<32 x i8> addrspace(2)* inreg) #0 {
entry:
@@ -17,7 +17,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i8ptr_v16i8ptr
+; FUNC-LABEL: {{^}}i8ptr_v16i8ptr:
; SI: S_ENDPGM
define void @i8ptr_v16i8ptr(<16 x i8> addrspace(1)* %out, i8 addrspace(1)* %in) {
entry:
@@ -55,7 +55,7 @@ define void @i32_to_v4i8(<4 x i8> addrspace(1)* %out, i32 addrspace(1)* %in) nou
ret void
}
-; FUNC-LABEL: @bitcast_v2i32_to_f64
+; FUNC-LABEL: {{^}}bitcast_v2i32_to_f64:
; SI: S_ENDPGM
define void @bitcast_v2i32_to_f64(double addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%val = load <2 x i32> addrspace(1)* %in, align 8
@@ -65,7 +65,7 @@ define void @bitcast_v2i32_to_f64(double addrspace(1)* %out, <2 x i32> addrspace
ret void
}
-; FUNC-LABEL: @bitcast_f64_to_v2i32
+; FUNC-LABEL: {{^}}bitcast_f64_to_v2i32:
; SI: S_ENDPGM
define void @bitcast_f64_to_v2i32(<2 x i32> addrspace(1)* %out, double addrspace(1)* %in) {
%val = load double addrspace(1)* %in, align 8
diff --git a/test/CodeGen/R600/build_vector.ll b/test/CodeGen/R600/build_vector.ll
index 8179de13e86..d1709187577 100644
--- a/test/CodeGen/R600/build_vector.ll
+++ b/test/CodeGen/R600/build_vector.ll
@@ -1,11 +1,11 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
-; R600-CHECK: @build_vector2
+; R600-CHECK: {{^}}build_vector2:
; R600-CHECK: MOV
; R600-CHECK: MOV
; R600-CHECK-NOT: MOV
-; SI-CHECK: @build_vector2
+; SI-CHECK: {{^}}build_vector2:
; SI-CHECK-DAG: V_MOV_B32_e32 v[[X:[0-9]]], 5
; SI-CHECK-DAG: V_MOV_B32_e32 v[[Y:[0-9]]], 6
; SI-CHECK: BUFFER_STORE_DWORDX2 v{{\[}}[[X]]:[[Y]]{{\]}}
@@ -15,13 +15,13 @@ entry:
ret void
}
-; R600-CHECK: @build_vector4
+; R600-CHECK: {{^}}build_vector4:
; R600-CHECK: MOV
; R600-CHECK: MOV
; R600-CHECK: MOV
; R600-CHECK: MOV
; R600-CHECK-NOT: MOV
-; SI-CHECK: @build_vector4
+; SI-CHECK: {{^}}build_vector4:
; SI-CHECK-DAG: V_MOV_B32_e32 v[[X:[0-9]]], 5
; SI-CHECK-DAG: V_MOV_B32_e32 v[[Y:[0-9]]], 6
; SI-CHECK-DAG: V_MOV_B32_e32 v[[Z:[0-9]]], 7
diff --git a/test/CodeGen/R600/call_fs.ll b/test/CodeGen/R600/call_fs.ll
index f7c4e5b22cb..7df22402cff 100644
--- a/test/CodeGen/R600/call_fs.ll
+++ b/test/CodeGen/R600/call_fs.ll
@@ -2,10 +2,10 @@
; RUN: llc < %s -march=r600 -mcpu=redwood -show-mc-encoding -o - | FileCheck --check-prefix=EG-CHECK %s
; RUN: llc < %s -march=r600 -mcpu=rv710 -show-mc-encoding -o - | FileCheck --check-prefix=R600-CHECK %s
-; EG-CHECK: @call_fs
+; EG-CHECK: {{^}}call_fs:
; EG-CHECK: .long 257
; EG-CHECK: CALL_FS ; encoding: [0x00,0x00,0x00,0x00,0x00,0x00,0xc0,0x84]
-; R600-CHECK: @call_fs
+; R600-CHECK: {{^}}call_fs:
; R600-CHECK: .long 257
; R600-CHECK:CALL_FS ; encoding: [0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x89]
diff --git a/test/CodeGen/R600/cayman-loop-bug.ll b/test/CodeGen/R600/cayman-loop-bug.ll
index a87352895eb..c7b8c403731 100644
--- a/test/CodeGen/R600/cayman-loop-bug.ll
+++ b/test/CodeGen/R600/cayman-loop-bug.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s
-; CHECK-LABEL: @main
+; CHECK-LABEL: {{^}}main:
; CHECK: LOOP_START_DX10
; CHECK: ALU_PUSH_BEFORE
; CHECK: LOOP_START_DX10
diff --git a/test/CodeGen/R600/cf-stack-bug.ll b/test/CodeGen/R600/cf-stack-bug.ll
index c3a4612e6ac..02c87d76bb2 100644
--- a/test/CodeGen/R600/cf-stack-bug.ll
+++ b/test/CodeGen/R600/cf-stack-bug.ll
@@ -17,7 +17,7 @@
; BUG64-NOT: Applying bug work-around
; BUG32-NOT: Applying bug work-around
; NOBUG-NOT: Applying bug work-around
-; FUNC-LABEL: @nested3
+; FUNC-LABEL: {{^}}nested3:
define void @nested3(i32 addrspace(1)* %out, i32 %cond) {
entry:
%0 = icmp sgt i32 %cond, 0
@@ -50,7 +50,7 @@ end:
; BUG64: Applying bug work-around
; BUG32-NOT: Applying bug work-around
; NOBUG-NOT: Applying bug work-around
-; FUNC-LABEL: @nested4
+; FUNC-LABEL: {{^}}nested4:
define void @nested4(i32 addrspace(1)* %out, i32 %cond) {
entry:
%0 = icmp sgt i32 %cond, 0
@@ -91,7 +91,7 @@ end:
; BUG64: Applying bug work-around
; BUG32-NOT: Applying bug work-around
; NOBUG-NOT: Applying bug work-around
-; FUNC-LABEL: @nested7
+; FUNC-LABEL: {{^}}nested7:
define void @nested7(i32 addrspace(1)* %out, i32 %cond) {
entry:
%0 = icmp sgt i32 %cond, 0
@@ -156,7 +156,7 @@ end:
; BUG64: Applying bug work-around
; BUG32: Applying bug work-around
; NOBUG-NOT: Applying bug work-around
-; FUNC-LABEL: @nested8
+; FUNC-LABEL: {{^}}nested8:
define void @nested8(i32 addrspace(1)* %out, i32 %cond) {
entry:
%0 = icmp sgt i32 %cond, 0
diff --git a/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll b/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll
index f5d731d0102..9c91d16dcb0 100644
--- a/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll
+++ b/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll
@@ -1,12 +1,13 @@
-; RUN: opt -codegenprepare -S -o - %s | FileCheck --check-prefix=OPT --check-prefix=FUNC %s
-; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-LLC --check-prefix=FUNC %s
+; RUN: opt -codegenprepare -S -o - %s | FileCheck --check-prefix=OPT %s
+; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-LLC %s
target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
target triple = "r600--"
-; FUNC-LABEL: @test
+; OPT-LABEL: @test
; OPT: mul nsw i32
; OPT-NEXT: sext
+; SI-LLC-LABEL: {{^}}test:
; SI-LLC: S_MUL_I32
; SI-LLC-NOT: MUL
define void @test(i8 addrspace(1)* nocapture readonly %in, i32 %a, i8 %b) {
diff --git a/test/CodeGen/R600/combine_vloads.ll b/test/CodeGen/R600/combine_vloads.ll
index f8ec712c1ec..38420b25cba 100644
--- a/test/CodeGen/R600/combine_vloads.ll
+++ b/test/CodeGen/R600/combine_vloads.ll
@@ -9,7 +9,7 @@
; 128-bit loads instead of many 8-bit
-; EG-LABEL: @combine_vloads:
+; EG-LABEL: {{^}}combine_vloads:
; EG: VTX_READ_128
; EG: VTX_READ_128
define void @combine_vloads(<8 x i8> addrspace(1)* nocapture %src, <8 x i8> addrspace(1)* nocapture %result) nounwind {
diff --git a/test/CodeGen/R600/complex-folding.ll b/test/CodeGen/R600/complex-folding.ll
index 99f0d99b352..a5399a71324 100644
--- a/test/CodeGen/R600/complex-folding.ll
+++ b/test/CodeGen/R600/complex-folding.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-; CHECK: @main
+; CHECK: {{^}}main:
; CHECK-NOT: MOV
define void @main(<4 x float> inreg %reg0) #0 {
entry:
diff --git a/test/CodeGen/R600/concat_vectors.ll b/test/CodeGen/R600/concat_vectors.ll
index 753282bbe82..8a17a04f3e0 100644
--- a/test/CodeGen/R600/concat_vectors.ll
+++ b/test/CodeGen/R600/concat_vectors.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @test_concat_v1i32
+; FUNC-LABEL: {{^}}test_concat_v1i32:
; 0x80f000 is the high 32 bits of the resource descriptor used by MUBUF
; instructions that access scratch memory. Bit 23, which is the add_tid_enable
; bit, is only set for scratch access, so we can check for the absence of this
@@ -13,7 +13,7 @@ define void @test_concat_v1i32(<2 x i32> addrspace(1)* %out, <1 x i32> %a, <1 x
ret void
}
-; FUNC-LABEL: @test_concat_v2i32
+; FUNC-LABEL: {{^}}test_concat_v2i32:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v2i32(<4 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) nounwind {
@@ -22,7 +22,7 @@ define void @test_concat_v2i32(<4 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x
ret void
}
-; FUNC-LABEL: @test_concat_v4i32
+; FUNC-LABEL: {{^}}test_concat_v4i32:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v4i32(<8 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) nounwind {
@@ -31,7 +31,7 @@ define void @test_concat_v4i32(<8 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x
ret void
}
-; FUNC-LABEL: @test_concat_v8i32
+; FUNC-LABEL: {{^}}test_concat_v8i32:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v8i32(<16 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b) nounwind {
@@ -40,7 +40,7 @@ define void @test_concat_v8i32(<16 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x
ret void
}
-; FUNC-LABEL: @test_concat_v16i32
+; FUNC-LABEL: {{^}}test_concat_v16i32:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v16i32(<32 x i32> addrspace(1)* %out, <16 x i32> %a, <16 x i32> %b) nounwind {
@@ -49,7 +49,7 @@ define void @test_concat_v16i32(<32 x i32> addrspace(1)* %out, <16 x i32> %a, <1
ret void
}
-; FUNC-LABEL: @test_concat_v1f32
+; FUNC-LABEL: {{^}}test_concat_v1f32:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v1f32(<2 x float> addrspace(1)* %out, <1 x float> %a, <1 x float> %b) nounwind {
@@ -58,7 +58,7 @@ define void @test_concat_v1f32(<2 x float> addrspace(1)* %out, <1 x float> %a, <
ret void
}
-; FUNC-LABEL: @test_concat_v2f32
+; FUNC-LABEL: {{^}}test_concat_v2f32:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v2f32(<4 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) nounwind {
@@ -67,7 +67,7 @@ define void @test_concat_v2f32(<4 x float> addrspace(1)* %out, <2 x float> %a, <
ret void
}
-; FUNC-LABEL: @test_concat_v4f32
+; FUNC-LABEL: {{^}}test_concat_v4f32:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v4f32(<8 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b) nounwind {
@@ -76,7 +76,7 @@ define void @test_concat_v4f32(<8 x float> addrspace(1)* %out, <4 x float> %a, <
ret void
}
-; FUNC-LABEL: @test_concat_v8f32
+; FUNC-LABEL: {{^}}test_concat_v8f32:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v8f32(<16 x float> addrspace(1)* %out, <8 x float> %a, <8 x float> %b) nounwind {
@@ -85,7 +85,7 @@ define void @test_concat_v8f32(<16 x float> addrspace(1)* %out, <8 x float> %a,
ret void
}
-; FUNC-LABEL: @test_concat_v16f32
+; FUNC-LABEL: {{^}}test_concat_v16f32:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v16f32(<32 x float> addrspace(1)* %out, <16 x float> %a, <16 x float> %b) nounwind {
@@ -94,7 +94,7 @@ define void @test_concat_v16f32(<32 x float> addrspace(1)* %out, <16 x float> %a
ret void
}
-; FUNC-LABEL: @test_concat_v1i64
+; FUNC-LABEL: {{^}}test_concat_v1i64:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v1i64(<2 x double> addrspace(1)* %out, <1 x double> %a, <1 x double> %b) nounwind {
@@ -103,7 +103,7 @@ define void @test_concat_v1i64(<2 x double> addrspace(1)* %out, <1 x double> %a,
ret void
}
-; FUNC-LABEL: @test_concat_v2i64
+; FUNC-LABEL: {{^}}test_concat_v2i64:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v2i64(<4 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b) nounwind {
@@ -112,7 +112,7 @@ define void @test_concat_v2i64(<4 x double> addrspace(1)* %out, <2 x double> %a,
ret void
}
-; FUNC-LABEL: @test_concat_v4i64
+; FUNC-LABEL: {{^}}test_concat_v4i64:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v4i64(<8 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b) nounwind {
@@ -121,7 +121,7 @@ define void @test_concat_v4i64(<8 x double> addrspace(1)* %out, <4 x double> %a,
ret void
}
-; FUNC-LABEL: @test_concat_v8i64
+; FUNC-LABEL: {{^}}test_concat_v8i64:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v8i64(<16 x double> addrspace(1)* %out, <8 x double> %a, <8 x double> %b) nounwind {
@@ -130,7 +130,7 @@ define void @test_concat_v8i64(<16 x double> addrspace(1)* %out, <8 x double> %a
ret void
}
-; FUNC-LABEL: @test_concat_v16i64
+; FUNC-LABEL: {{^}}test_concat_v16i64:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v16i64(<32 x double> addrspace(1)* %out, <16 x double> %a, <16 x double> %b) nounwind {
@@ -139,7 +139,7 @@ define void @test_concat_v16i64(<32 x double> addrspace(1)* %out, <16 x double>
ret void
}
-; FUNC-LABEL: @test_concat_v1f64
+; FUNC-LABEL: {{^}}test_concat_v1f64:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v1f64(<2 x double> addrspace(1)* %out, <1 x double> %a, <1 x double> %b) nounwind {
@@ -148,7 +148,7 @@ define void @test_concat_v1f64(<2 x double> addrspace(1)* %out, <1 x double> %a,
ret void
}
-; FUNC-LABEL: @test_concat_v2f64
+; FUNC-LABEL: {{^}}test_concat_v2f64:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v2f64(<4 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b) nounwind {
@@ -157,7 +157,7 @@ define void @test_concat_v2f64(<4 x double> addrspace(1)* %out, <2 x double> %a,
ret void
}
-; FUNC-LABEL: @test_concat_v4f64
+; FUNC-LABEL: {{^}}test_concat_v4f64:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v4f64(<8 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b) nounwind {
@@ -166,7 +166,7 @@ define void @test_concat_v4f64(<8 x double> addrspace(1)* %out, <4 x double> %a,
ret void
}
-; FUNC-LABEL: @test_concat_v8f64
+; FUNC-LABEL: {{^}}test_concat_v8f64:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v8f64(<16 x double> addrspace(1)* %out, <8 x double> %a, <8 x double> %b) nounwind {
@@ -175,7 +175,7 @@ define void @test_concat_v8f64(<16 x double> addrspace(1)* %out, <8 x double> %a
ret void
}
-; FUNC-LABEL: @test_concat_v16f64
+; FUNC-LABEL: {{^}}test_concat_v16f64:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v16f64(<32 x double> addrspace(1)* %out, <16 x double> %a, <16 x double> %b) nounwind {
@@ -184,7 +184,7 @@ define void @test_concat_v16f64(<32 x double> addrspace(1)* %out, <16 x double>
ret void
}
-; FUNC-LABEL: @test_concat_v1i1
+; FUNC-LABEL: {{^}}test_concat_v1i1:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v1i1(<2 x i1> addrspace(1)* %out, <1 x i1> %a, <1 x i1> %b) nounwind {
@@ -193,7 +193,7 @@ define void @test_concat_v1i1(<2 x i1> addrspace(1)* %out, <1 x i1> %a, <1 x i1>
ret void
}
-; FUNC-LABEL: @test_concat_v2i1
+; FUNC-LABEL: {{^}}test_concat_v2i1:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v2i1(<4 x i1> addrspace(1)* %out, <2 x i1> %a, <2 x i1> %b) nounwind {
@@ -202,7 +202,7 @@ define void @test_concat_v2i1(<4 x i1> addrspace(1)* %out, <2 x i1> %a, <2 x i1>
ret void
}
-; FUNC-LABEL: @test_concat_v4i1
+; FUNC-LABEL: {{^}}test_concat_v4i1:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v4i1(<8 x i1> addrspace(1)* %out, <4 x i1> %a, <4 x i1> %b) nounwind {
@@ -211,7 +211,7 @@ define void @test_concat_v4i1(<8 x i1> addrspace(1)* %out, <4 x i1> %a, <4 x i1>
ret void
}
-; FUNC-LABEL: @test_concat_v8i1
+; FUNC-LABEL: {{^}}test_concat_v8i1:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v8i1(<16 x i1> addrspace(1)* %out, <8 x i1> %a, <8 x i1> %b) nounwind {
@@ -220,7 +220,7 @@ define void @test_concat_v8i1(<16 x i1> addrspace(1)* %out, <8 x i1> %a, <8 x i1
ret void
}
-; FUNC-LABEL: @test_concat_v16i1
+; FUNC-LABEL: {{^}}test_concat_v16i1:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v16i1(<32 x i1> addrspace(1)* %out, <16 x i1> %a, <16 x i1> %b) nounwind {
@@ -229,7 +229,7 @@ define void @test_concat_v16i1(<32 x i1> addrspace(1)* %out, <16 x i1> %a, <16 x
ret void
}
-; FUNC-LABEL: @test_concat_v32i1
+; FUNC-LABEL: {{^}}test_concat_v32i1:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v32i1(<64 x i1> addrspace(1)* %out, <32 x i1> %a, <32 x i1> %b) nounwind {
@@ -238,7 +238,7 @@ define void @test_concat_v32i1(<64 x i1> addrspace(1)* %out, <32 x i1> %a, <32 x
ret void
}
-; FUNC-LABEL: @test_concat_v1i16
+; FUNC-LABEL: {{^}}test_concat_v1i16:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v1i16(<2 x i16> addrspace(1)* %out, <1 x i16> %a, <1 x i16> %b) nounwind {
@@ -247,7 +247,7 @@ define void @test_concat_v1i16(<2 x i16> addrspace(1)* %out, <1 x i16> %a, <1 x
ret void
}
-; FUNC-LABEL: @test_concat_v2i16
+; FUNC-LABEL: {{^}}test_concat_v2i16:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v2i16(<4 x i16> addrspace(1)* %out, <2 x i16> %a, <2 x i16> %b) nounwind {
@@ -256,7 +256,7 @@ define void @test_concat_v2i16(<4 x i16> addrspace(1)* %out, <2 x i16> %a, <2 x
ret void
}
-; FUNC-LABEL: @test_concat_v4i16
+; FUNC-LABEL: {{^}}test_concat_v4i16:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v4i16(<8 x i16> addrspace(1)* %out, <4 x i16> %a, <4 x i16> %b) nounwind {
@@ -265,7 +265,7 @@ define void @test_concat_v4i16(<8 x i16> addrspace(1)* %out, <4 x i16> %a, <4 x
ret void
}
-; FUNC-LABEL: @test_concat_v8i16
+; FUNC-LABEL: {{^}}test_concat_v8i16:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v8i16(<16 x i16> addrspace(1)* %out, <8 x i16> %a, <8 x i16> %b) nounwind {
@@ -274,7 +274,7 @@ define void @test_concat_v8i16(<16 x i16> addrspace(1)* %out, <8 x i16> %a, <8 x
ret void
}
-; FUNC-LABEL: @test_concat_v16i16
+; FUNC-LABEL: {{^}}test_concat_v16i16:
; SI-NOT: S_MOV_B32 s{{[0-9]}}, 0x80f000
; SI-NOT: MOVREL
define void @test_concat_v16i16(<32 x i16> addrspace(1)* %out, <16 x i16> %a, <16 x i16> %b) nounwind {
diff --git a/test/CodeGen/R600/copy-illegal-type.ll b/test/CodeGen/R600/copy-illegal-type.ll
index f7c2321ae8f..264a570c556 100644
--- a/test/CodeGen/R600/copy-illegal-type.ll
+++ b/test/CodeGen/R600/copy-illegal-type.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=tahiti < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @test_copy_v4i8
+; FUNC-LABEL: {{^}}test_copy_v4i8:
; SI: BUFFER_LOAD_DWORD [[REG:v[0-9]+]]
; SI: BUFFER_STORE_DWORD [[REG]]
; SI: S_ENDPGM
@@ -10,7 +10,7 @@ define void @test_copy_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)*
ret void
}
-; FUNC-LABEL: @test_copy_v4i8_x2
+; FUNC-LABEL: {{^}}test_copy_v4i8_x2:
; SI: BUFFER_LOAD_DWORD [[REG:v[0-9]+]]
; SI: BUFFER_STORE_DWORD [[REG]]
; SI: BUFFER_STORE_DWORD [[REG]]
@@ -22,7 +22,7 @@ define void @test_copy_v4i8_x2(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(
ret void
}
-; FUNC-LABEL: @test_copy_v4i8_x3
+; FUNC-LABEL: {{^}}test_copy_v4i8_x3:
; SI: BUFFER_LOAD_DWORD [[REG:v[0-9]+]]
; SI: BUFFER_STORE_DWORD [[REG]]
; SI: BUFFER_STORE_DWORD [[REG]]
@@ -36,7 +36,7 @@ define void @test_copy_v4i8_x3(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(
ret void
}
-; FUNC-LABEL: @test_copy_v4i8_x4
+; FUNC-LABEL: {{^}}test_copy_v4i8_x4:
; SI: BUFFER_LOAD_DWORD [[REG:v[0-9]+]]
; SI: BUFFER_STORE_DWORD [[REG]]
; SI: BUFFER_STORE_DWORD [[REG]]
@@ -52,7 +52,7 @@ define void @test_copy_v4i8_x4(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(
ret void
}
-; FUNC-LABEL: @test_copy_v4i8_extra_use
+; FUNC-LABEL: {{^}}test_copy_v4i8_extra_use:
; SI: BUFFER_LOAD_UBYTE
; SI: BUFFER_LOAD_UBYTE
; SI: BUFFER_LOAD_UBYTE
@@ -88,7 +88,7 @@ define void @test_copy_v4i8_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8> add
ret void
}
-; FUNC-LABEL: @test_copy_v4i8_x2_extra_use
+; FUNC-LABEL: {{^}}test_copy_v4i8_x2_extra_use:
; SI: BUFFER_LOAD_UBYTE
; SI: BUFFER_LOAD_UBYTE
; SI: BUFFER_LOAD_UBYTE
@@ -127,7 +127,7 @@ define void @test_copy_v4i8_x2_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8>
ret void
}
-; FUNC-LABEL: @test_copy_v3i8
+; FUNC-LABEL: {{^}}test_copy_v3i8:
; SI-NOT: BFE
; SI-NOT: BFI
; SI: S_ENDPGM
@@ -137,7 +137,7 @@ define void @test_copy_v3i8(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)*
ret void
}
-; FUNC-LABEL: @test_copy_v4i8_volatile_load
+; FUNC-LABEL: {{^}}test_copy_v4i8_volatile_load:
; SI: BUFFER_LOAD_UBYTE
; SI: BUFFER_LOAD_UBYTE
; SI: BUFFER_LOAD_UBYTE
@@ -149,7 +149,7 @@ define void @test_copy_v4i8_volatile_load(<4 x i8> addrspace(1)* %out, <4 x i8>
ret void
}
-; FUNC-LABEL: @test_copy_v4i8_volatile_store
+; FUNC-LABEL: {{^}}test_copy_v4i8_volatile_store:
; SI: BUFFER_LOAD_UBYTE
; SI: BUFFER_LOAD_UBYTE
; SI: BUFFER_LOAD_UBYTE
diff --git a/test/CodeGen/R600/ctlz_zero_undef.ll b/test/CodeGen/R600/ctlz_zero_undef.ll
index 1340ef98c60..c7f91650729 100644
--- a/test/CodeGen/R600/ctlz_zero_undef.ll
+++ b/test/CodeGen/R600/ctlz_zero_undef.ll
@@ -5,7 +5,7 @@ declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone
declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>, i1) nounwind readnone
declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) nounwind readnone
-; FUNC-LABEL: @s_ctlz_zero_undef_i32:
+; FUNC-LABEL: {{^}}s_ctlz_zero_undef_i32:
; SI: S_LOAD_DWORD [[VAL:s[0-9]+]],
; SI: S_FLBIT_I32_B32 [[SRESULT:s[0-9]+]], [[VAL]]
; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
@@ -19,7 +19,7 @@ define void @s_ctlz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 %val) nou
ret void
}
-; FUNC-LABEL: @v_ctlz_zero_undef_i32:
+; FUNC-LABEL: {{^}}v_ctlz_zero_undef_i32:
; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
; SI: V_FFBH_U32_e32 [[RESULT:v[0-9]+]], [[VAL]]
; SI: BUFFER_STORE_DWORD [[RESULT]],
@@ -33,7 +33,7 @@ define void @v_ctlz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 addrspace
ret void
}
-; FUNC-LABEL: @v_ctlz_zero_undef_v2i32:
+; FUNC-LABEL: {{^}}v_ctlz_zero_undef_v2i32:
; SI: BUFFER_LOAD_DWORDX2
; SI: V_FFBH_U32_e32
; SI: V_FFBH_U32_e32
@@ -49,7 +49,7 @@ define void @v_ctlz_zero_undef_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x
ret void
}
-; FUNC-LABEL: @v_ctlz_zero_undef_v4i32:
+; FUNC-LABEL: {{^}}v_ctlz_zero_undef_v4i32:
; SI: BUFFER_LOAD_DWORDX4
; SI: V_FFBH_U32_e32
; SI: V_FFBH_U32_e32
diff --git a/test/CodeGen/R600/ctpop.ll b/test/CodeGen/R600/ctpop.ll
index 61d494367f6..1e9a1553595 100644
--- a/test/CodeGen/R600/ctpop.ll
+++ b/test/CodeGen/R600/ctpop.ll
@@ -7,7 +7,7 @@ declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>) nounwind readnone
declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>) nounwind readnone
declare <16 x i32> @llvm.ctpop.v16i32(<16 x i32>) nounwind readnone
-; FUNC-LABEL: @s_ctpop_i32:
+; FUNC-LABEL: {{^}}s_ctpop_i32:
; SI: S_LOAD_DWORD [[SVAL:s[0-9]+]],
; SI: S_BCNT1_I32_B32 [[SRESULT:s[0-9]+]], [[SVAL]]
; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
@@ -22,7 +22,7 @@ define void @s_ctpop_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
}
; XXX - Why 0 in register?
-; FUNC-LABEL: @v_ctpop_i32:
+; FUNC-LABEL: {{^}}v_ctpop_i32:
; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
; SI: V_MOV_B32_e32 [[VZERO:v[0-9]+]], 0
; SI: V_BCNT_U32_B32_e32 [[RESULT:v[0-9]+]], [[VAL]], [[VZERO]]
@@ -37,7 +37,7 @@ define void @v_ctpop_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noali
ret void
}
-; FUNC-LABEL: @v_ctpop_add_chain_i32
+; FUNC-LABEL: {{^}}v_ctpop_add_chain_i32:
; SI: BUFFER_LOAD_DWORD [[VAL0:v[0-9]+]],
; SI: BUFFER_LOAD_DWORD [[VAL1:v[0-9]+]],
; SI: V_MOV_B32_e32 [[VZERO:v[0-9]+]], 0
@@ -58,7 +58,7 @@ define void @v_ctpop_add_chain_i32(i32 addrspace(1)* noalias %out, i32 addrspace
ret void
}
-; FUNC-LABEL: @v_ctpop_add_sgpr_i32
+; FUNC-LABEL: {{^}}v_ctpop_add_sgpr_i32:
; SI: BUFFER_LOAD_DWORD [[VAL0:v[0-9]+]],
; SI-NEXT: S_WAITCNT
; SI-NEXT: V_BCNT_U32_B32_e64 [[RESULT:v[0-9]+]], [[VAL0]], s{{[0-9]+}}
@@ -72,7 +72,7 @@ define void @v_ctpop_add_sgpr_i32(i32 addrspace(1)* noalias %out, i32 addrspace(
ret void
}
-; FUNC-LABEL: @v_ctpop_v2i32:
+; FUNC-LABEL: {{^}}v_ctpop_v2i32:
; SI: V_BCNT_U32_B32_e32
; SI: V_BCNT_U32_B32_e32
; SI: S_ENDPGM
@@ -86,7 +86,7 @@ define void @v_ctpop_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrs
ret void
}
-; FUNC-LABEL: @v_ctpop_v4i32:
+; FUNC-LABEL: {{^}}v_ctpop_v4i32:
; SI: V_BCNT_U32_B32_e32
; SI: V_BCNT_U32_B32_e32
; SI: V_BCNT_U32_B32_e32
@@ -104,7 +104,7 @@ define void @v_ctpop_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrs
ret void
}
-; FUNC-LABEL: @v_ctpop_v8i32:
+; FUNC-LABEL: {{^}}v_ctpop_v8i32:
; SI: V_BCNT_U32_B32_e32
; SI: V_BCNT_U32_B32_e32
; SI: V_BCNT_U32_B32_e32
@@ -130,7 +130,7 @@ define void @v_ctpop_v8i32(<8 x i32> addrspace(1)* noalias %out, <8 x i32> addrs
ret void
}
-; FUNC-LABEL: @v_ctpop_v16i32:
+; FUNC-LABEL: {{^}}v_ctpop_v16i32:
; SI: V_BCNT_U32_B32_e32
; SI: V_BCNT_U32_B32_e32
; SI: V_BCNT_U32_B32_e32
@@ -172,7 +172,7 @@ define void @v_ctpop_v16i32(<16 x i32> addrspace(1)* noalias %out, <16 x i32> ad
ret void
}
-; FUNC-LABEL: @v_ctpop_i32_add_inline_constant:
+; FUNC-LABEL: {{^}}v_ctpop_i32_add_inline_constant:
; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
; SI: V_BCNT_U32_B32_e64 [[RESULT:v[0-9]+]], [[VAL]], 4
; SI: BUFFER_STORE_DWORD [[RESULT]],
@@ -187,7 +187,7 @@ define void @v_ctpop_i32_add_inline_constant(i32 addrspace(1)* noalias %out, i32
ret void
}
-; FUNC-LABEL: @v_ctpop_i32_add_inline_constant_inv:
+; FUNC-LABEL: {{^}}v_ctpop_i32_add_inline_constant_inv:
; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
; SI: V_BCNT_U32_B32_e64 [[RESULT:v[0-9]+]], [[VAL]], 4
; SI: BUFFER_STORE_DWORD [[RESULT]],
@@ -202,7 +202,7 @@ define void @v_ctpop_i32_add_inline_constant_inv(i32 addrspace(1)* noalias %out,
ret void
}
-; FUNC-LABEL: @v_ctpop_i32_add_literal:
+; FUNC-LABEL: {{^}}v_ctpop_i32_add_literal:
; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
; SI: V_MOV_B32_e32 [[LIT:v[0-9]+]], 0x1869f
; SI: V_BCNT_U32_B32_e32 [[RESULT:v[0-9]+]], [[VAL]], [[LIT]]
@@ -216,7 +216,7 @@ define void @v_ctpop_i32_add_literal(i32 addrspace(1)* noalias %out, i32 addrspa
ret void
}
-; FUNC-LABEL: @v_ctpop_i32_add_var:
+; FUNC-LABEL: {{^}}v_ctpop_i32_add_var:
; SI-DAG: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
; SI-DAG: S_LOAD_DWORD [[VAR:s[0-9]+]],
; SI: V_BCNT_U32_B32_e64 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
@@ -232,7 +232,7 @@ define void @v_ctpop_i32_add_var(i32 addrspace(1)* noalias %out, i32 addrspace(1
ret void
}
-; FUNC-LABEL: @v_ctpop_i32_add_var_inv:
+; FUNC-LABEL: {{^}}v_ctpop_i32_add_var_inv:
; SI-DAG: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
; SI-DAG: S_LOAD_DWORD [[VAR:s[0-9]+]],
; SI: V_BCNT_U32_B32_e64 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
@@ -248,7 +248,7 @@ define void @v_ctpop_i32_add_var_inv(i32 addrspace(1)* noalias %out, i32 addrspa
ret void
}
-; FUNC-LABEL: @v_ctpop_i32_add_vvar_inv
+; FUNC-LABEL: {{^}}v_ctpop_i32_add_vvar_inv:
; SI-DAG: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]], s[{{[0-9]+:[0-9]+}}], {{0$}}
; SI-DAG: BUFFER_LOAD_DWORD [[VAR:v[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0 offset:0x10
; SI: V_BCNT_U32_B32_e32 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
@@ -269,7 +269,7 @@ define void @v_ctpop_i32_add_vvar_inv(i32 addrspace(1)* noalias %out, i32 addrsp
; FIXME: We currently disallow SALU instructions in all branches,
; but there are some cases when the should be allowed.
-; FUNC-LABEL: @ctpop_i32_in_br
+; FUNC-LABEL: {{^}}ctpop_i32_in_br:
; SI: S_LOAD_DWORD [[VAL:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0xd
; SI: S_BCNT1_I32_B32 [[SRESULT:s[0-9]+]], [[VAL]]
; SI: V_MOV_B32_e32 [[RESULT]], [[SRESULT]]
diff --git a/test/CodeGen/R600/ctpop64.ll b/test/CodeGen/R600/ctpop64.ll
index 76091c522ec..05f2ccc3991 100644
--- a/test/CodeGen/R600/ctpop64.ll
+++ b/test/CodeGen/R600/ctpop64.ll
@@ -6,7 +6,7 @@ declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>) nounwind readnone
declare <8 x i64> @llvm.ctpop.v8i64(<8 x i64>) nounwind readnone
declare <16 x i64> @llvm.ctpop.v16i64(<16 x i64>) nounwind readnone
-; FUNC-LABEL: @s_ctpop_i64:
+; FUNC-LABEL: {{^}}s_ctpop_i64:
; SI: S_LOAD_DWORDX2 [[SVAL:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI: S_BCNT1_I32_B64 [[SRESULT:s[0-9]+]], [[SVAL]]
; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
@@ -19,7 +19,7 @@ define void @s_ctpop_i64(i32 addrspace(1)* noalias %out, i64 %val) nounwind {
ret void
}
-; FUNC-LABEL: @v_ctpop_i64:
+; FUNC-LABEL: {{^}}v_ctpop_i64:
; SI: BUFFER_LOAD_DWORDX2 v{{\[}}[[LOVAL:[0-9]+]]:[[HIVAL:[0-9]+]]{{\]}},
; SI: V_MOV_B32_e32 [[VZERO:v[0-9]+]], 0
; SI: V_BCNT_U32_B32_e32 [[MIDRESULT:v[0-9]+]], v[[LOVAL]], [[VZERO]]
@@ -34,7 +34,7 @@ define void @v_ctpop_i64(i32 addrspace(1)* noalias %out, i64 addrspace(1)* noali
ret void
}
-; FUNC-LABEL: @s_ctpop_v2i64:
+; FUNC-LABEL: {{^}}s_ctpop_v2i64:
; SI: S_BCNT1_I32_B64
; SI: S_BCNT1_I32_B64
; SI: S_ENDPGM
@@ -45,7 +45,7 @@ define void @s_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <2 x i64> %val)
ret void
}
-; FUNC-LABEL: @s_ctpop_v4i64:
+; FUNC-LABEL: {{^}}s_ctpop_v4i64:
; SI: S_BCNT1_I32_B64
; SI: S_BCNT1_I32_B64
; SI: S_BCNT1_I32_B64
@@ -58,7 +58,7 @@ define void @s_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <4 x i64> %val)
ret void
}
-; FUNC-LABEL: @v_ctpop_v2i64:
+; FUNC-LABEL: {{^}}v_ctpop_v2i64:
; SI: V_BCNT_U32_B32
; SI: V_BCNT_U32_B32
; SI: V_BCNT_U32_B32
@@ -72,7 +72,7 @@ define void @v_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <2 x i64> addrs
ret void
}
-; FUNC-LABEL: @v_ctpop_v4i64:
+; FUNC-LABEL: {{^}}v_ctpop_v4i64:
; SI: V_BCNT_U32_B32
; SI: V_BCNT_U32_B32
; SI: V_BCNT_U32_B32
@@ -93,7 +93,7 @@ define void @v_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <4 x i64> addrs
; FIXME: We currently disallow SALU instructions in all branches,
; but there are some cases when the should be allowed.
-; FUNC-LABEL: @ctpop_i64_in_br
+; FUNC-LABEL: {{^}}ctpop_i64_in_br:
; SI: S_LOAD_DWORDX2 s{{\[}}[[LOVAL:[0-9]+]]:[[HIVAL:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], 0xd
; SI: S_BCNT1_I32_B64 [[RESULT:s[0-9]+]], {{s\[}}[[LOVAL]]:[[HIVAL]]{{\]}}
; SI: V_MOV_B32_e32 v[[VLO:[0-9]+]], [[RESULT]]
diff --git a/test/CodeGen/R600/cttz_zero_undef.ll b/test/CodeGen/R600/cttz_zero_undef.ll
index 9c4a3558d09..6e96828866a 100644
--- a/test/CodeGen/R600/cttz_zero_undef.ll
+++ b/test/CodeGen/R600/cttz_zero_undef.ll
@@ -5,7 +5,7 @@ declare i32 @llvm.cttz.i32(i32, i1) nounwind readnone
declare <2 x i32> @llvm.cttz.v2i32(<2 x i32>, i1) nounwind readnone
declare <4 x i32> @llvm.cttz.v4i32(<4 x i32>, i1) nounwind readnone
-; FUNC-LABEL: @s_cttz_zero_undef_i32:
+; FUNC-LABEL: {{^}}s_cttz_zero_undef_i32:
; SI: S_LOAD_DWORD [[VAL:s[0-9]+]],
; SI: S_FF1_I32_B32 [[SRESULT:s[0-9]+]], [[VAL]]
; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
@@ -19,7 +19,7 @@ define void @s_cttz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 %val) nou
ret void
}
-; FUNC-LABEL: @v_cttz_zero_undef_i32:
+; FUNC-LABEL: {{^}}v_cttz_zero_undef_i32:
; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
; SI: V_FFBL_B32_e32 [[RESULT:v[0-9]+]], [[VAL]]
; SI: BUFFER_STORE_DWORD [[RESULT]],
@@ -33,7 +33,7 @@ define void @v_cttz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 addrspace
ret void
}
-; FUNC-LABEL: @v_cttz_zero_undef_v2i32:
+; FUNC-LABEL: {{^}}v_cttz_zero_undef_v2i32:
; SI: BUFFER_LOAD_DWORDX2
; SI: V_FFBL_B32_e32
; SI: V_FFBL_B32_e32
@@ -49,7 +49,7 @@ define void @v_cttz_zero_undef_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x
ret void
}
-; FUNC-LABEL: @v_cttz_zero_undef_v4i32:
+; FUNC-LABEL: {{^}}v_cttz_zero_undef_v4i32:
; SI: BUFFER_LOAD_DWORDX4
; SI: V_FFBL_B32_e32
; SI: V_FFBL_B32_e32
diff --git a/test/CodeGen/R600/cvt_f32_ubyte.ll b/test/CodeGen/R600/cvt_f32_ubyte.ll
index 3e667b1822f..fa89f64d19e 100644
--- a/test/CodeGen/R600/cvt_f32_ubyte.ll
+++ b/test/CodeGen/R600/cvt_f32_ubyte.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
-; SI-LABEL: @load_i8_to_f32:
+; SI-LABEL: {{^}}load_i8_to_f32:
; SI: BUFFER_LOAD_UBYTE [[LOADREG:v[0-9]+]],
; SI-NOT: BFE
; SI-NOT: LSHR
@@ -13,7 +13,7 @@ define void @load_i8_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* n
ret void
}
-; SI-LABEL: @load_v2i8_to_v2f32:
+; SI-LABEL: {{^}}load_v2i8_to_v2f32:
; SI: BUFFER_LOAD_USHORT [[LOADREG:v[0-9]+]],
; SI-NOT: BFE
; SI-NOT: LSHR
@@ -28,7 +28,7 @@ define void @load_v2i8_to_v2f32(<2 x float> addrspace(1)* noalias %out, <2 x i8>
ret void
}
-; SI-LABEL: @load_v3i8_to_v3f32:
+; SI-LABEL: {{^}}load_v3i8_to_v3f32:
; SI-NOT: BFE
; SI-NOT: V_CVT_F32_UBYTE3_e32
; SI-DAG: V_CVT_F32_UBYTE2_e32
@@ -42,7 +42,7 @@ define void @load_v3i8_to_v3f32(<3 x float> addrspace(1)* noalias %out, <3 x i8>
ret void
}
-; SI-LABEL: @load_v4i8_to_v4f32:
+; SI-LABEL: {{^}}load_v4i8_to_v4f32:
; We can't use BUFFER_LOAD_DWORD here, because the load is byte aligned, and
; BUFFER_LOAD_DWORD requires dword alignment.
; SI: BUFFER_LOAD_USHORT
@@ -66,7 +66,7 @@ define void @load_v4i8_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8>
; for each component, but computeKnownBits doesn't handle vectors very
; well.
-; SI-LABEL: @load_v4i8_to_v4f32_2_uses:
+; SI-LABEL: {{^}}load_v4i8_to_v4f32_2_uses:
; SI: BUFFER_LOAD_UBYTE
; SI: BUFFER_LOAD_UBYTE
; SI: BUFFER_LOAD_UBYTE
@@ -93,7 +93,7 @@ define void @load_v4i8_to_v4f32_2_uses(<4 x float> addrspace(1)* noalias %out, <
}
; Make sure this doesn't crash.
-; SI-LABEL: @load_v7i8_to_v7f32:
+; SI-LABEL: {{^}}load_v7i8_to_v7f32:
; SI: S_ENDPGM
define void @load_v7i8_to_v7f32(<7 x float> addrspace(1)* noalias %out, <7 x i8> addrspace(1)* noalias %in) nounwind {
%load = load <7 x i8> addrspace(1)* %in, align 1
@@ -102,7 +102,7 @@ define void @load_v7i8_to_v7f32(<7 x float> addrspace(1)* noalias %out, <7 x i8>
ret void
}
-; SI-LABEL: @load_v8i8_to_v8f32:
+; SI-LABEL: {{^}}load_v8i8_to_v8f32:
; SI: BUFFER_LOAD_DWORDX2 v{{\[}}[[LOLOAD:[0-9]+]]:[[HILOAD:[0-9]+]]{{\]}},
; SI-NOT: BFE
; SI-NOT: LSHR
@@ -131,7 +131,7 @@ define void @load_v8i8_to_v8f32(<8 x float> addrspace(1)* noalias %out, <8 x i8>
ret void
}
-; SI-LABEL: @i8_zext_inreg_i32_to_f32:
+; SI-LABEL: {{^}}i8_zext_inreg_i32_to_f32:
; SI: BUFFER_LOAD_DWORD [[LOADREG:v[0-9]+]],
; SI: V_ADD_I32_e32 [[ADD:v[0-9]+]], 2, [[LOADREG]]
; SI-NEXT: V_CVT_F32_UBYTE0_e32 [[CONV:v[0-9]+]], [[ADD]]
@@ -145,7 +145,7 @@ define void @i8_zext_inreg_i32_to_f32(float addrspace(1)* noalias %out, i32 addr
ret void
}
-; SI-LABEL: @i8_zext_inreg_hi1_to_f32:
+; SI-LABEL: {{^}}i8_zext_inreg_hi1_to_f32:
define void @i8_zext_inreg_hi1_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
%load = load i32 addrspace(1)* %in, align 4
%inreg = and i32 %load, 65280
diff --git a/test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll b/test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll
index 6607c1218b5..1e47bfa0c77 100644
--- a/test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll
+++ b/test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll
@@ -7,7 +7,7 @@
; ISD::UINT_TO_FP and ISD::SINT_TO_FP opcodes.
-; CHECK: @sint
+; CHECK: {{^}}sint:
; CHECK: INT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @sint(<4 x float> addrspace(1)* %out, i32 addrspace(1)* %in) {
@@ -21,7 +21,7 @@ entry:
ret void
}
-;CHECK: @uint
+;CHECK: {{^}}uint:
;CHECK: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @uint(<4 x float> addrspace(1)* %out, i32 addrspace(1)* %in) {
diff --git a/test/CodeGen/R600/default-fp-mode.ll b/test/CodeGen/R600/default-fp-mode.ll
index b24a7a246fd..935bf97f337 100644
--- a/test/CodeGen/R600/default-fp-mode.ll
+++ b/test/CodeGen/R600/default-fp-mode.ll
@@ -6,7 +6,7 @@
; RUN: llc -march=r600 -mcpu=SI -mattr=-fp32-denormals < %s | FileCheck -check-prefix=DEFAULT -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=SI -mattr=+fp64-denormals < %s | FileCheck -check-prefix=DEFAULT -check-prefix=FUNC %s
-; FUNC-LABEL: @test_kernel
+; FUNC-LABEL: {{^}}test_kernel:
; DEFAULT: FloatMode: 192
; DEFAULT: IeeeMode: 0
diff --git a/test/CodeGen/R600/disconnected-predset-break-bug.ll b/test/CodeGen/R600/disconnected-predset-break-bug.ll
index 012c17b8fe4..858e4b98f3a 100644
--- a/test/CodeGen/R600/disconnected-predset-break-bug.ll
+++ b/test/CodeGen/R600/disconnected-predset-break-bug.ll
@@ -4,7 +4,7 @@
; result. This tests that there are no instructions between the PRED_SET*
; and the PREDICATE_BREAK in this loop.
-; CHECK: @loop_ge
+; CHECK: {{^}}loop_ge:
; CHECK: LOOP_START_DX10
; CHECK: ALU_PUSH_BEFORE
; CHECK-NEXT: JUMP
diff --git a/test/CodeGen/R600/dot4-folding.ll b/test/CodeGen/R600/dot4-folding.ll
index 3e8330f9b3e..dca6a59c6e6 100644
--- a/test/CodeGen/R600/dot4-folding.ll
+++ b/test/CodeGen/R600/dot4-folding.ll
@@ -2,7 +2,7 @@
; Exactly one constant vector can be folded into dot4, which means exactly
; 4 MOV instructions
-; CHECK: @main
+; CHECK: {{^}}main:
; CHECK: MOV
; CHECK: MOV
; CHECK: MOV
diff --git a/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll b/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll
index 231cf3d6883..67c6738a9a9 100644
--- a/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll
+++ b/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll
@@ -5,7 +5,7 @@ declare i32 @llvm.r600.read.tidig.x() #0
declare void @llvm.AMDGPU.barrier.local() #1
; Function Attrs: nounwind
-; CHECK-LABEL: @signed_ds_offset_addressing_loop
+; CHECK-LABEL: {{^}}signed_ds_offset_addressing_loop:
; CHECK: BB0_1:
; CHECK: V_ADD_I32_e32 [[VADDR:v[0-9]+]],
; SI-DAG: DS_READ_B32 v{{[0-9]+}}, [[VADDR]], 0x0
diff --git a/test/CodeGen/R600/extload.ll b/test/CodeGen/R600/extload.ll
index 9725bbfb709..c2c9e17dbda 100644
--- a/test/CodeGen/R600/extload.ll
+++ b/test/CodeGen/R600/extload.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @anyext_load_i8:
+; FUNC-LABEL: {{^}}anyext_load_i8:
; EG: AND_INT
; EG: 255
define void @anyext_load_i8(i8 addrspace(1)* nocapture noalias %out, i8 addrspace(1)* nocapture noalias %src) nounwind {
@@ -13,7 +13,7 @@ define void @anyext_load_i8(i8 addrspace(1)* nocapture noalias %out, i8 addrspac
ret void
}
-; FUNC-LABEL: @anyext_load_i16:
+; FUNC-LABEL: {{^}}anyext_load_i16:
; EG: AND_INT
; EG: AND_INT
; EG-DAG: 65535
@@ -27,7 +27,7 @@ define void @anyext_load_i16(i16 addrspace(1)* nocapture noalias %out, i16 addrs
ret void
}
-; FUNC-LABEL: @anyext_load_lds_i8:
+; FUNC-LABEL: {{^}}anyext_load_lds_i8:
; EG: AND_INT
; EG: 255
define void @anyext_load_lds_i8(i8 addrspace(3)* nocapture noalias %out, i8 addrspace(3)* nocapture noalias %src) nounwind {
@@ -39,7 +39,7 @@ define void @anyext_load_lds_i8(i8 addrspace(3)* nocapture noalias %out, i8 addr
ret void
}
-; FUNC-LABEL: @anyext_load_lds_i16:
+; FUNC-LABEL: {{^}}anyext_load_lds_i16:
; EG: AND_INT
; EG: AND_INT
; EG-DAG: 65535
@@ -53,7 +53,7 @@ define void @anyext_load_lds_i16(i16 addrspace(3)* nocapture noalias %out, i16 a
ret void
}
-; FUNC-LABEL: @sextload_global_i8_to_i64
+; FUNC-LABEL: {{^}}sextload_global_i8_to_i64:
; SI: BUFFER_LOAD_SBYTE [[LOAD:v[0-9]+]],
; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 31, [[LOAD]]
; SI: BUFFER_STORE_DWORDX2
@@ -64,7 +64,7 @@ define void @sextload_global_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(1)*
ret void
}
-; FUNC-LABEL: @sextload_global_i16_to_i64
+; FUNC-LABEL: {{^}}sextload_global_i16_to_i64:
; SI: BUFFER_LOAD_SSHORT [[LOAD:v[0-9]+]],
; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 31, [[LOAD]]
; SI: BUFFER_STORE_DWORDX2
@@ -75,7 +75,7 @@ define void @sextload_global_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)
ret void
}
-; FUNC-LABEL: @sextload_global_i32_to_i64
+; FUNC-LABEL: {{^}}sextload_global_i32_to_i64:
; SI: BUFFER_LOAD_DWORD [[LOAD:v[0-9]+]],
; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 31, [[LOAD]]
; SI: BUFFER_STORE_DWORDX2
@@ -86,7 +86,7 @@ define void @sextload_global_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)
ret void
}
-; FUNC-LABEL: @zextload_global_i8_to_i64
+; FUNC-LABEL: {{^}}zextload_global_i8_to_i64:
; SI-DAG: S_MOV_B32 [[ZERO:s[0-9]+]], 0{{$}}
; SI-DAG: BUFFER_LOAD_UBYTE [[LOAD:v[0-9]+]],
; SI: V_MOV_B32_e32 {{v[0-9]+}}, [[ZERO]]
@@ -98,7 +98,7 @@ define void @zextload_global_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(1)*
ret void
}
-; FUNC-LABEL: @zextload_global_i16_to_i64
+; FUNC-LABEL: {{^}}zextload_global_i16_to_i64:
; SI-DAG: S_MOV_B32 [[ZERO:s[0-9]+]], 0{{$}}
; SI-DAG: BUFFER_LOAD_USHORT [[LOAD:v[0-9]+]],
; SI: V_MOV_B32_e32 {{v[0-9]+}}, [[ZERO]]
@@ -110,7 +110,7 @@ define void @zextload_global_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)
ret void
}
-; FUNC-LABEL: @zextload_global_i32_to_i64
+; FUNC-LABEL: {{^}}zextload_global_i32_to_i64:
; SI-DAG: S_MOV_B32 [[ZERO:s[0-9]+]], 0{{$}}
; SI-DAG: BUFFER_LOAD_DWORD [[LOAD:v[0-9]+]],
; SI: V_MOV_B32_e32 {{v[0-9]+}}, [[ZERO]]
diff --git a/test/CodeGen/R600/extract_vector_elt_i16.ll b/test/CodeGen/R600/extract_vector_elt_i16.ll
index 26b8f2c9320..fcdd1bd5c0c 100644
--- a/test/CodeGen/R600/extract_vector_elt_i16.ll
+++ b/test/CodeGen/R600/extract_vector_elt_i16.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @extract_vector_elt_v2i16
+; FUNC-LABEL: {{^}}extract_vector_elt_v2i16:
; SI: BUFFER_LOAD_USHORT
; SI: BUFFER_LOAD_USHORT
; SI: BUFFER_STORE_SHORT
@@ -14,7 +14,7 @@ define void @extract_vector_elt_v2i16(i16 addrspace(1)* %out, <2 x i16> %foo) no
ret void
}
-; FUNC-LABEL: @extract_vector_elt_v4i16
+; FUNC-LABEL: {{^}}extract_vector_elt_v4i16:
; SI: BUFFER_LOAD_USHORT
; SI: BUFFER_LOAD_USHORT
; SI: BUFFER_STORE_SHORT
diff --git a/test/CodeGen/R600/fabs.f64.ll b/test/CodeGen/R600/fabs.f64.ll
index 1e1bdf4f7e9..72941b3a466 100644
--- a/test/CodeGen/R600/fabs.f64.ll
+++ b/test/CodeGen/R600/fabs.f64.ll
@@ -7,7 +7,7 @@ declare double @llvm.fabs.f64(double) readnone
declare <2 x double> @llvm.fabs.v2f64(<2 x double>) readnone
declare <4 x double> @llvm.fabs.v4f64(<4 x double>) readnone
-; FUNC-LABEL: @v_fabs_f64
+; FUNC-LABEL: {{^}}v_fabs_f64:
; SI: V_AND_B32
; SI: S_ENDPGM
define void @v_fabs_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
@@ -20,7 +20,7 @@ define void @v_fabs_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @fabs_f64
+; FUNC-LABEL: {{^}}fabs_f64:
; SI: V_AND_B32
; SI-NOT: V_AND_B32
; SI: S_ENDPGM
@@ -30,7 +30,7 @@ define void @fabs_f64(double addrspace(1)* %out, double %in) {
ret void
}
-; FUNC-LABEL: @fabs_v2f64
+; FUNC-LABEL: {{^}}fabs_v2f64:
; SI: V_AND_B32
; SI: V_AND_B32
; SI: S_ENDPGM
@@ -40,7 +40,7 @@ define void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
ret void
}
-; FUNC-LABEL: @fabs_v4f64
+; FUNC-LABEL: {{^}}fabs_v4f64:
; SI: V_AND_B32
; SI: V_AND_B32
; SI: V_AND_B32
@@ -52,7 +52,7 @@ define void @fabs_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) {
ret void
}
-; SI-LABEL: @fabs_fold_f64
+; SI-LABEL: {{^}}fabs_fold_f64:
; SI: S_LOAD_DWORDX2 [[ABS_VALUE:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
; SI-NOT: AND
; SI: V_MUL_F64 {{v\[[0-9]+:[0-9]+\]}}, |[[ABS_VALUE]]|, {{v\[[0-9]+:[0-9]+\]}}
@@ -64,7 +64,7 @@ define void @fabs_fold_f64(double addrspace(1)* %out, double %in0, double %in1)
ret void
}
-; SI-LABEL: @fabs_fn_fold_f64
+; SI-LABEL: {{^}}fabs_fn_fold_f64:
; SI: S_LOAD_DWORDX2 [[ABS_VALUE:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
; SI-NOT: AND
; SI: V_MUL_F64 {{v\[[0-9]+:[0-9]+\]}}, |[[ABS_VALUE]]|, {{v\[[0-9]+:[0-9]+\]}}
@@ -76,7 +76,7 @@ define void @fabs_fn_fold_f64(double addrspace(1)* %out, double %in0, double %in
ret void
}
-; FUNC-LABEL: @fabs_free_f64
+; FUNC-LABEL: {{^}}fabs_free_f64:
; SI: V_AND_B32
; SI: S_ENDPGM
define void @fabs_free_f64(double addrspace(1)* %out, i64 %in) {
@@ -86,7 +86,7 @@ define void @fabs_free_f64(double addrspace(1)* %out, i64 %in) {
ret void
}
-; FUNC-LABEL: @fabs_fn_free_f64
+; FUNC-LABEL: {{^}}fabs_fn_free_f64:
; SI: V_AND_B32
; SI: S_ENDPGM
define void @fabs_fn_free_f64(double addrspace(1)* %out, i64 %in) {
diff --git a/test/CodeGen/R600/fabs.ll b/test/CodeGen/R600/fabs.ll
index 610c4197a22..b5bb92b2392 100644
--- a/test/CodeGen/R600/fabs.ll
+++ b/test/CodeGen/R600/fabs.ll
@@ -6,7 +6,7 @@
; (fabs (f32 bitcast (i32 a))) => (f32 bitcast (and (i32 a), 0x7FFFFFFF))
; unless isFabsFree returns true
-; FUNC-LABEL: @fabs_fn_free
+; FUNC-LABEL: {{^}}fabs_fn_free:
; R600-NOT: AND
; R600: |PV.{{[XYZW]}}|
@@ -19,7 +19,7 @@ define void @fabs_fn_free(float addrspace(1)* %out, i32 %in) {
ret void
}
-; FUNC-LABEL: @fabs_free
+; FUNC-LABEL: {{^}}fabs_free:
; R600-NOT: AND
; R600: |PV.{{[XYZW]}}|
@@ -32,7 +32,7 @@ define void @fabs_free(float addrspace(1)* %out, i32 %in) {
ret void
}
-; FUNC-LABEL: @fabs_f32
+; FUNC-LABEL: {{^}}fabs_f32:
; R600: |{{(PV|T[0-9])\.[XYZW]}}|
; SI: V_AND_B32
@@ -42,7 +42,7 @@ define void @fabs_f32(float addrspace(1)* %out, float %in) {
ret void
}
-; FUNC-LABEL: @fabs_v2f32
+; FUNC-LABEL: {{^}}fabs_v2f32:
; R600: |{{(PV|T[0-9])\.[XYZW]}}|
; R600: |{{(PV|T[0-9])\.[XYZW]}}|
@@ -54,7 +54,7 @@ define void @fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
ret void
}
-; FUNC-LABEL: @fabs_v4
+; FUNC-LABEL: {{^}}fabs_v4f32:
; R600: |{{(PV|T[0-9])\.[XYZW]}}|
; R600: |{{(PV|T[0-9])\.[XYZW]}}|
; R600: |{{(PV|T[0-9])\.[XYZW]}}|
@@ -70,7 +70,7 @@ define void @fabs_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) {
ret void
}
-; SI-LABEL: @fabs_fn_fold
+; SI-LABEL: {{^}}fabs_fn_fold:
; SI: S_LOAD_DWORD [[ABS_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0xb
; SI-NOT: AND
; SI: V_MUL_F32_e64 v{{[0-9]+}}, |[[ABS_VALUE]]|, v{{[0-9]+}}
@@ -81,7 +81,7 @@ define void @fabs_fn_fold(float addrspace(1)* %out, float %in0, float %in1) {
ret void
}
-; SI-LABEL: @fabs_fold
+; SI-LABEL: {{^}}fabs_fold:
; SI: S_LOAD_DWORD [[ABS_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0xb
; SI-NOT: AND
; SI: V_MUL_F32_e64 v{{[0-9]+}}, |[[ABS_VALUE]]|, v{{[0-9]+}}
diff --git a/test/CodeGen/R600/fadd.ll b/test/CodeGen/R600/fadd.ll
index 3a87c892d7b..08f78909d02 100644
--- a/test/CodeGen/R600/fadd.ll
+++ b/test/CodeGen/R600/fadd.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s -check-prefix=R600 -check-prefix=FUNC
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s -check-prefix=SI -check-prefix=FUNC
-; FUNC-LABEL: @fadd_f32
+; FUNC-LABEL: {{^}}fadd_f32:
; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].W
; SI: V_ADD_F32
define void @fadd_f32(float addrspace(1)* %out, float %a, float %b) {
@@ -10,7 +10,7 @@ define void @fadd_f32(float addrspace(1)* %out, float %a, float %b) {
ret void
}
-; FUNC-LABEL: @fadd_v2f32
+; FUNC-LABEL: {{^}}fadd_v2f32:
; R600-DAG: ADD {{\** *}}T{{[0-9]\.[XYZW]}}, KC0[3].X, KC0[3].Z
; R600-DAG: ADD {{\** *}}T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y
; SI: V_ADD_F32
@@ -21,7 +21,7 @@ define void @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x flo
ret void
}
-; FUNC-LABEL: @fadd_v4f32
+; FUNC-LABEL: {{^}}fadd_v4f32:
; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
@@ -39,7 +39,7 @@ define void @fadd_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)
ret void
}
-; FUNC-LABEL: @fadd_v8f32
+; FUNC-LABEL: {{^}}fadd_v8f32:
; R600: ADD
; R600: ADD
; R600: ADD
diff --git a/test/CodeGen/R600/fadd64.ll b/test/CodeGen/R600/fadd64.ll
index 48cd3cfc8df..108264c4d55 100644
--- a/test/CodeGen/R600/fadd64.ll
+++ b/test/CodeGen/R600/fadd64.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
-; CHECK: @fadd_f64
+; CHECK: {{^}}fadd_f64:
; CHECK: V_ADD_F64 {{v[[0-9]+:[0-9]+]}}, {{v[[0-9]+:[0-9]+]}}, {{v[[0-9]+:[0-9]+]}}
define void @fadd_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
diff --git a/test/CodeGen/R600/fceil.ll b/test/CodeGen/R600/fceil.ll
index 458363adc1e..ef89eedf5cd 100644
--- a/test/CodeGen/R600/fceil.ll
+++ b/test/CodeGen/R600/fceil.ll
@@ -8,7 +8,7 @@ declare <4 x float> @llvm.ceil.v4f32(<4 x float>) nounwind readnone
declare <8 x float> @llvm.ceil.v8f32(<8 x float>) nounwind readnone
declare <16 x float> @llvm.ceil.v16f32(<16 x float>) nounwind readnone
-; FUNC-LABEL: @fceil_f32:
+; FUNC-LABEL: {{^}}fceil_f32:
; SI: V_CEIL_F32_e32
; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+\.[XYZW]]]
; EG: CEIL {{\*? *}}[[RESULT]]
@@ -18,7 +18,7 @@ define void @fceil_f32(float addrspace(1)* %out, float %x) {
ret void
}
-; FUNC-LABEL: @fceil_v2f32:
+; FUNC-LABEL: {{^}}fceil_v2f32:
; SI: V_CEIL_F32_e32
; SI: V_CEIL_F32_e32
; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+]]{{\.[XYZW]}}
@@ -30,7 +30,7 @@ define void @fceil_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %x) {
ret void
}
-; FUNC-LABEL: @fceil_v3f32:
+; FUNC-LABEL: {{^}}fceil_v3f32:
; FIXME-SI: V_CEIL_F32_e32
; FIXME-SI: V_CEIL_F32_e32
; FIXME-SI: V_CEIL_F32_e32
@@ -46,7 +46,7 @@ define void @fceil_v3f32(<3 x float> addrspace(1)* %out, <3 x float> %x) {
ret void
}
-; FUNC-LABEL: @fceil_v4f32:
+; FUNC-LABEL: {{^}}fceil_v4f32:
; SI: V_CEIL_F32_e32
; SI: V_CEIL_F32_e32
; SI: V_CEIL_F32_e32
@@ -62,7 +62,7 @@ define void @fceil_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %x) {
ret void
}
-; FUNC-LABEL: @fceil_v8f32:
+; FUNC-LABEL: {{^}}fceil_v8f32:
; SI: V_CEIL_F32_e32
; SI: V_CEIL_F32_e32
; SI: V_CEIL_F32_e32
@@ -87,7 +87,7 @@ define void @fceil_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %x) {
ret void
}
-; FUNC-LABEL: @fceil_v16f32:
+; FUNC-LABEL: {{^}}fceil_v16f32:
; SI: V_CEIL_F32_e32
; SI: V_CEIL_F32_e32
; SI: V_CEIL_F32_e32
diff --git a/test/CodeGen/R600/fceil64.ll b/test/CodeGen/R600/fceil64.ll
index fa1740a4b9f..acdf8390db6 100644
--- a/test/CodeGen/R600/fceil64.ll
+++ b/test/CodeGen/R600/fceil64.ll
@@ -8,7 +8,7 @@ declare <4 x double> @llvm.ceil.v4f64(<4 x double>) nounwind readnone
declare <8 x double> @llvm.ceil.v8f64(<8 x double>) nounwind readnone
declare <16 x double> @llvm.ceil.v16f64(<16 x double>) nounwind readnone
-; FUNC-LABEL: @fceil_f64:
+; FUNC-LABEL: {{^}}fceil_f64:
; CI: V_CEIL_F64_e32
; SI: S_BFE_I32 [[SEXP:s[0-9]+]], {{s[0-9]+}}, 0xb0014
; SI: S_ADD_I32 s{{[0-9]+}}, [[SEXP]], 0xfffffc01
@@ -34,7 +34,7 @@ define void @fceil_f64(double addrspace(1)* %out, double %x) {
ret void
}
-; FUNC-LABEL: @fceil_v2f64:
+; FUNC-LABEL: {{^}}fceil_v2f64:
; CI: V_CEIL_F64_e32
; CI: V_CEIL_F64_e32
define void @fceil_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
@@ -43,7 +43,7 @@ define void @fceil_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
ret void
}
-; FIXME-FUNC-LABEL: @fceil_v3f64:
+; FIXME-FUNC-LABEL: {{^}}fceil_v3f64:
; FIXME-CI: V_CEIL_F64_e32
; FIXME-CI: V_CEIL_F64_e32
; FIXME-CI: V_CEIL_F64_e32
@@ -53,7 +53,7 @@ define void @fceil_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
; ret void
; }
-; FUNC-LABEL: @fceil_v4f64:
+; FUNC-LABEL: {{^}}fceil_v4f64:
; CI: V_CEIL_F64_e32
; CI: V_CEIL_F64_e32
; CI: V_CEIL_F64_e32
@@ -64,7 +64,7 @@ define void @fceil_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
ret void
}
-; FUNC-LABEL: @fceil_v8f64:
+; FUNC-LABEL: {{^}}fceil_v8f64:
; CI: V_CEIL_F64_e32
; CI: V_CEIL_F64_e32
; CI: V_CEIL_F64_e32
@@ -79,7 +79,7 @@ define void @fceil_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
ret void
}
-; FUNC-LABEL: @fceil_v16f64:
+; FUNC-LABEL: {{^}}fceil_v16f64:
; CI: V_CEIL_F64_e32
; CI: V_CEIL_F64_e32
; CI: V_CEIL_F64_e32
diff --git a/test/CodeGen/R600/fcmp.ll b/test/CodeGen/R600/fcmp.ll
index c76a7587656..33992181e0d 100644
--- a/test/CodeGen/R600/fcmp.ll
+++ b/test/CodeGen/R600/fcmp.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-; CHECK: @fcmp_sext
+; CHECK: {{^}}fcmp_sext:
; CHECK: SETE_DX10 T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @fcmp_sext(i32 addrspace(1)* %out, float addrspace(1)* %in) {
@@ -18,7 +18,7 @@ entry:
; SET*_DX10 instruction. Previously we were lowering this to:
; SET* + FP_TO_SINT
-; CHECK: @fcmp_br
+; CHECK: {{^}}fcmp_br:
; CHECK: SET{{[N]*}}E_DX10 * T{{[0-9]+\.[XYZW],}}
; CHECK-NEXT {{[0-9]+(5.0}}
diff --git a/test/CodeGen/R600/fcmp64.ll b/test/CodeGen/R600/fcmp64.ll
index 8cbe9f68664..dcb474087e0 100644
--- a/test/CodeGen/R600/fcmp64.ll
+++ b/test/CodeGen/R600/fcmp64.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
-; CHECK: @flt_f64
+; CHECK: {{^}}flt_f64:
; CHECK: V_CMP_LT_F64_e64 {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
define void @flt_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
@@ -13,7 +13,7 @@ define void @flt_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
ret void
}
-; CHECK: @fle_f64
+; CHECK: {{^}}fle_f64:
; CHECK: V_CMP_LE_F64_e64 {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
define void @fle_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
@@ -26,7 +26,7 @@ define void @fle_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
ret void
}
-; CHECK: @fgt_f64
+; CHECK: {{^}}fgt_f64:
; CHECK: V_CMP_GT_F64_e64 {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
define void @fgt_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
@@ -39,7 +39,7 @@ define void @fgt_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
ret void
}
-; CHECK: @fge_f64
+; CHECK: {{^}}fge_f64:
; CHECK: V_CMP_GE_F64_e64 {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
define void @fge_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
@@ -52,7 +52,7 @@ define void @fge_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
ret void
}
-; CHECK: @fne_f64
+; CHECK: {{^}}fne_f64:
; CHECK: V_CMP_NEQ_F64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
define void @fne_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
@@ -65,7 +65,7 @@ define void @fne_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
ret void
}
-; CHECK: @feq_f64
+; CHECK: {{^}}feq_f64:
; CHECK: V_CMP_EQ_F64_e64 {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
define void @feq_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
diff --git a/test/CodeGen/R600/fconst64.ll b/test/CodeGen/R600/fconst64.ll
index 9c3a7e3d2e9..0e23d492ab6 100644
--- a/test/CodeGen/R600/fconst64.ll
+++ b/test/CodeGen/R600/fconst64.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
-; CHECK: @fconst_f64
+; CHECK: {{^}}fconst_f64:
; CHECK-DAG: S_MOV_B32 {{s[0-9]+}}, 0x40140000
; CHECK-DAG: S_MOV_B32 {{s[0-9]+}}, 0
diff --git a/test/CodeGen/R600/fcopysign.f32.ll b/test/CodeGen/R600/fcopysign.f32.ll
index 15ee40ec91a..8c48272026f 100644
--- a/test/CodeGen/R600/fcopysign.f32.ll
+++ b/test/CodeGen/R600/fcopysign.f32.ll
@@ -7,7 +7,7 @@ declare <2 x float> @llvm.copysign.v2f32(<2 x float>, <2 x float>) nounwind read
declare <4 x float> @llvm.copysign.v4f32(<4 x float>, <4 x float>) nounwind readnone
; Try to identify arg based on higher address.
-; FUNC-LABEL: @test_copysign_f32:
+; FUNC-LABEL: {{^}}test_copysign_f32:
; SI: S_LOAD_DWORD [[SMAG:s[0-9]+]], {{.*}} 0xb
; SI: S_LOAD_DWORD [[SSIGN:s[0-9]+]], {{.*}} 0xc
; SI-DAG: V_MOV_B32_e32 [[VSIGN:v[0-9]+]], [[SSIGN]]
@@ -24,7 +24,7 @@ define void @test_copysign_f32(float addrspace(1)* %out, float %mag, float %sign
ret void
}
-; FUNC-LABEL: @test_copysign_v2f32:
+; FUNC-LABEL: {{^}}test_copysign_v2f32:
; SI: S_ENDPGM
; EG: BFI_INT
@@ -35,7 +35,7 @@ define void @test_copysign_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %ma
ret void
}
-; FUNC-LABEL: @test_copysign_v4f32:
+; FUNC-LABEL: {{^}}test_copysign_v4f32:
; SI: S_ENDPGM
; EG: BFI_INT
diff --git a/test/CodeGen/R600/fcopysign.f64.ll b/test/CodeGen/R600/fcopysign.f64.ll
index c72329ca012..c0150bc4101 100644
--- a/test/CodeGen/R600/fcopysign.f64.ll
+++ b/test/CodeGen/R600/fcopysign.f64.ll
@@ -4,7 +4,7 @@ declare double @llvm.copysign.f64(double, double) nounwind readnone
declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>) nounwind readnone
declare <4 x double> @llvm.copysign.v4f64(<4 x double>, <4 x double>) nounwind readnone
-; FUNC-LABEL: @test_copysign_f64:
+; FUNC-LABEL: {{^}}test_copysign_f64:
; SI-DAG: S_LOAD_DWORDX2 s{{\[}}[[SMAG_LO:[0-9]+]]:[[SMAG_HI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI-DAG: S_LOAD_DWORDX2 s{{\[}}[[SSIGN_LO:[0-9]+]]:[[SSIGN_HI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
; SI-DAG: V_MOV_B32_e32 v[[VSIGN_HI:[0-9]+]], s[[SSIGN_HI]]
@@ -20,7 +20,7 @@ define void @test_copysign_f64(double addrspace(1)* %out, double %mag, double %s
ret void
}
-; FUNC-LABEL: @test_copysign_v2f64:
+; FUNC-LABEL: {{^}}test_copysign_v2f64:
; SI: S_ENDPGM
define void @test_copysign_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %mag, <2 x double> %sign) nounwind {
%result = call <2 x double> @llvm.copysign.v2f64(<2 x double> %mag, <2 x double> %sign)
@@ -28,7 +28,7 @@ define void @test_copysign_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %
ret void
}
-; FUNC-LABEL: @test_copysign_v4f64:
+; FUNC-LABEL: {{^}}test_copysign_v4f64:
; SI: S_ENDPGM
define void @test_copysign_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %mag, <4 x double> %sign) nounwind {
%result = call <4 x double> @llvm.copysign.v4f64(<4 x double> %mag, <4 x double> %sign)
diff --git a/test/CodeGen/R600/fdiv.ll b/test/CodeGen/R600/fdiv.ll
index 20db65c5eb6..d00217799f1 100644
--- a/test/CodeGen/R600/fdiv.ll
+++ b/test/CodeGen/R600/fdiv.ll
@@ -5,7 +5,7 @@
; scheduler is scheduling the RECIP_IEEE and MUL_IEEE instructions in separate
; instruction groups.
-; FUNC-LABEL: @fdiv_f32
+; FUNC-LABEL: {{^}}fdiv_f32:
; R600-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[3].Z
; R600-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[3].Y
; R600-DAG: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[3].X, PS
@@ -22,7 +22,7 @@ entry:
-; FUNC-LABEL: @fdiv_v2f32
+; FUNC-LABEL: {{^}}fdiv_v2f32:
; R600-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[3].Z
; R600-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[3].Y
; R600-DAG: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[3].X, PS
@@ -39,7 +39,7 @@ entry:
ret void
}
-; FUNC-LABEL: @fdiv_v4f32
+; FUNC-LABEL: {{^}}fdiv_v4f32:
; R600-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
diff --git a/test/CodeGen/R600/fdiv64.ll b/test/CodeGen/R600/fdiv64.ll
index 79b5c8bb96e..312870bbdff 100644
--- a/test/CodeGen/R600/fdiv64.ll
+++ b/test/CodeGen/R600/fdiv64.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
-; CHECK: @fdiv_f64
+; CHECK: {{^}}fdiv_f64:
; CHECK: V_RCP_F64_e32 {{v\[[0-9]+:[0-9]+\]}}
; CHECK: V_MUL_F64 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}
diff --git a/test/CodeGen/R600/fetch-limits.r600.ll b/test/CodeGen/R600/fetch-limits.r600.ll
index f78d1d968e5..d35573e818d 100644
--- a/test/CodeGen/R600/fetch-limits.r600.ll
+++ b/test/CodeGen/R600/fetch-limits.r600.ll
@@ -3,7 +3,7 @@
; RUN: llc < %s -march=r600 -mcpu=rv670 | FileCheck %s
; R600 supports 8 fetches in a clause
-; CHECK: @fetch_limits_r600
+; CHECK: {{^}}fetch_limits_r600:
; CHECK: Fetch clause
; CHECK: Fetch clause
diff --git a/test/CodeGen/R600/fetch-limits.r700+.ll b/test/CodeGen/R600/fetch-limits.r700+.ll
index 1a8a43fccc7..17760a05caa 100644
--- a/test/CodeGen/R600/fetch-limits.r700+.ll
+++ b/test/CodeGen/R600/fetch-limits.r700+.ll
@@ -12,7 +12,7 @@
; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s
; r700+ supports 16 fetches in a clause
-; CHECK: @fetch_limits_r700
+; CHECK: {{^}}fetch_limits_r700:
; CHECK: Fetch clause
; CHECK: Fetch clause
diff --git a/test/CodeGen/R600/ffloor.ll b/test/CodeGen/R600/ffloor.ll
index 1a98b75dd15..7a4aeaa16e8 100644
--- a/test/CodeGen/R600/ffloor.ll
+++ b/test/CodeGen/R600/ffloor.ll
@@ -8,7 +8,7 @@ declare <4 x double> @llvm.floor.v4f64(<4 x double>) nounwind readnone
declare <8 x double> @llvm.floor.v8f64(<8 x double>) nounwind readnone
declare <16 x double> @llvm.floor.v16f64(<16 x double>) nounwind readnone
-; FUNC-LABEL: @ffloor_f64:
+; FUNC-LABEL: {{^}}ffloor_f64:
; CI: V_FLOOR_F64_e32
; SI: S_BFE_I32 [[SEXP:s[0-9]+]], {{s[0-9]+}}, 0xb0014
@@ -35,7 +35,7 @@ define void @ffloor_f64(double addrspace(1)* %out, double %x) {
ret void
}
-; FUNC-LABEL: @ffloor_v2f64:
+; FUNC-LABEL: {{^}}ffloor_v2f64:
; CI: V_FLOOR_F64_e32
; CI: V_FLOOR_F64_e32
define void @ffloor_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
@@ -44,7 +44,7 @@ define void @ffloor_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
ret void
}
-; FIXME-FUNC-LABEL: @ffloor_v3f64:
+; FIXME-FUNC-LABEL: {{^}}ffloor_v3f64:
; FIXME-CI: V_FLOOR_F64_e32
; FIXME-CI: V_FLOOR_F64_e32
; FIXME-CI: V_FLOOR_F64_e32
@@ -54,7 +54,7 @@ define void @ffloor_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
; ret void
; }
-; FUNC-LABEL: @ffloor_v4f64:
+; FUNC-LABEL: {{^}}ffloor_v4f64:
; CI: V_FLOOR_F64_e32
; CI: V_FLOOR_F64_e32
; CI: V_FLOOR_F64_e32
@@ -65,7 +65,7 @@ define void @ffloor_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
ret void
}
-; FUNC-LABEL: @ffloor_v8f64:
+; FUNC-LABEL: {{^}}ffloor_v8f64:
; CI: V_FLOOR_F64_e32
; CI: V_FLOOR_F64_e32
; CI: V_FLOOR_F64_e32
@@ -80,7 +80,7 @@ define void @ffloor_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
ret void
}
-; FUNC-LABEL: @ffloor_v16f64:
+; FUNC-LABEL: {{^}}ffloor_v16f64:
; CI: V_FLOOR_F64_e32
; CI: V_FLOOR_F64_e32
; CI: V_FLOOR_F64_e32
diff --git a/test/CodeGen/R600/flat-address-space.ll b/test/CodeGen/R600/flat-address-space.ll
index 3f32e4dbc8b..ff9bd7312f7 100644
--- a/test/CodeGen/R600/flat-address-space.ll
+++ b/test/CodeGen/R600/flat-address-space.ll
@@ -5,7 +5,7 @@
; specialize away generic pointer accesses.
-; CHECK-LABEL: @branch_use_flat_i32:
+; CHECK-LABEL: {{^}}branch_use_flat_i32:
; CHECK: FLAT_STORE_DWORD {{v[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, [M0, FLAT_SCRATCH]
; CHECK: S_ENDPGM
define void @branch_use_flat_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* %gptr, i32 addrspace(3)* %lptr, i32 %x, i32 %c) #0 {
@@ -34,7 +34,7 @@ end:
; These testcases might become useless when there are optimizations to
; remove generic pointers.
-; CHECK-LABEL: @store_flat_i32:
+; CHECK-LABEL: {{^}}store_flat_i32:
; CHECK: V_MOV_B32_e32 v[[DATA:[0-9]+]], {{s[0-9]+}}
; CHECK: V_MOV_B32_e32 v[[LO_VREG:[0-9]+]], {{s[0-9]+}}
; CHECK: V_MOV_B32_e32 v[[HI_VREG:[0-9]+]], {{s[0-9]+}}
@@ -45,7 +45,7 @@ define void @store_flat_i32(i32 addrspace(1)* %gptr, i32 %x) #0 {
ret void
}
-; CHECK-LABEL: @store_flat_i64:
+; CHECK-LABEL: {{^}}store_flat_i64:
; CHECK: FLAT_STORE_DWORDX2
define void @store_flat_i64(i64 addrspace(1)* %gptr, i64 %x) #0 {
%fptr = addrspacecast i64 addrspace(1)* %gptr to i64 addrspace(4)*
@@ -53,7 +53,7 @@ define void @store_flat_i64(i64 addrspace(1)* %gptr, i64 %x) #0 {
ret void
}
-; CHECK-LABEL: @store_flat_v4i32:
+; CHECK-LABEL: {{^}}store_flat_v4i32:
; CHECK: FLAT_STORE_DWORDX4
define void @store_flat_v4i32(<4 x i32> addrspace(1)* %gptr, <4 x i32> %x) #0 {
%fptr = addrspacecast <4 x i32> addrspace(1)* %gptr to <4 x i32> addrspace(4)*
@@ -61,7 +61,7 @@ define void @store_flat_v4i32(<4 x i32> addrspace(1)* %gptr, <4 x i32> %x) #0 {
ret void
}
-; CHECK-LABEL: @store_flat_trunc_i16:
+; CHECK-LABEL: {{^}}store_flat_trunc_i16:
; CHECK: FLAT_STORE_SHORT
define void @store_flat_trunc_i16(i16 addrspace(1)* %gptr, i32 %x) #0 {
%fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)*
@@ -70,7 +70,7 @@ define void @store_flat_trunc_i16(i16 addrspace(1)* %gptr, i32 %x) #0 {
ret void
}
-; CHECK-LABEL: @store_flat_trunc_i8:
+; CHECK-LABEL: {{^}}store_flat_trunc_i8:
; CHECK: FLAT_STORE_BYTE
define void @store_flat_trunc_i8(i8 addrspace(1)* %gptr, i32 %x) #0 {
%fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)*
@@ -154,7 +154,7 @@ define void @zextload_flat_i16(i32 addrspace(1)* noalias %out, i16 addrspace(1)*
; scratch allocations again.
; Check for prologue initializing special SGPRs pointing to scratch.
-; CHECK-LABEL: @store_flat_scratch:
+; CHECK-LABEL: {{^}}store_flat_scratch:
; CHECK: S_MOVK_I32 flat_scratch_lo, 0
; CHECK-NO-PROMOTE: S_MOVK_I32 flat_scratch_hi, 40
; CHECK-PROMOTE: S_MOVK_I32 flat_scratch_hi, 0
diff --git a/test/CodeGen/R600/fma.f64.ll b/test/CodeGen/R600/fma.f64.ll
index b4b9450e2f7..e4fb441bbfc 100644
--- a/test/CodeGen/R600/fma.f64.ll
+++ b/test/CodeGen/R600/fma.f64.ll
@@ -5,7 +5,7 @@ declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) n
declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
-; FUNC-LABEL: @fma_f64
+; FUNC-LABEL: {{^}}fma_f64:
; SI: V_FMA_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
define void @fma_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2, double addrspace(1)* %in3) {
@@ -17,7 +17,7 @@ define void @fma_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
ret void
}
-; FUNC-LABEL: @fma_v2f64
+; FUNC-LABEL: {{^}}fma_v2f64:
; SI: V_FMA_F64
; SI: V_FMA_F64
define void @fma_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
@@ -30,7 +30,7 @@ define void @fma_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1
ret void
}
-; FUNC-LABEL: @fma_v4f64
+; FUNC-LABEL: {{^}}fma_v4f64:
; SI: V_FMA_F64
; SI: V_FMA_F64
; SI: V_FMA_F64
diff --git a/test/CodeGen/R600/fma.ll b/test/CodeGen/R600/fma.ll
index eec8eb7f1c5..58ecf6a83ce 100644
--- a/test/CodeGen/R600/fma.ll
+++ b/test/CodeGen/R600/fma.ll
@@ -5,7 +5,7 @@ declare float @llvm.fma.f32(float, float, float) nounwind readnone
declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) nounwind readnone
declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
-; FUNC-LABEL: @fma_f32
+; FUNC-LABEL: {{^}}fma_f32:
; SI: V_FMA_F32 {{v[0-9]+, v[0-9]+, v[0-9]+, v[0-9]+}}
define void @fma_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
float addrspace(1)* %in2, float addrspace(1)* %in3) {
@@ -17,7 +17,7 @@ define void @fma_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
ret void
}
-; FUNC-LABEL: @fma_v2f32
+; FUNC-LABEL: {{^}}fma_v2f32:
; SI: V_FMA_F32
; SI: V_FMA_F32
define void @fma_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in1,
@@ -30,7 +30,7 @@ define void @fma_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)*
ret void
}
-; FUNC-LABEL: @fma_v4f32
+; FUNC-LABEL: {{^}}fma_v4f32:
; SI: V_FMA_F32
; SI: V_FMA_F32
; SI: V_FMA_F32
diff --git a/test/CodeGen/R600/fmul.ll b/test/CodeGen/R600/fmul.ll
index 3c88bea0bed..814ed59340a 100644
--- a/test/CodeGen/R600/fmul.ll
+++ b/test/CodeGen/R600/fmul.ll
@@ -2,7 +2,7 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
-; FUNC-LABEL: @fmul_f32
+; FUNC-LABEL: {{^}}fmul_f32:
; R600: MUL_IEEE {{\** *}}{{T[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].W
; SI: V_MUL_F32
@@ -17,7 +17,7 @@ declare float @llvm.R600.load.input(i32) readnone
declare void @llvm.AMDGPU.store.output(float, i32)
-; FUNC-LABEL: @fmul_v2f32
+; FUNC-LABEL: {{^}}fmul_v2f32:
; R600: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW]}}
; R600: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW]}}
@@ -30,7 +30,7 @@ entry:
ret void
}
-; FUNC-LABEL: @fmul_v4f32
+; FUNC-LABEL: {{^}}fmul_v4f32:
; R600: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
@@ -49,7 +49,7 @@ define void @fmul_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)
ret void
}
-; FUNC-LABEL: @test_mul_2_k
+; FUNC-LABEL: {{^}}test_mul_2_k:
; SI: V_MUL_F32
; SI-NOT: V_MUL_F32
; SI: S_ENDPGM
@@ -60,7 +60,7 @@ define void @test_mul_2_k(float addrspace(1)* %out, float %x) #0 {
ret void
}
-; FUNC-LABEL: @test_mul_2_k_inv
+; FUNC-LABEL: {{^}}test_mul_2_k_inv:
; SI: V_MUL_F32
; SI-NOT: V_MUL_F32
; SI-NOT: V_MAD_F32
diff --git a/test/CodeGen/R600/fmul64.ll b/test/CodeGen/R600/fmul64.ll
index e8491f9fb49..c35f5f3b408 100644
--- a/test/CodeGen/R600/fmul64.ll
+++ b/test/CodeGen/R600/fmul64.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=SI %s
-; FUNC-LABEL: @fmul_f64
+; FUNC-LABEL: {{^}}fmul_f64:
; SI: V_MUL_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
define void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
@@ -11,7 +11,7 @@ define void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
ret void
}
-; FUNC-LABEL: @fmul_v2f64
+; FUNC-LABEL: {{^}}fmul_v2f64:
; SI: V_MUL_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
; SI: V_MUL_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
define void @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
@@ -23,7 +23,7 @@ define void @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(
ret void
}
-; FUNC-LABEL: @fmul_v4f64
+; FUNC-LABEL: {{^}}fmul_v4f64:
; SI: V_MUL_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
; SI: V_MUL_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
; SI: V_MUL_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
diff --git a/test/CodeGen/R600/fmuladd.ll b/test/CodeGen/R600/fmuladd.ll
index ea559c3ddc6..c2360e6ec98 100644
--- a/test/CodeGen/R600/fmuladd.ll
+++ b/test/CodeGen/R600/fmuladd.ll
@@ -5,7 +5,7 @@ declare double @llvm.fmuladd.f64(double, double, double)
declare i32 @llvm.r600.read.tidig.x() nounwind readnone
declare float @llvm.fabs.f32(float) nounwind readnone
-; CHECK-LABEL: @fmuladd_f32
+; CHECK-LABEL: {{^}}fmuladd_f32:
; CHECK: V_MAD_F32 {{v[0-9]+, v[0-9]+, v[0-9]+, v[0-9]+}}
define void @fmuladd_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
@@ -18,7 +18,7 @@ define void @fmuladd_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
ret void
}
-; CHECK-LABEL: @fmuladd_f64
+; CHECK-LABEL: {{^}}fmuladd_f64:
; CHECK: V_FMA_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
define void @fmuladd_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
@@ -31,7 +31,7 @@ define void @fmuladd_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
ret void
}
-; CHECK-LABEL: @fmuladd_2.0_a_b_f32
+; CHECK-LABEL: {{^}}fmuladd_2.0_a_b_f32
; CHECK-DAG: BUFFER_LOAD_DWORD [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; CHECK-DAG: BUFFER_LOAD_DWORD [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
; CHECK: V_MAD_F32 [[RESULT:v[0-9]+]], 2.0, [[R1]], [[R2]]
@@ -50,7 +50,7 @@ define void @fmuladd_2.0_a_b_f32(float addrspace(1)* %out, float addrspace(1)* %
ret void
}
-; CHECK-LABEL: @fmuladd_a_2.0_b_f32
+; CHECK-LABEL: {{^}}fmuladd_a_2.0_b_f32
; CHECK-DAG: BUFFER_LOAD_DWORD [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; CHECK-DAG: BUFFER_LOAD_DWORD [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
; CHECK: V_MAD_F32 [[RESULT:v[0-9]+]], 2.0, [[R1]], [[R2]]
@@ -69,7 +69,7 @@ define void @fmuladd_a_2.0_b_f32(float addrspace(1)* %out, float addrspace(1)* %
ret void
}
-; CHECK-LABEL: @fadd_a_a_b_f32
+; CHECK-LABEL: {{^}}fadd_a_a_b_f32:
; CHECK-DAG: BUFFER_LOAD_DWORD [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; CHECK-DAG: BUFFER_LOAD_DWORD [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
; CHECK: V_MAD_F32 [[RESULT:v[0-9]+]], 2.0, [[R1]], [[R2]]
@@ -91,7 +91,7 @@ define void @fadd_a_a_b_f32(float addrspace(1)* %out,
ret void
}
-; CHECK-LABEL: @fadd_b_a_a_f32
+; CHECK-LABEL: {{^}}fadd_b_a_a_f32:
; CHECK-DAG: BUFFER_LOAD_DWORD [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; CHECK-DAG: BUFFER_LOAD_DWORD [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
; CHECK: V_MAD_F32 [[RESULT:v[0-9]+]], 2.0, [[R1]], [[R2]]
@@ -113,7 +113,7 @@ define void @fadd_b_a_a_f32(float addrspace(1)* %out,
ret void
}
-; CHECK-LABEL: @fmuladd_neg_2.0_a_b_f32
+; CHECK-LABEL: {{^}}fmuladd_neg_2.0_a_b_f32
; CHECK-DAG: BUFFER_LOAD_DWORD [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; CHECK-DAG: BUFFER_LOAD_DWORD [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
; CHECK: V_MAD_F32 [[RESULT:v[0-9]+]], [[R1]], -2.0, [[R2]]
@@ -133,7 +133,7 @@ define void @fmuladd_neg_2.0_a_b_f32(float addrspace(1)* %out, float addrspace(1
}
-; CHECK-LABEL: @fmuladd_neg_2.0_neg_a_b_f32
+; CHECK-LABEL: {{^}}fmuladd_neg_2.0_neg_a_b_f32
; CHECK-DAG: BUFFER_LOAD_DWORD [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; CHECK-DAG: BUFFER_LOAD_DWORD [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
; CHECK: V_MAD_F32 [[RESULT:v[0-9]+]], 2.0, [[R1]], [[R2]]
@@ -155,7 +155,7 @@ define void @fmuladd_neg_2.0_neg_a_b_f32(float addrspace(1)* %out, float addrspa
}
-; CHECK-LABEL: @fmuladd_2.0_neg_a_b_f32
+; CHECK-LABEL: {{^}}fmuladd_2.0_neg_a_b_f32
; CHECK-DAG: BUFFER_LOAD_DWORD [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; CHECK-DAG: BUFFER_LOAD_DWORD [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
; CHECK: V_MAD_F32 [[RESULT:v[0-9]+]], [[R1]], -2.0, [[R2]]
@@ -177,7 +177,7 @@ define void @fmuladd_2.0_neg_a_b_f32(float addrspace(1)* %out, float addrspace(1
}
-; CHECK-LABEL: @fmuladd_2.0_a_neg_b_f32
+; CHECK-LABEL: {{^}}fmuladd_2.0_a_neg_b_f32
; CHECK-DAG: BUFFER_LOAD_DWORD [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; CHECK-DAG: BUFFER_LOAD_DWORD [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
; CHECK: V_MAD_F32 [[RESULT:v[0-9]+]], 2.0, [[R1]], -[[R2]]
diff --git a/test/CodeGen/R600/fneg-fabs.f64.ll b/test/CodeGen/R600/fneg-fabs.f64.ll
index 1442ca4804a..53d032c1b47 100644
--- a/test/CodeGen/R600/fneg-fabs.f64.ll
+++ b/test/CodeGen/R600/fneg-fabs.f64.ll
@@ -3,7 +3,7 @@
; FIXME: Check something here. Currently it seems fabs + fneg aren't
; into 2 modifiers, although theoretically that should work.
-; FUNC-LABEL: @fneg_fabs_fadd_f64
+; FUNC-LABEL: {{^}}fneg_fabs_fadd_f64:
; SI: V_MOV_B32_e32 [[IMMREG:v[0-9]+]], 0x7fffffff
; SI: V_AND_B32_e32 v[[FABS:[0-9]+]], {{s[0-9]+}}, [[IMMREG]]
; SI: V_ADD_F64 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, -v{{\[[0-9]+}}:[[FABS]]{{\]}}
@@ -25,7 +25,7 @@ define void @v_fneg_fabs_fadd_f64(double addrspace(1)* %out, double addrspace(1)
ret void
}
-; FUNC-LABEL: @fneg_fabs_fmul_f64
+; FUNC-LABEL: {{^}}fneg_fabs_fmul_f64:
; SI: V_MUL_F64 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, -|{{v\[[0-9]+:[0-9]+\]}}|
define void @fneg_fabs_fmul_f64(double addrspace(1)* %out, double %x, double %y) {
%fabs = call double @llvm.fabs.f64(double %x)
@@ -35,7 +35,7 @@ define void @fneg_fabs_fmul_f64(double addrspace(1)* %out, double %x, double %y)
ret void
}
-; FUNC-LABEL: @fneg_fabs_free_f64
+; FUNC-LABEL: {{^}}fneg_fabs_free_f64:
define void @fneg_fabs_free_f64(double addrspace(1)* %out, i64 %in) {
%bc = bitcast i64 %in to double
%fabs = call double @llvm.fabs.f64(double %bc)
@@ -44,7 +44,7 @@ define void @fneg_fabs_free_f64(double addrspace(1)* %out, i64 %in) {
ret void
}
-; FUNC-LABEL: @fneg_fabs_fn_free_f64
+; FUNC-LABEL: {{^}}fneg_fabs_fn_free_f64:
; SI: V_MOV_B32_e32 [[IMMREG:v[0-9]+]], 0x80000000
; SI: V_OR_B32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
define void @fneg_fabs_fn_free_f64(double addrspace(1)* %out, i64 %in) {
@@ -55,7 +55,7 @@ define void @fneg_fabs_fn_free_f64(double addrspace(1)* %out, i64 %in) {
ret void
}
-; FUNC-LABEL: @fneg_fabs_f64
+; FUNC-LABEL: {{^}}fneg_fabs_f64:
; SI: S_LOAD_DWORDX2
; SI: S_LOAD_DWORDX2 s{{\[}}[[LO_X:[0-9]+]]:[[HI_X:[0-9]+]]{{\]}}
; SI: V_MOV_B32_e32 [[IMMREG:v[0-9]+]], 0x80000000
@@ -69,7 +69,7 @@ define void @fneg_fabs_f64(double addrspace(1)* %out, double %in) {
ret void
}
-; FUNC-LABEL: @fneg_fabs_v2f64
+; FUNC-LABEL: {{^}}fneg_fabs_v2f64:
; SI: V_MOV_B32_e32 [[IMMREG:v[0-9]+]], 0x80000000
; SI-NOT: 0x80000000
; SI: V_OR_B32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
@@ -81,7 +81,7 @@ define void @fneg_fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in)
ret void
}
-; FUNC-LABEL: @fneg_fabs_v4f64
+; FUNC-LABEL: {{^}}fneg_fabs_v4f64:
; SI: V_MOV_B32_e32 [[IMMREG:v[0-9]+]], 0x80000000
; SI-NOT: 0x80000000
; SI: V_OR_B32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
diff --git a/test/CodeGen/R600/fneg-fabs.ll b/test/CodeGen/R600/fneg-fabs.ll
index 28cdd066387..bedc659ff9c 100644
--- a/test/CodeGen/R600/fneg-fabs.ll
+++ b/test/CodeGen/R600/fneg-fabs.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
-; FUNC-LABEL: @fneg_fabs_fadd_f32
+; FUNC-LABEL: {{^}}fneg_fabs_fadd_f32:
; SI-NOT: AND
; SI: V_SUB_F32_e64 {{v[0-9]+}}, {{s[0-9]+}}, |{{v[0-9]+}}|
define void @fneg_fabs_fadd_f32(float addrspace(1)* %out, float %x, float %y) {
@@ -12,7 +12,7 @@ define void @fneg_fabs_fadd_f32(float addrspace(1)* %out, float %x, float %y) {
ret void
}
-; FUNC-LABEL: @fneg_fabs_fmul_f32
+; FUNC-LABEL: {{^}}fneg_fabs_fmul_f32:
; SI-NOT: AND
; SI: V_MUL_F32_e64 {{v[0-9]+}}, {{s[0-9]+}}, -|{{v[0-9]+}}|
; SI-NOT: AND
@@ -28,7 +28,7 @@ define void @fneg_fabs_fmul_f32(float addrspace(1)* %out, float %x, float %y) {
; (fabs (f32 bitcast (i32 a))) => (f32 bitcast (and (i32 a), 0x7FFFFFFF))
; unless isFabsFree returns true
-; FUNC-LABEL: @fneg_fabs_free_f32
+; FUNC-LABEL: {{^}}fneg_fabs_free_f32:
; R600-NOT: AND
; R600: |PV.{{[XYZW]}}|
; R600: -PV
@@ -43,7 +43,7 @@ define void @fneg_fabs_free_f32(float addrspace(1)* %out, i32 %in) {
ret void
}
-; FUNC-LABEL: @fneg_fabs_fn_free_f32
+; FUNC-LABEL: {{^}}fneg_fabs_fn_free_f32:
; R600-NOT: AND
; R600: |PV.{{[XYZW]}}|
; R600: -PV
@@ -58,7 +58,7 @@ define void @fneg_fabs_fn_free_f32(float addrspace(1)* %out, i32 %in) {
ret void
}
-; FUNC-LABEL: @fneg_fabs_f32
+; FUNC-LABEL: {{^}}fneg_fabs_f32:
; SI: V_MOV_B32_e32 [[IMMREG:v[0-9]+]], 0x80000000
; SI: V_OR_B32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
define void @fneg_fabs_f32(float addrspace(1)* %out, float %in) {
@@ -68,7 +68,7 @@ define void @fneg_fabs_f32(float addrspace(1)* %out, float %in) {
ret void
}
-; FUNC-LABEL: @v_fneg_fabs_f32
+; FUNC-LABEL: {{^}}v_fneg_fabs_f32:
; SI: V_OR_B32_e32 v{{[0-9]+}}, 0x80000000, v{{[0-9]+}}
define void @v_fneg_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
%val = load float addrspace(1)* %in, align 4
@@ -78,7 +78,7 @@ define void @v_fneg_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in)
ret void
}
-; FUNC-LABEL: @fneg_fabs_v2f32
+; FUNC-LABEL: {{^}}fneg_fabs_v2f32:
; R600: |{{(PV|T[0-9])\.[XYZW]}}|
; R600: -PV
; R600: |{{(PV|T[0-9])\.[XYZW]}}|
@@ -97,7 +97,7 @@ define void @fneg_fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
}
; FIXME: SGPR should be used directly for first src operand.
-; FUNC-LABEL: @fneg_fabs_v4f32
+; FUNC-LABEL: {{^}}fneg_fabs_v4f32:
; SI: V_MOV_B32_e32 [[IMMREG:v[0-9]+]], 0x80000000
; SI-NOT: 0x80000000
; SI: V_OR_B32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[IMMREG]]
diff --git a/test/CodeGen/R600/fneg.f64.ll b/test/CodeGen/R600/fneg.f64.ll
index b8a4a9fee83..68a1c0f2320 100644
--- a/test/CodeGen/R600/fneg.f64.ll
+++ b/test/CodeGen/R600/fneg.f64.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @fneg_f64
+; FUNC-LABEL: {{^}}fneg_f64:
; SI: V_XOR_B32
define void @fneg_f64(double addrspace(1)* %out, double %in) {
%fneg = fsub double -0.000000e+00, %in
@@ -8,7 +8,7 @@ define void @fneg_f64(double addrspace(1)* %out, double %in) {
ret void
}
-; FUNC-LABEL: @fneg_v2f64
+; FUNC-LABEL: {{^}}fneg_v2f64:
; SI: V_XOR_B32
; SI: V_XOR_B32
define void @fneg_v2f64(<2 x double> addrspace(1)* nocapture %out, <2 x double> %in) {
@@ -17,7 +17,7 @@ define void @fneg_v2f64(<2 x double> addrspace(1)* nocapture %out, <2 x double>
ret void
}
-; FUNC-LABEL: @fneg_v4f64
+; FUNC-LABEL: {{^}}fneg_v4f64:
; R600: -PV
; R600: -T
; R600: -PV
@@ -37,7 +37,7 @@ define void @fneg_v4f64(<4 x double> addrspace(1)* nocapture %out, <4 x double>
; (fneg (f64 bitcast (i64 a))) => (f64 bitcast (xor (i64 a), 0x80000000))
; unless the target returns true for isNegFree()
-; FUNC-LABEL: @fneg_free_f64
+; FUNC-LABEL: {{^}}fneg_free_f64:
; FIXME: Unnecessary copy to VGPRs
; SI: V_ADD_F64 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, -{{v\[[0-9]+:[0-9]+\]$}}
define void @fneg_free_f64(double addrspace(1)* %out, i64 %in) {
@@ -47,7 +47,7 @@ define void @fneg_free_f64(double addrspace(1)* %out, i64 %in) {
ret void
}
-; SI-LABEL: @fneg_fold
+; SI-LABEL: {{^}}fneg_fold_f64:
; SI: S_LOAD_DWORDX2 [[NEG_VALUE:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
; SI-NOT: XOR
; SI: V_MUL_F64 {{v\[[0-9]+:[0-9]+\]}}, -[[NEG_VALUE]], [[NEG_VALUE]]
diff --git a/test/CodeGen/R600/fneg.ll b/test/CodeGen/R600/fneg.ll
index 4fd2239dddd..638983ab8f2 100644
--- a/test/CodeGen/R600/fneg.ll
+++ b/test/CodeGen/R600/fneg.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
-; FUNC-LABEL: @fneg_f32
+; FUNC-LABEL: {{^}}fneg_f32:
; R600: -PV
; SI: V_XOR_B32
@@ -11,7 +11,7 @@ define void @fneg_f32(float addrspace(1)* %out, float %in) {
ret void
}
-; FUNC-LABEL: @fneg_v2f32
+; FUNC-LABEL: {{^}}fneg_v2f32:
; R600: -PV
; R600: -PV
@@ -23,7 +23,7 @@ define void @fneg_v2f32(<2 x float> addrspace(1)* nocapture %out, <2 x float> %i
ret void
}
-; FUNC-LABEL: @fneg_v4f32
+; FUNC-LABEL: {{^}}fneg_v4f32:
; R600: -PV
; R600: -T
; R600: -PV
@@ -43,7 +43,7 @@ define void @fneg_v4f32(<4 x float> addrspace(1)* nocapture %out, <4 x float> %i
; (fneg (f32 bitcast (i32 a))) => (f32 bitcast (xor (i32 a), 0x80000000))
; unless the target returns true for isNegFree()
-; FUNC-LABEL: @fneg_free_f32
+; FUNC-LABEL: {{^}}fneg_free_f32:
; R600-NOT: XOR
; R600: -KC0[2].Z
@@ -56,7 +56,7 @@ define void @fneg_free_f32(float addrspace(1)* %out, i32 %in) {
ret void
}
-; FUNC-LABEL: @fneg_fold
+; FUNC-LABEL: {{^}}fneg_fold_f32:
; SI: S_LOAD_DWORD [[NEG_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0xb
; SI-NOT: XOR
; SI: V_MUL_F32_e64 v{{[0-9]+}}, -[[NEG_VALUE]], [[NEG_VALUE]]
diff --git a/test/CodeGen/R600/fp16_to_fp.ll b/test/CodeGen/R600/fp16_to_fp.ll
index 777eadc34ea..79fff1e1aa9 100644
--- a/test/CodeGen/R600/fp16_to_fp.ll
+++ b/test/CodeGen/R600/fp16_to_fp.ll
@@ -3,7 +3,7 @@
declare float @llvm.convert.from.fp16.f32(i16) nounwind readnone
declare double @llvm.convert.from.fp16.f64(i16) nounwind readnone
-; SI-LABEL: @test_convert_fp16_to_fp32:
+; SI-LABEL: {{^}}test_convert_fp16_to_fp32:
; SI: BUFFER_LOAD_USHORT [[VAL:v[0-9]+]]
; SI: V_CVT_F32_F16_e32 [[RESULT:v[0-9]+]], [[VAL]]
; SI: BUFFER_STORE_DWORD [[RESULT]]
@@ -15,7 +15,7 @@ define void @test_convert_fp16_to_fp32(float addrspace(1)* noalias %out, i16 add
}
-; SI-LABEL: @test_convert_fp16_to_fp64:
+; SI-LABEL: {{^}}test_convert_fp16_to_fp64:
; SI: BUFFER_LOAD_USHORT [[VAL:v[0-9]+]]
; SI: V_CVT_F32_F16_e32 [[RESULT32:v[0-9]+]], [[VAL]]
; SI: V_CVT_F64_F32_e32 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[RESULT32]]
diff --git a/test/CodeGen/R600/fp32_to_fp16.ll b/test/CodeGen/R600/fp32_to_fp16.ll
index 6b5ff00b5f6..802cca88877 100644
--- a/test/CodeGen/R600/fp32_to_fp16.ll
+++ b/test/CodeGen/R600/fp32_to_fp16.ll
@@ -2,7 +2,7 @@
declare i16 @llvm.convert.to.fp16.f32(float) nounwind readnone
-; SI-LABEL: @test_convert_fp32_to_fp16:
+; SI-LABEL: {{^}}test_convert_fp32_to_fp16:
; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]]
; SI: V_CVT_F16_F32_e32 [[RESULT:v[0-9]+]], [[VAL]]
; SI: BUFFER_STORE_SHORT [[RESULT]]
diff --git a/test/CodeGen/R600/fp64_to_sint.ll b/test/CodeGen/R600/fp64_to_sint.ll
index 221b32d7f65..b8b624da233 100644
--- a/test/CodeGen/R600/fp64_to_sint.ll
+++ b/test/CodeGen/R600/fp64_to_sint.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @fp_to_sint_f64_i32
+; FUNC-LABEL: {{^}}fp_to_sint_f64_i32:
; SI: V_CVT_I32_F64_e32
define void @fp_to_sint_f64_i32(i32 addrspace(1)* %out, double %in) {
%result = fptosi double %in to i32
@@ -8,7 +8,7 @@ define void @fp_to_sint_f64_i32(i32 addrspace(1)* %out, double %in) {
ret void
}
-; FUNC-LABEL: @fp_to_sint_v2f64_v2i32
+; FUNC-LABEL: {{^}}fp_to_sint_v2f64_v2i32:
; SI: V_CVT_I32_F64_e32
; SI: V_CVT_I32_F64_e32
define void @fp_to_sint_v2f64_v2i32(<2 x i32> addrspace(1)* %out, <2 x double> %in) {
@@ -17,7 +17,7 @@ define void @fp_to_sint_v2f64_v2i32(<2 x i32> addrspace(1)* %out, <2 x double> %
ret void
}
-; FUNC-LABEL: @fp_to_sint_v4f64_v4i32
+; FUNC-LABEL: {{^}}fp_to_sint_v4f64_v4i32:
; SI: V_CVT_I32_F64_e32
; SI: V_CVT_I32_F64_e32
; SI: V_CVT_I32_F64_e32
diff --git a/test/CodeGen/R600/fp_to_sint.ll b/test/CodeGen/R600/fp_to_sint.ll
index 322a55bf6e4..f71b8515ef0 100644
--- a/test/CodeGen/R600/fp_to_sint.ll
+++ b/test/CodeGen/R600/fp_to_sint.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s --check-prefix=EG --check-prefix=FUNC
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; FUNC-LABEL: @fp_to_sint_i32
+; FUNC-LABEL: {{^}}fp_to_sint_i32:
; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
; SI: V_CVT_I32_F32_e32
; SI: S_ENDPGM
@@ -11,7 +11,7 @@ define void @fp_to_sint_i32 (i32 addrspace(1)* %out, float %in) {
ret void
}
-; FUNC-LABEL: @fp_to_sint_v2i32
+; FUNC-LABEL: {{^}}fp_to_sint_v2i32:
; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
; SI: V_CVT_I32_F32_e32
@@ -22,7 +22,7 @@ define void @fp_to_sint_v2i32(<2 x i32> addrspace(1)* %out, <2 x float> %in) {
ret void
}
-; FUNC-LABEL: @fp_to_sint_v4i32
+; FUNC-LABEL: {{^}}fp_to_sint_v4i32:
; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW]}}
; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
@@ -38,7 +38,7 @@ define void @fp_to_sint_v4i32(<4 x i32> addrspace(1)* %out, <4 x float> addrspac
ret void
}
-; FUNC-LABEL: @fp_to_sint_i64
+; FUNC-LABEL: {{^}}fp_to_sint_i64:
; EG-DAG: AND_INT
; EG-DAG: LSHR
@@ -71,7 +71,7 @@ entry:
ret void
}
-; FUNC: @fp_to_sint_v2i64
+; FUNC: {{^}}fp_to_sint_v2i64:
; EG-DAG: AND_INT
; EG-DAG: LSHR
; EG-DAG: SUB_INT
@@ -122,7 +122,7 @@ define void @fp_to_sint_v2i64(<2 x i64> addrspace(1)* %out, <2 x float> %x) {
ret void
}
-; FUNC: @fp_to_sint_v4i64
+; FUNC: {{^}}fp_to_sint_v4i64:
; EG-DAG: AND_INT
; EG-DAG: LSHR
; EG-DAG: SUB_INT
diff --git a/test/CodeGen/R600/fp_to_uint.f64.ll b/test/CodeGen/R600/fp_to_uint.f64.ll
index bf607cef088..5b1fe220af7 100644
--- a/test/CodeGen/R600/fp_to_uint.f64.ll
+++ b/test/CodeGen/R600/fp_to_uint.f64.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
-; SI-LABEL: @fp_to_uint_i32_f64
+; SI-LABEL: {{^}}fp_to_uint_i32_f64:
; SI: V_CVT_U32_F64_e32
define void @fp_to_uint_i32_f64(i32 addrspace(1)* %out, double %in) {
%cast = fptoui double %in to i32
diff --git a/test/CodeGen/R600/fp_to_uint.ll b/test/CodeGen/R600/fp_to_uint.ll
index 6e71823a132..e92c2c651a3 100644
--- a/test/CodeGen/R600/fp_to_uint.ll
+++ b/test/CodeGen/R600/fp_to_uint.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s --check-prefix=EG --check-prefix=FUNC
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; FUNC-LABEL: @fp_to_uint_i32
+; FUNC-LABEL: {{^}}fp_to_uint_i32:
; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
; SI: V_CVT_U32_F32_e32
; SI: S_ENDPGM
@@ -11,7 +11,7 @@ define void @fp_to_uint_i32 (i32 addrspace(1)* %out, float %in) {
ret void
}
-; FUNC-LABEL: @fp_to_uint_v2i32
+; FUNC-LABEL: {{^}}fp_to_uint_v2i32:
; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; SI: V_CVT_U32_F32_e32
@@ -23,7 +23,7 @@ define void @fp_to_uint_v2i32(<2 x i32> addrspace(1)* %out, <2 x float> %in) {
ret void
}
-; FUNC-LABEL: @fp_to_uint_v4i32
+; FUNC-LABEL: {{^}}fp_to_uint_v4i32:
; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
@@ -40,7 +40,7 @@ define void @fp_to_uint_v4i32(<4 x i32> addrspace(1)* %out, <4 x float> addrspac
ret void
}
-; FUNC: @fp_to_uint_i64
+; FUNC: {{^}}fp_to_uint_i64:
; EG-DAG: AND_INT
; EG-DAG: LSHR
; EG-DAG: SUB_INT
@@ -70,7 +70,7 @@ define void @fp_to_uint_i64(i64 addrspace(1)* %out, float %x) {
ret void
}
-; FUNC: @fp_to_uint_v2i64
+; FUNC: {{^}}fp_to_uint_v2i64:
; EG-DAG: AND_INT
; EG-DAG: LSHR
; EG-DAG: SUB_INT
@@ -121,7 +121,7 @@ define void @fp_to_uint_v2i64(<2 x i64> addrspace(1)* %out, <2 x float> %x) {
ret void
}
-; FUNC: @fp_to_uint_v4i64
+; FUNC: {{^}}fp_to_uint_v4i64:
; EG-DAG: AND_INT
; EG-DAG: LSHR
; EG-DAG: SUB_INT
diff --git a/test/CodeGen/R600/fpext.ll b/test/CodeGen/R600/fpext.ll
index 143ee79fa15..4bad8e913b9 100644
--- a/test/CodeGen/R600/fpext.ll
+++ b/test/CodeGen/R600/fpext.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=CHECK
-; CHECK: @fpext
+; CHECK: {{^}}fpext:
; CHECK: V_CVT_F64_F32_e32
define void @fpext(double addrspace(1)* %out, float %in) {
%result = fpext float %in to double
diff --git a/test/CodeGen/R600/fptrunc.ll b/test/CodeGen/R600/fptrunc.ll
index 20a8c00ba49..05452d1e86e 100644
--- a/test/CodeGen/R600/fptrunc.ll
+++ b/test/CodeGen/R600/fptrunc.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=CHECK
-; CHECK: @fptrunc
+; CHECK: {{^}}fptrunc:
; CHECK: V_CVT_F32_F64_e32
define void @fptrunc(float addrspace(1)* %out, double %in) {
%result = fptrunc double %in to float
diff --git a/test/CodeGen/R600/frem.ll b/test/CodeGen/R600/frem.ll
index de2a5bcf419..c005ff890cf 100644
--- a/test/CodeGen/R600/frem.ll
+++ b/test/CodeGen/R600/frem.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=SI -enable-misched < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @frem_f32:
+; FUNC-LABEL: {{^}}frem_f32:
; SI-DAG: BUFFER_LOAD_DWORD [[X:v[0-9]+]], {{.*$}}
; SI-DAG: BUFFER_LOAD_DWORD [[Y:v[0-9]+]], {{.*}} offset:0x10
; SI-DAG: V_CMP
@@ -21,7 +21,7 @@ define void @frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
ret void
}
-; FUNC-LABEL: @unsafe_frem_f32:
+; FUNC-LABEL: {{^}}unsafe_frem_f32:
; SI: BUFFER_LOAD_DWORD [[Y:v[0-9]+]], {{.*}} offset:0x10
; SI: BUFFER_LOAD_DWORD [[X:v[0-9]+]], {{.*}}
; SI: V_RCP_F32_e32 [[INVY:v[0-9]+]], [[Y]]
@@ -43,7 +43,7 @@ define void @unsafe_frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
; TODO: This should check something when f64 fdiv is implemented
; correctly
-; FUNC-LABEL: @frem_f64:
+; FUNC-LABEL: {{^}}frem_f64:
; SI: S_ENDPGM
define void @frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) #0 {
@@ -54,7 +54,7 @@ define void @frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
ret void
}
-; FUNC-LABEL: @unsafe_frem_f64:
+; FUNC-LABEL: {{^}}unsafe_frem_f64:
; SI: V_RCP_F64_e32
; SI: V_MUL_F64
; SI: V_BFE_I32
diff --git a/test/CodeGen/R600/fsqrt.ll b/test/CodeGen/R600/fsqrt.ll
index ae50b17d38f..c8eb3f1dbcb 100644
--- a/test/CodeGen/R600/fsqrt.ll
+++ b/test/CodeGen/R600/fsqrt.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
-; CHECK: @fsqrt_f32
+; CHECK: {{^}}fsqrt_f32:
; CHECK: V_SQRT_F32_e32 {{v[0-9]+, v[0-9]+}}
define void @fsqrt_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
@@ -10,7 +10,7 @@ define void @fsqrt_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
ret void
}
-; CHECK: @fsqrt_f64
+; CHECK: {{^}}fsqrt_f64:
; CHECK: V_SQRT_F64_e32 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
define void @fsqrt_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
diff --git a/test/CodeGen/R600/fsub.ll b/test/CodeGen/R600/fsub.ll
index 22c6268e296..68f44881b66 100644
--- a/test/CodeGen/R600/fsub.ll
+++ b/test/CodeGen/R600/fsub.ll
@@ -2,7 +2,7 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @v_fsub_f32
+; FUNC-LABEL: {{^}}v_fsub_f32:
; SI: V_SUBREV_F32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
define void @v_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
%b_ptr = getelementptr float addrspace(1)* %in, i32 1
@@ -13,7 +13,7 @@ define void @v_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @s_fsub_f32
+; FUNC-LABEL: {{^}}s_fsub_f32:
; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, -KC0[2].W
; SI: V_SUB_F32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
@@ -27,7 +27,7 @@ declare float @llvm.R600.load.input(i32) readnone
declare void @llvm.AMDGPU.store.output(float, i32)
-; FUNC-LABEL: @fsub_v2f32
+; FUNC-LABEL: {{^}}fsub_v2f32:
; R600-DAG: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[3].X, -KC0[3].Z
; R600-DAG: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].W, -KC0[3].Y
@@ -40,7 +40,7 @@ define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x flo
ret void
}
-; FUNC-LABEL: @v_fsub_v4f32
+; FUNC-LABEL: {{^}}v_fsub_v4f32:
; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
@@ -61,7 +61,7 @@ define void @v_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(
; FIXME: Should be using SGPR directly for first operand
-; FUNC-LABEL: @s_fsub_v4f32
+; FUNC-LABEL: {{^}}s_fsub_v4f32:
; SI: V_SUBREV_F32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
; SI: V_SUBREV_F32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
; SI: V_SUBREV_F32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
diff --git a/test/CodeGen/R600/fsub64.ll b/test/CodeGen/R600/fsub64.ll
index f5e5708f1b4..b9bee2f6aff 100644
--- a/test/CodeGen/R600/fsub64.ll
+++ b/test/CodeGen/R600/fsub64.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
-; SI-LABEL: @fsub_f64:
+; SI-LABEL: {{^}}fsub_f64:
; SI: V_ADD_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
define void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
diff --git a/test/CodeGen/R600/ftrunc.ll b/test/CodeGen/R600/ftrunc.ll
index 0d7d4679fe3..93e32bfac84 100644
--- a/test/CodeGen/R600/ftrunc.ll
+++ b/test/CodeGen/R600/ftrunc.ll
@@ -8,7 +8,7 @@ declare <4 x float> @llvm.trunc.v4f32(<4 x float>) nounwind readnone
declare <8 x float> @llvm.trunc.v8f32(<8 x float>) nounwind readnone
declare <16 x float> @llvm.trunc.v16f32(<16 x float>) nounwind readnone
-; FUNC-LABEL: @ftrunc_f32:
+; FUNC-LABEL: {{^}}ftrunc_f32:
; EG: TRUNC
; SI: V_TRUNC_F32_e32
define void @ftrunc_f32(float addrspace(1)* %out, float %x) {
@@ -17,7 +17,7 @@ define void @ftrunc_f32(float addrspace(1)* %out, float %x) {
ret void
}
-; FUNC-LABEL: @ftrunc_v2f32:
+; FUNC-LABEL: {{^}}ftrunc_v2f32:
; EG: TRUNC
; EG: TRUNC
; SI: V_TRUNC_F32_e32
@@ -28,7 +28,7 @@ define void @ftrunc_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %x) {
ret void
}
-; FIXME-FUNC-LABEL: @ftrunc_v3f32:
+; FIXME-FUNC-LABEL: {{^}}ftrunc_v3f32:
; FIXME-EG: TRUNC
; FIXME-EG: TRUNC
; FIXME-EG: TRUNC
@@ -41,7 +41,7 @@ define void @ftrunc_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %x) {
; ret void
; }
-; FUNC-LABEL: @ftrunc_v4f32:
+; FUNC-LABEL: {{^}}ftrunc_v4f32:
; EG: TRUNC
; EG: TRUNC
; EG: TRUNC
@@ -56,7 +56,7 @@ define void @ftrunc_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %x) {
ret void
}
-; FUNC-LABEL: @ftrunc_v8f32:
+; FUNC-LABEL: {{^}}ftrunc_v8f32:
; EG: TRUNC
; EG: TRUNC
; EG: TRUNC
@@ -79,7 +79,7 @@ define void @ftrunc_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %x) {
ret void
}
-; FUNC-LABEL: @ftrunc_v16f32:
+; FUNC-LABEL: {{^}}ftrunc_v16f32:
; EG: TRUNC
; EG: TRUNC
; EG: TRUNC
diff --git a/test/CodeGen/R600/gep-address-space.ll b/test/CodeGen/R600/gep-address-space.ll
index cd698f4d6b4..c1e511d1bb1 100644
--- a/test/CodeGen/R600/gep-address-space.ll
+++ b/test/CodeGen/R600/gep-address-space.ll
@@ -2,7 +2,7 @@
; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs< %s | FileCheck --check-prefix=CI --check-prefix=CHECK %s
define void @use_gep_address_space([1024 x i32] addrspace(3)* %array) nounwind {
-; CHECK-LABEL: @use_gep_address_space:
+; CHECK-LABEL: {{^}}use_gep_address_space:
; CHECK: V_MOV_B32_e32 [[PTR:v[0-9]+]], s{{[0-9]+}}
; CHECK: DS_WRITE_B32 [[PTR]], v{{[0-9]+}}, 0x40
%p = getelementptr [1024 x i32] addrspace(3)* %array, i16 0, i16 16
@@ -11,7 +11,7 @@ define void @use_gep_address_space([1024 x i32] addrspace(3)* %array) nounwind {
}
define void @use_gep_address_space_large_offset([1024 x i32] addrspace(3)* %array) nounwind {
-; CHECK-LABEL: @use_gep_address_space_large_offset:
+; CHECK-LABEL: {{^}}use_gep_address_space_large_offset:
; The LDS offset will be 65536 bytes, which is larger than the size of LDS on
; SI, which is why it is being OR'd with the base pointer.
; SI: S_OR_B32
@@ -23,7 +23,7 @@ define void @use_gep_address_space_large_offset([1024 x i32] addrspace(3)* %arra
}
define void @gep_as_vector_v4(<4 x [1024 x i32] addrspace(3)*> %array) nounwind {
-; CHECK-LABEL: @gep_as_vector_v4:
+; CHECK-LABEL: {{^}}gep_as_vector_v4:
; CHECK: S_ADD_I32
; CHECK: S_ADD_I32
; CHECK: S_ADD_I32
@@ -41,7 +41,7 @@ define void @gep_as_vector_v4(<4 x [1024 x i32] addrspace(3)*> %array) nounwind
}
define void @gep_as_vector_v2(<2 x [1024 x i32] addrspace(3)*> %array) nounwind {
-; CHECK-LABEL: @gep_as_vector_v2:
+; CHECK-LABEL: {{^}}gep_as_vector_v2:
; CHECK: S_ADD_I32
; CHECK: S_ADD_I32
%p = getelementptr <2 x [1024 x i32] addrspace(3)*> %array, <2 x i16> zeroinitializer, <2 x i16> <i16 16, i16 16>
diff --git a/test/CodeGen/R600/global_atomics.ll b/test/CodeGen/R600/global_atomics.ll
index 665913fbdff..5095d487de6 100644
--- a/test/CodeGen/R600/global_atomics.ll
+++ b/test/CodeGen/R600/global_atomics.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=FUNC %s
-; FUNC-LABEL: @atomic_load_i32_offset
+; FUNC-LABEL: {{^}}atomic_load_i32_offset:
; SI: BUFFER_ATOMIC_ADD v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_load_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
@@ -8,7 +8,7 @@ entry:
ret void
}
-; FUNC-LABEL: @atomic_load_i32_ret_offset
+; FUNC-LABEL: {{^}}atomic_load_i32_ret_offset:
; SI: BUFFER_ATOMIC_ADD [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; SI: BUFFER_STORE_DWORD [[RET]]
define void @atomic_load_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
@@ -18,7 +18,7 @@ entry:
ret void
}
-; FUNC-LABEL: @atomic_load_i32_addr64
+; FUNC-LABEL: {{^}}atomic_load_i32_addr64:
; SI: BUFFER_ATOMIC_ADD v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
define void @atomic_load_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
@@ -27,7 +27,7 @@ entry:
ret void
}
-; FUNC-LABEL: @atomic_load_i32_ret_addr64
+; FUNC-LABEL: {{^}}atomic_load_i32_ret_addr64:
; SI: BUFFER_ATOMIC_ADD [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; SI: BUFFER_STORE_DWORD [[RET]]
define void @atomic_load_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
diff --git a/test/CodeGen/R600/gv-const-addrspace-fail.ll b/test/CodeGen/R600/gv-const-addrspace-fail.ll
index 7ad55ed2b40..c377670a12f 100644
--- a/test/CodeGen/R600/gv-const-addrspace-fail.ll
+++ b/test/CodeGen/R600/gv-const-addrspace-fail.ll
@@ -4,7 +4,7 @@
@a = internal addrspace(2) constant [1 x i8] [ i8 7 ], align 1
-; FUNC-LABEL: @test_i8
+; FUNC-LABEL: {{^}}test_i8:
; EG: CF_END
; SI: BUFFER_STORE_BYTE
; SI: S_ENDPGM
@@ -17,7 +17,7 @@ define void @test_i8( i32 %s, i8 addrspace(1)* %out) #3 {
@b = internal addrspace(2) constant [1 x i16] [ i16 7 ], align 2
-; FUNC-LABEL: @test_i16
+; FUNC-LABEL: {{^}}test_i16:
; EG: CF_END
; SI: BUFFER_STORE_SHORT
; SI: S_ENDPGM
@@ -33,7 +33,7 @@ define void @test_i16( i32 %s, i16 addrspace(1)* %out) #3 {
; The illegal i8s aren't handled
@struct_bar_gv = internal addrspace(2) constant [1 x %struct.bar] [ %struct.bar { float 16.0, [5 x i8] [i8 0, i8 1, i8 2, i8 3, i8 4] } ]
-; FUNC-LABEL: @struct_bar_gv_load
+; FUNC-LABEL: {{^}}struct_bar_gv_load:
define void @struct_bar_gv_load(i8 addrspace(1)* %out, i32 %index) {
%gep = getelementptr inbounds [1 x %struct.bar] addrspace(2)* @struct_bar_gv, i32 0, i32 0, i32 1, i32 %index
%load = load i8 addrspace(2)* %gep, align 1
@@ -48,7 +48,7 @@ define void @struct_bar_gv_load(i8 addrspace(1)* %out, i32 %index) {
<4 x i32> <i32 9, i32 10, i32 11, i32 12>,
<4 x i32> <i32 13, i32 14, i32 15, i32 16> ]
-; FUNC-LABEL: @array_vector_gv_load
+; FUNC-LABEL: {{^}}array_vector_gv_load:
define void @array_vector_gv_load(<4 x i32> addrspace(1)* %out, i32 %index) {
%gep = getelementptr inbounds [4 x <4 x i32>] addrspace(2)* @array_vector_gv, i32 0, i32 %index
%load = load <4 x i32> addrspace(2)* %gep, align 16
diff --git a/test/CodeGen/R600/gv-const-addrspace.ll b/test/CodeGen/R600/gv-const-addrspace.ll
index e0ac317f998..02c1ea16d7e 100644
--- a/test/CodeGen/R600/gv-const-addrspace.ll
+++ b/test/CodeGen/R600/gv-const-addrspace.ll
@@ -6,7 +6,7 @@
@float_gv = internal unnamed_addr addrspace(2) constant [5 x float] [float 0.0, float 1.0, float 2.0, float 3.0, float 4.0], align 4
-; FUNC-LABEL: @float
+; FUNC-LABEL: {{^}}float:
; FIXME: We should be using S_LOAD_DWORD here.
; SI: BUFFER_LOAD_DWORD
@@ -27,7 +27,7 @@ entry:
@i32_gv = internal unnamed_addr addrspace(2) constant [5 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4], align 4
-; FUNC-LABEL: @i32
+; FUNC-LABEL: {{^}}i32:
; FIXME: We should be using S_LOAD_DWORD here.
; SI: BUFFER_LOAD_DWORD
@@ -52,7 +52,7 @@ entry:
@struct_foo_gv = internal unnamed_addr addrspace(2) constant [1 x %struct.foo] [ %struct.foo { float 16.0, [5 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4] } ]
-; FUNC-LABEL: @struct_foo_gv_load
+; FUNC-LABEL: {{^}}struct_foo_gv_load:
; SI: S_LOAD_DWORD
define void @struct_foo_gv_load(i32 addrspace(1)* %out, i32 %index) {
@@ -67,7 +67,7 @@ define void @struct_foo_gv_load(i32 addrspace(1)* %out, i32 %index) {
<1 x i32> <i32 3>,
<1 x i32> <i32 4> ]
-; FUNC-LABEL: @array_v1_gv_load
+; FUNC-LABEL: {{^}}array_v1_gv_load:
; FIXME: We should be using S_LOAD_DWORD here.
; SI: BUFFER_LOAD_DWORD
define void @array_v1_gv_load(<1 x i32> addrspace(1)* %out, i32 %index) {
diff --git a/test/CodeGen/R600/half.ll b/test/CodeGen/R600/half.ll
index 42aa4faa99f..ce3a1d0d71c 100644
--- a/test/CodeGen/R600/half.ll
+++ b/test/CodeGen/R600/half.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s
define void @test_load_store(half addrspace(1)* %in, half addrspace(1)* %out) {
-; CHECK-LABEL: @test_load_store
+; CHECK-LABEL: {{^}}test_load_store:
; CHECK: BUFFER_LOAD_USHORT [[TMP:v[0-9]+]]
; CHECK: BUFFER_STORE_SHORT [[TMP]]
%val = load half addrspace(1)* %in
@@ -10,7 +10,7 @@ define void @test_load_store(half addrspace(1)* %in, half addrspace(1)* %out) {
}
define void @test_bitcast_from_half(half addrspace(1)* %in, i16 addrspace(1)* %out) {
-; CHECK-LABEL: @test_bitcast_from_half
+; CHECK-LABEL: {{^}}test_bitcast_from_half:
; CHECK: BUFFER_LOAD_USHORT [[TMP:v[0-9]+]]
; CHECK: BUFFER_STORE_SHORT [[TMP]]
%val = load half addrspace(1) * %in
@@ -20,7 +20,7 @@ define void @test_bitcast_from_half(half addrspace(1)* %in, i16 addrspace(1)* %o
}
define void @test_bitcast_to_half(half addrspace(1)* %out, i16 addrspace(1)* %in) {
-; CHECK-LABEL: @test_bitcast_to_half
+; CHECK-LABEL: {{^}}test_bitcast_to_half:
; CHECK: BUFFER_LOAD_USHORT [[TMP:v[0-9]+]]
; CHECK: BUFFER_STORE_SHORT [[TMP]]
%val = load i16 addrspace(1)* %in
@@ -30,7 +30,7 @@ define void @test_bitcast_to_half(half addrspace(1)* %out, i16 addrspace(1)* %in
}
define void @test_extend32(half addrspace(1)* %in, float addrspace(1)* %out) {
-; CHECK-LABEL: @test_extend32
+; CHECK-LABEL: {{^}}test_extend32:
; CHECK: V_CVT_F32_F16_e32
%val16 = load half addrspace(1)* %in
@@ -40,7 +40,7 @@ define void @test_extend32(half addrspace(1)* %in, float addrspace(1)* %out) {
}
define void @test_extend64(half addrspace(1)* %in, double addrspace(1)* %out) {
-; CHECK-LABEL: @test_extend64
+; CHECK-LABEL: {{^}}test_extend64:
; CHECK: V_CVT_F32_F16_e32
; CHECK: V_CVT_F64_F32_e32
@@ -51,7 +51,7 @@ define void @test_extend64(half addrspace(1)* %in, double addrspace(1)* %out) {
}
define void @test_trunc32(float addrspace(1)* %in, half addrspace(1)* %out) {
-; CHECK-LABEL: @test_trunc32
+; CHECK-LABEL: {{^}}test_trunc32:
; CHECK: V_CVT_F16_F32_e32
%val32 = load float addrspace(1)* %in
diff --git a/test/CodeGen/R600/icmp64.ll b/test/CodeGen/R600/icmp64.ll
index c9e62ff934e..aec88ba2eea 100644
--- a/test/CodeGen/R600/icmp64.ll
+++ b/test/CodeGen/R600/icmp64.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
-; SI-LABEL: @test_i64_eq:
+; SI-LABEL: {{^}}test_i64_eq:
; SI: V_CMP_EQ_I64
define void @test_i64_eq(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%cmp = icmp eq i64 %a, %b
@@ -9,7 +9,7 @@ define void @test_i64_eq(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
ret void
}
-; SI-LABEL: @test_i64_ne:
+; SI-LABEL: {{^}}test_i64_ne:
; SI: V_CMP_NE_I64
define void @test_i64_ne(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%cmp = icmp ne i64 %a, %b
@@ -18,7 +18,7 @@ define void @test_i64_ne(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
ret void
}
-; SI-LABEL: @test_i64_slt:
+; SI-LABEL: {{^}}test_i64_slt:
; SI: V_CMP_LT_I64
define void @test_i64_slt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%cmp = icmp slt i64 %a, %b
@@ -27,7 +27,7 @@ define void @test_i64_slt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
ret void
}
-; SI-LABEL: @test_i64_ult:
+; SI-LABEL: {{^}}test_i64_ult:
; SI: V_CMP_LT_U64
define void @test_i64_ult(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%cmp = icmp ult i64 %a, %b
@@ -36,7 +36,7 @@ define void @test_i64_ult(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
ret void
}
-; SI-LABEL: @test_i64_sle:
+; SI-LABEL: {{^}}test_i64_sle:
; SI: V_CMP_LE_I64
define void @test_i64_sle(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%cmp = icmp sle i64 %a, %b
@@ -45,7 +45,7 @@ define void @test_i64_sle(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
ret void
}
-; SI-LABEL: @test_i64_ule:
+; SI-LABEL: {{^}}test_i64_ule:
; SI: V_CMP_LE_U64
define void @test_i64_ule(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%cmp = icmp ule i64 %a, %b
@@ -54,7 +54,7 @@ define void @test_i64_ule(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
ret void
}
-; SI-LABEL: @test_i64_sgt:
+; SI-LABEL: {{^}}test_i64_sgt:
; SI: V_CMP_GT_I64
define void @test_i64_sgt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%cmp = icmp sgt i64 %a, %b
@@ -63,7 +63,7 @@ define void @test_i64_sgt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
ret void
}
-; SI-LABEL: @test_i64_ugt:
+; SI-LABEL: {{^}}test_i64_ugt:
; SI: V_CMP_GT_U64
define void @test_i64_ugt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%cmp = icmp ugt i64 %a, %b
@@ -72,7 +72,7 @@ define void @test_i64_ugt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
ret void
}
-; SI-LABEL: @test_i64_sge:
+; SI-LABEL: {{^}}test_i64_sge:
; SI: V_CMP_GE_I64
define void @test_i64_sge(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%cmp = icmp sge i64 %a, %b
@@ -81,7 +81,7 @@ define void @test_i64_sge(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
ret void
}
-; SI-LABEL: @test_i64_uge:
+; SI-LABEL: {{^}}test_i64_uge:
; SI: V_CMP_GE_U64
define void @test_i64_uge(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%cmp = icmp uge i64 %a, %b
diff --git a/test/CodeGen/R600/imm.ll b/test/CodeGen/R600/imm.ll
index 5eecb035f6a..6c135730ab2 100644
--- a/test/CodeGen/R600/imm.ll
+++ b/test/CodeGen/R600/imm.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck %s
; Use a 64-bit value with lo bits that can be represented as an inline constant
-; CHECK-LABEL: @i64_imm_inline_lo
+; CHECK-LABEL: {{^}}i64_imm_inline_lo:
; CHECK: S_MOV_B32 [[LO:s[0-9]+]], 5
; CHECK: V_MOV_B32_e32 v[[LO_VGPR:[0-9]+]], [[LO]]
; CHECK: BUFFER_STORE_DWORDX2 v{{\[}}[[LO_VGPR]]:
@@ -12,7 +12,7 @@ entry:
}
; Use a 64-bit value with hi bits that can be represented as an inline constant
-; CHECK-LABEL: @i64_imm_inline_hi
+; CHECK-LABEL: {{^}}i64_imm_inline_hi:
; CHECK: S_MOV_B32 [[HI:s[0-9]+]], 5
; CHECK: V_MOV_B32_e32 v[[HI_VGPR:[0-9]+]], [[HI]]
; CHECK: BUFFER_STORE_DWORDX2 v{{\[[0-9]+:}}[[HI_VGPR]]
@@ -22,7 +22,7 @@ entry:
ret void
}
-; CHECK-LABEL: @store_inline_imm_0.0_f32
+; CHECK-LABEL: {{^}}store_inline_imm_0.0_f32
; CHECK: V_MOV_B32_e32 [[REG:v[0-9]+]], 0{{$}}
; CHECK-NEXT: BUFFER_STORE_DWORD [[REG]]
define void @store_inline_imm_0.0_f32(float addrspace(1)* %out) {
@@ -30,7 +30,7 @@ define void @store_inline_imm_0.0_f32(float addrspace(1)* %out) {
ret void
}
-; CHECK-LABEL: @store_inline_imm_0.5_f32
+; CHECK-LABEL: {{^}}store_inline_imm_0.5_f32
; CHECK: V_MOV_B32_e32 [[REG:v[0-9]+]], 0.5{{$}}
; CHECK-NEXT: BUFFER_STORE_DWORD [[REG]]
define void @store_inline_imm_0.5_f32(float addrspace(1)* %out) {
@@ -38,7 +38,7 @@ define void @store_inline_imm_0.5_f32(float addrspace(1)* %out) {
ret void
}
-; CHECK-LABEL: @store_inline_imm_m_0.5_f32
+; CHECK-LABEL: {{^}}store_inline_imm_m_0.5_f32
; CHECK: V_MOV_B32_e32 [[REG:v[0-9]+]], -0.5{{$}}
; CHECK-NEXT: BUFFER_STORE_DWORD [[REG]]
define void @store_inline_imm_m_0.5_f32(float addrspace(1)* %out) {
@@ -46,7 +46,7 @@ define void @store_inline_imm_m_0.5_f32(float addrspace(1)* %out) {
ret void
}
-; CHECK-LABEL: @store_inline_imm_1.0_f32
+; CHECK-LABEL: {{^}}store_inline_imm_1.0_f32
; CHECK: V_MOV_B32_e32 [[REG:v[0-9]+]], 1.0{{$}}
; CHECK-NEXT: BUFFER_STORE_DWORD [[REG]]
define void @store_inline_imm_1.0_f32(float addrspace(1)* %out) {
@@ -54,7 +54,7 @@ define void @store_inline_imm_1.0_f32(float addrspace(1)* %out) {
ret void
}
-; CHECK-LABEL: @store_inline_imm_m_1.0_f32
+; CHECK-LABEL: {{^}}store_inline_imm_m_1.0_f32
; CHECK: V_MOV_B32_e32 [[REG:v[0-9]+]], -1.0{{$}}
; CHECK-NEXT: BUFFER_STORE_DWORD [[REG]]
define void @store_inline_imm_m_1.0_f32(float addrspace(1)* %out) {
@@ -62,7 +62,7 @@ define void @store_inline_imm_m_1.0_f32(float addrspace(1)* %out) {
ret void
}
-; CHECK-LABEL: @store_inline_imm_2.0_f32
+; CHECK-LABEL: {{^}}store_inline_imm_2.0_f32
; CHECK: V_MOV_B32_e32 [[REG:v[0-9]+]], 2.0{{$}}
; CHECK-NEXT: BUFFER_STORE_DWORD [[REG]]
define void @store_inline_imm_2.0_f32(float addrspace(1)* %out) {
@@ -70,7 +70,7 @@ define void @store_inline_imm_2.0_f32(float addrspace(1)* %out) {
ret void
}
-; CHECK-LABEL: @store_inline_imm_m_2.0_f32
+; CHECK-LABEL: {{^}}store_inline_imm_m_2.0_f32
; CHECK: V_MOV_B32_e32 [[REG:v[0-9]+]], -2.0{{$}}
; CHECK-NEXT: BUFFER_STORE_DWORD [[REG]]
define void @store_inline_imm_m_2.0_f32(float addrspace(1)* %out) {
@@ -78,7 +78,7 @@ define void @store_inline_imm_m_2.0_f32(float addrspace(1)* %out) {
ret void
}
-; CHECK-LABEL: @store_inline_imm_4.0_f32
+; CHECK-LABEL: {{^}}store_inline_imm_4.0_f32
; CHECK: V_MOV_B32_e32 [[REG:v[0-9]+]], 4.0{{$}}
; CHECK-NEXT: BUFFER_STORE_DWORD [[REG]]
define void @store_inline_imm_4.0_f32(float addrspace(1)* %out) {
@@ -86,7 +86,7 @@ define void @store_inline_imm_4.0_f32(float addrspace(1)* %out) {
ret void
}
-; CHECK-LABEL: @store_inline_imm_m_4.0_f32
+; CHECK-LABEL: {{^}}store_inline_imm_m_4.0_f32
; CHECK: V_MOV_B32_e32 [[REG:v[0-9]+]], -4.0{{$}}
; CHECK-NEXT: BUFFER_STORE_DWORD [[REG]]
define void @store_inline_imm_m_4.0_f32(float addrspace(1)* %out) {
@@ -94,7 +94,7 @@ define void @store_inline_imm_m_4.0_f32(float addrspace(1)* %out) {
ret void
}
-; CHECK-LABEL: @store_literal_imm_f32
+; CHECK-LABEL: {{^}}store_literal_imm_f32:
; CHECK: V_MOV_B32_e32 [[REG:v[0-9]+]], 0x45800000
; CHECK-NEXT: BUFFER_STORE_DWORD [[REG]]
define void @store_literal_imm_f32(float addrspace(1)* %out) {
@@ -102,7 +102,7 @@ define void @store_literal_imm_f32(float addrspace(1)* %out) {
ret void
}
-; CHECK-LABEL: @add_inline_imm_0.0_f32
+; CHECK-LABEL: {{^}}add_inline_imm_0.0_f32
; CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]]
; CHECK: V_ADD_F32_e64 [[REG:v[0-9]+]], [[VAL]], 0.0{{$}}
; CHECK-NEXT: BUFFER_STORE_DWORD [[REG]]
@@ -112,7 +112,7 @@ define void @add_inline_imm_0.0_f32(float addrspace(1)* %out, float %x) {
ret void
}
-; CHECK-LABEL: @add_inline_imm_0.5_f32
+; CHECK-LABEL: {{^}}add_inline_imm_0.5_f32
; CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]]
; CHECK: V_ADD_F32_e64 [[REG:v[0-9]+]], [[VAL]], 0.5{{$}}
; CHECK-NEXT: BUFFER_STORE_DWORD [[REG]]
@@ -122,7 +122,7 @@ define void @add_inline_imm_0.5_f32(float addrspace(1)* %out, float %x) {
ret void
}
-; CHECK-LABEL: @add_inline_imm_neg_0.5_f32
+; CHECK-LABEL: {{^}}add_inline_imm_neg_0.5_f32
; CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]]
; CHECK: V_ADD_F32_e64 [[REG:v[0-9]+]], [[VAL]], -0.5{{$}}
; CHECK-NEXT: BUFFER_STORE_DWORD [[REG]]
@@ -132,7 +132,7 @@ define void @add_inline_imm_neg_0.5_f32(float addrspace(1)* %out, float %x) {
ret void
}
-; CHECK-LABEL: @add_inline_imm_1.0_f32
+; CHECK-LABEL: {{^}}add_inline_imm_1.0_f32
; CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]]
; CHECK: V_ADD_F32_e64 [[REG:v[0-9]+]], [[VAL]], 1.0{{$}}
; CHECK-NEXT: BUFFER_STORE_DWORD [[REG]]
@@ -142,7 +142,7 @@ define void @add_inline_imm_1.0_f32(float addrspace(1)* %out, float %x) {
ret void
}
-; CHECK-LABEL: @add_inline_imm_neg_1.0_f32
+; CHECK-LABEL: {{^}}add_inline_imm_neg_1.0_f32
; CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]]
; CHECK: V_ADD_F32_e64 [[REG:v[0-9]+]], [[VAL]], -1.0{{$}}
; CHECK-NEXT: BUFFER_STORE_DWORD [[REG]]
@@ -152,7 +152,7 @@ define void @add_inline_imm_neg_1.0_f32(float addrspace(1)* %out, float %x) {
ret void
}
-; CHECK-LABEL: @add_inline_imm_2.0_f32
+; CHECK-LABEL: {{^}}add_inline_imm_2.0_f32
; CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]]
; CHECK: V_ADD_F32_e64 [[REG:v[0-9]+]], [[VAL]], 2.0{{$}}
; CHECK-NEXT: BUFFER_STORE_DWORD [[REG]]
@@ -162,7 +162,7 @@ define void @add_inline_imm_2.0_f32(float addrspace(1)* %out, float %x) {
ret void
}
-; CHECK-LABEL: @add_inline_imm_neg_2.0_f32
+; CHECK-LABEL: {{^}}add_inline_imm_neg_2.0_f32
; CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]]
; CHECK: V_ADD_F32_e64 [[REG:v[0-9]+]], [[VAL]], -2.0{{$}}
; CHECK-NEXT: BUFFER_STORE_DWORD [[REG]]
@@ -172,7 +172,7 @@ define void @add_inline_imm_neg_2.0_f32(float addrspace(1)* %out, float %x) {
ret void
}
-; CHECK-LABEL: @add_inline_imm_4.0_f32
+; CHECK-LABEL: {{^}}add_inline_imm_4.0_f32
; CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]]
; CHECK: V_ADD_F32_e64 [[REG:v[0-9]+]], [[VAL]], 4.0{{$}}
; CHECK-NEXT: BUFFER_STORE_DWORD [[REG]]
@@ -182,7 +182,7 @@ define void @add_inline_imm_4.0_f32(float addrspace(1)* %out, float %x) {
ret void
}
-; CHECK-LABEL: @add_inline_imm_neg_4.0_f32
+; CHECK-LABEL: {{^}}add_inline_imm_neg_4.0_f32
; CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]]
; CHECK: V_ADD_F32_e64 [[REG:v[0-9]+]], [[VAL]], -4.0{{$}}
; CHECK-NEXT: BUFFER_STORE_DWORD [[REG]]
diff --git a/test/CodeGen/R600/indirect-private-64.ll b/test/CodeGen/R600/indirect-private-64.ll
index 5747434935b..afae1e7cb58 100644
--- a/test/CodeGen/R600/indirect-private-64.ll
+++ b/test/CodeGen/R600/indirect-private-64.ll
@@ -4,7 +4,7 @@
declare void @llvm.AMDGPU.barrier.local() noduplicate nounwind
-; SI-LABEL: @private_access_f64_alloca:
+; SI-LABEL: {{^}}private_access_f64_alloca:
; SI-ALLOCA: BUFFER_STORE_DWORDX2
; SI-ALLOCA: BUFFER_LOAD_DWORDX2
@@ -22,7 +22,7 @@ define void @private_access_f64_alloca(double addrspace(1)* noalias %out, double
ret void
}
-; SI-LABEL: @private_access_v2f64_alloca:
+; SI-LABEL: {{^}}private_access_v2f64_alloca:
; SI-ALLOCA: BUFFER_STORE_DWORDX4
; SI-ALLOCA: BUFFER_LOAD_DWORDX4
@@ -46,7 +46,7 @@ define void @private_access_v2f64_alloca(<2 x double> addrspace(1)* noalias %out
ret void
}
-; SI-LABEL: @private_access_i64_alloca:
+; SI-LABEL: {{^}}private_access_i64_alloca:
; SI-ALLOCA: BUFFER_STORE_DWORDX2
; SI-ALLOCA: BUFFER_LOAD_DWORDX2
@@ -64,7 +64,7 @@ define void @private_access_i64_alloca(i64 addrspace(1)* noalias %out, i64 addrs
ret void
}
-; SI-LABEL: @private_access_v2i64_alloca:
+; SI-LABEL: {{^}}private_access_v2i64_alloca:
; SI-ALLOCA: BUFFER_STORE_DWORDX4
; SI-ALLOCA: BUFFER_LOAD_DWORDX4
diff --git a/test/CodeGen/R600/infinite-loop.ll b/test/CodeGen/R600/infinite-loop.ll
index 68ffaae1c42..7bce2f32c24 100644
--- a/test/CodeGen/R600/infinite-loop.ll
+++ b/test/CodeGen/R600/infinite-loop.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
-; SI-LABEL: @infinite_loop:
+; SI-LABEL: {{^}}infinite_loop:
; SI: V_MOV_B32_e32 [[REG:v[0-9]+]], 0x3e7
; SI: BB0_1:
; SI: BUFFER_STORE_DWORD [[REG]]
diff --git a/test/CodeGen/R600/input-mods.ll b/test/CodeGen/R600/input-mods.ll
index 13bfbab8569..e3e94995fc9 100644
--- a/test/CodeGen/R600/input-mods.ll
+++ b/test/CodeGen/R600/input-mods.ll
@@ -1,9 +1,9 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK
;RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM-CHECK
-;EG-CHECK-LABEL: @test
+;EG-CHECK-LABEL: {{^}}test:
;EG-CHECK: EXP_IEEE *
-;CM-CHECK-LABEL: @test
+;CM-CHECK-LABEL: {{^}}test:
;CM-CHECK: EXP_IEEE T{{[0-9]+}}.X, -|T{{[0-9]+}}.X|
;CM-CHECK: EXP_IEEE T{{[0-9]+}}.Y (MASKED), -|T{{[0-9]+}}.X|
;CM-CHECK: EXP_IEEE T{{[0-9]+}}.Z (MASKED), -|T{{[0-9]+}}.X|
diff --git a/test/CodeGen/R600/insert_vector_elt.ll b/test/CodeGen/R600/insert_vector_elt.ll
index 18769eddb3f..2f9730bdcc6 100644
--- a/test/CodeGen/R600/insert_vector_elt.ll
+++ b/test/CodeGen/R600/insert_vector_elt.ll
@@ -8,7 +8,7 @@
; FIXME: Why is the constant moved into the intermediate register and
; not just directly into the vector component?
-; SI-LABEL: @insertelement_v4f32_0:
+; SI-LABEL: {{^}}insertelement_v4f32_0:
; S_LOAD_DWORDX4 s{{[}}[[LOW_REG:[0-9]+]]:
; V_MOV_B32_e32
; V_MOV_B32_e32 [[CONSTREG:v[0-9]+]], 5.000000e+00
@@ -20,35 +20,35 @@ define void @insertelement_v4f32_0(<4 x float> addrspace(1)* %out, <4 x float> %
ret void
}
-; SI-LABEL: @insertelement_v4f32_1:
+; SI-LABEL: {{^}}insertelement_v4f32_1:
define void @insertelement_v4f32_1(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
%vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 1
store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
ret void
}
-; SI-LABEL: @insertelement_v4f32_2:
+; SI-LABEL: {{^}}insertelement_v4f32_2:
define void @insertelement_v4f32_2(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
%vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 2
store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
ret void
}
-; SI-LABEL: @insertelement_v4f32_3:
+; SI-LABEL: {{^}}insertelement_v4f32_3:
define void @insertelement_v4f32_3(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
%vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 3
store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
ret void
}
-; SI-LABEL: @insertelement_v4i32_0:
+; SI-LABEL: {{^}}insertelement_v4i32_0:
define void @insertelement_v4i32_0(<4 x i32> addrspace(1)* %out, <4 x i32> %a) nounwind {
%vecins = insertelement <4 x i32> %a, i32 999, i32 0
store <4 x i32> %vecins, <4 x i32> addrspace(1)* %out, align 16
ret void
}
-; SI-LABEL: @dynamic_insertelement_v2f32:
+; SI-LABEL: {{^}}dynamic_insertelement_v2f32:
; SI: V_MOV_B32_e32 [[CONST:v[0-9]+]], 0x40a00000
; SI: V_MOVRELD_B32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]]
; SI: BUFFER_STORE_DWORDX2 {{v\[}}[[LOW_RESULT_REG]]:
@@ -58,7 +58,7 @@ define void @dynamic_insertelement_v2f32(<2 x float> addrspace(1)* %out, <2 x fl
ret void
}
-; SI-LABEL: @dynamic_insertelement_v4f32:
+; SI-LABEL: {{^}}dynamic_insertelement_v4f32:
; SI: V_MOV_B32_e32 [[CONST:v[0-9]+]], 0x40a00000
; SI: V_MOVRELD_B32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]]
; SI: BUFFER_STORE_DWORDX4 {{v\[}}[[LOW_RESULT_REG]]:
@@ -68,7 +68,7 @@ define void @dynamic_insertelement_v4f32(<4 x float> addrspace(1)* %out, <4 x fl
ret void
}
-; SI-LABEL: @dynamic_insertelement_v8f32:
+; SI-LABEL: {{^}}dynamic_insertelement_v8f32:
; FIXMESI: BUFFER_STORE_DWORDX4
; FIXMESI: BUFFER_STORE_DWORDX4
define void @dynamic_insertelement_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, i32 %b) nounwind {
@@ -77,7 +77,7 @@ define void @dynamic_insertelement_v8f32(<8 x float> addrspace(1)* %out, <8 x fl
ret void
}
-; SI-LABEL: @dynamic_insertelement_v16f32:
+; SI-LABEL: {{^}}dynamic_insertelement_v16f32:
; FIXMESI: BUFFER_STORE_DWORDX4
; FIXMESI: BUFFER_STORE_DWORDX4
; FIXMESI: BUFFER_STORE_DWORDX4
@@ -88,7 +88,7 @@ define void @dynamic_insertelement_v16f32(<16 x float> addrspace(1)* %out, <16 x
ret void
}
-; SI-LABEL: @dynamic_insertelement_v2i32:
+; SI-LABEL: {{^}}dynamic_insertelement_v2i32:
; SI: BUFFER_STORE_DWORDX2
define void @dynamic_insertelement_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, i32 %b) nounwind {
%vecins = insertelement <2 x i32> %a, i32 5, i32 %b
@@ -96,7 +96,7 @@ define void @dynamic_insertelement_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32>
ret void
}
-; SI-LABEL: @dynamic_insertelement_v4i32:
+; SI-LABEL: {{^}}dynamic_insertelement_v4i32:
; SI: BUFFER_STORE_DWORDX4
define void @dynamic_insertelement_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, i32 %b) nounwind {
%vecins = insertelement <4 x i32> %a, i32 5, i32 %b
@@ -104,7 +104,7 @@ define void @dynamic_insertelement_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32>
ret void
}
-; SI-LABEL: @dynamic_insertelement_v8i32:
+; SI-LABEL: {{^}}dynamic_insertelement_v8i32:
; FIXMESI: BUFFER_STORE_DWORDX4
; FIXMESI: BUFFER_STORE_DWORDX4
define void @dynamic_insertelement_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, i32 %b) nounwind {
@@ -113,7 +113,7 @@ define void @dynamic_insertelement_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32>
ret void
}
-; SI-LABEL: @dynamic_insertelement_v16i32:
+; SI-LABEL: {{^}}dynamic_insertelement_v16i32:
; FIXMESI: BUFFER_STORE_DWORDX4
; FIXMESI: BUFFER_STORE_DWORDX4
; FIXMESI: BUFFER_STORE_DWORDX4
@@ -125,7 +125,7 @@ define void @dynamic_insertelement_v16i32(<16 x i32> addrspace(1)* %out, <16 x i
}
-; SI-LABEL: @dynamic_insertelement_v2i16:
+; SI-LABEL: {{^}}dynamic_insertelement_v2i16:
; FIXMESI: BUFFER_STORE_DWORDX2
define void @dynamic_insertelement_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %a, i32 %b) nounwind {
%vecins = insertelement <2 x i16> %a, i16 5, i32 %b
@@ -133,7 +133,7 @@ define void @dynamic_insertelement_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16>
ret void
}
-; SI-LABEL: @dynamic_insertelement_v4i16:
+; SI-LABEL: {{^}}dynamic_insertelement_v4i16:
; FIXMESI: BUFFER_STORE_DWORDX4
define void @dynamic_insertelement_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %a, i32 %b) nounwind {
%vecins = insertelement <4 x i16> %a, i16 5, i32 %b
@@ -142,7 +142,7 @@ define void @dynamic_insertelement_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16>
}
-; SI-LABEL: @dynamic_insertelement_v2i8:
+; SI-LABEL: {{^}}dynamic_insertelement_v2i8:
; FIXMESI: BUFFER_STORE_USHORT
define void @dynamic_insertelement_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> %a, i32 %b) nounwind {
%vecins = insertelement <2 x i8> %a, i8 5, i32 %b
@@ -150,7 +150,7 @@ define void @dynamic_insertelement_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> %a
ret void
}
-; SI-LABEL: @dynamic_insertelement_v4i8:
+; SI-LABEL: {{^}}dynamic_insertelement_v4i8:
; FIXMESI: BUFFER_STORE_DWORD
define void @dynamic_insertelement_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, i32 %b) nounwind {
%vecins = insertelement <4 x i8> %a, i8 5, i32 %b
@@ -158,7 +158,7 @@ define void @dynamic_insertelement_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a
ret void
}
-; SI-LABEL: @dynamic_insertelement_v8i8:
+; SI-LABEL: {{^}}dynamic_insertelement_v8i8:
; FIXMESI: BUFFER_STORE_DWORDX2
define void @dynamic_insertelement_v8i8(<8 x i8> addrspace(1)* %out, <8 x i8> %a, i32 %b) nounwind {
%vecins = insertelement <8 x i8> %a, i8 5, i32 %b
@@ -166,7 +166,7 @@ define void @dynamic_insertelement_v8i8(<8 x i8> addrspace(1)* %out, <8 x i8> %a
ret void
}
-; SI-LABEL: @dynamic_insertelement_v16i8:
+; SI-LABEL: {{^}}dynamic_insertelement_v16i8:
; FIXMESI: BUFFER_STORE_DWORDX4
define void @dynamic_insertelement_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8> %a, i32 %b) nounwind {
%vecins = insertelement <16 x i8> %a, i8 5, i32 %b
@@ -176,7 +176,7 @@ define void @dynamic_insertelement_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8>
; This test requires handling INSERT_SUBREG in SIFixSGPRCopies. Check that
; the compiler doesn't crash.
-; SI-LABEL: @insert_split_bb
+; SI-LABEL: {{^}}insert_split_bb:
define void @insert_split_bb(<2 x i32> addrspace(1)* %out, i32 addrspace(1)* %in, i32 %a, i32 %b) {
entry:
%0 = insertelement <2 x i32> undef, i32 %a, i32 0
@@ -200,7 +200,7 @@ endif:
ret void
}
-; SI-LABEL: @dynamic_insertelement_v2f64:
+; SI-LABEL: {{^}}dynamic_insertelement_v2f64:
; SI: BUFFER_STORE_DWORDX2
; SI: BUFFER_STORE_DWORDX2
; SI: BUFFER_STORE_DWORDX2
@@ -212,7 +212,7 @@ define void @dynamic_insertelement_v2f64(<2 x double> addrspace(1)* %out, <2 x d
ret void
}
-; SI-LABEL: @dynamic_insertelement_v2i64:
+; SI-LABEL: {{^}}dynamic_insertelement_v2i64:
; SI: BUFFER_STORE_DWORDX2
; SI: BUFFER_STORE_DWORDX2
; SI: S_ENDPGM
@@ -222,7 +222,7 @@ define void @dynamic_insertelement_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64>
ret void
}
-; SI-LABEL: @dynamic_insertelement_v4f64:
+; SI-LABEL: {{^}}dynamic_insertelement_v4f64:
; SI: BUFFER_STORE_DWORDX2
; SI: BUFFER_STORE_DWORDX2
; SI: BUFFER_STORE_DWORDX2
@@ -234,7 +234,7 @@ define void @dynamic_insertelement_v4f64(<4 x double> addrspace(1)* %out, <4 x d
ret void
}
-; SI-LABEL: @dynamic_insertelement_v8f64:
+; SI-LABEL: {{^}}dynamic_insertelement_v8f64:
; SI: BUFFER_STORE_DWORDX2
; SI: BUFFER_STORE_DWORDX2
; SI: BUFFER_STORE_DWORDX2
diff --git a/test/CodeGen/R600/kcache-fold.ll b/test/CodeGen/R600/kcache-fold.ll
index 0baa3cd3e1a..27840b2e160 100644
--- a/test/CodeGen/R600/kcache-fold.ll
+++ b/test/CodeGen/R600/kcache-fold.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-; CHECK: @main1
+; CHECK: {{^}}main1:
; CHECK: MOV * T{{[0-9]+\.[XYZW], KC0}}
define void @main1() {
main_body:
@@ -48,7 +48,7 @@ main_body:
ret void
}
-; CHECK: @main2
+; CHECK: {{^}}main2:
; CHECK-NOT: MOV
define void @main2() {
main_body:
diff --git a/test/CodeGen/R600/kernel-args.ll b/test/CodeGen/R600/kernel-args.ll
index d0750de8111..a229534134a 100644
--- a/test/CodeGen/R600/kernel-args.ll
+++ b/test/CodeGen/R600/kernel-args.ll
@@ -2,9 +2,9 @@
; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG-CHECK
; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
-; EG-CHECK-LABEL: @i8_arg
+; EG-CHECK-LABEL: {{^}}i8_arg:
; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-; SI-CHECK-LABEL: @i8_arg
+; SI-CHECK-LABEL: {{^}}i8_arg:
; SI-CHECK: BUFFER_LOAD_UBYTE
define void @i8_arg(i32 addrspace(1)* nocapture %out, i8 %in) nounwind {
@@ -14,9 +14,9 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @i8_zext_arg
+; EG-CHECK-LABEL: {{^}}i8_zext_arg:
; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-; SI-CHECK-LABEL: @i8_zext_arg
+; SI-CHECK-LABEL: {{^}}i8_zext_arg:
; SI-CHECK: S_LOAD_DWORD s{{[0-9]}}, s[0:1], 0xb
define void @i8_zext_arg(i32 addrspace(1)* nocapture %out, i8 zeroext %in) nounwind {
@@ -26,9 +26,9 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @i8_sext_arg
+; EG-CHECK-LABEL: {{^}}i8_sext_arg:
; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-; SI-CHECK-LABEL: @i8_sext_arg
+; SI-CHECK-LABEL: {{^}}i8_sext_arg:
; SI-CHECK: S_LOAD_DWORD s{{[0-9]}}, s[0:1], 0xb
define void @i8_sext_arg(i32 addrspace(1)* nocapture %out, i8 signext %in) nounwind {
@@ -38,9 +38,9 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @i16_arg
+; EG-CHECK-LABEL: {{^}}i16_arg:
; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-; SI-CHECK-LABEL: @i16_arg
+; SI-CHECK-LABEL: {{^}}i16_arg:
; SI-CHECK: BUFFER_LOAD_USHORT
define void @i16_arg(i32 addrspace(1)* nocapture %out, i16 %in) nounwind {
@@ -50,9 +50,9 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @i16_zext_arg
+; EG-CHECK-LABEL: {{^}}i16_zext_arg:
; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-; SI-CHECK-LABEL: @i16_zext_arg
+; SI-CHECK-LABEL: {{^}}i16_zext_arg:
; SI-CHECK: S_LOAD_DWORD s{{[0-9]}}, s[0:1], 0xb
define void @i16_zext_arg(i32 addrspace(1)* nocapture %out, i16 zeroext %in) nounwind {
@@ -62,9 +62,9 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @i16_sext_arg
+; EG-CHECK-LABEL: {{^}}i16_sext_arg:
; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-; SI-CHECK-LABEL: @i16_sext_arg
+; SI-CHECK-LABEL: {{^}}i16_sext_arg:
; SI-CHECK: S_LOAD_DWORD s{{[0-9]}}, s[0:1], 0xb
define void @i16_sext_arg(i32 addrspace(1)* nocapture %out, i16 signext %in) nounwind {
@@ -74,9 +74,9 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @i32_arg
+; EG-CHECK-LABEL: {{^}}i32_arg:
; EG-CHECK: T{{[0-9]\.[XYZW]}}, KC0[2].Z
-; SI-CHECK-LABEL: @i32_arg
+; SI-CHECK-LABEL: {{^}}i32_arg:
; S_LOAD_DWORD s{{[0-9]}}, s[0:1], 0xb
define void @i32_arg(i32 addrspace(1)* nocapture %out, i32 %in) nounwind {
entry:
@@ -84,9 +84,9 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @f32_arg
+; EG-CHECK-LABEL: {{^}}f32_arg:
; EG-CHECK: T{{[0-9]\.[XYZW]}}, KC0[2].Z
-; SI-CHECK-LABEL: @f32_arg
+; SI-CHECK-LABEL: {{^}}f32_arg:
; S_LOAD_DWORD s{{[0-9]}}, s[0:1], 0xb
define void @f32_arg(float addrspace(1)* nocapture %out, float %in) nounwind {
entry:
@@ -94,10 +94,10 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @v2i8_arg
+; EG-CHECK-LABEL: {{^}}v2i8_arg:
; EG-CHECK: VTX_READ_8
; EG-CHECK: VTX_READ_8
-; SI-CHECK-LABEL: @v2i8_arg
+; SI-CHECK-LABEL: {{^}}v2i8_arg:
; SI-CHECK: BUFFER_LOAD_UBYTE
; SI-CHECK: BUFFER_LOAD_UBYTE
define void @v2i8_arg(<2 x i8> addrspace(1)* %out, <2 x i8> %in) {
@@ -106,10 +106,10 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @v2i16_arg
+; EG-CHECK-LABEL: {{^}}v2i16_arg:
; EG-CHECK: VTX_READ_16
; EG-CHECK: VTX_READ_16
-; SI-CHECK-LABEL: @v2i16_arg
+; SI-CHECK-LABEL: {{^}}v2i16_arg:
; SI-CHECK-DAG: BUFFER_LOAD_USHORT
; SI-CHECK-DAG: BUFFER_LOAD_USHORT
define void @v2i16_arg(<2 x i16> addrspace(1)* %out, <2 x i16> %in) {
@@ -118,10 +118,10 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @v2i32_arg
+; EG-CHECK-LABEL: {{^}}v2i32_arg:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].X
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[2].W
-; SI-CHECK-LABEL: @v2i32_arg
+; SI-CHECK-LABEL: {{^}}v2i32_arg:
; SI-CHECK: S_LOAD_DWORDX2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xb
define void @v2i32_arg(<2 x i32> addrspace(1)* nocapture %out, <2 x i32> %in) nounwind {
entry:
@@ -129,10 +129,10 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @v2f32_arg
+; EG-CHECK-LABEL: {{^}}v2f32_arg:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].X
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[2].W
-; SI-CHECK-LABEL: @v2f32_arg
+; SI-CHECK-LABEL: {{^}}v2f32_arg:
; SI-CHECK: S_LOAD_DWORDX2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xb
define void @v2f32_arg(<2 x float> addrspace(1)* nocapture %out, <2 x float> %in) nounwind {
entry:
@@ -140,32 +140,32 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @v3i8_arg
+; EG-CHECK-LABEL: {{^}}v3i8_arg:
; VTX_READ_8 T{{[0-9]}}.X, T{{[0-9]}}.X, 40
; VTX_READ_8 T{{[0-9]}}.X, T{{[0-9]}}.X, 41
; VTX_READ_8 T{{[0-9]}}.X, T{{[0-9]}}.X, 42
-; SI-CHECK-LABEL: @v3i8_arg
+; SI-CHECK-LABEL: {{^}}v3i8_arg:
define void @v3i8_arg(<3 x i8> addrspace(1)* nocapture %out, <3 x i8> %in) nounwind {
entry:
store <3 x i8> %in, <3 x i8> addrspace(1)* %out, align 4
ret void
}
-; EG-CHECK-LABEL: @v3i16_arg
+; EG-CHECK-LABEL: {{^}}v3i16_arg:
; VTX_READ_16 T{{[0-9]}}.X, T{{[0-9]}}.X, 44
; VTX_READ_16 T{{[0-9]}}.X, T{{[0-9]}}.X, 46
; VTX_READ_16 T{{[0-9]}}.X, T{{[0-9]}}.X, 48
-; SI-CHECK-LABEL: @v3i16_arg
+; SI-CHECK-LABEL: {{^}}v3i16_arg:
define void @v3i16_arg(<3 x i16> addrspace(1)* nocapture %out, <3 x i16> %in) nounwind {
entry:
store <3 x i16> %in, <3 x i16> addrspace(1)* %out, align 4
ret void
}
-; EG-CHECK-LABEL: @v3i32_arg
+; EG-CHECK-LABEL: {{^}}v3i32_arg:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Y
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
-; SI-CHECK-LABEL: @v3i32_arg
+; SI-CHECK-LABEL: {{^}}v3i32_arg:
; SI-CHECK: S_LOAD_DWORDX4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0xd
define void @v3i32_arg(<3 x i32> addrspace(1)* nocapture %out, <3 x i32> %in) nounwind {
entry:
@@ -173,11 +173,11 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @v3f32_arg
+; EG-CHECK-LABEL: {{^}}v3f32_arg:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Y
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
-; SI-CHECK-LABEL: @v3f32_arg
+; SI-CHECK-LABEL: {{^}}v3f32_arg:
; SI-CHECK: S_LOAD_DWORDX4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0xd
define void @v3f32_arg(<3 x float> addrspace(1)* nocapture %out, <3 x float> %in) nounwind {
entry:
@@ -185,12 +185,12 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @v4i8_arg
+; EG-CHECK-LABEL: {{^}}v4i8_arg:
; EG-CHECK: VTX_READ_8
; EG-CHECK: VTX_READ_8
; EG-CHECK: VTX_READ_8
; EG-CHECK: VTX_READ_8
-; SI-CHECK-LABEL: @v4i8_arg
+; SI-CHECK-LABEL: {{^}}v4i8_arg:
; SI-CHECK: BUFFER_LOAD_UBYTE
; SI-CHECK: BUFFER_LOAD_UBYTE
; SI-CHECK: BUFFER_LOAD_UBYTE
@@ -201,12 +201,12 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @v4i16_arg
+; EG-CHECK-LABEL: {{^}}v4i16_arg:
; EG-CHECK: VTX_READ_16
; EG-CHECK: VTX_READ_16
; EG-CHECK: VTX_READ_16
; EG-CHECK: VTX_READ_16
-; SI-CHECK-LABEL: @v4i16_arg
+; SI-CHECK-LABEL: {{^}}v4i16_arg:
; SI-CHECK: BUFFER_LOAD_USHORT
; SI-CHECK: BUFFER_LOAD_USHORT
; SI-CHECK: BUFFER_LOAD_USHORT
@@ -217,12 +217,12 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @v4i32_arg
+; EG-CHECK-LABEL: {{^}}v4i32_arg:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Y
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].X
-; SI-CHECK-LABEL: @v4i32_arg
+; SI-CHECK-LABEL: {{^}}v4i32_arg:
; SI-CHECK: S_LOAD_DWORDX4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xd
define void @v4i32_arg(<4 x i32> addrspace(1)* nocapture %out, <4 x i32> %in) nounwind {
entry:
@@ -230,12 +230,12 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @v4f32_arg
+; EG-CHECK-LABEL: {{^}}v4f32_arg:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Y
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].X
-; SI-CHECK-LABEL: @v4f32_arg
+; SI-CHECK-LABEL: {{^}}v4f32_arg:
; SI-CHECK: S_LOAD_DWORDX4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xd
define void @v4f32_arg(<4 x float> addrspace(1)* nocapture %out, <4 x float> %in) nounwind {
entry:
@@ -243,7 +243,7 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @v8i8_arg
+; EG-CHECK-LABEL: {{^}}v8i8_arg:
; EG-CHECK: VTX_READ_8
; EG-CHECK: VTX_READ_8
; EG-CHECK: VTX_READ_8
@@ -252,7 +252,7 @@ entry:
; EG-CHECK: VTX_READ_8
; EG-CHECK: VTX_READ_8
; EG-CHECK: VTX_READ_8
-; SI-CHECK-LABEL: @v8i8_arg
+; SI-CHECK-LABEL: {{^}}v8i8_arg:
; SI-CHECK: BUFFER_LOAD_UBYTE
; SI-CHECK: BUFFER_LOAD_UBYTE
; SI-CHECK: BUFFER_LOAD_UBYTE
@@ -266,7 +266,7 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @v8i16_arg
+; EG-CHECK-LABEL: {{^}}v8i16_arg:
; EG-CHECK: VTX_READ_16
; EG-CHECK: VTX_READ_16
; EG-CHECK: VTX_READ_16
@@ -275,7 +275,7 @@ entry:
; EG-CHECK: VTX_READ_16
; EG-CHECK: VTX_READ_16
; EG-CHECK: VTX_READ_16
-; SI-CHECK-LABEL: @v8i16_arg
+; SI-CHECK-LABEL: {{^}}v8i16_arg:
; SI-CHECK: BUFFER_LOAD_USHORT
; SI-CHECK: BUFFER_LOAD_USHORT
; SI-CHECK: BUFFER_LOAD_USHORT
@@ -290,7 +290,7 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @v8i32_arg
+; EG-CHECK-LABEL: {{^}}v8i32_arg:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].Y
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].Z
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].W
@@ -299,7 +299,7 @@ entry:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].Z
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].W
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].X
-; SI-CHECK-LABEL: @v8i32_arg
+; SI-CHECK-LABEL: {{^}}v8i32_arg:
; SI-CHECK: S_LOAD_DWORDX8 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x11
define void @v8i32_arg(<8 x i32> addrspace(1)* nocapture %out, <8 x i32> %in) nounwind {
entry:
@@ -307,7 +307,7 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @v8f32_arg
+; EG-CHECK-LABEL: {{^}}v8f32_arg:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].Y
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].Z
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].W
@@ -316,7 +316,7 @@ entry:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].Z
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].W
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].X
-; SI-CHECK-LABEL: @v8f32_arg
+; SI-CHECK-LABEL: {{^}}v8f32_arg:
; SI-CHECK: S_LOAD_DWORDX8 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x11
define void @v8f32_arg(<8 x float> addrspace(1)* nocapture %out, <8 x float> %in) nounwind {
entry:
@@ -324,7 +324,7 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @v16i8_arg
+; EG-CHECK-LABEL: {{^}}v16i8_arg:
; EG-CHECK: VTX_READ_8
; EG-CHECK: VTX_READ_8
; EG-CHECK: VTX_READ_8
@@ -341,7 +341,7 @@ entry:
; EG-CHECK: VTX_READ_8
; EG-CHECK: VTX_READ_8
; EG-CHECK: VTX_READ_8
-; SI-CHECK-LABEL: @v16i8_arg
+; SI-CHECK-LABEL: {{^}}v16i8_arg:
; SI-CHECK: BUFFER_LOAD_UBYTE
; SI-CHECK: BUFFER_LOAD_UBYTE
; SI-CHECK: BUFFER_LOAD_UBYTE
@@ -364,7 +364,7 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @v16i16_arg
+; EG-CHECK-LABEL: {{^}}v16i16_arg:
; EG-CHECK: VTX_READ_16
; EG-CHECK: VTX_READ_16
; EG-CHECK: VTX_READ_16
@@ -381,7 +381,7 @@ entry:
; EG-CHECK: VTX_READ_16
; EG-CHECK: VTX_READ_16
; EG-CHECK: VTX_READ_16
-; SI-CHECK-LABEL: @v16i16_arg
+; SI-CHECK-LABEL: {{^}}v16i16_arg:
; SI-CHECK: BUFFER_LOAD_USHORT
; SI-CHECK: BUFFER_LOAD_USHORT
; SI-CHECK: BUFFER_LOAD_USHORT
@@ -404,7 +404,7 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @v16i32_arg
+; EG-CHECK-LABEL: {{^}}v16i32_arg:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].Y
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].Z
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].W
@@ -421,7 +421,7 @@ entry:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].Z
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].W
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[10].X
-; SI-CHECK-LABEL: @v16i32_arg
+; SI-CHECK-LABEL: {{^}}v16i32_arg:
; SI-CHECK: S_LOAD_DWORDX16 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x19
define void @v16i32_arg(<16 x i32> addrspace(1)* nocapture %out, <16 x i32> %in) nounwind {
entry:
@@ -429,7 +429,7 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @v16f32_arg
+; EG-CHECK-LABEL: {{^}}v16f32_arg:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].Y
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].Z
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].W
@@ -446,7 +446,7 @@ entry:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].Z
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].W
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[10].X
-; SI-CHECK-LABEL: @v16f32_arg
+; SI-CHECK-LABEL: {{^}}v16f32_arg:
; SI-CHECK: S_LOAD_DWORDX16 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x19
define void @v16f32_arg(<16 x float> addrspace(1)* nocapture %out, <16 x float> %in) nounwind {
entry:
@@ -454,7 +454,7 @@ entry:
ret void
}
-; FUNC-LABEL: @kernel_arg_i64
+; FUNC-LABEL: {{^}}kernel_arg_i64:
; SI: S_LOAD_DWORDX2
; SI: S_LOAD_DWORDX2
; SI: BUFFER_STORE_DWORDX2
@@ -463,7 +463,7 @@ define void @kernel_arg_i64(i64 addrspace(1)* %out, i64 %a) nounwind {
ret void
}
-; XFUNC-LABEL: @kernel_arg_v1i64
+; XFUNC-LABEL: {{^}}kernel_arg_v1i64:
; XSI: S_LOAD_DWORDX2
; XSI: S_LOAD_DWORDX2
; XSI: BUFFER_STORE_DWORDX2
diff --git a/test/CodeGen/R600/lds-oqap-crash.ll b/test/CodeGen/R600/lds-oqap-crash.ll
index 79591506e8c..fbcd778de2c 100644
--- a/test/CodeGen/R600/lds-oqap-crash.ll
+++ b/test/CodeGen/R600/lds-oqap-crash.ll
@@ -9,7 +9,7 @@
; because the LDS instructions are pseudo instructions and the OQAP
; reads and writes are bundled together in the same instruction.
-; CHECK: @lds_crash
+; CHECK: {{^}}lds_crash:
define void @lds_crash(i32 addrspace(1)* %out, i32 addrspace(3)* %in, i32 %a, i32 %b, i32 %c) {
entry:
%0 = load i32 addrspace(3)* %in
diff --git a/test/CodeGen/R600/lds-output-queue.ll b/test/CodeGen/R600/lds-output-queue.ll
index d5dc061964e..a17951b872a 100644
--- a/test/CodeGen/R600/lds-output-queue.ll
+++ b/test/CodeGen/R600/lds-output-queue.ll
@@ -3,7 +3,7 @@
; This test checks that the lds input queue will is empty at the end of
; the ALU clause.
-; CHECK-LABEL: @lds_input_queue
+; CHECK-LABEL: {{^}}lds_input_queue:
; CHECK: LDS_READ_RET * OQAP
; CHECK-NOT: ALU clause
; CHECK: MOV * T{{[0-9]\.[XYZW]}}, OQAP
@@ -84,7 +84,7 @@ declare void @llvm.AMDGPU.barrier.local()
; analysis, we should be able to keep these instructions sparate before
; scheduling.
;
-; CHECK-LABEL: @local_global_alias
+; CHECK-LABEL: {{^}}local_global_alias:
; CHECK: LDS_READ_RET
; CHECK-NOT: ALU clause
; CHECK: MOV * T{{[0-9]\.[XYZW]}}, OQAP
diff --git a/test/CodeGen/R600/lds-size.ll b/test/CodeGen/R600/lds-size.ll
index 9182e256150..e9771d9d323 100644
--- a/test/CodeGen/R600/lds-size.ll
+++ b/test/CodeGen/R600/lds-size.ll
@@ -3,7 +3,7 @@
; This test makes sure we do not double count global values when they are
; used in different basic blocks.
-; CHECK-LABEL: @test
+; CHECK-LABEL: {{^}}test:
; CHECK: .long 166120
; CHECK-NEXT: .long 1
@lds = internal unnamed_addr addrspace(3) global i32 zeroinitializer, align 4
diff --git a/test/CodeGen/R600/legalizedag-bug-expand-setcc.ll b/test/CodeGen/R600/legalizedag-bug-expand-setcc.ll
index 1aae7f9f91f..b9fa8e938ae 100644
--- a/test/CodeGen/R600/legalizedag-bug-expand-setcc.ll
+++ b/test/CodeGen/R600/legalizedag-bug-expand-setcc.ll
@@ -8,7 +8,7 @@
; instructions, when only one is needed.
;
-; CHECK: @setcc_expand
+; CHECK: {{^}}setcc_expand:
; CHECK: SET
; CHECK-NOT: CND
define void @setcc_expand(i32 addrspace(1)* %out, i32 %in) {
diff --git a/test/CodeGen/R600/literals.ll b/test/CodeGen/R600/literals.ll
index 47191e0a27f..cff1c24f89d 100644
--- a/test/CodeGen/R600/literals.ll
+++ b/test/CodeGen/R600/literals.ll
@@ -6,7 +6,7 @@
; or
; ADD_INT literal.x KC0[2].Z, 5
-; CHECK: @i32_literal
+; CHECK: {{^}}i32_literal:
; CHECK: ADD_INT {{\** *}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, literal.x
; CHECK-NEXT: LSHR
; CHECK-NEXT: 5
@@ -23,7 +23,7 @@ entry:
; or
; ADD literal.x KC0[2].Z, 5.0
-; CHECK: @float_literal
+; CHECK: {{^}}float_literal:
; CHECK: ADD {{\** *}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, literal.x
; CHECK-NEXT: LSHR
; CHECK-NEXT: 1084227584(5.0
@@ -35,7 +35,7 @@ entry:
}
; Make sure inline literals are folded into REG_SEQUENCE instructions.
-; CHECK: @inline_literal_reg_sequence
+; CHECK: {{^}}inline_literal_reg_sequence:
; CHECK: MOV {{\** *}}T[[GPR:[0-9]]].X, 0.0
; CHECK-NEXT: MOV {{\** *}}T[[GPR]].Y, 0.0
; CHECK-NEXT: MOV {{\** *}}T[[GPR]].Z, 0.0
@@ -47,7 +47,7 @@ entry:
ret void
}
-; CHECK: @inline_literal_dot4
+; CHECK: {{^}}inline_literal_dot4:
; CHECK: DOT4 T[[GPR:[0-9]]].X, 1.0
; CHECK-NEXT: DOT4 T[[GPR]].Y (MASKED), 1.0
; CHECK-NEXT: DOT4 T[[GPR]].Z (MASKED), 1.0
diff --git a/test/CodeGen/R600/llvm.AMDGPU.abs.ll b/test/CodeGen/R600/llvm.AMDGPU.abs.ll
index a0a47b7c470..2a0d8f9ec28 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.abs.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.abs.ll
@@ -6,7 +6,7 @@ declare i32 @llvm.AMDGPU.abs(i32) nounwind readnone
; Legacy name
declare i32 @llvm.AMDIL.abs.i32(i32) nounwind readnone
-; FUNC-LABEL: @s_abs_i32
+; FUNC-LABEL: {{^}}s_abs_i32:
; SI: S_SUB_I32
; SI: S_MAX_I32
; SI: S_ENDPGM
@@ -19,7 +19,7 @@ define void @s_abs_i32(i32 addrspace(1)* %out, i32 %src) nounwind {
ret void
}
-; FUNC-LABEL: @v_abs_i32
+; FUNC-LABEL: {{^}}v_abs_i32:
; SI: V_SUB_I32_e32
; SI: V_MAX_I32_e32
; SI: S_ENDPGM
@@ -33,7 +33,7 @@ define void @v_abs_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %src) nounwind
ret void
}
-; FUNC-LABEL: @abs_i32_legacy_amdil
+; FUNC-LABEL: {{^}}abs_i32_legacy_amdil:
; SI: V_SUB_I32_e32
; SI: V_MAX_I32_e32
; SI: S_ENDPGM
diff --git a/test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll b/test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll
index 47f5255e501..dd5b8fdd895 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; FUNC-LABEL: @test_barrier_global
+; FUNC-LABEL: {{^}}test_barrier_global:
; EG: GROUP_BARRIER
; SI: S_BARRIER
diff --git a/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll b/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll
index 7203675bb47..c5cd3b7b239 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; FUNC-LABEL: @test_barrier_local
+; FUNC-LABEL: {{^}}test_barrier_local:
; EG: GROUP_BARRIER
; SI: S_BARRIER
diff --git a/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll b/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll
index eb509423282..7c9d3e8bb20 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll
@@ -3,7 +3,7 @@
declare i32 @llvm.AMDGPU.bfe.i32(i32, i32, i32) nounwind readnone
-; FUNC-LABEL: @bfe_i32_arg_arg_arg
+; FUNC-LABEL: {{^}}bfe_i32_arg_arg_arg:
; SI: V_BFE_I32
; EG: BFE_INT
; EG: encoding: [{{[x0-9a-f]+,[x0-9a-f]+,[x0-9a-f]+,[x0-9a-f]+,[x0-9a-f]+}},0xac
@@ -13,7 +13,7 @@ define void @bfe_i32_arg_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i
ret void
}
-; FUNC-LABEL: @bfe_i32_arg_arg_imm
+; FUNC-LABEL: {{^}}bfe_i32_arg_arg_imm:
; SI: V_BFE_I32
; EG: BFE_INT
define void @bfe_i32_arg_arg_imm(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
@@ -22,7 +22,7 @@ define void @bfe_i32_arg_arg_imm(i32 addrspace(1)* %out, i32 %src0, i32 %src1) n
ret void
}
-; FUNC-LABEL: @bfe_i32_arg_imm_arg
+; FUNC-LABEL: {{^}}bfe_i32_arg_imm_arg:
; SI: V_BFE_I32
; EG: BFE_INT
define void @bfe_i32_arg_imm_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src2) nounwind {
@@ -31,7 +31,7 @@ define void @bfe_i32_arg_imm_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src2) n
ret void
}
-; FUNC-LABEL: @bfe_i32_imm_arg_arg
+; FUNC-LABEL: {{^}}bfe_i32_imm_arg_arg:
; SI: V_BFE_I32
; EG: BFE_INT
define void @bfe_i32_imm_arg_arg(i32 addrspace(1)* %out, i32 %src1, i32 %src2) nounwind {
@@ -40,7 +40,7 @@ define void @bfe_i32_imm_arg_arg(i32 addrspace(1)* %out, i32 %src1, i32 %src2) n
ret void
}
-; FUNC-LABEL: @v_bfe_print_arg
+; FUNC-LABEL: {{^}}v_bfe_print_arg:
; SI: V_BFE_I32 v{{[0-9]+}}, v{{[0-9]+}}, 2, 8
define void @v_bfe_print_arg(i32 addrspace(1)* %out, i32 addrspace(1)* %src0) nounwind {
%load = load i32 addrspace(1)* %src0, align 4
@@ -49,7 +49,7 @@ define void @v_bfe_print_arg(i32 addrspace(1)* %out, i32 addrspace(1)* %src0) no
ret void
}
-; FUNC-LABEL: @bfe_i32_arg_0_width_reg_offset
+; FUNC-LABEL: {{^}}bfe_i32_arg_0_width_reg_offset:
; SI-NOT: BFE
; SI: S_ENDPGM
; EG-NOT: BFE
@@ -59,7 +59,7 @@ define void @bfe_i32_arg_0_width_reg_offset(i32 addrspace(1)* %out, i32 %src0, i
ret void
}
-; FUNC-LABEL: @bfe_i32_arg_0_width_imm_offset
+; FUNC-LABEL: {{^}}bfe_i32_arg_0_width_imm_offset:
; SI-NOT: BFE
; SI: S_ENDPGM
; EG-NOT: BFE
@@ -69,7 +69,7 @@ define void @bfe_i32_arg_0_width_imm_offset(i32 addrspace(1)* %out, i32 %src0, i
ret void
}
-; FUNC-LABEL: @bfe_i32_test_6
+; FUNC-LABEL: {{^}}bfe_i32_test_6:
; SI: V_LSHLREV_B32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
; SI: S_ENDPGM
@@ -81,7 +81,7 @@ define void @bfe_i32_test_6(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounw
ret void
}
-; FUNC-LABEL: @bfe_i32_test_7
+; FUNC-LABEL: {{^}}bfe_i32_test_7:
; SI-NOT: SHL
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
@@ -96,7 +96,7 @@ define void @bfe_i32_test_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounw
}
; FIXME: The shifts should be 1 BFE
-; FUNC-LABEL: @bfe_i32_test_8
+; FUNC-LABEL: {{^}}bfe_i32_test_8:
; SI: BUFFER_LOAD_DWORD
; SI: V_BFE_I32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 1
; SI: S_ENDPGM
@@ -108,7 +108,7 @@ define void @bfe_i32_test_8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounw
ret void
}
-; FUNC-LABEL: @bfe_i32_test_9
+; FUNC-LABEL: {{^}}bfe_i32_test_9:
; SI-NOT: BFE
; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
; SI-NOT: BFE
@@ -120,7 +120,7 @@ define void @bfe_i32_test_9(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounw
ret void
}
-; FUNC-LABEL: @bfe_i32_test_10
+; FUNC-LABEL: {{^}}bfe_i32_test_10:
; SI-NOT: BFE
; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
; SI-NOT: BFE
@@ -132,7 +132,7 @@ define void @bfe_i32_test_10(i32 addrspace(1)* %out, i32 addrspace(1)* %in) noun
ret void
}
-; FUNC-LABEL: @bfe_i32_test_11
+; FUNC-LABEL: {{^}}bfe_i32_test_11:
; SI-NOT: BFE
; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
; SI-NOT: BFE
@@ -144,7 +144,7 @@ define void @bfe_i32_test_11(i32 addrspace(1)* %out, i32 addrspace(1)* %in) noun
ret void
}
-; FUNC-LABEL: @bfe_i32_test_12
+; FUNC-LABEL: {{^}}bfe_i32_test_12:
; SI-NOT: BFE
; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 24, v{{[0-9]+}}
; SI-NOT: BFE
@@ -156,7 +156,7 @@ define void @bfe_i32_test_12(i32 addrspace(1)* %out, i32 addrspace(1)* %in) noun
ret void
}
-; FUNC-LABEL: @bfe_i32_test_13
+; FUNC-LABEL: {{^}}bfe_i32_test_13:
; SI: V_ASHRREV_I32_e32 {{v[0-9]+}}, 31, {{v[0-9]+}}
; SI-NOT: BFE
; SI: S_ENDPGM
@@ -167,7 +167,7 @@ define void @bfe_i32_test_13(i32 addrspace(1)* %out, i32 addrspace(1)* %in) noun
store i32 %bfe, i32 addrspace(1)* %out, align 4 ret void
}
-; FUNC-LABEL: @bfe_i32_test_14
+; FUNC-LABEL: {{^}}bfe_i32_test_14:
; SI-NOT: LSHR
; SI-NOT: BFE
; SI: S_ENDPGM
@@ -178,7 +178,7 @@ define void @bfe_i32_test_14(i32 addrspace(1)* %out, i32 addrspace(1)* %in) noun
store i32 %bfe, i32 addrspace(1)* %out, align 4 ret void
}
-; FUNC-LABEL: @bfe_i32_constant_fold_test_0
+; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_0:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -190,7 +190,7 @@ define void @bfe_i32_constant_fold_test_0(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_i32_constant_fold_test_1
+; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_1:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -202,7 +202,7 @@ define void @bfe_i32_constant_fold_test_1(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_i32_constant_fold_test_2
+; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_2:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -214,7 +214,7 @@ define void @bfe_i32_constant_fold_test_2(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_i32_constant_fold_test_3
+; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_3:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], -1
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -226,7 +226,7 @@ define void @bfe_i32_constant_fold_test_3(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_i32_constant_fold_test_4
+; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_4:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], -1
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -238,7 +238,7 @@ define void @bfe_i32_constant_fold_test_4(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_i32_constant_fold_test_5
+; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_5:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], -1
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -250,7 +250,7 @@ define void @bfe_i32_constant_fold_test_5(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_i32_constant_fold_test_6
+; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_6:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0xffffff80
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -262,7 +262,7 @@ define void @bfe_i32_constant_fold_test_6(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_i32_constant_fold_test_7
+; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_7:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0x7f
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -274,7 +274,7 @@ define void @bfe_i32_constant_fold_test_7(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_i32_constant_fold_test_8
+; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_8:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 1
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -286,7 +286,7 @@ define void @bfe_i32_constant_fold_test_8(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_i32_constant_fold_test_9
+; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_9:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 1
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -298,7 +298,7 @@ define void @bfe_i32_constant_fold_test_9(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_i32_constant_fold_test_10
+; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_10:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -310,7 +310,7 @@ define void @bfe_i32_constant_fold_test_10(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_i32_constant_fold_test_11
+; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_11:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], -6
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -322,7 +322,7 @@ define void @bfe_i32_constant_fold_test_11(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_i32_constant_fold_test_12
+; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_12:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -334,7 +334,7 @@ define void @bfe_i32_constant_fold_test_12(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_i32_constant_fold_test_13
+; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_13:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 1
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -346,7 +346,7 @@ define void @bfe_i32_constant_fold_test_13(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_i32_constant_fold_test_14
+; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_14:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 40
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -358,7 +358,7 @@ define void @bfe_i32_constant_fold_test_14(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_i32_constant_fold_test_15
+; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_15:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 10
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -370,7 +370,7 @@ define void @bfe_i32_constant_fold_test_15(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_i32_constant_fold_test_16
+; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_16:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], -1
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -382,7 +382,7 @@ define void @bfe_i32_constant_fold_test_16(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_i32_constant_fold_test_17
+; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_17:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0x7f
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -394,7 +394,7 @@ define void @bfe_i32_constant_fold_test_17(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_i32_constant_fold_test_18
+; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_18:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -408,7 +408,7 @@ define void @bfe_i32_constant_fold_test_18(i32 addrspace(1)* %out) nounwind {
; XXX - This should really be a single BFE, but the sext_inreg of the
; extended type i24 is never custom lowered.
-; FUNC-LABEL: @bfe_sext_in_reg_i24
+; FUNC-LABEL: {{^}}bfe_sext_in_reg_i24:
; SI: BUFFER_LOAD_DWORD [[LOAD:v[0-9]+]],
; SI: V_LSHLREV_B32_e32 {{v[0-9]+}}, 8, {{v[0-9]+}}
; SI: V_ASHRREV_I32_e32 {{v[0-9]+}}, 8, {{v[0-9]+}}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll b/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll
index 1a62253eeb7..d065c2384f1 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll
@@ -3,7 +3,7 @@
declare i32 @llvm.AMDGPU.bfe.u32(i32, i32, i32) nounwind readnone
-; FUNC-LABEL: @bfe_u32_arg_arg_arg
+; FUNC-LABEL: {{^}}bfe_u32_arg_arg_arg:
; SI: V_BFE_U32
; EG: BFE_UINT
define void @bfe_u32_arg_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2) nounwind {
@@ -12,7 +12,7 @@ define void @bfe_u32_arg_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i
ret void
}
-; FUNC-LABEL: @bfe_u32_arg_arg_imm
+; FUNC-LABEL: {{^}}bfe_u32_arg_arg_imm:
; SI: V_BFE_U32
; EG: BFE_UINT
define void @bfe_u32_arg_arg_imm(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
@@ -21,7 +21,7 @@ define void @bfe_u32_arg_arg_imm(i32 addrspace(1)* %out, i32 %src0, i32 %src1) n
ret void
}
-; FUNC-LABEL: @bfe_u32_arg_imm_arg
+; FUNC-LABEL: {{^}}bfe_u32_arg_imm_arg:
; SI: V_BFE_U32
; EG: BFE_UINT
define void @bfe_u32_arg_imm_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src2) nounwind {
@@ -30,7 +30,7 @@ define void @bfe_u32_arg_imm_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src2) n
ret void
}
-; FUNC-LABEL: @bfe_u32_imm_arg_arg
+; FUNC-LABEL: {{^}}bfe_u32_imm_arg_arg:
; SI: V_BFE_U32
; EG: BFE_UINT
define void @bfe_u32_imm_arg_arg(i32 addrspace(1)* %out, i32 %src1, i32 %src2) nounwind {
@@ -39,7 +39,7 @@ define void @bfe_u32_imm_arg_arg(i32 addrspace(1)* %out, i32 %src1, i32 %src2) n
ret void
}
-; FUNC-LABEL: @bfe_u32_arg_0_width_reg_offset
+; FUNC-LABEL: {{^}}bfe_u32_arg_0_width_reg_offset:
; SI-NOT: BFE
; SI: S_ENDPGM
; EG-NOT: BFE
@@ -49,7 +49,7 @@ define void @bfe_u32_arg_0_width_reg_offset(i32 addrspace(1)* %out, i32 %src0, i
ret void
}
-; FUNC-LABEL: @bfe_u32_arg_0_width_imm_offset
+; FUNC-LABEL: {{^}}bfe_u32_arg_0_width_imm_offset:
; SI-NOT: BFE
; SI: S_ENDPGM
; EG-NOT: BFE
@@ -59,7 +59,7 @@ define void @bfe_u32_arg_0_width_imm_offset(i32 addrspace(1)* %out, i32 %src0, i
ret void
}
-; FUNC-LABEL: @bfe_u32_zextload_i8
+; FUNC-LABEL: {{^}}bfe_u32_zextload_i8:
; SI: BUFFER_LOAD_UBYTE
; SI-NOT: BFE
; SI: S_ENDPGM
@@ -71,7 +71,7 @@ define void @bfe_u32_zextload_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) n
ret void
}
-; FUNC-LABEL: @bfe_u32_zext_in_reg_i8
+; FUNC-LABEL: {{^}}bfe_u32_zext_in_reg_i8:
; SI: BUFFER_LOAD_DWORD
; SI: V_ADD_I32
; SI-NEXT: V_AND_B32_e32
@@ -86,7 +86,7 @@ define void @bfe_u32_zext_in_reg_i8(i32 addrspace(1)* %out, i32 addrspace(1)* %i
ret void
}
-; FUNC-LABEL: @bfe_u32_zext_in_reg_i16
+; FUNC-LABEL: {{^}}bfe_u32_zext_in_reg_i16:
; SI: BUFFER_LOAD_DWORD
; SI: V_ADD_I32
; SI-NEXT: V_AND_B32_e32
@@ -101,7 +101,7 @@ define void @bfe_u32_zext_in_reg_i16(i32 addrspace(1)* %out, i32 addrspace(1)* %
ret void
}
-; FUNC-LABEL: @bfe_u32_zext_in_reg_i8_offset_1
+; FUNC-LABEL: {{^}}bfe_u32_zext_in_reg_i8_offset_1:
; SI: BUFFER_LOAD_DWORD
; SI: V_ADD_I32
; SI: BFE
@@ -115,7 +115,7 @@ define void @bfe_u32_zext_in_reg_i8_offset_1(i32 addrspace(1)* %out, i32 addrspa
ret void
}
-; FUNC-LABEL: @bfe_u32_zext_in_reg_i8_offset_3
+; FUNC-LABEL: {{^}}bfe_u32_zext_in_reg_i8_offset_3:
; SI: BUFFER_LOAD_DWORD
; SI: V_ADD_I32
; SI-NEXT: V_AND_B32_e32 {{v[0-9]+}}, 0xf8
@@ -130,7 +130,7 @@ define void @bfe_u32_zext_in_reg_i8_offset_3(i32 addrspace(1)* %out, i32 addrspa
ret void
}
-; FUNC-LABEL: @bfe_u32_zext_in_reg_i8_offset_7
+; FUNC-LABEL: {{^}}bfe_u32_zext_in_reg_i8_offset_7:
; SI: BUFFER_LOAD_DWORD
; SI: V_ADD_I32
; SI-NEXT: V_AND_B32_e32 {{v[0-9]+}}, 0x80
@@ -145,7 +145,7 @@ define void @bfe_u32_zext_in_reg_i8_offset_7(i32 addrspace(1)* %out, i32 addrspa
ret void
}
-; FUNC-LABEL: @bfe_u32_zext_in_reg_i16_offset_8
+; FUNC-LABEL: {{^}}bfe_u32_zext_in_reg_i16_offset_8:
; SI: BUFFER_LOAD_DWORD
; SI: V_ADD_I32
; SI-NEXT: BFE
@@ -159,7 +159,7 @@ define void @bfe_u32_zext_in_reg_i16_offset_8(i32 addrspace(1)* %out, i32 addrsp
ret void
}
-; FUNC-LABEL: @bfe_u32_test_1
+; FUNC-LABEL: {{^}}bfe_u32_test_1:
; SI: BUFFER_LOAD_DWORD
; SI: V_AND_B32_e32 {{v[0-9]+}}, 1, {{v[0-9]+}}
; SI: S_ENDPGM
@@ -187,7 +187,7 @@ define void @bfe_u32_test_3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounw
ret void
}
-; FUNC-LABEL: @bfe_u32_test_4
+; FUNC-LABEL: {{^}}bfe_u32_test_4:
; SI-NOT: LSHL
; SI-NOT: SHR
; SI-NOT: BFE
@@ -203,7 +203,7 @@ define void @bfe_u32_test_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounw
ret void
}
-; FUNC-LABEL: @bfe_u32_test_5
+; FUNC-LABEL: {{^}}bfe_u32_test_5:
; SI: BUFFER_LOAD_DWORD
; SI-NOT: LSHL
; SI-NOT: SHR
@@ -218,7 +218,7 @@ define void @bfe_u32_test_5(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounw
ret void
}
-; FUNC-LABEL: @bfe_u32_test_6
+; FUNC-LABEL: {{^}}bfe_u32_test_6:
; SI: V_LSHLREV_B32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
; SI: V_LSHRREV_B32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
; SI: S_ENDPGM
@@ -230,7 +230,7 @@ define void @bfe_u32_test_6(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounw
ret void
}
-; FUNC-LABEL: @bfe_u32_test_7
+; FUNC-LABEL: {{^}}bfe_u32_test_7:
; SI: V_LSHLREV_B32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
; SI-NOT: BFE
; SI: S_ENDPGM
@@ -242,7 +242,7 @@ define void @bfe_u32_test_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounw
ret void
}
-; FUNC-LABEL: @bfe_u32_test_8
+; FUNC-LABEL: {{^}}bfe_u32_test_8:
; SI-NOT: BFE
; SI: V_AND_B32_e32 {{v[0-9]+}}, 1, {{v[0-9]+}}
; SI-NOT: BFE
@@ -255,7 +255,7 @@ define void @bfe_u32_test_8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounw
ret void
}
-; FUNC-LABEL: @bfe_u32_test_9
+; FUNC-LABEL: {{^}}bfe_u32_test_9:
; SI-NOT: BFE
; SI: V_LSHRREV_B32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
; SI-NOT: BFE
@@ -267,7 +267,7 @@ define void @bfe_u32_test_9(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounw
ret void
}
-; FUNC-LABEL: @bfe_u32_test_10
+; FUNC-LABEL: {{^}}bfe_u32_test_10:
; SI-NOT: BFE
; SI: V_LSHRREV_B32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
; SI-NOT: BFE
@@ -279,7 +279,7 @@ define void @bfe_u32_test_10(i32 addrspace(1)* %out, i32 addrspace(1)* %in) noun
ret void
}
-; FUNC-LABEL: @bfe_u32_test_11
+; FUNC-LABEL: {{^}}bfe_u32_test_11:
; SI-NOT: BFE
; SI: V_LSHRREV_B32_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
; SI-NOT: BFE
@@ -291,7 +291,7 @@ define void @bfe_u32_test_11(i32 addrspace(1)* %out, i32 addrspace(1)* %in) noun
ret void
}
-; FUNC-LABEL: @bfe_u32_test_12
+; FUNC-LABEL: {{^}}bfe_u32_test_12:
; SI-NOT: BFE
; SI: V_LSHRREV_B32_e32 v{{[0-9]+}}, 24, v{{[0-9]+}}
; SI-NOT: BFE
@@ -303,7 +303,7 @@ define void @bfe_u32_test_12(i32 addrspace(1)* %out, i32 addrspace(1)* %in) noun
ret void
}
-; FUNC-LABEL: @bfe_u32_test_13
+; FUNC-LABEL: {{^}}bfe_u32_test_13:
; V_ASHRREV_U32_e32 {{v[0-9]+}}, 31, {{v[0-9]+}}
; SI-NOT: BFE
; SI: S_ENDPGM
@@ -314,7 +314,7 @@ define void @bfe_u32_test_13(i32 addrspace(1)* %out, i32 addrspace(1)* %in) noun
store i32 %bfe, i32 addrspace(1)* %out, align 4 ret void
}
-; FUNC-LABEL: @bfe_u32_test_14
+; FUNC-LABEL: {{^}}bfe_u32_test_14:
; SI-NOT: LSHR
; SI-NOT: BFE
; SI: S_ENDPGM
@@ -325,7 +325,7 @@ define void @bfe_u32_test_14(i32 addrspace(1)* %out, i32 addrspace(1)* %in) noun
store i32 %bfe, i32 addrspace(1)* %out, align 4 ret void
}
-; FUNC-LABEL: @bfe_u32_constant_fold_test_0
+; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_0:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -337,7 +337,7 @@ define void @bfe_u32_constant_fold_test_0(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_u32_constant_fold_test_1
+; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_1:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -349,7 +349,7 @@ define void @bfe_u32_constant_fold_test_1(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_u32_constant_fold_test_2
+; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_2:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -361,7 +361,7 @@ define void @bfe_u32_constant_fold_test_2(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_u32_constant_fold_test_3
+; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_3:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 1
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -373,7 +373,7 @@ define void @bfe_u32_constant_fold_test_3(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_u32_constant_fold_test_4
+; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_4:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], -1
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -385,7 +385,7 @@ define void @bfe_u32_constant_fold_test_4(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_u32_constant_fold_test_5
+; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_5:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 1
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -397,7 +397,7 @@ define void @bfe_u32_constant_fold_test_5(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_u32_constant_fold_test_6
+; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_6:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0x80
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -409,7 +409,7 @@ define void @bfe_u32_constant_fold_test_6(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_u32_constant_fold_test_7
+; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_7:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0x7f
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -421,7 +421,7 @@ define void @bfe_u32_constant_fold_test_7(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_u32_constant_fold_test_8
+; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_8:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 1
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -433,7 +433,7 @@ define void @bfe_u32_constant_fold_test_8(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_u32_constant_fold_test_9
+; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_9:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 1
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -445,7 +445,7 @@ define void @bfe_u32_constant_fold_test_9(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_u32_constant_fold_test_10
+; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_10:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -457,7 +457,7 @@ define void @bfe_u32_constant_fold_test_10(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_u32_constant_fold_test_11
+; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_11:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 10
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -469,7 +469,7 @@ define void @bfe_u32_constant_fold_test_11(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_u32_constant_fold_test_12
+; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_12:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -481,7 +481,7 @@ define void @bfe_u32_constant_fold_test_12(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_u32_constant_fold_test_13
+; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_13:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 1
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -493,7 +493,7 @@ define void @bfe_u32_constant_fold_test_13(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_u32_constant_fold_test_14
+; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_14:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 40
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -505,7 +505,7 @@ define void @bfe_u32_constant_fold_test_14(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_u32_constant_fold_test_15
+; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_15:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 10
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -517,7 +517,7 @@ define void @bfe_u32_constant_fold_test_15(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_u32_constant_fold_test_16
+; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_16:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0x7f
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -529,7 +529,7 @@ define void @bfe_u32_constant_fold_test_16(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_u32_constant_fold_test_17
+; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_17:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0x7f
; SI: BUFFER_STORE_DWORD [[VREG]],
@@ -541,7 +541,7 @@ define void @bfe_u32_constant_fold_test_17(i32 addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @bfe_u32_constant_fold_test_18
+; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_18:
; SI-NOT: BFE
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
; SI: BUFFER_STORE_DWORD [[VREG]],
diff --git a/test/CodeGen/R600/llvm.AMDGPU.bfi.ll b/test/CodeGen/R600/llvm.AMDGPU.bfi.ll
index e1de45b4ba2..698f374872e 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.bfi.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.bfi.ll
@@ -3,7 +3,7 @@
declare i32 @llvm.AMDGPU.bfi(i32, i32, i32) nounwind readnone
-; FUNC-LABEL: @bfi_arg_arg_arg
+; FUNC-LABEL: {{^}}bfi_arg_arg_arg:
; SI: V_BFI_B32
; EG: BFI_INT
define void @bfi_arg_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2) nounwind {
@@ -12,7 +12,7 @@ define void @bfi_arg_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %
ret void
}
-; FUNC-LABEL: @bfi_arg_arg_imm
+; FUNC-LABEL: {{^}}bfi_arg_arg_imm:
; SI: V_BFI_B32
; EG: BFI_INT
define void @bfi_arg_arg_imm(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
@@ -21,7 +21,7 @@ define void @bfi_arg_arg_imm(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounw
ret void
}
-; FUNC-LABEL: @bfi_arg_imm_arg
+; FUNC-LABEL: {{^}}bfi_arg_imm_arg:
; SI: V_BFI_B32
; EG: BFI_INT
define void @bfi_arg_imm_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src2) nounwind {
@@ -30,7 +30,7 @@ define void @bfi_arg_imm_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src2) nounw
ret void
}
-; FUNC-LABEL: @bfi_imm_arg_arg
+; FUNC-LABEL: {{^}}bfi_imm_arg_arg:
; SI: V_BFI_B32
; EG: BFI_INT
define void @bfi_imm_arg_arg(i32 addrspace(1)* %out, i32 %src1, i32 %src2) nounwind {
diff --git a/test/CodeGen/R600/llvm.AMDGPU.bfm.ll b/test/CodeGen/R600/llvm.AMDGPU.bfm.ll
index ef8721e4a97..d1b0b3ee41a 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.bfm.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.bfm.ll
@@ -3,7 +3,7 @@
declare i32 @llvm.AMDGPU.bfm(i32, i32) nounwind readnone
-; FUNC-LABEL: @bfm_arg_arg
+; FUNC-LABEL: {{^}}bfm_arg_arg:
; SI: V_BFM
; EG: BFM_INT
define void @bfm_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
@@ -12,7 +12,7 @@ define void @bfm_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind
ret void
}
-; FUNC-LABEL: @bfm_arg_imm
+; FUNC-LABEL: {{^}}bfm_arg_imm:
; SI: V_BFM
; EG: BFM_INT
define void @bfm_arg_imm(i32 addrspace(1)* %out, i32 %src0) nounwind {
@@ -21,7 +21,7 @@ define void @bfm_arg_imm(i32 addrspace(1)* %out, i32 %src0) nounwind {
ret void
}
-; FUNC-LABEL: @bfm_imm_arg
+; FUNC-LABEL: {{^}}bfm_imm_arg:
; SI: V_BFM
; EG: BFM_INT
define void @bfm_imm_arg(i32 addrspace(1)* %out, i32 %src1) nounwind {
@@ -30,7 +30,7 @@ define void @bfm_imm_arg(i32 addrspace(1)* %out, i32 %src1) nounwind {
ret void
}
-; FUNC-LABEL: @bfm_imm_imm
+; FUNC-LABEL: {{^}}bfm_imm_imm:
; SI: V_BFM
; EG: BFM_INT
define void @bfm_imm_imm(i32 addrspace(1)* %out) nounwind {
diff --git a/test/CodeGen/R600/llvm.AMDGPU.brev.ll b/test/CodeGen/R600/llvm.AMDGPU.brev.ll
index 68a5ad0649c..87f6cb4230b 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.brev.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.brev.ll
@@ -2,7 +2,7 @@
declare i32 @llvm.AMDGPU.brev(i32) nounwind readnone
-; FUNC-LABEL: @s_brev_i32:
+; FUNC-LABEL: {{^}}s_brev_i32:
; SI: S_LOAD_DWORD [[VAL:s[0-9]+]],
; SI: S_BREV_B32 [[SRESULT:s[0-9]+]], [[VAL]]
; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
@@ -14,7 +14,7 @@ define void @s_brev_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
ret void
}
-; FUNC-LABEL: @v_brev_i32:
+; FUNC-LABEL: {{^}}v_brev_i32:
; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
; SI: V_BFREV_B32_e32 [[RESULT:v[0-9]+]], [[VAL]]
; SI: BUFFER_STORE_DWORD [[RESULT]],
diff --git a/test/CodeGen/R600/llvm.AMDGPU.clamp.ll b/test/CodeGen/R600/llvm.AMDGPU.clamp.ll
index c12732c6004..11c887f204b 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.clamp.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.clamp.ll
@@ -4,7 +4,7 @@
declare float @llvm.AMDGPU.clamp.f32(float, float, float) nounwind readnone
declare float @llvm.AMDIL.clamp.f32(float, float, float) nounwind readnone
-; FUNC-LABEL: @clamp_0_1_f32
+; FUNC-LABEL: {{^}}clamp_0_1_f32:
; SI: S_LOAD_DWORD [[ARG:s[0-9]+]],
; SI: V_ADD_F32_e64 [[RESULT:v[0-9]+]], 0, [[ARG]] clamp{{$}}
; SI: BUFFER_STORE_DWORD [[RESULT]]
@@ -17,7 +17,7 @@ define void @clamp_0_1_f32(float addrspace(1)* %out, float %src) nounwind {
ret void
}
-; FUNC-LABEL: @clamp_0_1_amdil_legacy_f32
+; FUNC-LABEL: {{^}}clamp_0_1_amdil_legacy_f32:
; SI: S_LOAD_DWORD [[ARG:s[0-9]+]],
; SI: V_ADD_F32_e64 [[RESULT:v[0-9]+]], 0, [[ARG]] clamp{{$}}
; SI: BUFFER_STORE_DWORD [[RESULT]]
diff --git a/test/CodeGen/R600/llvm.AMDGPU.cube.ll b/test/CodeGen/R600/llvm.AMDGPU.cube.ll
index 110bbfde68b..aa07afdebea 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.cube.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.cube.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-; CHECK: @cube
+; CHECK: {{^}}cube:
; CHECK: CUBE T{{[0-9]}}.X
; CHECK: CUBE T{{[0-9]}}.Y
; CHECK: CUBE T{{[0-9]}}.Z
diff --git a/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll b/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll
index 6facb4782e9..4295d24d8bd 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll
@@ -5,7 +5,7 @@ declare float @llvm.AMDGPU.cvt.f32.ubyte1(i32) nounwind readnone
declare float @llvm.AMDGPU.cvt.f32.ubyte2(i32) nounwind readnone
declare float @llvm.AMDGPU.cvt.f32.ubyte3(i32) nounwind readnone
-; SI-LABEL: @test_unpack_byte0_to_float:
+; SI-LABEL: {{^}}test_unpack_byte0_to_float:
; SI: V_CVT_F32_UBYTE0
define void @test_unpack_byte0_to_float(float addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%val = load i32 addrspace(1)* %in, align 4
@@ -14,7 +14,7 @@ define void @test_unpack_byte0_to_float(float addrspace(1)* %out, i32 addrspace(
ret void
}
-; SI-LABEL: @test_unpack_byte1_to_float:
+; SI-LABEL: {{^}}test_unpack_byte1_to_float:
; SI: V_CVT_F32_UBYTE1
define void @test_unpack_byte1_to_float(float addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%val = load i32 addrspace(1)* %in, align 4
@@ -23,7 +23,7 @@ define void @test_unpack_byte1_to_float(float addrspace(1)* %out, i32 addrspace(
ret void
}
-; SI-LABEL: @test_unpack_byte2_to_float:
+; SI-LABEL: {{^}}test_unpack_byte2_to_float:
; SI: V_CVT_F32_UBYTE2
define void @test_unpack_byte2_to_float(float addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%val = load i32 addrspace(1)* %in, align 4
@@ -32,7 +32,7 @@ define void @test_unpack_byte2_to_float(float addrspace(1)* %out, i32 addrspace(
ret void
}
-; SI-LABEL: @test_unpack_byte3_to_float:
+; SI-LABEL: {{^}}test_unpack_byte3_to_float:
; SI: V_CVT_F32_UBYTE3
define void @test_unpack_byte3_to_float(float addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%val = load i32 addrspace(1)* %in, align 4
diff --git a/test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll b/test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll
index d0eeeab09c5..7fbbc2104f8 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll
@@ -3,7 +3,7 @@
declare float @llvm.AMDGPU.div.fixup.f32(float, float, float) nounwind readnone
declare double @llvm.AMDGPU.div.fixup.f64(double, double, double) nounwind readnone
-; SI-LABEL: @test_div_fixup_f32:
+; SI-LABEL: {{^}}test_div_fixup_f32:
; SI-DAG: S_LOAD_DWORD [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI-DAG: S_LOAD_DWORD [[SC:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
; SI-DAG: S_LOAD_DWORD [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
@@ -18,7 +18,7 @@ define void @test_div_fixup_f32(float addrspace(1)* %out, float %a, float %b, fl
ret void
}
-; SI-LABEL: @test_div_fixup_f64:
+; SI-LABEL: {{^}}test_div_fixup_f64:
; SI: V_DIV_FIXUP_F64
define void @test_div_fixup_f64(double addrspace(1)* %out, double %a, double %b, double %c) nounwind {
%result = call double @llvm.AMDGPU.div.fixup.f64(double %a, double %b, double %c) nounwind readnone
diff --git a/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll b/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll
index 40b441df86b..8d76f1e662b 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll
@@ -3,7 +3,7 @@
declare float @llvm.AMDGPU.div.fmas.f32(float, float, float) nounwind readnone
declare double @llvm.AMDGPU.div.fmas.f64(double, double, double) nounwind readnone
-; SI-LABEL: @test_div_fmas_f32:
+; SI-LABEL: {{^}}test_div_fmas_f32:
; SI-DAG: S_LOAD_DWORD [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI-DAG: S_LOAD_DWORD [[SC:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
; SI-DAG: S_LOAD_DWORD [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
@@ -18,7 +18,7 @@ define void @test_div_fmas_f32(float addrspace(1)* %out, float %a, float %b, flo
ret void
}
-; SI-LABEL: @test_div_fmas_f64:
+; SI-LABEL: {{^}}test_div_fmas_f64:
; SI: V_DIV_FMAS_F64
define void @test_div_fmas_f64(double addrspace(1)* %out, double %a, double %b, double %c) nounwind {
%result = call double @llvm.AMDGPU.div.fmas.f64(double %a, double %b, double %c) nounwind readnone
diff --git a/test/CodeGen/R600/llvm.AMDGPU.fract.ll b/test/CodeGen/R600/llvm.AMDGPU.fract.ll
index 72ec1c57571..7463b942dde 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.fract.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.fract.ll
@@ -6,7 +6,7 @@ declare float @llvm.AMDGPU.fract.f32(float) nounwind readnone
; Legacy name
declare float @llvm.AMDIL.fraction.f32(float) nounwind readnone
-; FUNC-LABEL: @fract_f32
+; FUNC-LABEL: {{^}}fract_f32:
; SI: V_FRACT_F32
; EG: FRACT
define void @fract_f32(float addrspace(1)* %out, float addrspace(1)* %src) nounwind {
@@ -16,7 +16,7 @@ define void @fract_f32(float addrspace(1)* %out, float addrspace(1)* %src) nounw
ret void
}
-; FUNC-LABEL: @fract_f32_legacy_amdil
+; FUNC-LABEL: {{^}}fract_f32_legacy_amdil:
; SI: V_FRACT_F32
; EG: FRACT
define void @fract_f32_legacy_amdil(float addrspace(1)* %out, float addrspace(1)* %src) nounwind {
diff --git a/test/CodeGen/R600/llvm.AMDGPU.imad24.ll b/test/CodeGen/R600/llvm.AMDGPU.imad24.ll
index 95795ea63b9..e1b41c4ac8d 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.imad24.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.imad24.ll
@@ -8,7 +8,7 @@
declare i32 @llvm.AMDGPU.imad24(i32, i32, i32) nounwind readnone
-; FUNC-LABEL: @test_imad24
+; FUNC-LABEL: {{^}}test_imad24:
; SI: V_MAD_I32_I24
; CM: MULADD_INT24
; R600: MULLO_INT
diff --git a/test/CodeGen/R600/llvm.AMDGPU.imax.ll b/test/CodeGen/R600/llvm.AMDGPU.imax.ll
index 01c9f435b9f..36f5ac7766d 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.imax.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.imax.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=SI %s
-; SI-LABEL: @vector_imax
+; SI-LABEL: {{^}}vector_imax:
; SI: V_MAX_I32_e32
define void @vector_imax(i32 %p0, i32 %p1, i32 addrspace(1)* %in) #0 {
main_body:
@@ -11,7 +11,7 @@ main_body:
ret void
}
-; SI-LABEL: @scalar_imax
+; SI-LABEL: {{^}}scalar_imax:
; SI: S_MAX_I32
define void @scalar_imax(i32 %p0, i32 %p1) #0 {
entry:
diff --git a/test/CodeGen/R600/llvm.AMDGPU.imin.ll b/test/CodeGen/R600/llvm.AMDGPU.imin.ll
index 565bf344408..e7ae7bd5f0d 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.imin.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.imin.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=SI %s
-; SI-LABEL: @vector_imin
+; SI-LABEL: {{^}}vector_imin:
; SI: V_MIN_I32_e32
define void @vector_imin(i32 %p0, i32 %p1, i32 addrspace(1)* %in) #0 {
main_body:
@@ -11,7 +11,7 @@ main_body:
ret void
}
-; SI-LABEL: @scalar_imin
+; SI-LABEL: {{^}}scalar_imin:
; SI: S_MIN_I32
define void @scalar_imin(i32 %p0, i32 %p1) #0 {
entry:
diff --git a/test/CodeGen/R600/llvm.AMDGPU.imul24.ll b/test/CodeGen/R600/llvm.AMDGPU.imul24.ll
index 8ee3520daea..2b0d87601a0 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.imul24.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.imul24.ll
@@ -4,7 +4,7 @@
declare i32 @llvm.AMDGPU.imul24(i32, i32) nounwind readnone
-; FUNC-LABEL: @test_imul24
+; FUNC-LABEL: {{^}}test_imul24:
; SI: V_MUL_I32_I24
; CM: MUL_INT24
; R600: MULLO_INT
diff --git a/test/CodeGen/R600/llvm.AMDGPU.kill.ll b/test/CodeGen/R600/llvm.AMDGPU.kill.ll
index 1f82ffb53f1..0d61eb637ed 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.kill.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.kill.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
-; SI-LABEL: @kill_gs_const
+; SI-LABEL: {{^}}kill_gs_const:
; SI-NOT: V_CMPX_LE_F32
; SI: S_MOV_B64 exec, 0
diff --git a/test/CodeGen/R600/llvm.AMDGPU.ldexp.ll b/test/CodeGen/R600/llvm.AMDGPU.ldexp.ll
index d59426c6754..473b95142d2 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.ldexp.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.ldexp.ll
@@ -3,7 +3,7 @@
declare float @llvm.AMDGPU.ldexp.f32(float, i32) nounwind readnone
declare double @llvm.AMDGPU.ldexp.f64(double, i32) nounwind readnone
-; SI-LABEL: @test_ldexp_f32:
+; SI-LABEL: {{^}}test_ldexp_f32:
; SI: V_LDEXP_F32
; SI: S_ENDPGM
define void @test_ldexp_f32(float addrspace(1)* %out, float %a, i32 %b) nounwind {
@@ -12,7 +12,7 @@ define void @test_ldexp_f32(float addrspace(1)* %out, float %a, i32 %b) nounwind
ret void
}
-; SI-LABEL: @test_ldexp_f64:
+; SI-LABEL: {{^}}test_ldexp_f64:
; SI: V_LDEXP_F64
; SI: S_ENDPGM
define void @test_ldexp_f64(double addrspace(1)* %out, double %a, i32 %b) nounwind {
diff --git a/test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll b/test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll
index 51964eefa64..ac6e8fd4a5e 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll
@@ -3,7 +3,7 @@
declare float @llvm.AMDGPU.legacy.rsq(float) nounwind readnone
-; FUNC-LABEL: @rsq_legacy_f32
+; FUNC-LABEL: {{^}}rsq_legacy_f32:
; SI: V_RSQ_LEGACY_F32_e32
; EG: RECIPSQRT_IEEE
define void @rsq_legacy_f32(float addrspace(1)* %out, float %src) nounwind {
diff --git a/test/CodeGen/R600/llvm.AMDGPU.rcp.f64.ll b/test/CodeGen/R600/llvm.AMDGPU.rcp.f64.ll
index b5dda0ce81f..85c902c0d4f 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.rcp.f64.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.rcp.f64.ll
@@ -3,7 +3,7 @@
declare double @llvm.AMDGPU.rcp.f64(double) nounwind readnone
declare double @llvm.sqrt.f64(double) nounwind readnone
-; FUNC-LABEL: @rcp_f64
+; FUNC-LABEL: {{^}}rcp_f64:
; SI: V_RCP_F64_e32
define void @rcp_f64(double addrspace(1)* %out, double %src) nounwind {
%rcp = call double @llvm.AMDGPU.rcp.f64(double %src) nounwind readnone
@@ -11,7 +11,7 @@ define void @rcp_f64(double addrspace(1)* %out, double %src) nounwind {
ret void
}
-; FUNC-LABEL: @rcp_pat_f64
+; FUNC-LABEL: {{^}}rcp_pat_f64:
; SI: V_RCP_F64_e32
define void @rcp_pat_f64(double addrspace(1)* %out, double %src) nounwind {
%rcp = fdiv double 1.0, %src
@@ -19,7 +19,7 @@ define void @rcp_pat_f64(double addrspace(1)* %out, double %src) nounwind {
ret void
}
-; FUNC-LABEL: @rsq_rcp_pat_f64
+; FUNC-LABEL: {{^}}rsq_rcp_pat_f64:
; SI-UNSAFE: V_RSQ_F64_e32
; SI-SAFE-NOT: V_RSQ_F64_e32
define void @rsq_rcp_pat_f64(double addrspace(1)* %out, double %src) nounwind {
diff --git a/test/CodeGen/R600/llvm.AMDGPU.rcp.ll b/test/CodeGen/R600/llvm.AMDGPU.rcp.ll
index df6c3bb6a2c..dcfa07f8fa2 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.rcp.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.rcp.ll
@@ -10,7 +10,7 @@ declare double @llvm.AMDGPU.rcp.f64(double) nounwind readnone
declare float @llvm.sqrt.f32(float) nounwind readnone
-; FUNC-LABEL: @rcp_f32
+; FUNC-LABEL: {{^}}rcp_f32:
; SI: V_RCP_F32_e32
; EG: RECIP_IEEE
define void @rcp_f32(float addrspace(1)* %out, float %src) nounwind {
@@ -20,7 +20,7 @@ define void @rcp_f32(float addrspace(1)* %out, float %src) nounwind {
}
; FIXME: Evergreen only ever does unsafe fp math.
-; FUNC-LABEL: @rcp_pat_f32
+; FUNC-LABEL: {{^}}rcp_pat_f32:
; SI-SAFE: V_RCP_F32_e32
; XSI-SAFE-SPDENORM-NOT: V_RCP_F32_e32
@@ -33,7 +33,7 @@ define void @rcp_pat_f32(float addrspace(1)* %out, float %src) nounwind {
ret void
}
-; FUNC-LABEL: @rsq_rcp_pat_f32
+; FUNC-LABEL: {{^}}rsq_rcp_pat_f32:
; SI-UNSAFE: V_RSQ_F32_e32
; SI-SAFE: V_SQRT_F32_e32
; SI-SAFE: V_RCP_F32_e32
diff --git a/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll b/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll
index 100d6ff7770..7ca5e8a9077 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll
@@ -2,7 +2,7 @@
declare double @llvm.AMDGPU.rsq.clamped.f64(double) nounwind readnone
-; FUNC-LABEL: @rsq_clamped_f64
+; FUNC-LABEL: {{^}}rsq_clamped_f64:
; SI: V_RSQ_CLAMP_F64_e32
define void @rsq_clamped_f64(double addrspace(1)* %out, double %src) nounwind {
%rsq_clamped = call double @llvm.AMDGPU.rsq.clamped.f64(double %src) nounwind readnone
diff --git a/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll b/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll
index 683df7355ac..ace9653e44d 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll
@@ -4,7 +4,7 @@
declare float @llvm.AMDGPU.rsq.clamped.f32(float) nounwind readnone
-; FUNC-LABEL: @rsq_clamped_f32
+; FUNC-LABEL: {{^}}rsq_clamped_f32:
; SI: V_RSQ_CLAMP_F32_e32
; EG: RECIPSQRT_CLAMPED
define void @rsq_clamped_f32(float addrspace(1)* %out, float %src) nounwind {
diff --git a/test/CodeGen/R600/llvm.AMDGPU.rsq.ll b/test/CodeGen/R600/llvm.AMDGPU.rsq.ll
index 449b3afc381..709d445ebe2 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.rsq.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.rsq.ll
@@ -3,7 +3,7 @@
declare float @llvm.AMDGPU.rsq.f32(float) nounwind readnone
-; FUNC-LABEL: @rsq_f32
+; FUNC-LABEL: {{^}}rsq_f32:
; SI: V_RSQ_F32_e32 {{v[0-9]+}}, {{s[0-9]+}}
; EG: RECIPSQRT_IEEE
define void @rsq_f32(float addrspace(1)* %out, float %src) nounwind {
@@ -13,7 +13,7 @@ define void @rsq_f32(float addrspace(1)* %out, float %src) nounwind {
}
; TODO: Really these should be constant folded
-; FUNC-LABEL: @rsq_f32_constant_4.0
+; FUNC-LABEL: {{^}}rsq_f32_constant_4.0
; SI: V_RSQ_F32_e32 {{v[0-9]+}}, 4.0
; EG: RECIPSQRT_IEEE
define void @rsq_f32_constant_4.0(float addrspace(1)* %out) nounwind {
@@ -22,7 +22,7 @@ define void @rsq_f32_constant_4.0(float addrspace(1)* %out) nounwind {
ret void
}
-; FUNC-LABEL: @rsq_f32_constant_100.0
+; FUNC-LABEL: {{^}}rsq_f32_constant_100.0
; SI: V_RSQ_F32_e32 {{v[0-9]+}}, 0x42c80000
; EG: RECIPSQRT_IEEE
define void @rsq_f32_constant_100.0(float addrspace(1)* %out) nounwind {
diff --git a/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll b/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll
index 1c736d447ea..8eecd6eaef1 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll
@@ -2,7 +2,7 @@
declare double @llvm.AMDGPU.trig.preop.f64(double, i32) nounwind readnone
-; SI-LABEL: @test_trig_preop_f64:
+; SI-LABEL: {{^}}test_trig_preop_f64:
; SI-DAG: BUFFER_LOAD_DWORD [[SEG:v[0-9]+]]
; SI-DAG: BUFFER_LOAD_DWORDX2 [[SRC:v\[[0-9]+:[0-9]+\]]],
; SI: V_TRIG_PREOP_F64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[SRC]], [[SEG]]
@@ -16,7 +16,7 @@ define void @test_trig_preop_f64(double addrspace(1)* %out, double addrspace(1)*
ret void
}
-; SI-LABEL: @test_trig_preop_f64_imm_segment:
+; SI-LABEL: {{^}}test_trig_preop_f64_imm_segment:
; SI: BUFFER_LOAD_DWORDX2 [[SRC:v\[[0-9]+:[0-9]+\]]],
; SI: V_TRIG_PREOP_F64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[SRC]], 7
; SI: BUFFER_STORE_DWORDX2 [[RESULT]],
diff --git a/test/CodeGen/R600/llvm.AMDGPU.trunc.ll b/test/CodeGen/R600/llvm.AMDGPU.trunc.ll
index e6bb2c4e945..8f85a845ec1 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.trunc.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.trunc.ll
@@ -1,9 +1,9 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600-CHECK %s
; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
-; R600-CHECK: @amdgpu_trunc
+; R600-CHECK: {{^}}amdgpu_trunc:
; R600-CHECK: TRUNC T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-; SI-CHECK: @amdgpu_trunc
+; SI-CHECK: {{^}}amdgpu_trunc:
; SI-CHECK: V_TRUNC_F32
define void @amdgpu_trunc(float addrspace(1)* %out, float %x) {
diff --git a/test/CodeGen/R600/llvm.AMDGPU.umad24.ll b/test/CodeGen/R600/llvm.AMDGPU.umad24.ll
index afdfb18a563..3331ba488f2 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.umad24.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.umad24.ll
@@ -6,7 +6,7 @@
declare i32 @llvm.AMDGPU.umad24(i32, i32, i32) nounwind readnone
-; FUNC-LABEL: @test_umad24
+; FUNC-LABEL: {{^}}test_umad24:
; SI: V_MAD_U32_U24
; EG: MULADD_UINT24
; R600: MULLO_UINT
diff --git a/test/CodeGen/R600/llvm.AMDGPU.umax.ll b/test/CodeGen/R600/llvm.AMDGPU.umax.ll
index 1b8da2e1553..83c6b06ec70 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.umax.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.umax.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=SI %s
-; SI-LABEL: @vector_umax
+; SI-LABEL: {{^}}vector_umax:
; SI: V_MAX_U32_e32
define void @vector_umax(i32 %p0, i32 %p1, i32 addrspace(1)* %in) #0 {
main_body:
@@ -11,7 +11,7 @@ main_body:
ret void
}
-; SI-LABEL: @scalar_umax
+; SI-LABEL: {{^}}scalar_umax:
; SI: S_MAX_U32
define void @scalar_umax(i32 %p0, i32 %p1) #0 {
entry:
@@ -21,7 +21,7 @@ entry:
ret void
}
-; SI-LABEL: @trunc_zext_umax
+; SI-LABEL: {{^}}trunc_zext_umax:
; SI: BUFFER_LOAD_UBYTE [[VREG:v[0-9]+]],
; SI: V_MAX_U32_e32 [[RESULT:v[0-9]+]], 0, [[VREG]]
; SI-NOT: AND
diff --git a/test/CodeGen/R600/llvm.AMDGPU.umin.ll b/test/CodeGen/R600/llvm.AMDGPU.umin.ll
index 08397f8356c..6b76a2a028e 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.umin.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.umin.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=SI %s
-; SI-LABEL: @vector_umin
+; SI-LABEL: {{^}}vector_umin:
; SI: V_MIN_U32_e32
define void @vector_umin(i32 %p0, i32 %p1, i32 addrspace(1)* %in) #0 {
main_body:
@@ -11,7 +11,7 @@ main_body:
ret void
}
-; SI-LABEL: @scalar_umin
+; SI-LABEL: {{^}}scalar_umin:
; SI: S_MIN_U32
define void @scalar_umin(i32 %p0, i32 %p1) #0 {
entry:
@@ -21,7 +21,7 @@ entry:
ret void
}
-; SI-LABEL: @trunc_zext_umin
+; SI-LABEL: {{^}}trunc_zext_umin:
; SI: BUFFER_LOAD_UBYTE [[VREG:v[0-9]+]],
; SI: V_MIN_U32_e32 [[RESULT:v[0-9]+]], 0, [[VREG]]
; SI-NOT: AND
diff --git a/test/CodeGen/R600/llvm.AMDGPU.umul24.ll b/test/CodeGen/R600/llvm.AMDGPU.umul24.ll
index 72a36029fb3..18bc7033f2e 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.umul24.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.umul24.ll
@@ -6,7 +6,7 @@
declare i32 @llvm.AMDGPU.umul24(i32, i32) nounwind readnone
-; FUNC-LABEL: @test_umul24
+; FUNC-LABEL: {{^}}test_umul24:
; SI: V_MUL_U32_U24
; R600: MUL_UINT24
; R600: MULLO_UINT
diff --git a/test/CodeGen/R600/llvm.SI.gather4.ll b/test/CodeGen/R600/llvm.SI.gather4.ll
index 8402faaa4dc..ad9789bd9b6 100644
--- a/test/CodeGen/R600/llvm.SI.gather4.ll
+++ b/test/CodeGen/R600/llvm.SI.gather4.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
-;CHECK-LABEL: @gather4_v2
+;CHECK-LABEL: {{^}}gather4_v2:
;CHECK: IMAGE_GATHER4 {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_v2() #0 {
main_body:
@@ -13,7 +13,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4
+;CHECK-LABEL: {{^}}gather4:
;CHECK: IMAGE_GATHER4 {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4() #0 {
main_body:
@@ -26,7 +26,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_cl
+;CHECK-LABEL: {{^}}gather4_cl:
;CHECK: IMAGE_GATHER4_CL {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_cl() #0 {
main_body:
@@ -39,7 +39,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_l
+;CHECK-LABEL: {{^}}gather4_l:
;CHECK: IMAGE_GATHER4_L {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_l() #0 {
main_body:
@@ -52,7 +52,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_b
+;CHECK-LABEL: {{^}}gather4_b:
;CHECK: IMAGE_GATHER4_B {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_b() #0 {
main_body:
@@ -65,7 +65,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_b_cl
+;CHECK-LABEL: {{^}}gather4_b_cl:
;CHECK: IMAGE_GATHER4_B_CL {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_b_cl() #0 {
main_body:
@@ -78,7 +78,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_b_cl_v8
+;CHECK-LABEL: {{^}}gather4_b_cl_v8:
;CHECK: IMAGE_GATHER4_B_CL {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_b_cl_v8() #0 {
main_body:
@@ -91,7 +91,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_lz_v2
+;CHECK-LABEL: {{^}}gather4_lz_v2:
;CHECK: IMAGE_GATHER4_LZ {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_lz_v2() #0 {
main_body:
@@ -104,7 +104,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_lz
+;CHECK-LABEL: {{^}}gather4_lz:
;CHECK: IMAGE_GATHER4_LZ {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_lz() #0 {
main_body:
@@ -119,7 +119,7 @@ main_body:
-;CHECK-LABEL: @gather4_o
+;CHECK-LABEL: {{^}}gather4_o:
;CHECK: IMAGE_GATHER4_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_o() #0 {
main_body:
@@ -132,7 +132,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_cl_o
+;CHECK-LABEL: {{^}}gather4_cl_o:
;CHECK: IMAGE_GATHER4_CL_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_cl_o() #0 {
main_body:
@@ -145,7 +145,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_cl_o_v8
+;CHECK-LABEL: {{^}}gather4_cl_o_v8:
;CHECK: IMAGE_GATHER4_CL_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_cl_o_v8() #0 {
main_body:
@@ -158,7 +158,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_l_o
+;CHECK-LABEL: {{^}}gather4_l_o:
;CHECK: IMAGE_GATHER4_L_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_l_o() #0 {
main_body:
@@ -171,7 +171,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_l_o_v8
+;CHECK-LABEL: {{^}}gather4_l_o_v8:
;CHECK: IMAGE_GATHER4_L_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_l_o_v8() #0 {
main_body:
@@ -184,7 +184,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_b_o
+;CHECK-LABEL: {{^}}gather4_b_o:
;CHECK: IMAGE_GATHER4_B_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_b_o() #0 {
main_body:
@@ -197,7 +197,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_b_o_v8
+;CHECK-LABEL: {{^}}gather4_b_o_v8:
;CHECK: IMAGE_GATHER4_B_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_b_o_v8() #0 {
main_body:
@@ -210,7 +210,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_b_cl_o
+;CHECK-LABEL: {{^}}gather4_b_cl_o:
;CHECK: IMAGE_GATHER4_B_CL_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_b_cl_o() #0 {
main_body:
@@ -223,7 +223,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_lz_o
+;CHECK-LABEL: {{^}}gather4_lz_o:
;CHECK: IMAGE_GATHER4_LZ_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_lz_o() #0 {
main_body:
@@ -238,7 +238,7 @@ main_body:
-;CHECK-LABEL: @gather4_c
+;CHECK-LABEL: {{^}}gather4_c:
;CHECK: IMAGE_GATHER4_C {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_c() #0 {
main_body:
@@ -251,7 +251,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_c_cl
+;CHECK-LABEL: {{^}}gather4_c_cl:
;CHECK: IMAGE_GATHER4_C_CL {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_c_cl() #0 {
main_body:
@@ -264,7 +264,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_c_cl_v8
+;CHECK-LABEL: {{^}}gather4_c_cl_v8:
;CHECK: IMAGE_GATHER4_C_CL {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_c_cl_v8() #0 {
main_body:
@@ -277,7 +277,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_c_l
+;CHECK-LABEL: {{^}}gather4_c_l:
;CHECK: IMAGE_GATHER4_C_L {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_c_l() #0 {
main_body:
@@ -290,7 +290,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_c_l_v8
+;CHECK-LABEL: {{^}}gather4_c_l_v8:
;CHECK: IMAGE_GATHER4_C_L {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_c_l_v8() #0 {
main_body:
@@ -303,7 +303,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_c_b
+;CHECK-LABEL: {{^}}gather4_c_b:
;CHECK: IMAGE_GATHER4_C_B {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_c_b() #0 {
main_body:
@@ -316,7 +316,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_c_b_v8
+;CHECK-LABEL: {{^}}gather4_c_b_v8:
;CHECK: IMAGE_GATHER4_C_B {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_c_b_v8() #0 {
main_body:
@@ -329,7 +329,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_c_b_cl
+;CHECK-LABEL: {{^}}gather4_c_b_cl:
;CHECK: IMAGE_GATHER4_C_B_CL {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_c_b_cl() #0 {
main_body:
@@ -342,7 +342,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_c_lz
+;CHECK-LABEL: {{^}}gather4_c_lz:
;CHECK: IMAGE_GATHER4_C_LZ {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_c_lz() #0 {
main_body:
@@ -357,7 +357,7 @@ main_body:
-;CHECK-LABEL: @gather4_c_o
+;CHECK-LABEL: {{^}}gather4_c_o:
;CHECK: IMAGE_GATHER4_C_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_c_o() #0 {
main_body:
@@ -370,7 +370,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_c_o_v8
+;CHECK-LABEL: {{^}}gather4_c_o_v8:
;CHECK: IMAGE_GATHER4_C_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_c_o_v8() #0 {
main_body:
@@ -383,7 +383,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_c_cl_o
+;CHECK-LABEL: {{^}}gather4_c_cl_o:
;CHECK: IMAGE_GATHER4_C_CL_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_c_cl_o() #0 {
main_body:
@@ -396,7 +396,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_c_l_o
+;CHECK-LABEL: {{^}}gather4_c_l_o:
;CHECK: IMAGE_GATHER4_C_L_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_c_l_o() #0 {
main_body:
@@ -409,7 +409,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_c_b_o
+;CHECK-LABEL: {{^}}gather4_c_b_o:
;CHECK: IMAGE_GATHER4_C_B_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_c_b_o() #0 {
main_body:
@@ -422,7 +422,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_c_b_cl_o
+;CHECK-LABEL: {{^}}gather4_c_b_cl_o:
;CHECK: IMAGE_GATHER4_C_B_CL_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_c_b_cl_o() #0 {
main_body:
@@ -435,7 +435,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_c_lz_o
+;CHECK-LABEL: {{^}}gather4_c_lz_o:
;CHECK: IMAGE_GATHER4_C_LZ_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_c_lz_o() #0 {
main_body:
@@ -448,7 +448,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @gather4_c_lz_o_v8
+;CHECK-LABEL: {{^}}gather4_c_lz_o_v8:
;CHECK: IMAGE_GATHER4_C_LZ_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @gather4_c_lz_o_v8() #0 {
main_body:
diff --git a/test/CodeGen/R600/llvm.SI.getlod.ll b/test/CodeGen/R600/llvm.SI.getlod.ll
index a7a17ec3fff..225a72428ee 100644
--- a/test/CodeGen/R600/llvm.SI.getlod.ll
+++ b/test/CodeGen/R600/llvm.SI.getlod.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
-;CHECK-LABEL: @getlod
+;CHECK-LABEL: {{^}}getlod:
;CHECK: IMAGE_GET_LOD {{v\[[0-9]+:[0-9]+\]}}, 3, 0, 0, -1, 0, 0, 0, 0, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @getlod() #0 {
main_body:
@@ -11,7 +11,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @getlod_v2
+;CHECK-LABEL: {{^}}getlod_v2:
;CHECK: IMAGE_GET_LOD {{v\[[0-9]+:[0-9]+\]}}, 3, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @getlod_v2() #0 {
main_body:
@@ -22,7 +22,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @getlod_v4
+;CHECK-LABEL: {{^}}getlod_v4:
;CHECK: IMAGE_GET_LOD {{v\[[0-9]+:[0-9]+\]}}, 3, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @getlod_v4() #0 {
main_body:
diff --git a/test/CodeGen/R600/llvm.SI.image.ll b/test/CodeGen/R600/llvm.SI.image.ll
index eac0b8eead3..d8fc748a810 100644
--- a/test/CodeGen/R600/llvm.SI.image.ll
+++ b/test/CodeGen/R600/llvm.SI.image.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
-;CHECK-LABEL: @image_load
+;CHECK-LABEL: {{^}}image_load:
;CHECK: IMAGE_LOAD {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @image_load() #0 {
main_body:
@@ -13,7 +13,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @image_load_mip
+;CHECK-LABEL: {{^}}image_load_mip:
;CHECK: IMAGE_LOAD_MIP {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @image_load_mip() #0 {
main_body:
@@ -26,7 +26,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @getresinfo
+;CHECK-LABEL: {{^}}getresinfo:
;CHECK: IMAGE_GET_RESINFO {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}
define void @getresinfo() #0 {
main_body:
diff --git a/test/CodeGen/R600/llvm.SI.image.sample.ll b/test/CodeGen/R600/llvm.SI.image.sample.ll
index 14dff7eb5fe..934bf425f48 100644
--- a/test/CodeGen/R600/llvm.SI.image.sample.ll
+++ b/test/CodeGen/R600/llvm.SI.image.sample.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
-;CHECK-LABEL: @sample
+;CHECK-LABEL: {{^}}sample:
;CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample() #0 {
main_body:
@@ -13,7 +13,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_cl
+;CHECK-LABEL: {{^}}sample_cl:
;CHECK: IMAGE_SAMPLE_CL {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_cl() #0 {
main_body:
@@ -26,7 +26,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_d
+;CHECK-LABEL: {{^}}sample_d:
;CHECK: IMAGE_SAMPLE_D {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_d() #0 {
main_body:
@@ -39,7 +39,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_d_cl
+;CHECK-LABEL: {{^}}sample_d_cl:
;CHECK: IMAGE_SAMPLE_D_CL {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_d_cl() #0 {
main_body:
@@ -52,7 +52,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_l
+;CHECK-LABEL: {{^}}sample_l:
;CHECK: IMAGE_SAMPLE_L {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_l() #0 {
main_body:
@@ -65,7 +65,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_b
+;CHECK-LABEL: {{^}}sample_b:
;CHECK: IMAGE_SAMPLE_B {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_b() #0 {
main_body:
@@ -78,7 +78,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_b_cl
+;CHECK-LABEL: {{^}}sample_b_cl:
;CHECK: IMAGE_SAMPLE_B_CL {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_b_cl() #0 {
main_body:
@@ -91,7 +91,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_lz
+;CHECK-LABEL: {{^}}sample_lz:
;CHECK: IMAGE_SAMPLE_LZ {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_lz() #0 {
main_body:
@@ -104,7 +104,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_cd
+;CHECK-LABEL: {{^}}sample_cd:
;CHECK: IMAGE_SAMPLE_CD {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_cd() #0 {
main_body:
@@ -117,7 +117,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_cd_cl
+;CHECK-LABEL: {{^}}sample_cd_cl:
;CHECK: IMAGE_SAMPLE_CD_CL {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_cd_cl() #0 {
main_body:
@@ -130,7 +130,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_c
+;CHECK-LABEL: {{^}}sample_c:
;CHECK: IMAGE_SAMPLE_C {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c() #0 {
main_body:
@@ -143,7 +143,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_c_cl
+;CHECK-LABEL: {{^}}sample_c_cl:
;CHECK: IMAGE_SAMPLE_C_CL {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_cl() #0 {
main_body:
@@ -156,7 +156,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_c_d
+;CHECK-LABEL: {{^}}sample_c_d:
;CHECK: IMAGE_SAMPLE_C_D {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_d() #0 {
main_body:
@@ -169,7 +169,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_c_d_cl
+;CHECK-LABEL: {{^}}sample_c_d_cl:
;CHECK: IMAGE_SAMPLE_C_D_CL {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_d_cl() #0 {
main_body:
@@ -182,7 +182,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_c_l
+;CHECK-LABEL: {{^}}sample_c_l:
;CHECK: IMAGE_SAMPLE_C_L {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_l() #0 {
main_body:
@@ -195,7 +195,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_c_b
+;CHECK-LABEL: {{^}}sample_c_b:
;CHECK: IMAGE_SAMPLE_C_B {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_b() #0 {
main_body:
@@ -208,7 +208,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_c_b_cl
+;CHECK-LABEL: {{^}}sample_c_b_cl:
;CHECK: IMAGE_SAMPLE_C_B_CL {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_b_cl() #0 {
main_body:
@@ -221,7 +221,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_c_lz
+;CHECK-LABEL: {{^}}sample_c_lz:
;CHECK: IMAGE_SAMPLE_C_LZ {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_lz() #0 {
main_body:
@@ -234,7 +234,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_c_cd
+;CHECK-LABEL: {{^}}sample_c_cd:
;CHECK: IMAGE_SAMPLE_C_CD {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_cd() #0 {
main_body:
@@ -247,7 +247,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_c_cd_cl
+;CHECK-LABEL: {{^}}sample_c_cd_cl:
;CHECK: IMAGE_SAMPLE_C_CD_CL {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_cd_cl() #0 {
main_body:
diff --git a/test/CodeGen/R600/llvm.SI.image.sample.o.ll b/test/CodeGen/R600/llvm.SI.image.sample.o.ll
index ed3ef914014..71c2107e36f 100644
--- a/test/CodeGen/R600/llvm.SI.image.sample.o.ll
+++ b/test/CodeGen/R600/llvm.SI.image.sample.o.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
-;CHECK-LABEL: @sample
+;CHECK-LABEL: {{^}}sample:
;CHECK: IMAGE_SAMPLE_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample() #0 {
main_body:
@@ -13,7 +13,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_cl
+;CHECK-LABEL: {{^}}sample_cl:
;CHECK: IMAGE_SAMPLE_CL_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_cl() #0 {
main_body:
@@ -26,7 +26,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_d
+;CHECK-LABEL: {{^}}sample_d:
;CHECK: IMAGE_SAMPLE_D_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_d() #0 {
main_body:
@@ -39,7 +39,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_d_cl
+;CHECK-LABEL: {{^}}sample_d_cl:
;CHECK: IMAGE_SAMPLE_D_CL_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_d_cl() #0 {
main_body:
@@ -52,7 +52,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_l
+;CHECK-LABEL: {{^}}sample_l:
;CHECK: IMAGE_SAMPLE_L_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_l() #0 {
main_body:
@@ -65,7 +65,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_b
+;CHECK-LABEL: {{^}}sample_b:
;CHECK: IMAGE_SAMPLE_B_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_b() #0 {
main_body:
@@ -78,7 +78,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_b_cl
+;CHECK-LABEL: {{^}}sample_b_cl:
;CHECK: IMAGE_SAMPLE_B_CL_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_b_cl() #0 {
main_body:
@@ -91,7 +91,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_lz
+;CHECK-LABEL: {{^}}sample_lz:
;CHECK: IMAGE_SAMPLE_LZ_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_lz() #0 {
main_body:
@@ -104,7 +104,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_cd
+;CHECK-LABEL: {{^}}sample_cd:
;CHECK: IMAGE_SAMPLE_CD_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_cd() #0 {
main_body:
@@ -117,7 +117,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_cd_cl
+;CHECK-LABEL: {{^}}sample_cd_cl:
;CHECK: IMAGE_SAMPLE_CD_CL_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_cd_cl() #0 {
main_body:
@@ -130,7 +130,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_c
+;CHECK-LABEL: {{^}}sample_c:
;CHECK: IMAGE_SAMPLE_C_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c() #0 {
main_body:
@@ -143,7 +143,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_c_cl
+;CHECK-LABEL: {{^}}sample_c_cl:
;CHECK: IMAGE_SAMPLE_C_CL_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_cl() #0 {
main_body:
@@ -156,7 +156,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_c_d
+;CHECK-LABEL: {{^}}sample_c_d:
;CHECK: IMAGE_SAMPLE_C_D_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_d() #0 {
main_body:
@@ -169,7 +169,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_c_d_cl
+;CHECK-LABEL: {{^}}sample_c_d_cl:
;CHECK: IMAGE_SAMPLE_C_D_CL_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_d_cl() #0 {
main_body:
@@ -182,7 +182,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_c_l
+;CHECK-LABEL: {{^}}sample_c_l:
;CHECK: IMAGE_SAMPLE_C_L_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_l() #0 {
main_body:
@@ -195,7 +195,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_c_b
+;CHECK-LABEL: {{^}}sample_c_b:
;CHECK: IMAGE_SAMPLE_C_B_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_b() #0 {
main_body:
@@ -208,7 +208,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_c_b_cl
+;CHECK-LABEL: {{^}}sample_c_b_cl:
;CHECK: IMAGE_SAMPLE_C_B_CL_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_b_cl() #0 {
main_body:
@@ -221,7 +221,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_c_lz
+;CHECK-LABEL: {{^}}sample_c_lz:
;CHECK: IMAGE_SAMPLE_C_LZ_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_lz() #0 {
main_body:
@@ -234,7 +234,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_c_cd
+;CHECK-LABEL: {{^}}sample_c_cd:
;CHECK: IMAGE_SAMPLE_C_CD_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_cd() #0 {
main_body:
@@ -247,7 +247,7 @@ main_body:
ret void
}
-;CHECK-LABEL: @sample_c_cd_cl
+;CHECK-LABEL: {{^}}sample_c_cd_cl:
;CHECK: IMAGE_SAMPLE_C_CD_CL_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_cd_cl() #0 {
main_body:
diff --git a/test/CodeGen/R600/llvm.SI.load.dword.ll b/test/CodeGen/R600/llvm.SI.load.dword.ll
index a6227755b72..524040046af 100644
--- a/test/CodeGen/R600/llvm.SI.load.dword.ll
+++ b/test/CodeGen/R600/llvm.SI.load.dword.ll
@@ -3,7 +3,7 @@
; Example of a simple geometry shader loading vertex attributes from the
; ESGS ring buffer
-; CHECK-LABEL: @main
+; CHECK-LABEL: {{^}}main:
; CHECK: BUFFER_LOAD_DWORD
; CHECK: BUFFER_LOAD_DWORD
; CHECK: BUFFER_LOAD_DWORD
diff --git a/test/CodeGen/R600/llvm.SI.sample-masked.ll b/test/CodeGen/R600/llvm.SI.sample-masked.ll
index 445359a4ced..2c511447277 100644
--- a/test/CodeGen/R600/llvm.SI.sample-masked.ll
+++ b/test/CodeGen/R600/llvm.SI.sample-masked.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=verde | FileCheck %s
-; CHECK-LABEL: @v1
+; CHECK-LABEL: {{^}}v1:
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 13
define void @v1(i32 %a1) #0 {
entry:
@@ -13,7 +13,7 @@ entry:
ret void
}
-; CHECK-LABEL: @v2
+; CHECK-LABEL: {{^}}v2:
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 11
define void @v2(i32 %a1) #0 {
entry:
@@ -26,7 +26,7 @@ entry:
ret void
}
-; CHECK-LABEL: @v3
+; CHECK-LABEL: {{^}}v3:
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 14
define void @v3(i32 %a1) #0 {
entry:
@@ -39,7 +39,7 @@ entry:
ret void
}
-; CHECK-LABEL: @v4
+; CHECK-LABEL: {{^}}v4:
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 7
define void @v4(i32 %a1) #0 {
entry:
@@ -52,7 +52,7 @@ entry:
ret void
}
-; CHECK-LABEL: @v5
+; CHECK-LABEL: {{^}}v5:
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 10
define void @v5(i32 %a1) #0 {
entry:
@@ -64,7 +64,7 @@ entry:
ret void
}
-; CHECK-LABEL: @v6
+; CHECK-LABEL: {{^}}v6:
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 6
define void @v6(i32 %a1) #0 {
entry:
@@ -76,7 +76,7 @@ entry:
ret void
}
-; CHECK-LABEL: @v7
+; CHECK-LABEL: {{^}}v7:
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 9
define void @v7(i32 %a1) #0 {
entry:
diff --git a/test/CodeGen/R600/llvm.SI.sample.ll b/test/CodeGen/R600/llvm.SI.sample.ll
index 24e8f640d90..951e9ea85bc 100644
--- a/test/CodeGen/R600/llvm.SI.sample.ll
+++ b/test/CodeGen/R600/llvm.SI.sample.ll
@@ -135,7 +135,7 @@ define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) #0 {
ret void
}
-; CHECK: @v1
+; CHECK: {{^}}v1:
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 15
define void @v1(i32 %a1) #0 {
entry:
diff --git a/test/CodeGen/R600/llvm.SI.sendmsg.ll b/test/CodeGen/R600/llvm.SI.sendmsg.ll
index 581d422b095..9763f882665 100644
--- a/test/CodeGen/R600/llvm.SI.sendmsg.ll
+++ b/test/CodeGen/R600/llvm.SI.sendmsg.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
-; CHECK-LABEL: @main
+; CHECK-LABEL: {{^}}main:
; CHECK: S_SENDMSG Gs(emit stream 0)
; CHECK: S_SENDMSG Gs(cut stream 1)
; CHECK: S_SENDMSG Gs(emit-cut stream 2)
diff --git a/test/CodeGen/R600/llvm.SI.tbuffer.store.ll b/test/CodeGen/R600/llvm.SI.tbuffer.store.ll
index 740581a6966..44472790b37 100644
--- a/test/CodeGen/R600/llvm.SI.tbuffer.store.ll
+++ b/test/CodeGen/R600/llvm.SI.tbuffer.store.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
-;CHECK-LABEL: @test1
+;CHECK-LABEL: {{^}}test1:
;CHECK: TBUFFER_STORE_FORMAT_XYZW {{v\[[0-9]+:[0-9]+\]}}, 0x20, -1, 0, -1, 0, 14, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
define void @test1(i32 %a1, i32 %vaddr) #0 {
%vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
@@ -10,7 +10,7 @@ define void @test1(i32 %a1, i32 %vaddr) #0 {
ret void
}
-;CHECK-LABEL: @test2
+;CHECK-LABEL: {{^}}test2:
;CHECK: TBUFFER_STORE_FORMAT_XYZ {{v\[[0-9]+:[0-9]+\]}}, 0x18, -1, 0, -1, 0, 13, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
define void @test2(i32 %a1, i32 %vaddr) #0 {
%vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
@@ -20,7 +20,7 @@ define void @test2(i32 %a1, i32 %vaddr) #0 {
ret void
}
-;CHECK-LABEL: @test3
+;CHECK-LABEL: {{^}}test3:
;CHECK: TBUFFER_STORE_FORMAT_XY {{v\[[0-9]+:[0-9]+\]}}, 0x10, -1, 0, -1, 0, 11, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
define void @test3(i32 %a1, i32 %vaddr) #0 {
%vdata = insertelement <2 x i32> undef, i32 %a1, i32 0
@@ -30,7 +30,7 @@ define void @test3(i32 %a1, i32 %vaddr) #0 {
ret void
}
-;CHECK-LABEL: @test4
+;CHECK-LABEL: {{^}}test4:
;CHECK: TBUFFER_STORE_FORMAT_X {{v[0-9]+}}, 0x8, -1, 0, -1, 0, 4, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
define void @test4(i32 %vdata, i32 %vaddr) #0 {
call void @llvm.SI.tbuffer.store.i32(<16 x i8> undef, i32 %vdata,
diff --git a/test/CodeGen/R600/llvm.amdgpu.kilp.ll b/test/CodeGen/R600/llvm.amdgpu.kilp.ll
index 1b8b1bfd208..d2e2b6870df 100644
--- a/test/CodeGen/R600/llvm.amdgpu.kilp.ll
+++ b/test/CodeGen/R600/llvm.amdgpu.kilp.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
-; SI-LABEL: @kilp_gs_const
+; SI-LABEL: {{^}}kilp_gs_const:
; SI: S_MOV_B64 exec, 0
define void @kilp_gs_const() #0 {
main_body:
diff --git a/test/CodeGen/R600/llvm.amdgpu.lrp.ll b/test/CodeGen/R600/llvm.amdgpu.lrp.ll
index c493a016e33..b360baca3c9 100644
--- a/test/CodeGen/R600/llvm.amdgpu.lrp.ll
+++ b/test/CodeGen/R600/llvm.amdgpu.lrp.ll
@@ -2,7 +2,7 @@
declare float @llvm.AMDGPU.lrp(float, float, float) nounwind readnone
-; FUNC-LABEL: @test_lrp
+; FUNC-LABEL: {{^}}test_lrp:
; SI: V_SUB_F32
; SI: V_MAD_F32
define void @test_lrp(float addrspace(1)* %out, float %src0, float %src1, float %src2) nounwind {
diff --git a/test/CodeGen/R600/llvm.exp2.ll b/test/CodeGen/R600/llvm.exp2.ll
index 119d5ef49a5..db8419eb269 100644
--- a/test/CodeGen/R600/llvm.exp2.ll
+++ b/test/CodeGen/R600/llvm.exp2.ll
@@ -2,7 +2,7 @@
;RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM-CHECK --check-prefix=FUNC
;RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK --check-prefix=FUNC
-;FUNC-LABEL: @test
+;FUNC-LABEL: {{^}}test:
;EG-CHECK: EXP_IEEE
;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
@@ -17,7 +17,7 @@ entry:
ret void
}
-;FUNC-LABEL: @testv2
+;FUNC-LABEL: {{^}}testv2:
;EG-CHECK: EXP_IEEE
;EG-CHECK: EXP_IEEE
; FIXME: We should be able to merge these packets together on Cayman so we
@@ -40,7 +40,7 @@ entry:
ret void
}
-;FUNC-LABEL: @testv4
+;FUNC-LABEL: {{^}}testv4:
;EG-CHECK: EXP_IEEE
;EG-CHECK: EXP_IEEE
;EG-CHECK: EXP_IEEE
diff --git a/test/CodeGen/R600/llvm.floor.ll b/test/CodeGen/R600/llvm.floor.ll
index f7071cd9b87..fd5682c7e04 100644
--- a/test/CodeGen/R600/llvm.floor.ll
+++ b/test/CodeGen/R600/llvm.floor.ll
@@ -1,9 +1,9 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
-; R600-CHECK: @f32
+; R600-CHECK: {{^}}f32:
; R600-CHECK: FLOOR
-; SI-CHECK: @f32
+; SI-CHECK: {{^}}f32:
; SI-CHECK: V_FLOOR_F32_e32
define void @f32(float addrspace(1)* %out, float %in) {
entry:
@@ -12,10 +12,10 @@ entry:
ret void
}
-; R600-CHECK: @v2f32
+; R600-CHECK: {{^}}v2f32:
; R600-CHECK: FLOOR
; R600-CHECK: FLOOR
-; SI-CHECK: @v2f32
+; SI-CHECK: {{^}}v2f32:
; SI-CHECK: V_FLOOR_F32_e32
; SI-CHECK: V_FLOOR_F32_e32
define void @v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
@@ -25,12 +25,12 @@ entry:
ret void
}
-; R600-CHECK: @v4f32
+; R600-CHECK: {{^}}v4f32:
; R600-CHECK: FLOOR
; R600-CHECK: FLOOR
; R600-CHECK: FLOOR
; R600-CHECK: FLOOR
-; SI-CHECK: @v4f32
+; SI-CHECK: {{^}}v4f32:
; SI-CHECK: V_FLOOR_F32_e32
; SI-CHECK: V_FLOOR_F32_e32
; SI-CHECK: V_FLOOR_F32_e32
diff --git a/test/CodeGen/R600/llvm.log2.ll b/test/CodeGen/R600/llvm.log2.ll
index 4cba2d44a5c..954a70edb3e 100644
--- a/test/CodeGen/R600/llvm.log2.ll
+++ b/test/CodeGen/R600/llvm.log2.ll
@@ -2,7 +2,7 @@
;RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM-CHECK --check-prefix=FUNC
;RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK --check-prefix=FUNC
-;FUNC-LABEL: @test
+;FUNC-LABEL: {{^}}test:
;EG-CHECK: LOG_IEEE
;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
@@ -17,7 +17,7 @@ entry:
ret void
}
-;FUNC-LABEL: @testv2
+;FUNC-LABEL: {{^}}testv2:
;EG-CHECK: LOG_IEEE
;EG-CHECK: LOG_IEEE
; FIXME: We should be able to merge these packets together on Cayman so we
@@ -40,7 +40,7 @@ entry:
ret void
}
-;FUNC-LABEL: @testv4
+;FUNC-LABEL: {{^}}testv4:
;EG-CHECK: LOG_IEEE
;EG-CHECK: LOG_IEEE
;EG-CHECK: LOG_IEEE
diff --git a/test/CodeGen/R600/llvm.memcpy.ll b/test/CodeGen/R600/llvm.memcpy.ll
index 68a4050ce37..5cf388daea6 100644
--- a/test/CodeGen/R600/llvm.memcpy.ll
+++ b/test/CodeGen/R600/llvm.memcpy.ll
@@ -4,7 +4,7 @@ declare void @llvm.memcpy.p3i8.p3i8.i32(i8 addrspace(3)* nocapture, i8 addrspace
declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture, i64, i32, i1) nounwind
-; FUNC-LABEL: @test_small_memcpy_i64_lds_to_lds_align1
+; FUNC-LABEL: {{^}}test_small_memcpy_i64_lds_to_lds_align1:
; SI: DS_READ_U8
; SI: DS_WRITE_B8
; SI: DS_READ_U8
@@ -85,7 +85,7 @@ define void @test_small_memcpy_i64_lds_to_lds_align1(i64 addrspace(3)* noalias %
ret void
}
-; FUNC-LABEL: @test_small_memcpy_i64_lds_to_lds_align2
+; FUNC-LABEL: {{^}}test_small_memcpy_i64_lds_to_lds_align2:
; SI: DS_READ_U16
; SI: DS_READ_U16
; SI: DS_READ_U16
@@ -130,7 +130,7 @@ define void @test_small_memcpy_i64_lds_to_lds_align2(i64 addrspace(3)* noalias %
ret void
}
-; FUNC-LABEL: @test_small_memcpy_i64_lds_to_lds_align4
+; FUNC-LABEL: {{^}}test_small_memcpy_i64_lds_to_lds_align4:
; SI-DAG: DS_READ_B32
; SI-DAG: DS_WRITE_B32
@@ -167,7 +167,7 @@ define void @test_small_memcpy_i64_lds_to_lds_align4(i64 addrspace(3)* noalias %
}
; FIXME: Use 64-bit ops
-; FUNC-LABEL: @test_small_memcpy_i64_lds_to_lds_align8
+; FUNC-LABEL: {{^}}test_small_memcpy_i64_lds_to_lds_align8:
; SI-DAG: DS_READ_B32
; SI-DAG: DS_WRITE_B32
@@ -204,7 +204,7 @@ define void @test_small_memcpy_i64_lds_to_lds_align8(i64 addrspace(3)* noalias %
ret void
}
-; FUNC-LABEL: @test_small_memcpy_i64_global_to_global_align1
+; FUNC-LABEL: {{^}}test_small_memcpy_i64_global_to_global_align1:
; SI-DAG: BUFFER_LOAD_UBYTE
; SI-DAG: BUFFER_STORE_BYTE
; SI-DAG: BUFFER_LOAD_UBYTE
@@ -281,7 +281,7 @@ define void @test_small_memcpy_i64_global_to_global_align1(i64 addrspace(1)* noa
ret void
}
-; FUNC-LABEL: @test_small_memcpy_i64_global_to_global_align2
+; FUNC-LABEL: {{^}}test_small_memcpy_i64_global_to_global_align2:
; SI-DAG: BUFFER_LOAD_USHORT
; SI-DAG: BUFFER_LOAD_USHORT
; SI-DAG: BUFFER_LOAD_USHORT
@@ -324,7 +324,7 @@ define void @test_small_memcpy_i64_global_to_global_align2(i64 addrspace(1)* noa
ret void
}
-; FUNC-LABEL: @test_small_memcpy_i64_global_to_global_align4
+; FUNC-LABEL: {{^}}test_small_memcpy_i64_global_to_global_align4:
; SI: BUFFER_LOAD_DWORDX4
; SI: BUFFER_LOAD_DWORDX4
; SI: BUFFER_STORE_DWORDX4
@@ -337,7 +337,7 @@ define void @test_small_memcpy_i64_global_to_global_align4(i64 addrspace(1)* noa
ret void
}
-; FUNC-LABEL: @test_small_memcpy_i64_global_to_global_align8
+; FUNC-LABEL: {{^}}test_small_memcpy_i64_global_to_global_align8:
; SI: BUFFER_LOAD_DWORDX4
; SI: BUFFER_LOAD_DWORDX4
; SI: BUFFER_STORE_DWORDX4
@@ -350,7 +350,7 @@ define void @test_small_memcpy_i64_global_to_global_align8(i64 addrspace(1)* noa
ret void
}
-; FUNC-LABEL: @test_small_memcpy_i64_global_to_global_align16
+; FUNC-LABEL: {{^}}test_small_memcpy_i64_global_to_global_align16:
; SI: BUFFER_LOAD_DWORDX4
; SI: BUFFER_LOAD_DWORDX4
; SI: BUFFER_STORE_DWORDX4
diff --git a/test/CodeGen/R600/llvm.rint.f64.ll b/test/CodeGen/R600/llvm.rint.f64.ll
index 3e2884b7ce0..ec82c91260c 100644
--- a/test/CodeGen/R600/llvm.rint.f64.ll
+++ b/test/CodeGen/R600/llvm.rint.f64.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @rint_f64
+; FUNC-LABEL: {{^}}rint_f64:
; CI: V_RNDNE_F64_e32
; SI-DAG: V_ADD_F64
@@ -17,7 +17,7 @@ entry:
ret void
}
-; FUNC-LABEL: @rint_v2f64
+; FUNC-LABEL: {{^}}rint_v2f64:
; CI: V_RNDNE_F64_e32
; CI: V_RNDNE_F64_e32
define void @rint_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
@@ -27,7 +27,7 @@ entry:
ret void
}
-; FUNC-LABEL: @rint_v4f64
+; FUNC-LABEL: {{^}}rint_v4f64:
; CI: V_RNDNE_F64_e32
; CI: V_RNDNE_F64_e32
; CI: V_RNDNE_F64_e32
diff --git a/test/CodeGen/R600/llvm.rint.ll b/test/CodeGen/R600/llvm.rint.ll
index 209bb4358fd..ddad5c8dbc0 100644
--- a/test/CodeGen/R600/llvm.rint.ll
+++ b/test/CodeGen/R600/llvm.rint.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s -check-prefix=R600 -check-prefix=FUNC
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @rint_f32
+; FUNC-LABEL: {{^}}rint_f32:
; R600: RNDNE
; SI: V_RNDNE_F32_e32
@@ -12,7 +12,7 @@ entry:
ret void
}
-; FUNC-LABEL: @rint_v2f32
+; FUNC-LABEL: {{^}}rint_v2f32:
; R600: RNDNE
; R600: RNDNE
@@ -25,7 +25,7 @@ entry:
ret void
}
-; FUNC-LABEL: @rint_v4f32
+; FUNC-LABEL: {{^}}rint_v4f32:
; R600: RNDNE
; R600: RNDNE
; R600: RNDNE
@@ -42,7 +42,7 @@ entry:
ret void
}
-; FUNC-LABEL: @legacy_amdil_round_nearest_f32
+; FUNC-LABEL: {{^}}legacy_amdil_round_nearest_f32:
; R600: RNDNE
; SI: V_RNDNE_F32_e32
diff --git a/test/CodeGen/R600/llvm.round.ll b/test/CodeGen/R600/llvm.round.ll
index 5700427ee89..bedf4ba72ae 100644
--- a/test/CodeGen/R600/llvm.round.ll
+++ b/test/CodeGen/R600/llvm.round.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s --check-prefix=R600 --check-prefix=FUNC
-; FUNC-LABEL: @f32
+; FUNC-LABEL: {{^}}f32:
; R600: FRACT {{.*}}, [[ARG:KC[0-9]\[[0-9]+\]\.[XYZW]]]
; R600-DAG: ADD {{.*}}, -0.5
; R600-DAG: CEIL {{.*}} [[ARG]]
diff --git a/test/CodeGen/R600/llvm.sin.ll b/test/CodeGen/R600/llvm.sin.ll
index 50ddb147462..a365ecb0ccb 100644
--- a/test/CodeGen/R600/llvm.sin.ll
+++ b/test/CodeGen/R600/llvm.sin.ll
@@ -19,7 +19,7 @@ define void @sin_f32(float addrspace(1)* %out, float %x) #1 {
ret void
}
-; FUNC-LABEL: @sin_3x_f32
+; FUNC-LABEL: {{^}}sin_3x_f32:
; SI-UNSAFE-NOT: V_ADD_F32
; SI-UNSAFE: 0x3ef47644
; SI-UNSAFE: V_MUL_F32
@@ -35,7 +35,7 @@ define void @sin_3x_f32(float addrspace(1)* %out, float %x) #1 {
ret void
}
-; FUNC-LABEL: @sin_2x_f32
+; FUNC-LABEL: {{^}}sin_2x_f32:
; SI-UNSAFE-NOT: V_ADD_F32
; SI-UNSAFE: 0x3ea2f983
; SI-UNSAFE: V_MUL_F32
@@ -51,7 +51,7 @@ define void @sin_2x_f32(float addrspace(1)* %out, float %x) #1 {
ret void
}
-; FUNC-LABEL: @test_2sin_f32
+; FUNC-LABEL: {{^}}test_2sin_f32:
; SI-UNSAFE: 0x3ea2f983
; SI-UNSAFE: V_MUL_F32
; SI-SAFE: V_ADD_F32
@@ -66,7 +66,7 @@ define void @test_2sin_f32(float addrspace(1)* %out, float %x) #1 {
ret void
}
-; FUNC-LABEL: @sin_v4f32
+; FUNC-LABEL: {{^}}sin_v4f32:
; EG: SIN * T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
; EG: SIN * T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
; EG: SIN * T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
diff --git a/test/CodeGen/R600/llvm.sqrt.ll b/test/CodeGen/R600/llvm.sqrt.ll
index 4eee37ffbe2..40d6b460e7b 100644
--- a/test/CodeGen/R600/llvm.sqrt.ll
+++ b/test/CodeGen/R600/llvm.sqrt.ll
@@ -1,10 +1,10 @@
; RUN: llc < %s -march=r600 --mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
; RUN: llc < %s -march=r600 --mcpu=SI -verify-machineinstrs| FileCheck %s --check-prefix=SI-CHECK
-; R600-CHECK-LABEL: @sqrt_f32
+; R600-CHECK-LABEL: {{^}}sqrt_f32:
; R600-CHECK: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[2].Z
; R600-CHECK: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[2].Z, PS
-; SI-CHECK-LABEL: @sqrt_f32
+; SI-CHECK-LABEL: {{^}}sqrt_f32:
; SI-CHECK: V_SQRT_F32_e32
define void @sqrt_f32(float addrspace(1)* %out, float %in) {
entry:
@@ -13,12 +13,12 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @sqrt_v2f32
+; R600-CHECK-LABEL: {{^}}sqrt_v2f32:
; R600-CHECK-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[2].W
; R600-CHECK-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[2].W, PS
; R600-CHECK-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[3].X
; R600-CHECK-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[3].X, PS
-; SI-CHECK-LABEL: @sqrt_v2f32
+; SI-CHECK-LABEL: {{^}}sqrt_v2f32:
; SI-CHECK: V_SQRT_F32_e32
; SI-CHECK: V_SQRT_F32_e32
define void @sqrt_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
@@ -28,7 +28,7 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @sqrt_v4f32
+; R600-CHECK-LABEL: {{^}}sqrt_v4f32:
; R600-CHECK-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[3].Y
; R600-CHECK-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[3].Y, PS
; R600-CHECK-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[3].Z
@@ -37,7 +37,7 @@ entry:
; R600-CHECK-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[3].W, PS
; R600-CHECK-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[4].X
; R600-CHECK-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[4].X, PS
-; SI-CHECK-LABEL: @sqrt_v4f32
+; SI-CHECK-LABEL: {{^}}sqrt_v4f32:
; SI-CHECK: V_SQRT_F32_e32
; SI-CHECK: V_SQRT_F32_e32
; SI-CHECK: V_SQRT_F32_e32
diff --git a/test/CodeGen/R600/llvm.trunc.ll b/test/CodeGen/R600/llvm.trunc.ll
index fa6fb9906dd..5585477ef29 100644
--- a/test/CodeGen/R600/llvm.trunc.ll
+++ b/test/CodeGen/R600/llvm.trunc.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-; CHECK-LABEL: @trunc_f32
+; CHECK-LABEL: {{^}}trunc_f32:
; CHECK: TRUNC
define void @trunc_f32(float addrspace(1)* %out, float %in) {
diff --git a/test/CodeGen/R600/load-i1.ll b/test/CodeGen/R600/load-i1.ll
index 9ba81b85f59..2408f70cff2 100644
--- a/test/CodeGen/R600/load-i1.ll
+++ b/test/CodeGen/R600/load-i1.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
-; SI-LABEL: @global_copy_i1_to_i1
+; SI-LABEL: {{^}}global_copy_i1_to_i1:
; SI: BUFFER_LOAD_UBYTE
; SI: V_AND_B32_e32 v{{[0-9]+}}, 1
; SI: BUFFER_STORE_BYTE
@@ -12,7 +12,7 @@ define void @global_copy_i1_to_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) n
ret void
}
-; SI-LABEL: @global_sextload_i1_to_i32
+; SI-LABEL: {{^}}global_sextload_i1_to_i32:
; XSI: BUFFER_LOAD_BYTE
; SI: BUFFER_STORE_DWORD
; SI: S_ENDPGM
@@ -23,7 +23,7 @@ define void @global_sextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(1)*
ret void
}
-; SI-LABEL: @global_zextload_i1_to_i32
+; SI-LABEL: {{^}}global_zextload_i1_to_i32:
; SI: BUFFER_LOAD_UBYTE
; SI: BUFFER_STORE_DWORD
; SI: S_ENDPGM
@@ -34,7 +34,7 @@ define void @global_zextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(1)*
ret void
}
-; SI-LABEL: @global_sextload_i1_to_i64
+; SI-LABEL: {{^}}global_sextload_i1_to_i64:
; XSI: BUFFER_LOAD_BYTE
; SI: BUFFER_STORE_DWORDX2
; SI: S_ENDPGM
@@ -45,7 +45,7 @@ define void @global_sextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(1)*
ret void
}
-; SI-LABEL: @global_zextload_i1_to_i64
+; SI-LABEL: {{^}}global_zextload_i1_to_i64:
; SI: BUFFER_LOAD_UBYTE
; SI: BUFFER_STORE_DWORDX2
; SI: S_ENDPGM
@@ -56,7 +56,7 @@ define void @global_zextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(1)*
ret void
}
-; SI-LABEL: @i1_arg
+; SI-LABEL: {{^}}i1_arg:
; SI: BUFFER_LOAD_UBYTE
; SI: V_AND_B32_e32
; SI: BUFFER_STORE_BYTE
@@ -66,7 +66,7 @@ define void @i1_arg(i1 addrspace(1)* %out, i1 %x) nounwind {
ret void
}
-; SI-LABEL: @i1_arg_zext_i32
+; SI-LABEL: {{^}}i1_arg_zext_i32:
; SI: BUFFER_LOAD_UBYTE
; SI: BUFFER_STORE_DWORD
; SI: S_ENDPGM
@@ -76,7 +76,7 @@ define void @i1_arg_zext_i32(i32 addrspace(1)* %out, i1 %x) nounwind {
ret void
}
-; SI-LABEL: @i1_arg_zext_i64
+; SI-LABEL: {{^}}i1_arg_zext_i64:
; SI: BUFFER_LOAD_UBYTE
; SI: BUFFER_STORE_DWORDX2
; SI: S_ENDPGM
@@ -86,7 +86,7 @@ define void @i1_arg_zext_i64(i64 addrspace(1)* %out, i1 %x) nounwind {
ret void
}
-; SI-LABEL: @i1_arg_sext_i32
+; SI-LABEL: {{^}}i1_arg_sext_i32:
; XSI: BUFFER_LOAD_BYTE
; SI: BUFFER_STORE_DWORD
; SI: S_ENDPGM
@@ -96,7 +96,7 @@ define void @i1_arg_sext_i32(i32 addrspace(1)* %out, i1 %x) nounwind {
ret void
}
-; SI-LABEL: @i1_arg_sext_i64
+; SI-LABEL: {{^}}i1_arg_sext_i64:
; XSI: BUFFER_LOAD_BYTE
; SI: BUFFER_STORE_DWORDX2
; SI: S_ENDPGM
diff --git a/test/CodeGen/R600/load.ll b/test/CodeGen/R600/load.ll
index 8905fbd3aeb..d92ade1cfa1 100644
--- a/test/CodeGen/R600/load.ll
+++ b/test/CodeGen/R600/load.ll
@@ -7,7 +7,7 @@
;===------------------------------------------------------------------------===;
; Load an i8 value from the global address space.
-; FUNC-LABEL: @load_i8
+; FUNC-LABEL: {{^}}load_i8:
; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
; SI-CHECK: BUFFER_LOAD_UBYTE v{{[0-9]+}},
@@ -18,7 +18,7 @@ define void @load_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @load_i8_sext
+; FUNC-LABEL: {{^}}load_i8_sext:
; R600-CHECK: VTX_READ_8 [[DST:T[0-9]\.[XYZW]]], [[DST]]
; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
; R600-CHECK: 24
@@ -33,7 +33,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_v2i8
+; FUNC-LABEL: {{^}}load_v2i8:
; R600-CHECK: VTX_READ_8
; R600-CHECK: VTX_READ_8
; SI-CHECK: BUFFER_LOAD_UBYTE
@@ -46,7 +46,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_v2i8_sext
+; FUNC-LABEL: {{^}}load_v2i8_sext:
; R600-CHECK-DAG: VTX_READ_8 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
; R600-CHECK-DAG: VTX_READ_8 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_X_CHAN:[XYZW]]], [[DST_X]]
@@ -67,7 +67,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_v4i8
+; FUNC-LABEL: {{^}}load_v4i8:
; R600-CHECK: VTX_READ_8
; R600-CHECK: VTX_READ_8
; R600-CHECK: VTX_READ_8
@@ -84,7 +84,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_v4i8_sext
+; FUNC-LABEL: {{^}}load_v4i8_sext:
; R600-CHECK-DAG: VTX_READ_8 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
; R600-CHECK-DAG: VTX_READ_8 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
; R600-CHECK-DAG: VTX_READ_8 [[DST_Z:T[0-9]\.[XYZW]]], [[DST_Z]]
@@ -118,7 +118,7 @@ entry:
}
; Load an i16 value from the global address space.
-; FUNC-LABEL: @load_i16
+; FUNC-LABEL: {{^}}load_i16:
; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
; SI-CHECK: BUFFER_LOAD_USHORT
define void @load_i16(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
@@ -129,7 +129,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_i16_sext
+; FUNC-LABEL: {{^}}load_i16_sext:
; R600-CHECK: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], [[DST]]
; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
; R600-CHECK: 16
@@ -144,7 +144,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_v2i16
+; FUNC-LABEL: {{^}}load_v2i16:
; R600-CHECK: VTX_READ_16
; R600-CHECK: VTX_READ_16
; SI-CHECK: BUFFER_LOAD_USHORT
@@ -157,7 +157,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_v2i16_sext
+; FUNC-LABEL: {{^}}load_v2i16_sext:
; R600-CHECK-DAG: VTX_READ_16 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
; R600-CHECK-DAG: VTX_READ_16 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_X_CHAN:[XYZW]]], [[DST_X]]
@@ -178,7 +178,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_v4i16
+; FUNC-LABEL: {{^}}load_v4i16:
; R600-CHECK: VTX_READ_16
; R600-CHECK: VTX_READ_16
; R600-CHECK: VTX_READ_16
@@ -195,7 +195,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_v4i16_sext
+; FUNC-LABEL: {{^}}load_v4i16_sext:
; R600-CHECK-DAG: VTX_READ_16 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
; R600-CHECK-DAG: VTX_READ_16 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
; R600-CHECK-DAG: VTX_READ_16 [[DST_Z:T[0-9]\.[XYZW]]], [[DST_Z]]
@@ -229,7 +229,7 @@ entry:
}
; load an i32 value from the global address space.
-; FUNC-LABEL: @load_i32
+; FUNC-LABEL: {{^}}load_i32:
; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
; SI-CHECK: BUFFER_LOAD_DWORD v{{[0-9]+}}
@@ -241,7 +241,7 @@ entry:
}
; load a f32 value from the global address space.
-; FUNC-LABEL: @load_f32
+; FUNC-LABEL: {{^}}load_f32:
; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
; SI-CHECK: BUFFER_LOAD_DWORD v{{[0-9]+}}
@@ -253,7 +253,7 @@ entry:
}
; load a v2f32 value from the global address space
-; FUNC-LABEL: @load_v2f32
+; FUNC-LABEL: {{^}}load_v2f32:
; R600-CHECK: MEM_RAT
; R600-CHECK: VTX_READ_64
; SI-CHECK: BUFFER_LOAD_DWORDX2
@@ -264,7 +264,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_i64
+; FUNC-LABEL: {{^}}load_i64:
; R600-CHECK: VTX_READ_64
; SI-CHECK: BUFFER_LOAD_DWORDX2
define void @load_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
@@ -274,7 +274,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_i64_sext
+; FUNC-LABEL: {{^}}load_i64_sext:
; R600-CHECK: MEM_RAT
; R600-CHECK: MEM_RAT
; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, T{{[0-9]\.[XYZW]}}, literal.x
@@ -289,7 +289,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_i64_zext
+; FUNC-LABEL: {{^}}load_i64_zext:
; R600-CHECK: MEM_RAT
; R600-CHECK: MEM_RAT
define void @load_i64_zext(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
@@ -300,7 +300,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_v8i32
+; FUNC-LABEL: {{^}}load_v8i32:
; R600-CHECK: VTX_READ_128
; R600-CHECK: VTX_READ_128
; XXX: We should be using DWORDX4 instructions on SI.
@@ -319,7 +319,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_v16i32
+; FUNC-LABEL: {{^}}load_v16i32:
; R600-CHECK: VTX_READ_128
; R600-CHECK: VTX_READ_128
; R600-CHECK: VTX_READ_128
@@ -353,7 +353,7 @@ entry:
;===------------------------------------------------------------------------===;
; Load a sign-extended i8 value
-; FUNC-LABEL: @load_const_i8_sext
+; FUNC-LABEL: {{^}}load_const_i8_sext:
; R600-CHECK: VTX_READ_8 [[DST:T[0-9]\.[XYZW]]], [[DST]]
; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
; R600-CHECK: 24
@@ -369,7 +369,7 @@ entry:
}
; Load an aligned i8 value
-; FUNC-LABEL: @load_const_i8_aligned
+; FUNC-LABEL: {{^}}load_const_i8_aligned:
; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
; SI-CHECK: BUFFER_LOAD_UBYTE v{{[0-9]+}},
define void @load_const_i8_aligned(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
@@ -381,7 +381,7 @@ entry:
}
; Load an un-aligned i8 value
-; FUNC-LABEL: @load_const_i8_unaligned
+; FUNC-LABEL: {{^}}load_const_i8_unaligned:
; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
; SI-CHECK: BUFFER_LOAD_UBYTE v{{[0-9]+}},
define void @load_const_i8_unaligned(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
@@ -394,7 +394,7 @@ entry:
}
; Load a sign-extended i16 value
-; FUNC-LABEL: @load_const_i16_sext
+; FUNC-LABEL: {{^}}load_const_i16_sext:
; R600-CHECK: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], [[DST]]
; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
; R600-CHECK: 16
@@ -410,7 +410,7 @@ entry:
}
; Load an aligned i16 value
-; FUNC-LABEL: @load_const_i16_aligned
+; FUNC-LABEL: {{^}}load_const_i16_aligned:
; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
; SI-CHECK: BUFFER_LOAD_USHORT
define void @load_const_i16_aligned(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
@@ -422,7 +422,7 @@ entry:
}
; Load an un-aligned i16 value
-; FUNC-LABEL: @load_const_i16_unaligned
+; FUNC-LABEL: {{^}}load_const_i16_unaligned:
; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
; SI-CHECK: BUFFER_LOAD_USHORT
define void @load_const_i16_unaligned(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
@@ -435,7 +435,7 @@ entry:
}
; Load an i32 value from the constant address space.
-; FUNC-LABEL: @load_const_addrspace_i32
+; FUNC-LABEL: {{^}}load_const_addrspace_i32:
; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
; SI-CHECK: S_LOAD_DWORD s{{[0-9]+}}
@@ -447,7 +447,7 @@ entry:
}
; Load a f32 value from the constant address space.
-; FUNC-LABEL: @load_const_addrspace_f32
+; FUNC-LABEL: {{^}}load_const_addrspace_f32:
; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
; SI-CHECK: S_LOAD_DWORD s{{[0-9]+}}
@@ -462,7 +462,7 @@ define void @load_const_addrspace_f32(float addrspace(1)* %out, float addrspace(
;===------------------------------------------------------------------------===;
; Load an i8 value from the local address space.
-; FUNC-LABEL: @load_i8_local
+; FUNC-LABEL: {{^}}load_i8_local:
; R600-CHECK: LDS_UBYTE_READ_RET
; SI-CHECK-NOT: S_WQM_B64
; SI-CHECK: S_MOV_B32 m0
@@ -474,7 +474,7 @@ define void @load_i8_local(i32 addrspace(1)* %out, i8 addrspace(3)* %in) {
ret void
}
-; FUNC-LABEL: @load_i8_sext_local
+; FUNC-LABEL: {{^}}load_i8_sext_local:
; R600-CHECK: LDS_UBYTE_READ_RET
; R600-CHECK: ASHR
; SI-CHECK-NOT: S_WQM_B64
@@ -488,7 +488,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_v2i8_local
+; FUNC-LABEL: {{^}}load_v2i8_local:
; R600-CHECK: LDS_UBYTE_READ_RET
; R600-CHECK: LDS_UBYTE_READ_RET
; SI-CHECK-NOT: S_WQM_B64
@@ -503,7 +503,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_v2i8_sext_local
+; FUNC-LABEL: {{^}}load_v2i8_sext_local:
; R600-CHECK-DAG: LDS_UBYTE_READ_RET
; R600-CHECK-DAG: LDS_UBYTE_READ_RET
; R600-CHECK-DAG: ASHR
@@ -520,7 +520,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_v4i8_local
+; FUNC-LABEL: {{^}}load_v4i8_local:
; R600-CHECK: LDS_UBYTE_READ_RET
; R600-CHECK: LDS_UBYTE_READ_RET
; R600-CHECK: LDS_UBYTE_READ_RET
@@ -539,7 +539,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_v4i8_sext_local
+; FUNC-LABEL: {{^}}load_v4i8_sext_local:
; R600-CHECK-DAG: LDS_UBYTE_READ_RET
; R600-CHECK-DAG: LDS_UBYTE_READ_RET
; R600-CHECK-DAG: LDS_UBYTE_READ_RET
@@ -563,7 +563,7 @@ entry:
}
; Load an i16 value from the local address space.
-; FUNC-LABEL: @load_i16_local
+; FUNC-LABEL: {{^}}load_i16_local:
; R600-CHECK: LDS_USHORT_READ_RET
; SI-CHECK-NOT: S_WQM_B64
; SI-CHECK: S_MOV_B32 m0
@@ -576,7 +576,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_i16_sext_local
+; FUNC-LABEL: {{^}}load_i16_sext_local:
; R600-CHECK: LDS_USHORT_READ_RET
; R600-CHECK: ASHR
; SI-CHECK-NOT: S_WQM_B64
@@ -590,7 +590,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_v2i16_local
+; FUNC-LABEL: {{^}}load_v2i16_local:
; R600-CHECK: LDS_USHORT_READ_RET
; R600-CHECK: LDS_USHORT_READ_RET
; SI-CHECK-NOT: S_WQM_B64
@@ -605,7 +605,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_v2i16_sext_local
+; FUNC-LABEL: {{^}}load_v2i16_sext_local:
; R600-CHECK-DAG: LDS_USHORT_READ_RET
; R600-CHECK-DAG: LDS_USHORT_READ_RET
; R600-CHECK-DAG: ASHR
@@ -622,7 +622,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_v4i16_local
+; FUNC-LABEL: {{^}}load_v4i16_local:
; R600-CHECK: LDS_USHORT_READ_RET
; R600-CHECK: LDS_USHORT_READ_RET
; R600-CHECK: LDS_USHORT_READ_RET
@@ -641,7 +641,7 @@ entry:
ret void
}
-; FUNC-LABEL: @load_v4i16_sext_local
+; FUNC-LABEL: {{^}}load_v4i16_sext_local:
; R600-CHECK-DAG: LDS_USHORT_READ_RET
; R600-CHECK-DAG: LDS_USHORT_READ_RET
; R600-CHECK-DAG: LDS_USHORT_READ_RET
@@ -665,7 +665,7 @@ entry:
}
; load an i32 value from the local address space.
-; FUNC-LABEL: @load_i32_local
+; FUNC-LABEL: {{^}}load_i32_local:
; R600-CHECK: LDS_READ_RET
; SI-CHECK-NOT: S_WQM_B64
; SI-CHECK: S_MOV_B32 m0
@@ -678,7 +678,7 @@ entry:
}
; load a f32 value from the local address space.
-; FUNC-LABEL: @load_f32_local
+; FUNC-LABEL: {{^}}load_f32_local:
; R600-CHECK: LDS_READ_RET
; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_B32
@@ -690,7 +690,7 @@ entry:
}
; load a v2f32 value from the local address space
-; FUNC-LABEL: @load_v2f32_local
+; FUNC-LABEL: {{^}}load_v2f32_local:
; R600-CHECK: LDS_READ_RET
; R600-CHECK: LDS_READ_RET
; SI-CHECK: S_MOV_B32 m0
diff --git a/test/CodeGen/R600/load.vec.ll b/test/CodeGen/R600/load.vec.ll
index 81a6310bbcc..5776d45f56c 100644
--- a/test/CodeGen/R600/load.vec.ll
+++ b/test/CodeGen/R600/load.vec.ll
@@ -2,9 +2,9 @@
; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
; load a v2i32 value from the global address space.
-; EG-CHECK: @load_v2i32
+; EG-CHECK: {{^}}load_v2i32:
; EG-CHECK: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0
-; SI-CHECK: @load_v2i32
+; SI-CHECK: {{^}}load_v2i32:
; SI-CHECK: BUFFER_LOAD_DWORDX2 v[{{[0-9]+:[0-9]+}}]
define void @load_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%a = load <2 x i32> addrspace(1) * %in
@@ -13,9 +13,9 @@ define void @load_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %i
}
; load a v4i32 value from the global address space.
-; EG-CHECK: @load_v4i32
+; EG-CHECK: {{^}}load_v4i32:
; EG-CHECK: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0
-; SI-CHECK: @load_v4i32
+; SI-CHECK: {{^}}load_v4i32:
; SI-CHECK: BUFFER_LOAD_DWORDX4 v[{{[0-9]+:[0-9]+}}]
define void @load_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%a = load <4 x i32> addrspace(1) * %in
diff --git a/test/CodeGen/R600/load64.ll b/test/CodeGen/R600/load64.ll
index a117557e98e..1668db2128f 100644
--- a/test/CodeGen/R600/load64.ll
+++ b/test/CodeGen/R600/load64.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
; load a f64 value from the global address space.
-; CHECK-LABEL: @load_f64:
+; CHECK-LABEL: {{^}}load_f64:
; CHECK: BUFFER_LOAD_DWORDX2 v[{{[0-9]+:[0-9]+}}]
; CHECK: BUFFER_STORE_DWORDX2 v[{{[0-9]+:[0-9]+}}]
define void @load_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
@@ -10,7 +10,7 @@ define void @load_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
ret void
}
-; CHECK-LABEL: @load_i64:
+; CHECK-LABEL: {{^}}load_i64:
; CHECK: BUFFER_LOAD_DWORDX2 v[{{[0-9]+:[0-9]+}}]
; CHECK: BUFFER_STORE_DWORDX2 v[{{[0-9]+:[0-9]+}}]
define void @load_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
@@ -20,7 +20,7 @@ define void @load_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
}
; Load a f64 value from the constant address space.
-; CHECK-LABEL: @load_const_addrspace_f64:
+; CHECK-LABEL: {{^}}load_const_addrspace_f64:
; CHECK: S_LOAD_DWORDX2 s[{{[0-9]+:[0-9]+}}]
; CHECK: BUFFER_STORE_DWORDX2 v[{{[0-9]+:[0-9]+}}]
define void @load_const_addrspace_f64(double addrspace(1)* %out, double addrspace(2)* %in) {
diff --git a/test/CodeGen/R600/local-64.ll b/test/CodeGen/R600/local-64.ll
index ef48eefa5ee..0eb9bc0c059 100644
--- a/test/CodeGen/R600/local-64.ll
+++ b/test/CodeGen/R600/local-64.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck --check-prefix=SI --check-prefix=BOTH %s
; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs< %s | FileCheck --check-prefix=CI --check-prefix=BOTH %s
-; BOTH-LABEL: @local_i32_load
+; BOTH-LABEL: {{^}}local_i32_load:
; BOTH: DS_READ_B32 [[REG:v[0-9]+]], v{{[0-9]+}}, 0x1c, [M0]
; BOTH: BUFFER_STORE_DWORD [[REG]],
define void @local_i32_load(i32 addrspace(1)* %out, i32 addrspace(3)* %in) nounwind {
@@ -11,7 +11,7 @@ define void @local_i32_load(i32 addrspace(1)* %out, i32 addrspace(3)* %in) nounw
ret void
}
-; BOTH-LABEL: @local_i32_load_0_offset
+; BOTH-LABEL: {{^}}local_i32_load_0_offset:
; BOTH: DS_READ_B32 [[REG:v[0-9]+]], v{{[0-9]+}}, 0x0, [M0]
; BOTH: BUFFER_STORE_DWORD [[REG]],
define void @local_i32_load_0_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) nounwind {
@@ -20,7 +20,7 @@ define void @local_i32_load_0_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %
ret void
}
-; BOTH-LABEL: @local_i8_load_i16_max_offset
+; BOTH-LABEL: {{^}}local_i8_load_i16_max_offset:
; BOTH-NOT: ADD
; BOTH: DS_READ_U8 [[REG:v[0-9]+]], {{v[0-9]+}}, 0xffff, [M0]
; BOTH: BUFFER_STORE_BYTE [[REG]],
@@ -31,7 +31,7 @@ define void @local_i8_load_i16_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)
ret void
}
-; BOTH-LABEL: @local_i8_load_over_i16_max_offset
+; BOTH-LABEL: {{^}}local_i8_load_over_i16_max_offset:
; The LDS offset will be 65536 bytes, which is larger than the size of LDS on
; SI, which is why it is being OR'd with the base pointer.
; SI: S_OR_B32 [[ADDR:s[0-9]+]], s{{[0-9]+}}, 0x10000
@@ -46,7 +46,7 @@ define void @local_i8_load_over_i16_max_offset(i8 addrspace(1)* %out, i8 addrspa
ret void
}
-; BOTH-LABEL: @local_i64_load
+; BOTH-LABEL: {{^}}local_i64_load:
; BOTH-NOT: ADD
; BOTH: DS_READ_B64 [[REG:v[[0-9]+:[0-9]+]]], v{{[0-9]+}}, 0x38, [M0]
; BOTH: BUFFER_STORE_DWORDX2 [[REG]],
@@ -57,7 +57,7 @@ define void @local_i64_load(i64 addrspace(1)* %out, i64 addrspace(3)* %in) nounw
ret void
}
-; BOTH-LABEL: @local_i64_load_0_offset
+; BOTH-LABEL: {{^}}local_i64_load_0_offset:
; BOTH: DS_READ_B64 [[REG:v\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, 0x0, [M0]
; BOTH: BUFFER_STORE_DWORDX2 [[REG]],
define void @local_i64_load_0_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %in) nounwind {
@@ -66,7 +66,7 @@ define void @local_i64_load_0_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %
ret void
}
-; BOTH-LABEL: @local_f64_load
+; BOTH-LABEL: {{^}}local_f64_load:
; BOTH-NOT: ADD
; BOTH: DS_READ_B64 [[REG:v[[0-9]+:[0-9]+]]], v{{[0-9]+}}, 0x38, [M0]
; BOTH: BUFFER_STORE_DWORDX2 [[REG]],
@@ -77,7 +77,7 @@ define void @local_f64_load(double addrspace(1)* %out, double addrspace(3)* %in)
ret void
}
-; BOTH-LABEL: @local_f64_load_0_offset
+; BOTH-LABEL: {{^}}local_f64_load_0_offset:
; BOTH: DS_READ_B64 [[REG:v\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, 0x0, [M0]
; BOTH: BUFFER_STORE_DWORDX2 [[REG]],
define void @local_f64_load_0_offset(double addrspace(1)* %out, double addrspace(3)* %in) nounwind {
@@ -86,7 +86,7 @@ define void @local_f64_load_0_offset(double addrspace(1)* %out, double addrspace
ret void
}
-; BOTH-LABEL: @local_i64_store
+; BOTH-LABEL: {{^}}local_i64_store:
; BOTH-NOT: ADD
; BOTH: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x38 [M0]
define void @local_i64_store(i64 addrspace(3)* %out) nounwind {
@@ -95,7 +95,7 @@ define void @local_i64_store(i64 addrspace(3)* %out) nounwind {
ret void
}
-; BOTH-LABEL: @local_i64_store_0_offset
+; BOTH-LABEL: {{^}}local_i64_store_0_offset:
; BOTH-NOT: ADD
; BOTH: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x0 [M0]
define void @local_i64_store_0_offset(i64 addrspace(3)* %out) nounwind {
@@ -103,7 +103,7 @@ define void @local_i64_store_0_offset(i64 addrspace(3)* %out) nounwind {
ret void
}
-; BOTH-LABEL: @local_f64_store
+; BOTH-LABEL: {{^}}local_f64_store:
; BOTH-NOT: ADD
; BOTH: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x38 [M0]
define void @local_f64_store(double addrspace(3)* %out) nounwind {
@@ -112,14 +112,14 @@ define void @local_f64_store(double addrspace(3)* %out) nounwind {
ret void
}
-; BOTH-LABEL: @local_f64_store_0_offset
+; BOTH-LABEL: {{^}}local_f64_store_0_offset:
; BOTH: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x0 [M0]
define void @local_f64_store_0_offset(double addrspace(3)* %out) nounwind {
store double 20.0, double addrspace(3)* %out, align 8
ret void
}
-; BOTH-LABEL: @local_v2i64_store
+; BOTH-LABEL: {{^}}local_v2i64_store:
; BOTH-NOT: ADD
; BOTH-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x78 [M0]
; BOTH-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x70 [M0]
@@ -129,7 +129,7 @@ define void @local_v2i64_store(<2 x i64> addrspace(3)* %out) nounwind {
ret void
}
-; BOTH-LABEL: @local_v2i64_store_0_offset
+; BOTH-LABEL: {{^}}local_v2i64_store_0_offset:
; BOTH-NOT: ADD
; BOTH-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x8 [M0]
; BOTH-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x0 [M0]
@@ -138,7 +138,7 @@ define void @local_v2i64_store_0_offset(<2 x i64> addrspace(3)* %out) nounwind {
ret void
}
-; BOTH-LABEL: @local_v4i64_store
+; BOTH-LABEL: {{^}}local_v4i64_store:
; BOTH-NOT: ADD
; BOTH-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0xf8 [M0]
; BOTH-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0xf0 [M0]
@@ -150,7 +150,7 @@ define void @local_v4i64_store(<4 x i64> addrspace(3)* %out) nounwind {
ret void
}
-; BOTH-LABEL: @local_v4i64_store_0_offset
+; BOTH-LABEL: {{^}}local_v4i64_store_0_offset:
; BOTH-NOT: ADD
; BOTH-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x18 [M0]
; BOTH-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x10 [M0]
diff --git a/test/CodeGen/R600/local-atomics.ll b/test/CodeGen/R600/local-atomics.ll
index 65802e91a46..340290c4f94 100644
--- a/test/CodeGen/R600/local-atomics.ll
+++ b/test/CodeGen/R600/local-atomics.ll
@@ -2,7 +2,7 @@
; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; FUNC-LABEL: @lds_atomic_xchg_ret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_xchg_ret_i32:
; EG: LDS_WRXCHG_RET *
; SI: S_LOAD_DWORD [[SPTR:s[0-9]+]],
; SI: V_MOV_B32_e32 [[DATA:v[0-9]+]], 4
@@ -16,7 +16,7 @@ define void @lds_atomic_xchg_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %
ret void
}
-; FUNC-LABEL: @lds_atomic_xchg_ret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_xchg_ret_i32_offset:
; EG: LDS_WRXCHG_RET *
; SI: DS_WRXCHG_RTN_B32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
@@ -28,7 +28,7 @@ define void @lds_atomic_xchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspac
}
; XXX - Is it really necessary to load 4 into VGPR?
-; FUNC-LABEL: @lds_atomic_add_ret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_add_ret_i32:
; EG: LDS_ADD_RET *
; SI: S_LOAD_DWORD [[SPTR:s[0-9]+]],
; SI: V_MOV_B32_e32 [[DATA:v[0-9]+]], 4
@@ -42,7 +42,7 @@ define void @lds_atomic_add_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
ret void
}
-; FUNC-LABEL: @lds_atomic_add_ret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_add_ret_i32_offset:
; EG: LDS_ADD_RET *
; SI: DS_ADD_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
@@ -53,7 +53,7 @@ define void @lds_atomic_add_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
ret void
}
-; FUNC-LABEL: @lds_atomic_add_ret_i32_bad_si_offset
+; FUNC-LABEL: {{^}}lds_atomic_add_ret_i32_bad_si_offset:
; EG: LDS_ADD_RET *
; SI: DS_ADD_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x0
; CI: DS_ADD_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
@@ -67,7 +67,7 @@ define void @lds_atomic_add_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i32 ad
ret void
}
-; FUNC-LABEL: @lds_atomic_inc_ret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_inc_ret_i32:
; EG: LDS_ADD_RET *
; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
@@ -79,7 +79,7 @@ define void @lds_atomic_inc_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
ret void
}
-; FUNC-LABEL: @lds_atomic_inc_ret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_inc_ret_i32_offset:
; EG: LDS_ADD_RET *
; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
@@ -92,7 +92,7 @@ define void @lds_atomic_inc_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
ret void
}
-; FUNC-LABEL: @lds_atomic_inc_ret_i32_bad_si_offset:
+; FUNC-LABEL: {{^}}lds_atomic_inc_ret_i32_bad_si_offset:
; EG: LDS_ADD_RET *
; SI: DS_INC_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x0
; CI: DS_INC_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
@@ -106,7 +106,7 @@ define void @lds_atomic_inc_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i32 ad
ret void
}
-; FUNC-LABEL: @lds_atomic_sub_ret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_sub_ret_i32:
; EG: LDS_SUB_RET *
; SI: DS_SUB_RTN_U32
; SI: S_ENDPGM
@@ -116,7 +116,7 @@ define void @lds_atomic_sub_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
ret void
}
-; FUNC-LABEL: @lds_atomic_sub_ret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_sub_ret_i32_offset:
; EG: LDS_SUB_RET *
; SI: DS_SUB_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
@@ -127,7 +127,7 @@ define void @lds_atomic_sub_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
ret void
}
-; FUNC-LABEL: @lds_atomic_dec_ret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_dec_ret_i32:
; EG: LDS_SUB_RET *
; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
@@ -139,7 +139,7 @@ define void @lds_atomic_dec_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
ret void
}
-; FUNC-LABEL: @lds_atomic_dec_ret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_dec_ret_i32_offset:
; EG: LDS_SUB_RET *
; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
@@ -152,7 +152,7 @@ define void @lds_atomic_dec_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
ret void
}
-; FUNC-LABEL: @lds_atomic_and_ret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_and_ret_i32:
; EG: LDS_AND_RET *
; SI: DS_AND_RTN_B32
; SI: S_ENDPGM
@@ -162,7 +162,7 @@ define void @lds_atomic_and_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
ret void
}
-; FUNC-LABEL: @lds_atomic_and_ret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_and_ret_i32_offset:
; EG: LDS_AND_RET *
; SI: DS_AND_RTN_B32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
@@ -173,7 +173,7 @@ define void @lds_atomic_and_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
ret void
}
-; FUNC-LABEL: @lds_atomic_or_ret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_or_ret_i32:
; EG: LDS_OR_RET *
; SI: DS_OR_RTN_B32
; SI: S_ENDPGM
@@ -183,7 +183,7 @@ define void @lds_atomic_or_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %pt
ret void
}
-; FUNC-LABEL: @lds_atomic_or_ret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_or_ret_i32_offset:
; EG: LDS_OR_RET *
; SI: DS_OR_RTN_B32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
@@ -194,7 +194,7 @@ define void @lds_atomic_or_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(
ret void
}
-; FUNC-LABEL: @lds_atomic_xor_ret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_xor_ret_i32:
; EG: LDS_XOR_RET *
; SI: DS_XOR_RTN_B32
; SI: S_ENDPGM
@@ -204,7 +204,7 @@ define void @lds_atomic_xor_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
ret void
}
-; FUNC-LABEL: @lds_atomic_xor_ret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_xor_ret_i32_offset:
; EG: LDS_XOR_RET *
; SI: DS_XOR_RTN_B32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
@@ -216,14 +216,14 @@ define void @lds_atomic_xor_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
}
; FIXME: There is no atomic nand instr
-; XFUNC-LABEL: @lds_atomic_nand_ret_i32:uction, so we somehow need to expand this.
+; XFUNC-LABEL: {{^}}lds_atomic_nand_ret_i32:uction, so we somehow need to expand this.
; define void @lds_atomic_nand_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
; %result = atomicrmw nand i32 addrspace(3)* %ptr, i32 4 seq_cst
; store i32 %result, i32 addrspace(1)* %out, align 4
; ret void
; }
-; FUNC-LABEL: @lds_atomic_min_ret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_min_ret_i32:
; EG: LDS_MIN_INT_RET *
; SI: DS_MIN_RTN_I32
; SI: S_ENDPGM
@@ -233,7 +233,7 @@ define void @lds_atomic_min_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
ret void
}
-; FUNC-LABEL: @lds_atomic_min_ret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_min_ret_i32_offset:
; EG: LDS_MIN_INT_RET *
; SI: DS_MIN_RTN_I32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
@@ -244,7 +244,7 @@ define void @lds_atomic_min_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
ret void
}
-; FUNC-LABEL: @lds_atomic_max_ret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_max_ret_i32:
; EG: LDS_MAX_INT_RET *
; SI: DS_MAX_RTN_I32
; SI: S_ENDPGM
@@ -254,7 +254,7 @@ define void @lds_atomic_max_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
ret void
}
-; FUNC-LABEL: @lds_atomic_max_ret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_max_ret_i32_offset:
; EG: LDS_MAX_INT_RET *
; SI: DS_MAX_RTN_I32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
@@ -265,7 +265,7 @@ define void @lds_atomic_max_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
ret void
}
-; FUNC-LABEL: @lds_atomic_umin_ret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_umin_ret_i32:
; EG: LDS_MIN_UINT_RET *
; SI: DS_MIN_RTN_U32
; SI: S_ENDPGM
@@ -275,7 +275,7 @@ define void @lds_atomic_umin_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %
ret void
}
-; FUNC-LABEL: @lds_atomic_umin_ret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_umin_ret_i32_offset:
; EG: LDS_MIN_UINT_RET *
; SI: DS_MIN_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
@@ -286,7 +286,7 @@ define void @lds_atomic_umin_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspac
ret void
}
-; FUNC-LABEL: @lds_atomic_umax_ret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_umax_ret_i32:
; EG: LDS_MAX_UINT_RET *
; SI: DS_MAX_RTN_U32
; SI: S_ENDPGM
@@ -296,7 +296,7 @@ define void @lds_atomic_umax_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %
ret void
}
-; FUNC-LABEL: @lds_atomic_umax_ret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_umax_ret_i32_offset:
; EG: LDS_MAX_UINT_RET *
; SI: DS_MAX_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
@@ -307,7 +307,7 @@ define void @lds_atomic_umax_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspac
ret void
}
-; FUNC-LABEL: @lds_atomic_xchg_noret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_xchg_noret_i32:
; SI: S_LOAD_DWORD [[SPTR:s[0-9]+]],
; SI: V_MOV_B32_e32 [[DATA:v[0-9]+]], 4
; SI: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
@@ -318,7 +318,7 @@ define void @lds_atomic_xchg_noret_i32(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_xchg_noret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_xchg_noret_i32_offset:
; SI: DS_WRXCHG_RTN_B32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
define void @lds_atomic_xchg_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
@@ -328,7 +328,7 @@ define void @lds_atomic_xchg_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
}
; XXX - Is it really necessary to load 4 into VGPR?
-; FUNC-LABEL: @lds_atomic_add_noret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_add_noret_i32:
; SI: S_LOAD_DWORD [[SPTR:s[0-9]+]],
; SI: V_MOV_B32_e32 [[DATA:v[0-9]+]], 4
; SI: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
@@ -339,7 +339,7 @@ define void @lds_atomic_add_noret_i32(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_add_noret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_add_noret_i32_offset:
; SI: DS_ADD_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
define void @lds_atomic_add_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
@@ -348,7 +348,7 @@ define void @lds_atomic_add_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_add_noret_i32_bad_si_offset
+; FUNC-LABEL: {{^}}lds_atomic_add_noret_i32_bad_si_offset:
; SI: DS_ADD_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x0
; CI: DS_ADD_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
@@ -360,7 +360,7 @@ define void @lds_atomic_add_noret_i32_bad_si_offset(i32 addrspace(3)* %ptr, i32
ret void
}
-; FUNC-LABEL: @lds_atomic_inc_noret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_inc_noret_i32:
; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
; SI: DS_INC_U32 v{{[0-9]+}}, [[NEGONE]], 0x0
@@ -370,7 +370,7 @@ define void @lds_atomic_inc_noret_i32(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_inc_noret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_inc_noret_i32_offset:
; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
; SI: DS_INC_U32 v{{[0-9]+}}, [[NEGONE]], 0x10
@@ -381,7 +381,7 @@ define void @lds_atomic_inc_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_inc_noret_i32_bad_si_offset:
+; FUNC-LABEL: {{^}}lds_atomic_inc_noret_i32_bad_si_offset:
; SI: DS_INC_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x0
; CI: DS_INC_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
@@ -393,7 +393,7 @@ define void @lds_atomic_inc_noret_i32_bad_si_offset(i32 addrspace(3)* %ptr, i32
ret void
}
-; FUNC-LABEL: @lds_atomic_sub_noret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_sub_noret_i32:
; SI: DS_SUB_U32
; SI: S_ENDPGM
define void @lds_atomic_sub_noret_i32(i32 addrspace(3)* %ptr) nounwind {
@@ -401,7 +401,7 @@ define void @lds_atomic_sub_noret_i32(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_sub_noret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_sub_noret_i32_offset:
; SI: DS_SUB_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
define void @lds_atomic_sub_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
@@ -410,7 +410,7 @@ define void @lds_atomic_sub_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_dec_noret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_dec_noret_i32:
; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
; SI: DS_DEC_U32 v{{[0-9]+}}, [[NEGONE]], 0x0
@@ -420,7 +420,7 @@ define void @lds_atomic_dec_noret_i32(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_dec_noret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_dec_noret_i32_offset:
; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
; SI: DS_DEC_U32 v{{[0-9]+}}, [[NEGONE]], 0x10
@@ -431,7 +431,7 @@ define void @lds_atomic_dec_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_and_noret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_and_noret_i32:
; SI: DS_AND_B32
; SI: S_ENDPGM
define void @lds_atomic_and_noret_i32(i32 addrspace(3)* %ptr) nounwind {
@@ -439,7 +439,7 @@ define void @lds_atomic_and_noret_i32(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_and_noret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_and_noret_i32_offset:
; SI: DS_AND_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
define void @lds_atomic_and_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
@@ -448,7 +448,7 @@ define void @lds_atomic_and_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_or_noret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_or_noret_i32:
; SI: DS_OR_B32
; SI: S_ENDPGM
define void @lds_atomic_or_noret_i32(i32 addrspace(3)* %ptr) nounwind {
@@ -456,7 +456,7 @@ define void @lds_atomic_or_noret_i32(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_or_noret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_or_noret_i32_offset:
; SI: DS_OR_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
define void @lds_atomic_or_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
@@ -465,7 +465,7 @@ define void @lds_atomic_or_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_xor_noret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_xor_noret_i32:
; SI: DS_XOR_B32
; SI: S_ENDPGM
define void @lds_atomic_xor_noret_i32(i32 addrspace(3)* %ptr) nounwind {
@@ -473,7 +473,7 @@ define void @lds_atomic_xor_noret_i32(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_xor_noret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_xor_noret_i32_offset:
; SI: DS_XOR_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
define void @lds_atomic_xor_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
@@ -483,13 +483,13 @@ define void @lds_atomic_xor_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
}
; FIXME: There is no atomic nand instr
-; XFUNC-LABEL: @lds_atomic_nand_noret_i32:uction, so we somehow need to expand this.
+; XFUNC-LABEL: {{^}}lds_atomic_nand_noret_i32:uction, so we somehow need to expand this.
; define void @lds_atomic_nand_noret_i32(i32 addrspace(3)* %ptr) nounwind {
; %result = atomicrmw nand i32 addrspace(3)* %ptr, i32 4 seq_cst
; ret void
; }
-; FUNC-LABEL: @lds_atomic_min_noret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_min_noret_i32:
; SI: DS_MIN_I32
; SI: S_ENDPGM
define void @lds_atomic_min_noret_i32(i32 addrspace(3)* %ptr) nounwind {
@@ -497,7 +497,7 @@ define void @lds_atomic_min_noret_i32(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_min_noret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_min_noret_i32_offset:
; SI: DS_MIN_I32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
define void @lds_atomic_min_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
@@ -506,7 +506,7 @@ define void @lds_atomic_min_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_max_noret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_max_noret_i32:
; SI: DS_MAX_I32
; SI: S_ENDPGM
define void @lds_atomic_max_noret_i32(i32 addrspace(3)* %ptr) nounwind {
@@ -514,7 +514,7 @@ define void @lds_atomic_max_noret_i32(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_max_noret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_max_noret_i32_offset:
; SI: DS_MAX_I32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
define void @lds_atomic_max_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
@@ -523,7 +523,7 @@ define void @lds_atomic_max_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_umin_noret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_umin_noret_i32:
; SI: DS_MIN_U32
; SI: S_ENDPGM
define void @lds_atomic_umin_noret_i32(i32 addrspace(3)* %ptr) nounwind {
@@ -531,7 +531,7 @@ define void @lds_atomic_umin_noret_i32(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_umin_noret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_umin_noret_i32_offset:
; SI: DS_MIN_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
define void @lds_atomic_umin_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
@@ -540,7 +540,7 @@ define void @lds_atomic_umin_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_umax_noret_i32:
+; FUNC-LABEL: {{^}}lds_atomic_umax_noret_i32:
; SI: DS_MAX_U32
; SI: S_ENDPGM
define void @lds_atomic_umax_noret_i32(i32 addrspace(3)* %ptr) nounwind {
@@ -548,7 +548,7 @@ define void @lds_atomic_umax_noret_i32(i32 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_umax_noret_i32_offset:
+; FUNC-LABEL: {{^}}lds_atomic_umax_noret_i32_offset:
; SI: DS_MAX_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
; SI: S_ENDPGM
define void @lds_atomic_umax_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
diff --git a/test/CodeGen/R600/local-atomics64.ll b/test/CodeGen/R600/local-atomics64.ll
index 975fed2bd6c..8a31e4a1535 100644
--- a/test/CodeGen/R600/local-atomics64.ll
+++ b/test/CodeGen/R600/local-atomics64.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
-; FUNC-LABEL: @lds_atomic_xchg_ret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_xchg_ret_i64:
; SI: DS_WRXCHG_RTN_B64
; SI: S_ENDPGM
define void @lds_atomic_xchg_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -9,7 +9,7 @@ define void @lds_atomic_xchg_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %
ret void
}
-; FUNC-LABEL: @lds_atomic_xchg_ret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_xchg_ret_i64_offset:
; SI: DS_WRXCHG_RTN_B64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_xchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -19,7 +19,7 @@ define void @lds_atomic_xchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspac
ret void
}
-; FUNC-LABEL: @lds_atomic_add_ret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_add_ret_i64:
; SI: DS_ADD_RTN_U64
; SI: S_ENDPGM
define void @lds_atomic_add_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -28,7 +28,7 @@ define void @lds_atomic_add_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
ret void
}
-; FUNC-LABEL: @lds_atomic_add_ret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_add_ret_i64_offset:
; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, 9
; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
@@ -44,7 +44,7 @@ define void @lds_atomic_add_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
ret void
}
-; FUNC-LABEL: @lds_atomic_inc_ret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_inc_ret_i64:
; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, -1
; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
@@ -57,7 +57,7 @@ define void @lds_atomic_inc_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
ret void
}
-; FUNC-LABEL: @lds_atomic_inc_ret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_inc_ret_i64_offset:
; SI: DS_INC_RTN_U64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_inc_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -67,7 +67,7 @@ define void @lds_atomic_inc_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
ret void
}
-; FUNC-LABEL: @lds_atomic_sub_ret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_sub_ret_i64:
; SI: DS_SUB_RTN_U64
; SI: S_ENDPGM
define void @lds_atomic_sub_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -76,7 +76,7 @@ define void @lds_atomic_sub_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
ret void
}
-; FUNC-LABEL: @lds_atomic_sub_ret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_sub_ret_i64_offset:
; SI: DS_SUB_RTN_U64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_sub_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -86,7 +86,7 @@ define void @lds_atomic_sub_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
ret void
}
-; FUNC-LABEL: @lds_atomic_dec_ret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_dec_ret_i64:
; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, -1
; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
@@ -99,7 +99,7 @@ define void @lds_atomic_dec_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
ret void
}
-; FUNC-LABEL: @lds_atomic_dec_ret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_dec_ret_i64_offset:
; SI: DS_DEC_RTN_U64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_dec_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -109,7 +109,7 @@ define void @lds_atomic_dec_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
ret void
}
-; FUNC-LABEL: @lds_atomic_and_ret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_and_ret_i64:
; SI: DS_AND_RTN_B64
; SI: S_ENDPGM
define void @lds_atomic_and_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -118,7 +118,7 @@ define void @lds_atomic_and_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
ret void
}
-; FUNC-LABEL: @lds_atomic_and_ret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_and_ret_i64_offset:
; SI: DS_AND_RTN_B64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_and_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -128,7 +128,7 @@ define void @lds_atomic_and_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
ret void
}
-; FUNC-LABEL: @lds_atomic_or_ret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_or_ret_i64:
; SI: DS_OR_RTN_B64
; SI: S_ENDPGM
define void @lds_atomic_or_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -137,7 +137,7 @@ define void @lds_atomic_or_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %pt
ret void
}
-; FUNC-LABEL: @lds_atomic_or_ret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_or_ret_i64_offset:
; SI: DS_OR_RTN_B64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_or_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -147,7 +147,7 @@ define void @lds_atomic_or_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(
ret void
}
-; FUNC-LABEL: @lds_atomic_xor_ret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_xor_ret_i64:
; SI: DS_XOR_RTN_B64
; SI: S_ENDPGM
define void @lds_atomic_xor_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -156,7 +156,7 @@ define void @lds_atomic_xor_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
ret void
}
-; FUNC-LABEL: @lds_atomic_xor_ret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_xor_ret_i64_offset:
; SI: DS_XOR_RTN_B64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_xor_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -167,14 +167,14 @@ define void @lds_atomic_xor_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
}
; FIXME: There is no atomic nand instr
-; XFUNC-LABEL: @lds_atomic_nand_ret_i64:uction, so we somehow need to expand this.
+; XFUNC-LABEL: {{^}}lds_atomic_nand_ret_i64:uction, so we somehow need to expand this.
; define void @lds_atomic_nand_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
; %result = atomicrmw nand i64 addrspace(3)* %ptr, i32 4 seq_cst
; store i64 %result, i64 addrspace(1)* %out, align 8
; ret void
; }
-; FUNC-LABEL: @lds_atomic_min_ret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_min_ret_i64:
; SI: DS_MIN_RTN_I64
; SI: S_ENDPGM
define void @lds_atomic_min_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -183,7 +183,7 @@ define void @lds_atomic_min_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
ret void
}
-; FUNC-LABEL: @lds_atomic_min_ret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_min_ret_i64_offset:
; SI: DS_MIN_RTN_I64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_min_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -193,7 +193,7 @@ define void @lds_atomic_min_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
ret void
}
-; FUNC-LABEL: @lds_atomic_max_ret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_max_ret_i64:
; SI: DS_MAX_RTN_I64
; SI: S_ENDPGM
define void @lds_atomic_max_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -202,7 +202,7 @@ define void @lds_atomic_max_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
ret void
}
-; FUNC-LABEL: @lds_atomic_max_ret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_max_ret_i64_offset:
; SI: DS_MAX_RTN_I64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_max_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -212,7 +212,7 @@ define void @lds_atomic_max_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
ret void
}
-; FUNC-LABEL: @lds_atomic_umin_ret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_umin_ret_i64:
; SI: DS_MIN_RTN_U64
; SI: S_ENDPGM
define void @lds_atomic_umin_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -221,7 +221,7 @@ define void @lds_atomic_umin_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %
ret void
}
-; FUNC-LABEL: @lds_atomic_umin_ret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_umin_ret_i64_offset:
; SI: DS_MIN_RTN_U64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_umin_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -231,7 +231,7 @@ define void @lds_atomic_umin_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspac
ret void
}
-; FUNC-LABEL: @lds_atomic_umax_ret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_umax_ret_i64:
; SI: DS_MAX_RTN_U64
; SI: S_ENDPGM
define void @lds_atomic_umax_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -240,7 +240,7 @@ define void @lds_atomic_umax_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %
ret void
}
-; FUNC-LABEL: @lds_atomic_umax_ret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_umax_ret_i64_offset:
; SI: DS_MAX_RTN_U64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_umax_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
@@ -250,7 +250,7 @@ define void @lds_atomic_umax_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspac
ret void
}
-; FUNC-LABEL: @lds_atomic_xchg_noret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_xchg_noret_i64:
; SI: DS_WRXCHG_RTN_B64
; SI: S_ENDPGM
define void @lds_atomic_xchg_noret_i64(i64 addrspace(3)* %ptr) nounwind {
@@ -258,7 +258,7 @@ define void @lds_atomic_xchg_noret_i64(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_xchg_noret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_xchg_noret_i64_offset:
; SI: DS_WRXCHG_RTN_B64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_xchg_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
@@ -267,7 +267,7 @@ define void @lds_atomic_xchg_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_add_noret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_add_noret_i64:
; SI: DS_ADD_U64
; SI: S_ENDPGM
define void @lds_atomic_add_noret_i64(i64 addrspace(3)* %ptr) nounwind {
@@ -275,7 +275,7 @@ define void @lds_atomic_add_noret_i64(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_add_noret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_add_noret_i64_offset:
; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x9
; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, 9
; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
@@ -289,7 +289,7 @@ define void @lds_atomic_add_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_inc_noret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_inc_noret_i64:
; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, -1
; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
@@ -300,7 +300,7 @@ define void @lds_atomic_inc_noret_i64(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_inc_noret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_inc_noret_i64_offset:
; SI: DS_INC_U64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_inc_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
@@ -309,7 +309,7 @@ define void @lds_atomic_inc_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_sub_noret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_sub_noret_i64:
; SI: DS_SUB_U64
; SI: S_ENDPGM
define void @lds_atomic_sub_noret_i64(i64 addrspace(3)* %ptr) nounwind {
@@ -317,7 +317,7 @@ define void @lds_atomic_sub_noret_i64(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_sub_noret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_sub_noret_i64_offset:
; SI: DS_SUB_U64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_sub_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
@@ -326,7 +326,7 @@ define void @lds_atomic_sub_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_dec_noret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_dec_noret_i64:
; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, -1
; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
@@ -337,7 +337,7 @@ define void @lds_atomic_dec_noret_i64(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_dec_noret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_dec_noret_i64_offset:
; SI: DS_DEC_U64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_dec_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
@@ -346,7 +346,7 @@ define void @lds_atomic_dec_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_and_noret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_and_noret_i64:
; SI: DS_AND_B64
; SI: S_ENDPGM
define void @lds_atomic_and_noret_i64(i64 addrspace(3)* %ptr) nounwind {
@@ -354,7 +354,7 @@ define void @lds_atomic_and_noret_i64(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_and_noret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_and_noret_i64_offset:
; SI: DS_AND_B64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_and_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
@@ -363,7 +363,7 @@ define void @lds_atomic_and_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_or_noret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_or_noret_i64:
; SI: DS_OR_B64
; SI: S_ENDPGM
define void @lds_atomic_or_noret_i64(i64 addrspace(3)* %ptr) nounwind {
@@ -371,7 +371,7 @@ define void @lds_atomic_or_noret_i64(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_or_noret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_or_noret_i64_offset:
; SI: DS_OR_B64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_or_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
@@ -380,7 +380,7 @@ define void @lds_atomic_or_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_xor_noret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_xor_noret_i64:
; SI: DS_XOR_B64
; SI: S_ENDPGM
define void @lds_atomic_xor_noret_i64(i64 addrspace(3)* %ptr) nounwind {
@@ -388,7 +388,7 @@ define void @lds_atomic_xor_noret_i64(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_xor_noret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_xor_noret_i64_offset:
; SI: DS_XOR_B64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_xor_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
@@ -398,13 +398,13 @@ define void @lds_atomic_xor_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
}
; FIXME: There is no atomic nand instr
-; XFUNC-LABEL: @lds_atomic_nand_noret_i64:uction, so we somehow need to expand this.
+; XFUNC-LABEL: {{^}}lds_atomic_nand_noret_i64:uction, so we somehow need to expand this.
; define void @lds_atomic_nand_noret_i64(i64 addrspace(3)* %ptr) nounwind {
; %result = atomicrmw nand i64 addrspace(3)* %ptr, i32 4 seq_cst
; ret void
; }
-; FUNC-LABEL: @lds_atomic_min_noret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_min_noret_i64:
; SI: DS_MIN_I64
; SI: S_ENDPGM
define void @lds_atomic_min_noret_i64(i64 addrspace(3)* %ptr) nounwind {
@@ -412,7 +412,7 @@ define void @lds_atomic_min_noret_i64(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_min_noret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_min_noret_i64_offset:
; SI: DS_MIN_I64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_min_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
@@ -421,7 +421,7 @@ define void @lds_atomic_min_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_max_noret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_max_noret_i64:
; SI: DS_MAX_I64
; SI: S_ENDPGM
define void @lds_atomic_max_noret_i64(i64 addrspace(3)* %ptr) nounwind {
@@ -429,7 +429,7 @@ define void @lds_atomic_max_noret_i64(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_max_noret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_max_noret_i64_offset:
; SI: DS_MAX_I64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_max_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
@@ -438,7 +438,7 @@ define void @lds_atomic_max_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_umin_noret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_umin_noret_i64:
; SI: DS_MIN_U64
; SI: S_ENDPGM
define void @lds_atomic_umin_noret_i64(i64 addrspace(3)* %ptr) nounwind {
@@ -446,7 +446,7 @@ define void @lds_atomic_umin_noret_i64(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_umin_noret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_umin_noret_i64_offset:
; SI: DS_MIN_U64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_umin_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
@@ -455,7 +455,7 @@ define void @lds_atomic_umin_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_umax_noret_i64:
+; FUNC-LABEL: {{^}}lds_atomic_umax_noret_i64:
; SI: DS_MAX_U64
; SI: S_ENDPGM
define void @lds_atomic_umax_noret_i64(i64 addrspace(3)* %ptr) nounwind {
@@ -463,7 +463,7 @@ define void @lds_atomic_umax_noret_i64(i64 addrspace(3)* %ptr) nounwind {
ret void
}
-; FUNC-LABEL: @lds_atomic_umax_noret_i64_offset:
+; FUNC-LABEL: {{^}}lds_atomic_umax_noret_i64_offset:
; SI: DS_MAX_U64 {{.*}} 0x20
; SI: S_ENDPGM
define void @lds_atomic_umax_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
diff --git a/test/CodeGen/R600/local-memory-two-objects.ll b/test/CodeGen/R600/local-memory-two-objects.ll
index f0f68243ebf..3bb79ab978d 100644
--- a/test/CodeGen/R600/local-memory-two-objects.ll
+++ b/test/CodeGen/R600/local-memory-two-objects.ll
@@ -5,7 +5,7 @@
@local_memory_two_objects.local_mem0 = internal unnamed_addr addrspace(3) global [4 x i32] zeroinitializer, align 4
@local_memory_two_objects.local_mem1 = internal unnamed_addr addrspace(3) global [4 x i32] zeroinitializer, align 4
-; EG-CHECK: @local_memory_two_objects
+; EG-CHECK: {{^}}local_memory_two_objects:
; Check that the LDS size emitted correctly
; EG-CHECK: .long 166120
diff --git a/test/CodeGen/R600/local-memory.ll b/test/CodeGen/R600/local-memory.ll
index ece89400119..279be38df19 100644
--- a/test/CodeGen/R600/local-memory.ll
+++ b/test/CodeGen/R600/local-memory.ll
@@ -4,7 +4,7 @@
@local_memory.local_mem = internal unnamed_addr addrspace(3) global [128 x i32] zeroinitializer, align 4
-; FUNC-LABEL: @local_memory
+; FUNC-LABEL: {{^}}local_memory:
; Check that the LDS size emitted correctly
; EG: .long 166120
diff --git a/test/CodeGen/R600/loop-idiom.ll b/test/CodeGen/R600/loop-idiom.ll
index 128f661077e..0478bdb2fb5 100644
--- a/test/CodeGen/R600/loop-idiom.ll
+++ b/test/CodeGen/R600/loop-idiom.ll
@@ -10,8 +10,8 @@ target triple = "r600--"
; implementations of these for R600.
; FUNC: @no_memcpy
-; R600-NOT: @llvm.memcpy
-; SI-NOT: @llvm.memcpy
+; R600-NOT: {{^}}llvm.memcpy
+; SI-NOT: {{^}}llvm.memcpy
define void @no_memcpy(i8 addrspace(3)* %in, i32 %size) {
entry:
%dest = alloca i8, i32 32
@@ -32,10 +32,10 @@ for.end:
}
; FUNC: @no_memset
-; R600-NOT: @llvm.memset
-; R600-NOT: @memset_pattern16
-; SI-NOT: @llvm.memset
-; SI-NOT: @memset_pattern16
+; R600-NOT: {{^}}llvm.memset
+; R600-NOT: {{^}}memset_pattern16:
+; SI-NOT: {{^}}llvm.memset
+; SI-NOT: {{^}}memset_pattern16:
define void @no_memset(i32 %size) {
entry:
%dest = alloca i8, i32 32
diff --git a/test/CodeGen/R600/mad-sub.ll b/test/CodeGen/R600/mad-sub.ll
index 444bb02b66c..95a402149e8 100644
--- a/test/CodeGen/R600/mad-sub.ll
+++ b/test/CodeGen/R600/mad-sub.ll
@@ -3,7 +3,7 @@
declare i32 @llvm.r600.read.tidig.x() #0
declare float @llvm.fabs.f32(float) #0
-; FUNC-LABEL: @mad_sub_f32
+; FUNC-LABEL: {{^}}mad_sub_f32:
; SI: BUFFER_LOAD_DWORD [[REGA:v[0-9]+]]
; SI: BUFFER_LOAD_DWORD [[REGB:v[0-9]+]]
; SI: BUFFER_LOAD_DWORD [[REGC:v[0-9]+]]
@@ -27,7 +27,7 @@ define void @mad_sub_f32(float addrspace(1)* noalias nocapture %out, float addrs
ret void
}
-; FUNC-LABEL: @mad_sub_inv_f32
+; FUNC-LABEL: {{^}}mad_sub_inv_f32:
; SI: BUFFER_LOAD_DWORD [[REGA:v[0-9]+]]
; SI: BUFFER_LOAD_DWORD [[REGB:v[0-9]+]]
; SI: BUFFER_LOAD_DWORD [[REGC:v[0-9]+]]
@@ -51,7 +51,7 @@ define void @mad_sub_inv_f32(float addrspace(1)* noalias nocapture %out, float a
ret void
}
-; FUNC-LABEL: @mad_sub_f64
+; FUNC-LABEL: {{^}}mad_sub_f64:
; SI: V_MUL_F64
; SI: V_ADD_F64
define void @mad_sub_f64(double addrspace(1)* noalias nocapture %out, double addrspace(1)* noalias nocapture readonly %ptr) #1 {
@@ -72,7 +72,7 @@ define void @mad_sub_f64(double addrspace(1)* noalias nocapture %out, double add
ret void
}
-; FUNC-LABEL: @mad_sub_fabs_f32
+; FUNC-LABEL: {{^}}mad_sub_fabs_f32:
; SI: BUFFER_LOAD_DWORD [[REGA:v[0-9]+]]
; SI: BUFFER_LOAD_DWORD [[REGB:v[0-9]+]]
; SI: BUFFER_LOAD_DWORD [[REGC:v[0-9]+]]
@@ -97,7 +97,7 @@ define void @mad_sub_fabs_f32(float addrspace(1)* noalias nocapture %out, float
ret void
}
-; FUNC-LABEL: @mad_sub_fabs_inv_f32
+; FUNC-LABEL: {{^}}mad_sub_fabs_inv_f32:
; SI: BUFFER_LOAD_DWORD [[REGA:v[0-9]+]]
; SI: BUFFER_LOAD_DWORD [[REGB:v[0-9]+]]
; SI: BUFFER_LOAD_DWORD [[REGC:v[0-9]+]]
@@ -122,7 +122,7 @@ define void @mad_sub_fabs_inv_f32(float addrspace(1)* noalias nocapture %out, fl
ret void
}
-; FUNC-LABEL: @neg_neg_mad_f32
+; FUNC-LABEL: {{^}}neg_neg_mad_f32:
; SI: V_MAD_F32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
define void @neg_neg_mad_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
@@ -144,7 +144,7 @@ define void @neg_neg_mad_f32(float addrspace(1)* noalias nocapture %out, float a
ret void
}
-; FUNC-LABEL: @mad_fabs_sub_f32
+; FUNC-LABEL: {{^}}mad_fabs_sub_f32:
; SI: BUFFER_LOAD_DWORD [[REGA:v[0-9]+]]
; SI: BUFFER_LOAD_DWORD [[REGB:v[0-9]+]]
; SI: BUFFER_LOAD_DWORD [[REGC:v[0-9]+]]
@@ -169,7 +169,7 @@ define void @mad_fabs_sub_f32(float addrspace(1)* noalias nocapture %out, float
ret void
}
-; FUNC-LABEL: @fsub_c_fadd_a_a
+; FUNC-LABEL: {{^}}fsub_c_fadd_a_a:
; SI-DAG: BUFFER_LOAD_DWORD [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI-DAG: BUFFER_LOAD_DWORD [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
; SI: V_MAD_F32 [[RESULT:v[0-9]+]], -2.0, [[R1]], [[R2]]
@@ -190,7 +190,7 @@ define void @fsub_c_fadd_a_a(float addrspace(1)* %out, float addrspace(1)* %in)
ret void
}
-; FUNC-LABEL: @fsub_fadd_a_a_c
+; FUNC-LABEL: {{^}}fsub_fadd_a_a_c:
; SI-DAG: BUFFER_LOAD_DWORD [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI-DAG: BUFFER_LOAD_DWORD [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
; SI: V_MAD_F32 [[RESULT:v[0-9]+]], 2.0, [[R1]], -[[R2]]
diff --git a/test/CodeGen/R600/mad_int24.ll b/test/CodeGen/R600/mad_int24.ll
index abb52907b9b..9b91a055dfe 100644
--- a/test/CodeGen/R600/mad_int24.ll
+++ b/test/CodeGen/R600/mad_int24.ll
@@ -2,7 +2,7 @@
; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM --check-prefix=FUNC
; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; FUNC-LABEL: @i32_mad24
+; FUNC-LABEL: {{^}}i32_mad24:
; Signed 24-bit multiply is not supported on pre-Cayman GPUs.
; EG: MULLO_INT
; Make sure we aren't masking the inputs.
diff --git a/test/CodeGen/R600/mad_uint24.ll b/test/CodeGen/R600/mad_uint24.ll
index 0f0893bd53c..103b65cb4d0 100644
--- a/test/CodeGen/R600/mad_uint24.ll
+++ b/test/CodeGen/R600/mad_uint24.ll
@@ -2,7 +2,7 @@
; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG --check-prefix=FUNC
; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; FUNC-LABEL: @u32_mad24
+; FUNC-LABEL: {{^}}u32_mad24:
; EG: MULADD_UINT24
; SI: V_MAD_U32_U24
@@ -18,7 +18,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i16_mad24
+; FUNC-LABEL: {{^}}i16_mad24:
; The order of A and B does not matter.
; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
; The result must be sign-extended
@@ -36,7 +36,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i8_mad24
+; FUNC-LABEL: {{^}}i8_mad24:
; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
; The result must be sign-extended
; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
@@ -60,7 +60,7 @@ entry:
; 24-bit mad pattern wasn't being matched.
; Check that the select instruction is not deleted.
-; FUNC-LABEL: @i24_i32_i32_mad
+; FUNC-LABEL: {{^}}i24_i32_i32_mad:
; EG: CNDE_INT
; SI: V_CNDMASK
define void @i24_i32_i32_mad(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
diff --git a/test/CodeGen/R600/max-literals.ll b/test/CodeGen/R600/max-literals.ll
index 65a6d2b5fc9..c357524b140 100644
--- a/test/CodeGen/R600/max-literals.ll
+++ b/test/CodeGen/R600/max-literals.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-; CHECK: @main
+; CHECK-LABEL: {{^}}main:
; CHECK: ADD *
define void @main(<4 x float> inreg %reg0, <4 x float> inreg %reg1, <4 x float> inreg %reg2) #0 {
@@ -29,7 +29,7 @@ main_body:
ret void
}
-; CHECK: @main
+; CHECK-LABEL: {{^}}main2:
; CHECK-NOT: ADD *
define void @main2(<4 x float> inreg %reg0, <4 x float> inreg %reg1, <4 x float> inreg %reg2) #0 {
diff --git a/test/CodeGen/R600/missing-store.ll b/test/CodeGen/R600/missing-store.ll
index ab1f4d059b6..aa34380a157 100644
--- a/test/CodeGen/R600/missing-store.ll
+++ b/test/CodeGen/R600/missing-store.ll
@@ -5,7 +5,7 @@
; Make sure when the load from %ptr2 is folded the chain isn't lost,
; resulting in losing the store to gptr
-; FUNC-LABEL: @missing_store_reduced
+; FUNC-LABEL: {{^}}missing_store_reduced:
; SI: DS_READ_B64
; SI: BUFFER_STORE_DWORD
; SI: BUFFER_LOAD_DWORD
diff --git a/test/CodeGen/R600/mubuf.ll b/test/CodeGen/R600/mubuf.ll
index 7bffead5281..062006e6543 100644
--- a/test/CodeGen/R600/mubuf.ll
+++ b/test/CodeGen/R600/mubuf.ll
@@ -7,7 +7,7 @@ declare i32 @llvm.r600.read.tidig.x() readnone
;;;==========================================================================;;;
; MUBUF load with an immediate byte offset that fits into 12-bits
-; CHECK-LABEL: @mubuf_load0
+; CHECK-LABEL: {{^}}mubuf_load0:
; CHECK: BUFFER_LOAD_DWORD v{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0 offset:0x4 ; encoding: [0x04,0x00,0x30,0xe0
define void @mubuf_load0(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
@@ -18,7 +18,7 @@ entry:
}
; MUBUF load with the largest possible immediate offset
-; CHECK-LABEL: @mubuf_load1
+; CHECK-LABEL: {{^}}mubuf_load1:
; CHECK: BUFFER_LOAD_UBYTE v{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0 offset:0xfff ; encoding: [0xff,0x0f,0x20,0xe0
define void @mubuf_load1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
entry:
@@ -29,7 +29,7 @@ entry:
}
; MUBUF load with an immediate byte offset that doesn't fit into 12-bits
-; CHECK-LABEL: @mubuf_load2
+; CHECK-LABEL: {{^}}mubuf_load2:
; CHECK: BUFFER_LOAD_DWORD v{{[0-9]}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64 ; encoding: [0x00,0x80,0x30,0xe0
define void @mubuf_load2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
@@ -40,7 +40,7 @@ entry:
}
; MUBUF load with a 12-bit immediate offset and a register offset
-; CHECK-LABEL: @mubuf_load3
+; CHECK-LABEL: {{^}}mubuf_load3:
; CHECK-NOT: ADD
; CHECK: BUFFER_LOAD_DWORD v{{[0-9]}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:0x4 ; encoding: [0x04,0x80,0x30,0xe0
define void @mubuf_load3(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i64 %offset) {
@@ -57,7 +57,7 @@ entry:
;;;==========================================================================;;;
; MUBUF store with an immediate byte offset that fits into 12-bits
-; CHECK-LABEL: @mubuf_store0
+; CHECK-LABEL: {{^}}mubuf_store0:
; CHECK: BUFFER_STORE_DWORD v{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0 offset:0x4 ; encoding: [0x04,0x00,0x70,0xe0
define void @mubuf_store0(i32 addrspace(1)* %out) {
entry:
@@ -67,7 +67,7 @@ entry:
}
; MUBUF store with the largest possible immediate offset
-; CHECK-LABEL: @mubuf_store1
+; CHECK-LABEL: {{^}}mubuf_store1:
; CHECK: BUFFER_STORE_BYTE v{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0 offset:0xfff ; encoding: [0xff,0x0f,0x60,0xe0
define void @mubuf_store1(i8 addrspace(1)* %out) {
@@ -78,7 +78,7 @@ entry:
}
; MUBUF store with an immediate byte offset that doesn't fit into 12-bits
-; CHECK-LABEL: @mubuf_store2
+; CHECK-LABEL: {{^}}mubuf_store2:
; CHECK: BUFFER_STORE_DWORD v{{[0-9]}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]:[0-9]}}], 0 addr64 ; encoding: [0x00,0x80,0x70,0xe0
define void @mubuf_store2(i32 addrspace(1)* %out) {
entry:
@@ -88,7 +88,7 @@ entry:
}
; MUBUF store with a 12-bit immediate offset and a register offset
-; CHECK-LABEL: @mubuf_store3
+; CHECK-LABEL: {{^}}mubuf_store3:
; CHECK-NOT: ADD
; CHECK: BUFFER_STORE_DWORD v{{[0-9]}}, v[{{[0-9]:[0-9]}}], s[{{[0-9]:[0-9]}}], 0 addr64 offset:0x4 ; encoding: [0x04,0x80,0x70,0xe0
define void @mubuf_store3(i32 addrspace(1)* %out, i64 %offset) {
@@ -99,14 +99,14 @@ entry:
ret void
}
-; CHECK-LABEL: @store_sgpr_ptr
+; CHECK-LABEL: {{^}}store_sgpr_ptr:
; CHECK: BUFFER_STORE_DWORD v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0
define void @store_sgpr_ptr(i32 addrspace(1)* %out) #0 {
store i32 99, i32 addrspace(1)* %out, align 4
ret void
}
-; CHECK-LABEL: @store_sgpr_ptr_offset
+; CHECK-LABEL: {{^}}store_sgpr_ptr_offset:
; CHECK: BUFFER_STORE_DWORD v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:0x28
define void @store_sgpr_ptr_offset(i32 addrspace(1)* %out) #0 {
%out.gep = getelementptr i32 addrspace(1)* %out, i32 10
@@ -114,7 +114,7 @@ define void @store_sgpr_ptr_offset(i32 addrspace(1)* %out) #0 {
ret void
}
-; CHECK-LABEL: @store_sgpr_ptr_large_offset
+; CHECK-LABEL: {{^}}store_sgpr_ptr_large_offset:
; CHECK: BUFFER_STORE_DWORD v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64
define void @store_sgpr_ptr_large_offset(i32 addrspace(1)* %out) #0 {
%out.gep = getelementptr i32 addrspace(1)* %out, i32 32768
@@ -122,7 +122,7 @@ define void @store_sgpr_ptr_large_offset(i32 addrspace(1)* %out) #0 {
ret void
}
-; CHECK-LABEL: @store_vgpr_ptr
+; CHECK-LABEL: {{^}}store_vgpr_ptr:
; CHECK: BUFFER_STORE_DWORD v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64
define void @store_vgpr_ptr(i32 addrspace(1)* %out) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() readnone
diff --git a/test/CodeGen/R600/mul.ll b/test/CodeGen/R600/mul.ll
index 11de3e333ba..bf1a1e1ffef 100644
--- a/test/CodeGen/R600/mul.ll
+++ b/test/CodeGen/R600/mul.ll
@@ -3,7 +3,7 @@
; mul24 and mad24 are affected
-; FUNC-LABEL: @test_mul_v2i32
+; FUNC-LABEL: {{^}}test_mul_v2i32:
; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
@@ -19,7 +19,7 @@ define void @test_mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)
ret void
}
-; FUNC-LABEL: @v_mul_v4i32
+; FUNC-LABEL: {{^}}v_mul_v4i32:
; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
@@ -39,7 +39,7 @@ define void @v_mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %
ret void
}
-; FUNC-LABEL: @s_trunc_i64_mul_to_i32
+; FUNC-LABEL: {{^}}s_trunc_i64_mul_to_i32:
; SI: S_LOAD_DWORD
; SI: S_LOAD_DWORD
; SI: S_MUL_I32
@@ -51,7 +51,7 @@ define void @s_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
ret void
}
-; FUNC-LABEL: @v_trunc_i64_mul_to_i32
+; FUNC-LABEL: {{^}}v_trunc_i64_mul_to_i32:
; SI: S_LOAD_DWORD
; SI: S_LOAD_DWORD
; SI: V_MUL_LO_I32
@@ -67,7 +67,7 @@ define void @v_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %a
; This 64-bit multiply should just use MUL_HI and MUL_LO, since the top
; 32-bits of both arguments are sign bits.
-; FUNC-LABEL: @mul64_sext_c
+; FUNC-LABEL: {{^}}mul64_sext_c:
; EG-DAG: MULLO_INT
; EG-DAG: MULHI_INT
; SI-DAG: S_MUL_I32
@@ -80,7 +80,7 @@ entry:
ret void
}
-; FUNC-LABEL: @v_mul64_sext_c:
+; FUNC-LABEL: {{^}}v_mul64_sext_c:
; EG-DAG: MULLO_INT
; EG-DAG: MULHI_INT
; SI-DAG: V_MUL_LO_I32
@@ -94,7 +94,7 @@ define void @v_mul64_sext_c(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @v_mul64_sext_inline_imm:
+; FUNC-LABEL: {{^}}v_mul64_sext_inline_imm:
; SI-DAG: V_MUL_LO_I32 v{{[0-9]+}}, 9, v{{[0-9]+}}
; SI-DAG: V_MUL_HI_I32 v{{[0-9]+}}, 9, v{{[0-9]+}}
; SI: S_ENDPGM
@@ -106,7 +106,7 @@ define void @v_mul64_sext_inline_imm(i64 addrspace(1)* %out, i32 addrspace(1)* %
ret void
}
-; FUNC-LABEL: @s_mul_i32:
+; FUNC-LABEL: {{^}}s_mul_i32:
; SI: S_LOAD_DWORD [[SRC0:s[0-9]+]],
; SI: S_LOAD_DWORD [[SRC1:s[0-9]+]],
; SI: S_MUL_I32 [[SRESULT:s[0-9]+]], [[SRC0]], [[SRC1]]
@@ -119,7 +119,7 @@ define void @s_mul_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
ret void
}
-; FUNC-LABEL: @v_mul_i32
+; FUNC-LABEL: {{^}}v_mul_i32:
; SI: V_MUL_LO_I32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define void @v_mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32 addrspace(1)* %in, i32 1
@@ -137,14 +137,14 @@ define void @v_mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; so this test just uses FUNC-LABEL to make sure the compiler does not
; crash with a 'failed to select' error.
-; FUNC-LABEL: @s_mul_i64:
+; FUNC-LABEL: {{^}}s_mul_i64:
define void @s_mul_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%mul = mul i64 %a, %b
store i64 %mul, i64 addrspace(1)* %out, align 8
ret void
}
-; FUNC-LABEL: @v_mul_i64
+; FUNC-LABEL: {{^}}v_mul_i64:
; SI: V_MUL_LO_I32
define void @v_mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
%a = load i64 addrspace(1)* %aptr, align 8
@@ -154,7 +154,7 @@ define void @v_mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addr
ret void
}
-; FUNC-LABEL: @mul32_in_branch
+; FUNC-LABEL: {{^}}mul32_in_branch:
; SI: S_MUL_I32
define void @mul32_in_branch(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %a, i32 %b, i32 %c) {
entry:
@@ -175,7 +175,7 @@ endif:
ret void
}
-; FUNC-LABEL: @mul64_in_branch
+; FUNC-LABEL: {{^}}mul64_in_branch:
; SI-DAG: S_MUL_I32
; SI-DAG: V_MUL_HI_U32
; SI: S_ENDPGM
diff --git a/test/CodeGen/R600/mul_int24.ll b/test/CodeGen/R600/mul_int24.ll
index 046911ba147..177505cb5bf 100644
--- a/test/CodeGen/R600/mul_int24.ll
+++ b/test/CodeGen/R600/mul_int24.ll
@@ -2,7 +2,7 @@
; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM --check-prefix=FUNC
; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; FUNC-LABEL: @i32_mul24
+; FUNC-LABEL: {{^}}i32_mul24:
; Signed 24-bit multiply is not supported on pre-Cayman GPUs.
; EG: MULLO_INT
; Make sure we are not masking the inputs
diff --git a/test/CodeGen/R600/mul_uint24.ll b/test/CodeGen/R600/mul_uint24.ll
index 72bbe0f938a..3904fac6397 100644
--- a/test/CodeGen/R600/mul_uint24.ll
+++ b/test/CodeGen/R600/mul_uint24.ll
@@ -2,7 +2,7 @@
; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG --check-prefix=FUNC
; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; FUNC-LABEL: @u32_mul24
+; FUNC-LABEL: {{^}}u32_mul24:
; EG: MUL_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, KC0[2].W
; SI: V_MUL_U32_U24
@@ -17,7 +17,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i16_mul24
+; FUNC-LABEL: {{^}}i16_mul24:
; EG: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]]
; The result must be sign-extended
; EG: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PV.[[MUL_CHAN]], 0.0, literal.x
@@ -32,7 +32,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i8_mul24
+; FUNC-LABEL: {{^}}i8_mul24:
; EG: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]]
; The result must be sign-extended
; EG: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PV.[[MUL_CHAN]], 0.0, literal.x
@@ -48,7 +48,7 @@ entry:
}
; Multiply with 24-bit inputs and 64-bit output
-; FUNC_LABEL: @mul24_i64
+; FUNC_LABEL: {{^}}mul24_i64:
; EG; MUL_UINT24
; EG: MULHI
; SI: V_MUL_U32_U24
diff --git a/test/CodeGen/R600/no-initializer-constant-addrspace.ll b/test/CodeGen/R600/no-initializer-constant-addrspace.ll
index ab82e7ee799..cd2dca3f9e4 100644
--- a/test/CodeGen/R600/no-initializer-constant-addrspace.ll
+++ b/test/CodeGen/R600/no-initializer-constant-addrspace.ll
@@ -3,7 +3,7 @@
@extern_const_addrspace = external unnamed_addr addrspace(2) constant [5 x i32], align 4
-; FUNC-LABEL: @load_extern_const_init
+; FUNC-LABEL: {{^}}load_extern_const_init:
define void @load_extern_const_init(i32 addrspace(1)* %out) nounwind {
%val = load i32 addrspace(2)* getelementptr ([5 x i32] addrspace(2)* @extern_const_addrspace, i64 0, i64 3), align 4
store i32 %val, i32 addrspace(1)* %out, align 4
@@ -12,7 +12,7 @@ define void @load_extern_const_init(i32 addrspace(1)* %out) nounwind {
@undef_const_addrspace = unnamed_addr addrspace(2) constant [5 x i32] undef, align 4
-; FUNC-LABEL: @load_undef_const_init
+; FUNC-LABEL: {{^}}load_undef_const_init:
define void @load_undef_const_init(i32 addrspace(1)* %out) nounwind {
%val = load i32 addrspace(2)* getelementptr ([5 x i32] addrspace(2)* @undef_const_addrspace, i64 0, i64 3), align 4
store i32 %val, i32 addrspace(1)* %out, align 4
diff --git a/test/CodeGen/R600/operand-spacing.ll b/test/CodeGen/R600/operand-spacing.ll
index d9e153d4fe4..bd80414ba6d 100644
--- a/test/CodeGen/R600/operand-spacing.ll
+++ b/test/CodeGen/R600/operand-spacing.ll
@@ -2,7 +2,7 @@
; Make sure there isn't an extra space between the instruction name and first operands.
-; SI-LABEL: @add_f32
+; SI-LABEL: {{^}}add_f32:
; SI-DAG: S_LOAD_DWORD [[SREGA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI-DAG: S_LOAD_DWORD [[SREGB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
; SI: V_MOV_B32_e32 [[VREGB:v[0-9]+]], [[SREGB]]
diff --git a/test/CodeGen/R600/or.ll b/test/CodeGen/R600/or.ll
index 913fda128db..dbe3326fb24 100644
--- a/test/CodeGen/R600/or.ll
+++ b/test/CodeGen/R600/or.ll
@@ -1,11 +1,11 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG %s
;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI %s
-; EG-LABEL: @or_v2i32
+; EG-LABEL: {{^}}or_v2i32:
; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; SI-LABEL: @or_v2i32
+; SI-LABEL: {{^}}or_v2i32:
; SI: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; SI: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
@@ -18,13 +18,13 @@ define void @or_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in)
ret void
}
-; EG-LABEL: @or_v4i32
+; EG-LABEL: {{^}}or_v4i32:
; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; SI-LABEL: @or_v4i32
+; SI-LABEL: {{^}}or_v4i32:
; SI: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; SI: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; SI: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
@@ -39,7 +39,7 @@ define void @or_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in)
ret void
}
-; SI-LABEL: @scalar_or_i32
+; SI-LABEL: {{^}}scalar_or_i32:
; SI: S_OR_B32
define void @scalar_or_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
%or = or i32 %a, %b
@@ -47,7 +47,7 @@ define void @scalar_or_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
ret void
}
-; SI-LABEL: @vector_or_i32
+; SI-LABEL: {{^}}vector_or_i32:
; SI: V_OR_B32_e32 v{{[0-9]}}
define void @vector_or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 %b) {
%loada = load i32 addrspace(1)* %a
@@ -56,7 +56,7 @@ define void @vector_or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 %b)
ret void
}
-; SI-LABEL: @scalar_or_literal_i32
+; SI-LABEL: {{^}}scalar_or_literal_i32:
; SI: S_OR_B32 s{{[0-9]+}}, s{{[0-9]+}}, 0x1869f
define void @scalar_or_literal_i32(i32 addrspace(1)* %out, i32 %a) {
%or = or i32 %a, 99999
@@ -64,7 +64,7 @@ define void @scalar_or_literal_i32(i32 addrspace(1)* %out, i32 %a) {
ret void
}
-; SI-LABEL: @vector_or_literal_i32
+; SI-LABEL: {{^}}vector_or_literal_i32:
; SI: V_OR_B32_e32 v{{[0-9]+}}, 0xffff, v{{[0-9]+}}
define void @vector_or_literal_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 addrspace(1)* %b) {
%loada = load i32 addrspace(1)* %a, align 4
@@ -73,7 +73,7 @@ define void @vector_or_literal_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a,
ret void
}
-; SI-LABEL: @vector_or_inline_immediate_i32
+; SI-LABEL: {{^}}vector_or_inline_immediate_i32:
; SI: V_OR_B32_e32 v{{[0-9]+}}, 4, v{{[0-9]+}}
define void @vector_or_inline_immediate_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 addrspace(1)* %b) {
%loada = load i32 addrspace(1)* %a, align 4
@@ -82,10 +82,10 @@ define void @vector_or_inline_immediate_i32(i32 addrspace(1)* %out, i32 addrspac
ret void
}
-; EG-LABEL: @scalar_or_i64
+; EG-LABEL: {{^}}scalar_or_i64:
; EG-DAG: OR_INT * T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y
; EG-DAG: OR_INT * T{{[0-9]\.[XYZW]}}, KC0[3].X, KC0[3].Z
-; SI-LABEL: @scalar_or_i64
+; SI-LABEL: {{^}}scalar_or_i64:
; SI: S_OR_B64
define void @scalar_or_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
%or = or i64 %a, %b
@@ -93,7 +93,7 @@ define void @scalar_or_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
ret void
}
-; SI-LABEL: @vector_or_i64
+; SI-LABEL: {{^}}vector_or_i64:
; SI: V_OR_B32_e32 v{{[0-9]}}
; SI: V_OR_B32_e32 v{{[0-9]}}
define void @vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
@@ -104,7 +104,7 @@ define void @vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 add
ret void
}
-; SI-LABEL: @scalar_vector_or_i64
+; SI-LABEL: {{^}}scalar_vector_or_i64:
; SI: V_OR_B32_e32 v{{[0-9]}}
; SI: V_OR_B32_e32 v{{[0-9]}}
define void @scalar_vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 %b) {
@@ -114,7 +114,7 @@ define void @scalar_vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a,
ret void
}
-; SI-LABEL: @vector_or_i64_loadimm
+; SI-LABEL: {{^}}vector_or_i64_loadimm:
; SI-DAG: S_MOV_B32 [[LO_S_IMM:s[0-9]+]], 0xdf77987f
; SI-DAG: S_MOV_B32 [[HI_S_IMM:s[0-9]+]], 0x146f
; SI-DAG: BUFFER_LOAD_DWORDX2 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}},
@@ -129,7 +129,7 @@ define void @vector_or_i64_loadimm(i64 addrspace(1)* %out, i64 addrspace(1)* %a,
}
; FIXME: The or 0 should really be removed.
-; SI-LABEL: @vector_or_i64_imm
+; SI-LABEL: {{^}}vector_or_i64_imm:
; SI: BUFFER_LOAD_DWORDX2 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}},
; SI: V_OR_B32_e32 {{v[0-9]+}}, 8, v[[LO_VREG]]
; SI: V_OR_B32_e32 {{v[0-9]+}}, 0, {{.*}}
@@ -141,7 +141,7 @@ define void @vector_or_i64_imm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64
ret void
}
-; SI-LABEL: @trunc_i64_or_to_i32
+; SI-LABEL: {{^}}trunc_i64_or_to_i32:
; SI: S_LOAD_DWORD s[[SREG0:[0-9]+]]
; SI: S_LOAD_DWORD s[[SREG1:[0-9]+]]
; SI: S_OR_B32 s[[SRESULT:[0-9]+]], s[[SREG1]], s[[SREG0]]
@@ -154,10 +154,10 @@ define void @trunc_i64_or_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
ret void
}
-; EG-CHECK: @or_i1
+; EG-CHECK: {{^}}or_i1:
; EG-CHECK: OR_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], PS}}
-; SI-CHECK: @or_i1
+; SI-CHECK: {{^}}or_i1:
; SI-CHECK: S_OR_B64 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}]
define void @or_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) {
%a = load float addrspace(1) * %in0
diff --git a/test/CodeGen/R600/packetizer.ll b/test/CodeGen/R600/packetizer.ll
index 0a405c57ea9..49a7c0df748 100644
--- a/test/CodeGen/R600/packetizer.ll
+++ b/test/CodeGen/R600/packetizer.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s
-; CHECK: @test
+; CHECK: {{^}}test:
; CHECK: BIT_ALIGN_INT T{{[0-9]}}.X
; CHECK: BIT_ALIGN_INT T{{[0-9]}}.Y
; CHECK: BIT_ALIGN_INT T{{[0-9]}}.Z
diff --git a/test/CodeGen/R600/predicate-dp4.ll b/test/CodeGen/R600/predicate-dp4.ll
index e48d6a7aa9a..6bc18759435 100644
--- a/test/CodeGen/R600/predicate-dp4.ll
+++ b/test/CodeGen/R600/predicate-dp4.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=cayman
-; CHECK-LABEL: @main
+; CHECK-LABEL: {{^}}main:
; CHECK: PRED_SETE_INT * Pred,
; CHECK: DOT4 T{{[0-9]+}}.X, T0.X, T0.X, Pred_sel_one
define void @main(<4 x float> inreg) #0 {
diff --git a/test/CodeGen/R600/predicates.ll b/test/CodeGen/R600/predicates.ll
index 902508ff9e0..0ce74d97ba8 100644
--- a/test/CodeGen/R600/predicates.ll
+++ b/test/CodeGen/R600/predicates.ll
@@ -3,7 +3,7 @@
; These tests make sure the compiler is optimizing branches using predicates
; when it is legal to do so.
-; CHECK: @simple_if
+; CHECK: {{^}}simple_if:
; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Pred,
; CHECK: LSHL * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
define void @simple_if(i32 addrspace(1)* %out, i32 %in) {
@@ -21,7 +21,7 @@ ENDIF:
ret void
}
-; CHECK: @simple_if_else
+; CHECK: {{^}}simple_if_else:
; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Pred,
; CHECK: LSH{{[LR] \* T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
; CHECK: LSH{{[LR] \* T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
@@ -44,7 +44,7 @@ ENDIF:
ret void
}
-; CHECK: @nested_if
+; CHECK: {{^}}nested_if:
; CHECK: ALU_PUSH_BEFORE
; CHECK: JUMP
; CHECK: POP
@@ -71,7 +71,7 @@ ENDIF:
ret void
}
-; CHECK: @nested_if_else
+; CHECK: {{^}}nested_if_else:
; CHECK: ALU_PUSH_BEFORE
; CHECK: JUMP
; CHECK: POP
diff --git a/test/CodeGen/R600/private-memory.ll b/test/CodeGen/R600/private-memory.ll
index 505ef6c51df..820fe699022 100644
--- a/test/CodeGen/R600/private-memory.ll
+++ b/test/CodeGen/R600/private-memory.ll
@@ -4,7 +4,7 @@
declare i32 @llvm.r600.read.tidig.x() nounwind readnone
-; FUNC-LABEL: @mova_same_clause
+; FUNC-LABEL: {{^}}mova_same_clause:
; R600: LDS_WRITE
; R600: LDS_WRITE
@@ -45,7 +45,7 @@ entry:
; XXX: This generated code has unnecessary MOVs, we should be able to optimize
; this.
-; FUNC-LABEL: @multiple_structs
+; FUNC-LABEL: {{^}}multiple_structs:
; R600-NOT: MOVA_INT
; SI-NOT: V_MOVREL
; SI-NOT: V_MOVREL
@@ -76,7 +76,7 @@ entry:
; loads and stores should be lowered to copies, so there shouldn't be any
; MOVA instructions.
-; FUNC-LABEL: @direct_loop
+; FUNC-LABEL: {{^}}direct_loop:
; R600-NOT: MOVA_INT
; SI-NOT: V_MOVREL
@@ -112,7 +112,7 @@ for.end:
ret void
}
-; FUNC-LABEL: @short_array
+; FUNC-LABEL: {{^}}short_array:
; R600: MOVA_INT
@@ -133,7 +133,7 @@ entry:
ret void
}
-; FUNC-LABEL: @char_array
+; FUNC-LABEL: {{^}}char_array:
; R600: MOVA_INT
@@ -156,7 +156,7 @@ entry:
; Make sure we don't overwrite workitem information with private memory
-; FUNC-LABEL: @work_item_info
+; FUNC-LABEL: {{^}}work_item_info:
; R600-NOT: MOV T0.X
; Additional check in case the move ends up in the last slot
; R600-NOT: MOV * TO.X
@@ -179,7 +179,7 @@ entry:
; Test that two stack objects are not stored in the same register
; The second stack object should be in T3.X
-; FUNC-LABEL: @no_overlap
+; FUNC-LABEL: {{^}}no_overlap:
; R600_CHECK: MOV
; R600_CHECK: [[CHAN:[XYZW]]]+
; R600-NOT: [[CHAN]]+
diff --git a/test/CodeGen/R600/r600-encoding.ll b/test/CodeGen/R600/r600-encoding.ll
index b760c882f4e..112cdac0a1b 100644
--- a/test/CodeGen/R600/r600-encoding.ll
+++ b/test/CodeGen/R600/r600-encoding.ll
@@ -4,10 +4,10 @@
; The earliest R600 GPUs have a slightly different encoding than the rest of
; the VLIW4/5 GPUs.
-; EG-CHECK: @test
+; EG-CHECK: {{^}}test:
; EG-CHECK: MUL_IEEE {{[ *TXYZWPVxyzw.,0-9]+}} ; encoding: [{{0x[0-9a-f]+,0x[0-9a-f]+,0x[0-9a-f]+,0x[0-9a-f]+,0x10,0x01,0x[0-9a-f]+,0x[0-9a-f]+}}]
-; R600-CHECK: @test
+; R600-CHECK: {{^}}test:
; R600-CHECK: MUL_IEEE {{[ *TXYZWPVxyzw.,0-9]+}} ; encoding: [{{0x[0-9a-f]+,0x[0-9a-f]+,0x[0-9a-f]+,0x[0-9a-f]+,0x10,0x02,0x[0-9a-f]+,0x[0-9a-f]+}}]
define void @test(<4 x float> inreg %reg0) #0 {
diff --git a/test/CodeGen/R600/register-count-comments.ll b/test/CodeGen/R600/register-count-comments.ll
index 6179013f5fc..61d1b5e8d0b 100644
--- a/test/CodeGen/R600/register-count-comments.ll
+++ b/test/CodeGen/R600/register-count-comments.ll
@@ -2,7 +2,7 @@
declare i32 @llvm.SI.tid() nounwind readnone
-; SI-LABEL: @foo:
+; SI-LABEL: {{^}}foo:
; SI: .section .AMDGPU.csdata
; SI: ; Kernel info:
; SI: ; NumSgprs: {{[0-9]+}}
@@ -19,7 +19,7 @@ define void @foo(i32 addrspace(1)* noalias %out, i32 addrspace(1)* %abase, i32 a
ret void
}
-; SI-LABEL: @one_vgpr_used
+; SI-LABEL: {{^}}one_vgpr_used:
; SI: NumVgprs: 1
define void @one_vgpr_used(i32 addrspace(1)* %out, i32 %x) nounwind {
store i32 %x, i32 addrspace(1)* %out, align 4
diff --git a/test/CodeGen/R600/reorder-stores.ll b/test/CodeGen/R600/reorder-stores.ll
index be2fcc6849f..77848ddd67e 100644
--- a/test/CodeGen/R600/reorder-stores.ll
+++ b/test/CodeGen/R600/reorder-stores.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
-; SI-LABEL: @no_reorder_v2f64_global_load_store
+; SI-LABEL: {{^}}no_reorder_v2f64_global_load_store:
; SI: BUFFER_LOAD_DWORDX2
; SI: BUFFER_LOAD_DWORDX2
; SI: BUFFER_LOAD_DWORDX2
@@ -18,7 +18,7 @@ define void @no_reorder_v2f64_global_load_store(<2 x double> addrspace(1)* nocap
ret void
}
-; SI-LABEL: @no_reorder_scalarized_v2f64_local_load_store
+; SI-LABEL: {{^}}no_reorder_scalarized_v2f64_local_load_store:
; SI: DS_READ_B64
; SI: DS_READ_B64
; SI: DS_WRITE_B64
@@ -32,7 +32,7 @@ define void @no_reorder_scalarized_v2f64_local_load_store(<2 x double> addrspace
ret void
}
-; SI-LABEL: @no_reorder_split_v8i32_global_load_store
+; SI-LABEL: {{^}}no_reorder_split_v8i32_global_load_store:
; SI: BUFFER_LOAD_DWORD
; SI: BUFFER_LOAD_DWORD
; SI: BUFFER_LOAD_DWORD
@@ -82,7 +82,7 @@ define void @no_reorder_split_v8i32_global_load_store(<8 x i32> addrspace(1)* no
ret void
}
-; SI-LABEL: @no_reorder_extload_64
+; SI-LABEL: {{^}}no_reorder_extload_64:
; SI: DS_READ_B64
; SI: DS_READ_B64
; SI: DS_WRITE_B64
diff --git a/test/CodeGen/R600/rotl.i64.ll b/test/CodeGen/R600/rotl.i64.ll
index a221ce3f8bf..a10620f6180 100644
--- a/test/CodeGen/R600/rotl.i64.ll
+++ b/test/CodeGen/R600/rotl.i64.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @s_rotl_i64:
+; FUNC-LABEL: {{^}}s_rotl_i64:
; SI-DAG: S_LSHL_B64
; SI-DAG: S_SUB_I32
; SI-DAG: S_LSHR_B64
@@ -16,7 +16,7 @@ entry:
ret void
}
-; FUNC-LABEL: @v_rotl_i64:
+; FUNC-LABEL: {{^}}v_rotl_i64:
; SI-DAG: V_LSHL_B64
; SI-DAG: V_SUB_I32
; SI: V_LSHR_B64
diff --git a/test/CodeGen/R600/rotl.ll b/test/CodeGen/R600/rotl.ll
index a9dee8ca78c..f54c79188d8 100644
--- a/test/CodeGen/R600/rotl.ll
+++ b/test/CodeGen/R600/rotl.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=R600 -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @rotl_i32:
+; FUNC-LABEL: {{^}}rotl_i32:
; R600: SUB_INT {{\** T[0-9]+\.[XYZW]}}, literal.x
; R600-NEXT: 32
; R600: BIT_ALIGN_INT {{T[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].Z, PV.{{[XYZW]}}
@@ -19,7 +19,7 @@ entry:
ret void
}
-; FUNC-LABEL: @rotl_v2i32
+; FUNC-LABEL: {{^}}rotl_v2i32:
; SI-DAG: S_SUB_I32
; SI-DAG: S_SUB_I32
; SI-DAG: V_ALIGNBIT_B32
@@ -35,7 +35,7 @@ entry:
ret void
}
-; FUNC-LABEL: @rotl_v4i32
+; FUNC-LABEL: {{^}}rotl_v4i32:
; SI-DAG: S_SUB_I32
; SI-DAG: V_ALIGNBIT_B32
; SI-DAG: S_SUB_I32
diff --git a/test/CodeGen/R600/rotr.i64.ll b/test/CodeGen/R600/rotr.i64.ll
index d15fbc348bb..56910e42b1d 100644
--- a/test/CodeGen/R600/rotr.i64.ll
+++ b/test/CodeGen/R600/rotr.i64.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @s_rotr_i64
+; FUNC-LABEL: {{^}}s_rotr_i64:
; SI-DAG: S_SUB_I32
; SI-DAG: S_LSHR_B64
; SI-DAG: S_LSHL_B64
@@ -15,7 +15,7 @@ entry:
ret void
}
-; FUNC-LABEL: @v_rotr_i64
+; FUNC-LABEL: {{^}}v_rotr_i64:
; SI-DAG: V_SUB_I32
; SI-DAG: V_LSHR_B64
; SI-DAG: V_LSHL_B64
@@ -33,7 +33,7 @@ entry:
ret void
}
-; FUNC-LABEL: @s_rotr_v2i64
+; FUNC-LABEL: {{^}}s_rotr_v2i64:
define void @s_rotr_v2i64(<2 x i64> addrspace(1)* %in, <2 x i64> %x, <2 x i64> %y) {
entry:
%tmp0 = sub <2 x i64> <i64 64, i64 64>, %y
@@ -44,7 +44,7 @@ entry:
ret void
}
-; FUNC-LABEL: @v_rotr_v2i64
+; FUNC-LABEL: {{^}}v_rotr_v2i64:
define void @v_rotr_v2i64(<2 x i64> addrspace(1)* %in, <2 x i64> addrspace(1)* %xptr, <2 x i64> addrspace(1)* %yptr) {
entry:
%x = load <2 x i64> addrspace(1)* %xptr, align 8
diff --git a/test/CodeGen/R600/rotr.ll b/test/CodeGen/R600/rotr.ll
index a5a4da48073..0dd0da08c82 100644
--- a/test/CodeGen/R600/rotr.ll
+++ b/test/CodeGen/R600/rotr.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=R600 -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @rotr_i32:
+; FUNC-LABEL: {{^}}rotr_i32:
; R600: BIT_ALIGN_INT
; SI: V_ALIGNBIT_B32
@@ -15,7 +15,7 @@ entry:
ret void
}
-; FUNC-LABEL: @rotr_v2i32:
+; FUNC-LABEL: {{^}}rotr_v2i32:
; R600: BIT_ALIGN_INT
; R600: BIT_ALIGN_INT
@@ -31,7 +31,7 @@ entry:
ret void
}
-; FUNC-LABEL: @rotr_v4i32:
+; FUNC-LABEL: {{^}}rotr_v4i32:
; R600: BIT_ALIGN_INT
; R600: BIT_ALIGN_INT
; R600: BIT_ALIGN_INT
diff --git a/test/CodeGen/R600/rsq.ll b/test/CodeGen/R600/rsq.ll
index a9f3013d3e4..6b583094787 100644
--- a/test/CodeGen/R600/rsq.ll
+++ b/test/CodeGen/R600/rsq.ll
@@ -4,7 +4,7 @@
declare float @llvm.sqrt.f32(float) nounwind readnone
declare double @llvm.sqrt.f64(double) nounwind readnone
-; SI-LABEL: @rsq_f32
+; SI-LABEL: {{^}}rsq_f32:
; SI: V_RSQ_F32_e32
; SI: S_ENDPGM
define void @rsq_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
@@ -15,7 +15,7 @@ define void @rsq_f32(float addrspace(1)* noalias %out, float addrspace(1)* noali
ret void
}
-; SI-LABEL: @rsq_f64
+; SI-LABEL: {{^}}rsq_f64:
; SI-UNSAFE: V_RSQ_F64_e32
; SI-SAFE: V_SQRT_F64_e32
; SI: S_ENDPGM
@@ -27,7 +27,7 @@ define void @rsq_f64(double addrspace(1)* noalias %out, double addrspace(1)* noa
ret void
}
-; SI-LABEL: @rsq_f32_sgpr
+; SI-LABEL: {{^}}rsq_f32_sgpr:
; SI: V_RSQ_F32_e32 {{v[0-9]+}}, {{s[0-9]+}}
; SI: S_ENDPGM
define void @rsq_f32_sgpr(float addrspace(1)* noalias %out, float %val) nounwind {
diff --git a/test/CodeGen/R600/saddo.ll b/test/CodeGen/R600/saddo.ll
index c80480e8551..93f3c74d4e3 100644
--- a/test/CodeGen/R600/saddo.ll
+++ b/test/CodeGen/R600/saddo.ll
@@ -4,7 +4,7 @@
declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
declare { i64, i1 } @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
-; FUNC-LABEL: @saddo_i64_zext
+; FUNC-LABEL: {{^}}saddo_i64_zext:
define void @saddo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%sadd = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %a, i64 %b) nounwind
%val = extractvalue { i64, i1 } %sadd, 0
@@ -15,7 +15,7 @@ define void @saddo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
ret void
}
-; FUNC-LABEL: @s_saddo_i32
+; FUNC-LABEL: {{^}}s_saddo_i32:
define void @s_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind {
%sadd = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 %b) nounwind
%val = extractvalue { i32, i1 } %sadd, 0
@@ -25,7 +25,7 @@ define void @s_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32
ret void
}
-; FUNC-LABEL: @v_saddo_i32
+; FUNC-LABEL: {{^}}v_saddo_i32:
define void @v_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%a = load i32 addrspace(1)* %aptr, align 4
%b = load i32 addrspace(1)* %bptr, align 4
@@ -37,7 +37,7 @@ define void @v_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32
ret void
}
-; FUNC-LABEL: @s_saddo_i64
+; FUNC-LABEL: {{^}}s_saddo_i64:
define void @s_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind {
%sadd = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %a, i64 %b) nounwind
%val = extractvalue { i64, i1 } %sadd, 0
@@ -47,7 +47,7 @@ define void @s_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64
ret void
}
-; FUNC-LABEL: @v_saddo_i64
+; FUNC-LABEL: {{^}}v_saddo_i64:
; SI: V_ADD_I32
; SI: V_ADDC_U32
define void @v_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
diff --git a/test/CodeGen/R600/salu-to-valu.ll b/test/CodeGen/R600/salu-to-valu.ll
index 98ccff68599..ba983196968 100644
--- a/test/CodeGen/R600/salu-to-valu.ll
+++ b/test/CodeGen/R600/salu-to-valu.ll
@@ -7,7 +7,7 @@
; sgpr register pair and use that for the pointer operand
; (low 64-bits of srsrc).
-; CHECK-LABEL: @mubuf
+; CHECK-LABEL: {{^}}mubuf:
; Make sure we aren't using VGPRs for the source operand of S_MOV_B64
; CHECK-NOT: S_MOV_B64 s[{{[0-9]+:[0-9]+}}], v
@@ -49,7 +49,7 @@ attributes #1 = { nounwind readnone }
; Test moving an SMRD instruction to the VALU
-; CHECK-LABEL: @smrd_valu
+; CHECK-LABEL: {{^}}smrd_valu:
; CHECK: BUFFER_LOAD_DWORD [[OUT:v[0-9]+]]
; CHECK: BUFFER_STORE_DWORD [[OUT]]
@@ -77,7 +77,7 @@ endif:
; Test moving ann SMRD with an immediate offset to the VALU
-; CHECK-LABEL: @smrd_valu2
+; CHECK-LABEL: {{^}}smrd_valu2:
; CHECK: BUFFER_LOAD_DWORD
define void @smrd_valu2(i32 addrspace(1)* %out, [8 x i32] addrspace(2)* %in) {
entry:
@@ -89,7 +89,7 @@ entry:
ret void
}
-; CHECK-LABEL: @s_load_imm_v8i32
+; CHECK-LABEL: {{^}}s_load_imm_v8i32:
; CHECK: BUFFER_LOAD_DWORDX4
; CHECK: BUFFER_LOAD_DWORDX4
define void @s_load_imm_v8i32(<8 x i32> addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) {
@@ -102,7 +102,7 @@ entry:
ret void
}
-; CHECK-LABEL: @s_load_imm_v16i32
+; CHECK-LABEL: {{^}}s_load_imm_v16i32:
; CHECK: BUFFER_LOAD_DWORDX4
; CHECK: BUFFER_LOAD_DWORDX4
; CHECK: BUFFER_LOAD_DWORDX4
diff --git a/test/CodeGen/R600/scalar_to_vector.ll b/test/CodeGen/R600/scalar_to_vector.ll
index bcccb065818..d67cbaf1fed 100644
--- a/test/CodeGen/R600/scalar_to_vector.ll
+++ b/test/CodeGen/R600/scalar_to_vector.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @scalar_to_vector_v2i32
+; FUNC-LABEL: {{^}}scalar_to_vector_v2i32:
; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
; SI: V_LSHRREV_B32_e32 [[RESULT:v[0-9]+]], 16, [[VAL]]
; SI: BUFFER_STORE_SHORT [[RESULT]]
@@ -17,7 +17,7 @@ define void @scalar_to_vector_v2i32(<4 x i16> addrspace(1)* %out, i32 addrspace(
ret void
}
-; FUNC-LABEL: @scalar_to_vector_v2f32
+; FUNC-LABEL: {{^}}scalar_to_vector_v2f32:
; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
; SI: V_LSHRREV_B32_e32 [[RESULT:v[0-9]+]], 16, [[VAL]]
; SI: BUFFER_STORE_SHORT [[RESULT]]
diff --git a/test/CodeGen/R600/schedule-global-loads.ll b/test/CodeGen/R600/schedule-global-loads.ll
index fcff65f02ef..5ee1e821b78 100644
--- a/test/CodeGen/R600/schedule-global-loads.ll
+++ b/test/CodeGen/R600/schedule-global-loads.ll
@@ -8,7 +8,7 @@ declare i32 @llvm.r600.read.tidig.x() #1
; seems the only things areLoadsFromSameBasePtr is accomplishing is
; ordering the loads so that the lower address loads come first.
-; FUNC-LABEL: @cluster_global_arg_loads
+; FUNC-LABEL: {{^}}cluster_global_arg_loads:
; SI-DAG: BUFFER_LOAD_DWORD [[REG0:v[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
; SI-DAG: BUFFER_LOAD_DWORD [[REG1:v[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0 offset:0x4
; SI: BUFFER_STORE_DWORD [[REG0]]
@@ -24,7 +24,7 @@ define void @cluster_global_arg_loads(i32 addrspace(1)* %out0, i32 addrspace(1)*
; Test for a crach in SIInstrInfo::areLoadsFromSameBasePtr() when checking
; an MUBUF load which does not have a vaddr operand.
-; FUNC-LABEL: @same_base_ptr_crash
+; FUNC-LABEL: {{^}}same_base_ptr_crash:
; SI: BUFFER_LOAD_DWORD
; SI: BUFFER_LOAD_DWORD
define void @same_base_ptr_crash(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %offset) {
diff --git a/test/CodeGen/R600/schedule-kernel-arg-loads.ll b/test/CodeGen/R600/schedule-kernel-arg-loads.ll
index 34b709810a9..01bef00419c 100644
--- a/test/CodeGen/R600/schedule-kernel-arg-loads.ll
+++ b/test/CodeGen/R600/schedule-kernel-arg-loads.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=SI %s
-; FUNC-LABEL: @cluster_arg_loads
+; FUNC-LABEL: {{^}}cluster_arg_loads:
; SI: S_LOAD_DWORDX2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x9
; SI-NEXT: S_LOAD_DWORDX2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI-NEXT: S_LOAD_DWORD s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
diff --git a/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll b/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll
index 3d2142d53ec..baac5b5a0a9 100644
--- a/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll
+++ b/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll
@@ -5,7 +5,7 @@
declare void @llvm.AMDGPU.barrier.local() nounwind noduplicate
-; SI-LABEL: @main(
+; SI-LABEL: {{^}}main(
define void @main(<4 x float> inreg %reg0, <4 x float> inreg %reg1) #0 {
main_body:
%0 = extractelement <4 x float> %reg1, i32 0
diff --git a/test/CodeGen/R600/sdiv.ll b/test/CodeGen/R600/sdiv.ll
index e922d5c1868..d964530d688 100644
--- a/test/CodeGen/R600/sdiv.ll
+++ b/test/CodeGen/R600/sdiv.ll
@@ -10,7 +10,7 @@
; This was fixed by adding an additional pattern in R600Instructions.td to
; match this pattern with a CNDGE_INT.
-; FUNC-LABEL: @sdiv_i32
+; FUNC-LABEL: {{^}}sdiv_i32:
; EG: CF_END
define void @sdiv_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
@@ -21,7 +21,7 @@ define void @sdiv_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @sdiv_i32_4
+; FUNC-LABEL: {{^}}sdiv_i32_4:
define void @sdiv_i32_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%num = load i32 addrspace(1) * %in
%result = sdiv i32 %num, 4
@@ -32,7 +32,7 @@ define void @sdiv_i32_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; Multiply by a weird constant to make sure setIntDivIsCheap is
; working.
-; FUNC-LABEL: @slow_sdiv_i32_3435
+; FUNC-LABEL: {{^}}slow_sdiv_i32_3435:
; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
; SI: V_MOV_B32_e32 [[MAGIC:v[0-9]+]], 0x98a1930b
; SI: V_MUL_HI_I32 [[TMP:v[0-9]+]], [[VAL]], [[MAGIC]]
diff --git a/test/CodeGen/R600/sdivrem24.ll b/test/CodeGen/R600/sdivrem24.ll
index 55f6d2c09ee..19c451c1d08 100644
--- a/test/CodeGen/R600/sdivrem24.ll
+++ b/test/CodeGen/R600/sdivrem24.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; FUNC-LABEL: @sdiv24_i8
+; FUNC-LABEL: {{^}}sdiv24_i8:
; SI: V_CVT_F32_I32
; SI: V_CVT_F32_I32
; SI: V_RCP_F32
@@ -20,7 +20,7 @@ define void @sdiv24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @sdiv24_i16
+; FUNC-LABEL: {{^}}sdiv24_i16:
; SI: V_CVT_F32_I32
; SI: V_CVT_F32_I32
; SI: V_RCP_F32
@@ -39,7 +39,7 @@ define void @sdiv24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @sdiv24_i32
+; FUNC-LABEL: {{^}}sdiv24_i32:
; SI: V_CVT_F32_I32
; SI: V_CVT_F32_I32
; SI: V_RCP_F32
@@ -62,7 +62,7 @@ define void @sdiv24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @sdiv25_i32
+; FUNC-LABEL: {{^}}sdiv25_i32:
; SI-NOT: V_CVT_F32_I32
; SI-NOT: V_RCP_F32
@@ -81,7 +81,7 @@ define void @sdiv25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @test_no_sdiv24_i32_1
+; FUNC-LABEL: {{^}}test_no_sdiv24_i32_1:
; SI-NOT: V_CVT_F32_I32
; SI-NOT: V_RCP_F32
@@ -100,7 +100,7 @@ define void @test_no_sdiv24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
ret void
}
-; FUNC-LABEL: @test_no_sdiv24_i32_2
+; FUNC-LABEL: {{^}}test_no_sdiv24_i32_2:
; SI-NOT: V_CVT_F32_I32
; SI-NOT: V_RCP_F32
@@ -119,7 +119,7 @@ define void @test_no_sdiv24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
ret void
}
-; FUNC-LABEL: @srem24_i8
+; FUNC-LABEL: {{^}}srem24_i8:
; SI: V_CVT_F32_I32
; SI: V_CVT_F32_I32
; SI: V_RCP_F32
@@ -138,7 +138,7 @@ define void @srem24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @srem24_i16
+; FUNC-LABEL: {{^}}srem24_i16:
; SI: V_CVT_F32_I32
; SI: V_CVT_F32_I32
; SI: V_RCP_F32
@@ -157,7 +157,7 @@ define void @srem24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @srem24_i32
+; FUNC-LABEL: {{^}}srem24_i32:
; SI: V_CVT_F32_I32
; SI: V_CVT_F32_I32
; SI: V_RCP_F32
@@ -180,7 +180,7 @@ define void @srem24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @srem25_i32
+; FUNC-LABEL: {{^}}srem25_i32:
; SI-NOT: V_CVT_F32_I32
; SI-NOT: V_RCP_F32
@@ -199,7 +199,7 @@ define void @srem25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @test_no_srem24_i32_1
+; FUNC-LABEL: {{^}}test_no_srem24_i32_1:
; SI-NOT: V_CVT_F32_I32
; SI-NOT: V_RCP_F32
@@ -218,7 +218,7 @@ define void @test_no_srem24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
ret void
}
-; FUNC-LABEL: @test_no_srem24_i32_2
+; FUNC-LABEL: {{^}}test_no_srem24_i32_2:
; SI-NOT: V_CVT_F32_I32
; SI-NOT: V_RCP_F32
diff --git a/test/CodeGen/R600/select-i1.ll b/test/CodeGen/R600/select-i1.ll
index 009dd7f68de..af9534f9765 100644
--- a/test/CodeGen/R600/select-i1.ll
+++ b/test/CodeGen/R600/select-i1.ll
@@ -2,7 +2,7 @@
; FIXME: This should go in existing select.ll test, except the current testcase there is broken on SI
-; FUNC-LABEL: @select_i1
+; FUNC-LABEL: {{^}}select_i1:
; SI: V_CNDMASK_B32
; SI-NOT: V_CNDMASK_B32
define void @select_i1(i1 addrspace(1)* %out, i32 %cond, i1 %a, i1 %b) nounwind {
diff --git a/test/CodeGen/R600/select-vectors.ll b/test/CodeGen/R600/select-vectors.ll
index 94605fe08ad..00758399a11 100644
--- a/test/CodeGen/R600/select-vectors.ll
+++ b/test/CodeGen/R600/select-vectors.ll
@@ -4,7 +4,7 @@
; Evergreen not enabled since it seems to be having problems with doubles.
-; FUNC-LABEL: @select_v4i8
+; FUNC-LABEL: {{^}}select_v4i8:
; SI: V_CNDMASK_B32_e64
; SI: V_CNDMASK_B32_e64
; SI: V_CNDMASK_B32_e64
@@ -16,7 +16,7 @@ define void @select_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, <4 x i8> %b,
ret void
}
-; FUNC-LABEL: @select_v4i16
+; FUNC-LABEL: {{^}}select_v4i16:
; SI: V_CNDMASK_B32_e64
; SI: V_CNDMASK_B32_e64
; SI: V_CNDMASK_B32_e64
@@ -28,7 +28,7 @@ define void @select_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %a, <4 x i16>
ret void
}
-; FUNC-LABEL: @select_v2i32
+; FUNC-LABEL: {{^}}select_v2i32:
; SI: V_CNDMASK_B32_e64
; SI: V_CNDMASK_B32_e64
; SI: BUFFER_STORE_DWORDX2
@@ -39,7 +39,7 @@ define void @select_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32>
ret void
}
-; FUNC-LABEL: @select_v4i32
+; FUNC-LABEL: {{^}}select_v4i32:
; SI: V_CNDMASK_B32_e64
; SI: V_CNDMASK_B32_e64
; SI: V_CNDMASK_B32_e64
@@ -52,7 +52,7 @@ define void @select_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32>
ret void
}
-; FUNC-LABEL: @select_v8i32
+; FUNC-LABEL: {{^}}select_v8i32:
; SI: V_CNDMASK_B32_e64
; SI: V_CNDMASK_B32_e64
; SI: V_CNDMASK_B32_e64
@@ -68,7 +68,7 @@ define void @select_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32>
ret void
}
-; FUNC-LABEL: @select_v2f32
+; FUNC-LABEL: {{^}}select_v2f32:
; SI: BUFFER_STORE_DWORDX2
define void @select_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b, i32 %c) nounwind {
%cmp = icmp eq i32 %c, 0
@@ -77,7 +77,7 @@ define void @select_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x f
ret void
}
-; FUNC-LABEL: @select_v4f32
+; FUNC-LABEL: {{^}}select_v4f32:
; SI: BUFFER_STORE_DWORDX4
define void @select_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b, i32 %c) nounwind {
%cmp = icmp eq i32 %c, 0
@@ -86,7 +86,7 @@ define void @select_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x f
ret void
}
-; FUNC-LABEL: @select_v8f32
+; FUNC-LABEL: {{^}}select_v8f32:
; SI: V_CNDMASK_B32_e64
; SI: V_CNDMASK_B32_e64
; SI: V_CNDMASK_B32_e64
@@ -102,7 +102,7 @@ define void @select_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, <8 x f
ret void
}
-; FUNC-LABEL: @select_v2f64
+; FUNC-LABEL: {{^}}select_v2f64:
; SI: V_CNDMASK_B32_e64
; SI: V_CNDMASK_B32_e64
; SI: V_CNDMASK_B32_e64
@@ -114,7 +114,7 @@ define void @select_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, <2 x
ret void
}
-; FUNC-LABEL: @select_v4f64
+; FUNC-LABEL: {{^}}select_v4f64:
; SI: V_CNDMASK_B32_e64
; SI: V_CNDMASK_B32_e64
; SI: V_CNDMASK_B32_e64
@@ -130,7 +130,7 @@ define void @select_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, <4 x
ret void
}
-; FUNC-LABEL: @select_v8f64
+; FUNC-LABEL: {{^}}select_v8f64:
; SI: V_CNDMASK_B32_e64
; SI: V_CNDMASK_B32_e64
; SI: V_CNDMASK_B32_e64
diff --git a/test/CodeGen/R600/select.ll b/test/CodeGen/R600/select.ll
index 7d5156834b9..45f3cd5a7ac 100644
--- a/test/CodeGen/R600/select.ll
+++ b/test/CodeGen/R600/select.ll
@@ -7,7 +7,7 @@
; In order to avoid the select_cc optimization, this test case calculates the
; condition for the select in a separate basic block.
-; FUNC-LABEL: @select
+; FUNC-LABEL: {{^}}select:
; EG-DAG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.X
; EG-DAG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.X
; EG-DAG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
diff --git a/test/CodeGen/R600/select64.ll b/test/CodeGen/R600/select64.ll
index dba25e3bd21..1bc01daee1c 100644
--- a/test/CodeGen/R600/select64.ll
+++ b/test/CodeGen/R600/select64.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s
-; CHECK-LABEL: @select0
+; CHECK-LABEL: {{^}}select0:
; i64 select should be split into two i32 selects, and we shouldn't need
; to use a shfit to extract the hi dword of the input.
; CHECK-NOT: S_LSHR_B64
@@ -14,7 +14,7 @@ entry:
ret void
}
-; CHECK-LABEL: @select_trunc_i64
+; CHECK-LABEL: {{^}}select_trunc_i64:
; CHECK: V_CNDMASK_B32
; CHECK-NOT: V_CNDMASK_B32
define void @select_trunc_i64(i32 addrspace(1)* %out, i32 %cond, i64 %in) nounwind {
@@ -25,7 +25,7 @@ define void @select_trunc_i64(i32 addrspace(1)* %out, i32 %cond, i64 %in) nounwi
ret void
}
-; CHECK-LABEL: @select_trunc_i64_2
+; CHECK-LABEL: {{^}}select_trunc_i64_2:
; CHECK: V_CNDMASK_B32
; CHECK-NOT: V_CNDMASK_B32
define void @select_trunc_i64_2(i32 addrspace(1)* %out, i32 %cond, i64 %a, i64 %b) nounwind {
@@ -36,7 +36,7 @@ define void @select_trunc_i64_2(i32 addrspace(1)* %out, i32 %cond, i64 %a, i64 %
ret void
}
-; CHECK-LABEL: @v_select_trunc_i64_2
+; CHECK-LABEL: {{^}}v_select_trunc_i64_2:
; CHECK: V_CNDMASK_B32
; CHECK-NOT: V_CNDMASK_B32
define void @v_select_trunc_i64_2(i32 addrspace(1)* %out, i32 %cond, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
diff --git a/test/CodeGen/R600/selectcc-opt.ll b/test/CodeGen/R600/selectcc-opt.ll
index bdb6867850b..5bcd11e9963 100644
--- a/test/CodeGen/R600/selectcc-opt.ll
+++ b/test/CodeGen/R600/selectcc-opt.ll
@@ -2,7 +2,7 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; FUNC-LABEL: @test_a
+; FUNC-LABEL: {{^}}test_a:
; EG-NOT: CND
; EG: SET{{[NEQGTL]+}}_DX10
@@ -30,7 +30,7 @@ ENDIF:
; Same as test_a, but the branch labels are swapped to produce the inverse cc
; for the icmp instruction
-; EG-LABEL: @test_b
+; EG-LABEL: {{^}}test_b:
; EG: SET{{[GTEQN]+}}_DX10
; EG-NEXT: PRED_
; EG-NEXT: ALU clause starting
@@ -56,7 +56,7 @@ ENDIF:
}
; Test a CND*_INT instruction with float true/false values
-; EG-LABEL: @test_c
+; EG-LABEL: {{^}}test_c:
; EG: CND{{[GTE]+}}_INT
define void @test_c(float addrspace(1)* %out, i32 %in) {
entry:
@@ -66,7 +66,7 @@ entry:
ret void
}
-; FUNC-LABEL: @selectcc_bool
+; FUNC-LABEL: {{^}}selectcc_bool:
; SI: V_CMP_NE_I32
; SI-NEXT: V_CNDMASK_B32_e64
; SI-NOT: CMP
diff --git a/test/CodeGen/R600/selectcc.ll b/test/CodeGen/R600/selectcc.ll
index a8f57cf1b57..6625691312e 100644
--- a/test/CodeGen/R600/selectcc.ll
+++ b/test/CodeGen/R600/selectcc.ll
@@ -1,7 +1,7 @@
; RUN: llc -verify-machineinstrs -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @selectcc_i64
+; FUNC-LABEL: {{^}}selectcc_i64:
; EG: XOR_INT
; EG: XOR_INT
; EG: OR_INT
diff --git a/test/CodeGen/R600/set-dx10.ll b/test/CodeGen/R600/set-dx10.ll
index 5c7d4998d07..53694dcffa6 100644
--- a/test/CodeGen/R600/set-dx10.ll
+++ b/test/CodeGen/R600/set-dx10.ll
@@ -4,7 +4,7 @@
; to store integer true (-1) and false (0) values are lowered to one of the
; SET*DX10 instructions.
-; CHECK: @fcmp_une_select_fptosi
+; CHECK: {{^}}fcmp_une_select_fptosi:
; CHECK: SETNE_DX10 {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.x,
; CHECK-NEXT: LSHR
; CHECK-NEXT: 1084227584(5.000000e+00)
@@ -18,7 +18,7 @@ entry:
ret void
}
-; CHECK: @fcmp_une_select_i32
+; CHECK: {{^}}fcmp_une_select_i32:
; CHECK: SETNE_DX10 {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.x,
; CHECK-NEXT: LSHR
; CHECK-NEXT: 1084227584(5.000000e+00)
@@ -30,7 +30,7 @@ entry:
ret void
}
-; CHECK: @fcmp_oeq_select_fptosi
+; CHECK: {{^}}fcmp_oeq_select_fptosi:
; CHECK: SETE_DX10 {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.x,
; CHECK-NEXT: LSHR
; CHECK-NEXT: 1084227584(5.000000e+00)
@@ -44,7 +44,7 @@ entry:
ret void
}
-; CHECK: @fcmp_oeq_select_i32
+; CHECK: {{^}}fcmp_oeq_select_i32:
; CHECK: SETE_DX10 {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.x,
; CHECK-NEXT: LSHR
; CHECK-NEXT: 1084227584(5.000000e+00)
@@ -56,7 +56,7 @@ entry:
ret void
}
-; CHECK: @fcmp_ogt_select_fptosi
+; CHECK: {{^}}fcmp_ogt_select_fptosi:
; CHECK: SETGT_DX10 {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.x,
; CHECK-NEXT: LSHR
; CHECK-NEXT: 1084227584(5.000000e+00)
@@ -70,7 +70,7 @@ entry:
ret void
}
-; CHECK: @fcmp_ogt_select_i32
+; CHECK: {{^}}fcmp_ogt_select_i32:
; CHECK: SETGT_DX10 {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.x,
; CHECK-NEXT: LSHR
; CHECK-NEXT: 1084227584(5.000000e+00)
@@ -82,7 +82,7 @@ entry:
ret void
}
-; CHECK: @fcmp_oge_select_fptosi
+; CHECK: {{^}}fcmp_oge_select_fptosi:
; CHECK: SETGE_DX10 {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.x,
; CHECK-NEXT: LSHR
; CHECK-NEXT: 1084227584(5.000000e+00)
@@ -96,7 +96,7 @@ entry:
ret void
}
-; CHECK: @fcmp_oge_select_i32
+; CHECK: {{^}}fcmp_oge_select_i32:
; CHECK: SETGE_DX10 {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.x,
; CHECK-NEXT: LSHR
; CHECK-NEXT: 1084227584(5.000000e+00)
@@ -108,7 +108,7 @@ entry:
ret void
}
-; CHECK: @fcmp_ole_select_fptosi
+; CHECK: {{^}}fcmp_ole_select_fptosi:
; CHECK: SETGE_DX10 {{\** *}}T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z,
; CHECK-NEXT: LSHR
; CHECK-NEXT: 1084227584(5.000000e+00)
@@ -122,7 +122,7 @@ entry:
ret void
}
-; CHECK: @fcmp_ole_select_i32
+; CHECK: {{^}}fcmp_ole_select_i32:
; CHECK: SETGE_DX10 {{\** *}}T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z,
; CHECK-NEXT: LSHR
; CHECK-NEXT: 1084227584(5.000000e+00)
@@ -134,7 +134,7 @@ entry:
ret void
}
-; CHECK: @fcmp_olt_select_fptosi
+; CHECK: {{^}}fcmp_olt_select_fptosi:
; CHECK: SETGT_DX10 {{\** *}}T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z,
; CHECK-NEXT: LSHR
; CHECK-NEXT: 1084227584(5.000000e+00)
@@ -148,7 +148,7 @@ entry:
ret void
}
-; CHECK: @fcmp_olt_select_i32
+; CHECK: {{^}}fcmp_olt_select_i32:
; CHECK: SETGT_DX10 {{\** *}}T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z,
; CHECK-NEXT: LSHR
; CHECK-NEXT: 1084227584(5.000000e+00)
diff --git a/test/CodeGen/R600/setcc-equivalent.ll b/test/CodeGen/R600/setcc-equivalent.ll
index e21cecf0534..11ea793650c 100644
--- a/test/CodeGen/R600/setcc-equivalent.ll
+++ b/test/CodeGen/R600/setcc-equivalent.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG %s
-; EG-LABEL: @and_setcc_setcc_i32
+; EG-LABEL: {{^}}and_setcc_setcc_i32:
; EG: AND_INT
; EG-NEXT: SETE_INT
define void @and_setcc_setcc_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
@@ -12,7 +12,7 @@ define void @and_setcc_setcc_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
ret void
}
-; EG-LABEL: @and_setcc_setcc_v4i32
+; EG-LABEL: {{^}}and_setcc_setcc_v4i32:
; EG: AND_INT
; EG: AND_INT
; EG: SETE_INT
diff --git a/test/CodeGen/R600/setcc-opt.ll b/test/CodeGen/R600/setcc-opt.ll
index 8e831e40919..0ed60109516 100644
--- a/test/CodeGen/R600/setcc-opt.ll
+++ b/test/CodeGen/R600/setcc-opt.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; SI-LABEL: @sext_bool_icmp_ne
+; SI-LABEL: {{^}}sext_bool_icmp_ne:
; SI: V_CMP_NE_I32
; SI-NEXT: V_CNDMASK_B32
; SI-NOT: V_CMP_NE_I32
diff --git a/test/CodeGen/R600/setcc.ll b/test/CodeGen/R600/setcc.ll
index 5bd95b79c0f..351adf9185c 100644
--- a/test/CodeGen/R600/setcc.ll
+++ b/test/CodeGen/R600/setcc.ll
@@ -1,7 +1,7 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600 --check-prefix=FUNC %s
;RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s
-; FUNC-LABEL: @setcc_v2i32
+; FUNC-LABEL: {{^}}setcc_v2i32:
; R600-DAG: SETE_INT * T{{[0-9]+\.[XYZW]}}, KC0[3].X, KC0[3].Z
; R600-DAG: SETE_INT * T{{[0-9]+\.[XYZW]}}, KC0[2].W, KC0[3].Y
@@ -12,7 +12,7 @@ define void @setcc_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %
ret void
}
-; FUNC-LABEL: @setcc_v4i32
+; FUNC-LABEL: {{^}}setcc_v4i32:
; R600-DAG: SETE_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-DAG: SETE_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-DAG: SETE_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
@@ -32,7 +32,7 @@ define void @setcc_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %
;; Float comparisons
;;;==========================================================================;;;
-; FUNC-LABEL: @f32_oeq
+; FUNC-LABEL: {{^}}f32_oeq:
; R600: SETE_DX10
; SI: V_CMP_EQ_F32
define void @f32_oeq(i32 addrspace(1)* %out, float %a, float %b) {
@@ -43,7 +43,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f32_ogt
+; FUNC-LABEL: {{^}}f32_ogt:
; R600: SETGT_DX10
; SI: V_CMP_GT_F32
define void @f32_ogt(i32 addrspace(1)* %out, float %a, float %b) {
@@ -54,7 +54,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f32_oge
+; FUNC-LABEL: {{^}}f32_oge:
; R600: SETGE_DX10
; SI: V_CMP_GE_F32
define void @f32_oge(i32 addrspace(1)* %out, float %a, float %b) {
@@ -65,7 +65,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f32_olt
+; FUNC-LABEL: {{^}}f32_olt:
; R600: SETGT_DX10
; SI: V_CMP_LT_F32
define void @f32_olt(i32 addrspace(1)* %out, float %a, float %b) {
@@ -76,7 +76,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f32_ole
+; FUNC-LABEL: {{^}}f32_ole:
; R600: SETGE_DX10
; SI: V_CMP_LE_F32
define void @f32_ole(i32 addrspace(1)* %out, float %a, float %b) {
@@ -87,7 +87,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f32_one
+; FUNC-LABEL: {{^}}f32_one:
; R600-DAG: SETE_DX10
; R600-DAG: SETE_DX10
; R600-DAG: AND_INT
@@ -107,7 +107,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f32_ord
+; FUNC-LABEL: {{^}}f32_ord:
; R600-DAG: SETE_DX10
; R600-DAG: SETE_DX10
; R600-DAG: AND_INT
@@ -121,7 +121,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f32_ueq
+; FUNC-LABEL: {{^}}f32_ueq:
; R600-DAG: SETNE_DX10
; R600-DAG: SETNE_DX10
; R600-DAG: OR_INT
@@ -141,7 +141,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f32_ugt
+; FUNC-LABEL: {{^}}f32_ugt:
; R600: SETGE
; R600: SETE_DX10
; SI: V_CMP_U_F32
@@ -157,7 +157,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f32_uge
+; FUNC-LABEL: {{^}}f32_uge:
; R600: SETGT
; R600: SETE_DX10
; SI: V_CMP_U_F32
@@ -173,7 +173,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f32_ult
+; FUNC-LABEL: {{^}}f32_ult:
; R600: SETGE
; R600: SETE_DX10
; SI: V_CMP_U_F32
@@ -189,7 +189,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f32_ule
+; FUNC-LABEL: {{^}}f32_ule:
; R600: SETGT
; R600: SETE_DX10
; SI: V_CMP_U_F32
@@ -205,7 +205,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f32_une
+; FUNC-LABEL: {{^}}f32_une:
; R600: SETNE_DX10
; SI: V_CMP_NEQ_F32
define void @f32_une(i32 addrspace(1)* %out, float %a, float %b) {
@@ -216,7 +216,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f32_uno
+; FUNC-LABEL: {{^}}f32_uno:
; R600: SETNE_DX10
; R600: SETNE_DX10
; R600: OR_INT
@@ -234,7 +234,7 @@ entry:
;; 32-bit integer comparisons
;;;==========================================================================;;;
-; FUNC-LABEL: @i32_eq
+; FUNC-LABEL: {{^}}i32_eq:
; R600: SETE_INT
; SI: V_CMP_EQ_I32
define void @i32_eq(i32 addrspace(1)* %out, i32 %a, i32 %b) {
@@ -245,7 +245,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i32_ne
+; FUNC-LABEL: {{^}}i32_ne:
; R600: SETNE_INT
; SI: V_CMP_NE_I32
define void @i32_ne(i32 addrspace(1)* %out, i32 %a, i32 %b) {
@@ -256,7 +256,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i32_ugt
+; FUNC-LABEL: {{^}}i32_ugt:
; R600: SETGT_UINT
; SI: V_CMP_GT_U32
define void @i32_ugt(i32 addrspace(1)* %out, i32 %a, i32 %b) {
@@ -267,7 +267,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i32_uge
+; FUNC-LABEL: {{^}}i32_uge:
; R600: SETGE_UINT
; SI: V_CMP_GE_U32
define void @i32_uge(i32 addrspace(1)* %out, i32 %a, i32 %b) {
@@ -278,7 +278,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i32_ult
+; FUNC-LABEL: {{^}}i32_ult:
; R600: SETGT_UINT
; SI: V_CMP_LT_U32
define void @i32_ult(i32 addrspace(1)* %out, i32 %a, i32 %b) {
@@ -289,7 +289,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i32_ule
+; FUNC-LABEL: {{^}}i32_ule:
; R600: SETGE_UINT
; SI: V_CMP_LE_U32
define void @i32_ule(i32 addrspace(1)* %out, i32 %a, i32 %b) {
@@ -300,7 +300,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i32_sgt
+; FUNC-LABEL: {{^}}i32_sgt:
; R600: SETGT_INT
; SI: V_CMP_GT_I32
define void @i32_sgt(i32 addrspace(1)* %out, i32 %a, i32 %b) {
@@ -311,7 +311,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i32_sge
+; FUNC-LABEL: {{^}}i32_sge:
; R600: SETGE_INT
; SI: V_CMP_GE_I32
define void @i32_sge(i32 addrspace(1)* %out, i32 %a, i32 %b) {
@@ -322,7 +322,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i32_slt
+; FUNC-LABEL: {{^}}i32_slt:
; R600: SETGT_INT
; SI: V_CMP_LT_I32
define void @i32_slt(i32 addrspace(1)* %out, i32 %a, i32 %b) {
@@ -333,7 +333,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i32_sle
+; FUNC-LABEL: {{^}}i32_sle:
; R600: SETGE_INT
; SI: V_CMP_LE_I32
define void @i32_sle(i32 addrspace(1)* %out, i32 %a, i32 %b) {
diff --git a/test/CodeGen/R600/setcc64.ll b/test/CodeGen/R600/setcc64.ll
index 54a33b30940..fd75e175486 100644
--- a/test/CodeGen/R600/setcc64.ll
+++ b/test/CodeGen/R600/setcc64.ll
@@ -6,7 +6,7 @@
;; Double comparisons
;;;==========================================================================;;;
-; FUNC-LABEL: @f64_oeq
+; FUNC-LABEL: {{^}}f64_oeq:
; SI: V_CMP_EQ_F64
define void @f64_oeq(i32 addrspace(1)* %out, double %a, double %b) {
entry:
@@ -16,7 +16,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f64_ogt
+; FUNC-LABEL: {{^}}f64_ogt:
; SI: V_CMP_GT_F64
define void @f64_ogt(i32 addrspace(1)* %out, double %a, double %b) {
entry:
@@ -26,7 +26,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f64_oge
+; FUNC-LABEL: {{^}}f64_oge:
; SI: V_CMP_GE_F64
define void @f64_oge(i32 addrspace(1)* %out, double %a, double %b) {
entry:
@@ -36,7 +36,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f64_olt
+; FUNC-LABEL: {{^}}f64_olt:
; SI: V_CMP_LT_F64
define void @f64_olt(i32 addrspace(1)* %out, double %a, double %b) {
entry:
@@ -46,7 +46,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f64_ole
+; FUNC-LABEL: {{^}}f64_ole:
; SI: V_CMP_LE_F64
define void @f64_ole(i32 addrspace(1)* %out, double %a, double %b) {
entry:
@@ -56,7 +56,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f64_one
+; FUNC-LABEL: {{^}}f64_one:
; SI: V_CMP_O_F64
; SI: V_CMP_NEQ_F64
; SI: V_CNDMASK_B32_e64
@@ -70,7 +70,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f64_ord
+; FUNC-LABEL: {{^}}f64_ord:
; SI: V_CMP_O_F64
define void @f64_ord(i32 addrspace(1)* %out, double %a, double %b) {
entry:
@@ -80,7 +80,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f64_ueq
+; FUNC-LABEL: {{^}}f64_ueq:
; SI: V_CMP_U_F64
; SI: V_CMP_EQ_F64
; SI: V_CNDMASK_B32_e64
@@ -94,7 +94,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f64_ugt
+; FUNC-LABEL: {{^}}f64_ugt:
; SI: V_CMP_U_F64
; SI: V_CMP_GT_F64
; SI: V_CNDMASK_B32_e64
@@ -108,7 +108,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f64_uge
+; FUNC-LABEL: {{^}}f64_uge:
; SI: V_CMP_U_F64
; SI: V_CMP_GE_F64
; SI: V_CNDMASK_B32_e64
@@ -122,7 +122,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f64_ult
+; FUNC-LABEL: {{^}}f64_ult:
; SI: V_CMP_U_F64
; SI: V_CMP_LT_F64
; SI: V_CNDMASK_B32_e64
@@ -136,7 +136,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f64_ule
+; FUNC-LABEL: {{^}}f64_ule:
; SI: V_CMP_U_F64
; SI: V_CMP_LE_F64
; SI: V_CNDMASK_B32_e64
@@ -150,7 +150,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f64_une
+; FUNC-LABEL: {{^}}f64_une:
; SI: V_CMP_NEQ_F64
define void @f64_une(i32 addrspace(1)* %out, double %a, double %b) {
entry:
@@ -160,7 +160,7 @@ entry:
ret void
}
-; FUNC-LABEL: @f64_uno
+; FUNC-LABEL: {{^}}f64_uno:
; SI: V_CMP_U_F64
define void @f64_uno(i32 addrspace(1)* %out, double %a, double %b) {
entry:
@@ -174,7 +174,7 @@ entry:
;; 64-bit integer comparisons
;;;==========================================================================;;;
-; FUNC-LABEL: @i64_eq
+; FUNC-LABEL: {{^}}i64_eq:
; SI: V_CMP_EQ_I64
define void @i64_eq(i32 addrspace(1)* %out, i64 %a, i64 %b) {
entry:
@@ -184,7 +184,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i64_ne
+; FUNC-LABEL: {{^}}i64_ne:
; SI: V_CMP_NE_I64
define void @i64_ne(i32 addrspace(1)* %out, i64 %a, i64 %b) {
entry:
@@ -194,7 +194,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i64_ugt
+; FUNC-LABEL: {{^}}i64_ugt:
; SI: V_CMP_GT_U64
define void @i64_ugt(i32 addrspace(1)* %out, i64 %a, i64 %b) {
entry:
@@ -204,7 +204,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i64_uge
+; FUNC-LABEL: {{^}}i64_uge:
; SI: V_CMP_GE_U64
define void @i64_uge(i32 addrspace(1)* %out, i64 %a, i64 %b) {
entry:
@@ -214,7 +214,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i64_ult
+; FUNC-LABEL: {{^}}i64_ult:
; SI: V_CMP_LT_U64
define void @i64_ult(i32 addrspace(1)* %out, i64 %a, i64 %b) {
entry:
@@ -224,7 +224,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i64_ule
+; FUNC-LABEL: {{^}}i64_ule:
; SI: V_CMP_LE_U64
define void @i64_ule(i32 addrspace(1)* %out, i64 %a, i64 %b) {
entry:
@@ -234,7 +234,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i64_sgt
+; FUNC-LABEL: {{^}}i64_sgt:
; SI: V_CMP_GT_I64
define void @i64_sgt(i32 addrspace(1)* %out, i64 %a, i64 %b) {
entry:
@@ -244,7 +244,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i64_sge
+; FUNC-LABEL: {{^}}i64_sge:
; SI: V_CMP_GE_I64
define void @i64_sge(i32 addrspace(1)* %out, i64 %a, i64 %b) {
entry:
@@ -254,7 +254,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i64_slt
+; FUNC-LABEL: {{^}}i64_slt:
; SI: V_CMP_LT_I64
define void @i64_slt(i32 addrspace(1)* %out, i64 %a, i64 %b) {
entry:
@@ -264,7 +264,7 @@ entry:
ret void
}
-; FUNC-LABEL: @i64_sle
+; FUNC-LABEL: {{^}}i64_sle:
; SI: V_CMP_LE_I64
define void @i64_sle(i32 addrspace(1)* %out, i64 %a, i64 %b) {
entry:
diff --git a/test/CodeGen/R600/seto.ll b/test/CodeGen/R600/seto.ll
index eb1176f5855..384c9cf5bb8 100644
--- a/test/CodeGen/R600/seto.ll
+++ b/test/CodeGen/R600/seto.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck %s
-; CHECK-LABEL: @main
+; CHECK-LABEL: {{^}}main:
; CHECK: V_CMP_O_F32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[SREG:s[0-9]+]], [[SREG]]
; CHECK-NEXT: V_CNDMASK_B32_e64 {{v[0-9]+}}, 0, 1.0, [[CMP]]
define void @main(float %p) {
diff --git a/test/CodeGen/R600/setuo.ll b/test/CodeGen/R600/setuo.ll
index a78e8e6b45b..7aec7acbb26 100644
--- a/test/CodeGen/R600/setuo.ll
+++ b/test/CodeGen/R600/setuo.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck %s
-; CHECK-LABEL: @main
+; CHECK-LABEL: {{^}}main:
; CHECK: V_CMP_U_F32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[SREG:s[0-9]+]], [[SREG]]
; CHECK-NEXT: V_CNDMASK_B32_e64 {{v[0-9]+}}, 0, 1.0, [[CMP]]
define void @main(float %p) {
diff --git a/test/CodeGen/R600/sext-in-reg.ll b/test/CodeGen/R600/sext-in-reg.ll
index ba0dbcb2840..f43d006bae9 100644
--- a/test/CodeGen/R600/sext-in-reg.ll
+++ b/test/CodeGen/R600/sext-in-reg.ll
@@ -4,7 +4,7 @@
declare i32 @llvm.AMDGPU.imax(i32, i32) nounwind readnone
-; FUNC-LABEL: @sext_in_reg_i1_i32
+; FUNC-LABEL: {{^}}sext_in_reg_i1_i32:
; SI: S_LOAD_DWORD [[ARG:s[0-9]+]],
; SI: S_BFE_I32 [[SEXTRACT:s[0-9]+]], [[ARG]], 0x10000
; SI: V_MOV_B32_e32 [[EXTRACT:v[0-9]+]], [[SEXTRACT]]
@@ -20,7 +20,7 @@ define void @sext_in_reg_i1_i32(i32 addrspace(1)* %out, i32 %in) {
ret void
}
-; FUNC-LABEL: @sext_in_reg_i8_to_i32
+; FUNC-LABEL: {{^}}sext_in_reg_i8_to_i32:
; SI: S_ADD_I32 [[VAL:s[0-9]+]],
; SI: S_SEXT_I32_I8 [[EXTRACT:s[0-9]+]], [[VAL]]
; SI: V_MOV_B32_e32 [[VEXTRACT:v[0-9]+]], [[EXTRACT]]
@@ -38,7 +38,7 @@ define void @sext_in_reg_i8_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounw
ret void
}
-; FUNC-LABEL: @sext_in_reg_i16_to_i32
+; FUNC-LABEL: {{^}}sext_in_reg_i16_to_i32:
; SI: S_ADD_I32 [[VAL:s[0-9]+]],
; SI: S_SEXT_I32_I16 [[EXTRACT:s[0-9]+]], [[VAL]]
; SI: V_MOV_B32_e32 [[VEXTRACT:v[0-9]+]], [[EXTRACT]]
@@ -56,7 +56,7 @@ define void @sext_in_reg_i16_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) noun
ret void
}
-; FUNC-LABEL: @sext_in_reg_i8_to_v1i32
+; FUNC-LABEL: {{^}}sext_in_reg_i8_to_v1i32:
; SI: S_ADD_I32 [[VAL:s[0-9]+]],
; SI: S_SEXT_I32_I8 [[EXTRACT:s[0-9]+]], [[VAL]]
; SI: V_MOV_B32_e32 [[VEXTRACT:v[0-9]+]], [[EXTRACT]]
@@ -74,7 +74,7 @@ define void @sext_in_reg_i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i32> %a,
ret void
}
-; FUNC-LABEL: @sext_in_reg_i1_to_i64
+; FUNC-LABEL: {{^}}sext_in_reg_i1_to_i64:
; SI: S_MOV_B32 {{s[0-9]+}}, -1
; SI: S_ADD_I32 [[VAL:s[0-9]+]],
; SI: S_BFE_I32 s{{[0-9]+}}, s{{[0-9]+}}, 0x10000
@@ -87,7 +87,7 @@ define void @sext_in_reg_i1_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounw
ret void
}
-; FUNC-LABEL: @sext_in_reg_i8_to_i64
+; FUNC-LABEL: {{^}}sext_in_reg_i8_to_i64:
; SI: S_MOV_B32 {{s[0-9]+}}, -1
; SI: S_ADD_I32 [[VAL:s[0-9]+]],
; SI: S_SEXT_I32_I8 [[EXTRACT:s[0-9]+]], [[VAL]]
@@ -111,7 +111,7 @@ define void @sext_in_reg_i8_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounw
ret void
}
-; FUNC-LABEL: @sext_in_reg_i16_to_i64
+; FUNC-LABEL: {{^}}sext_in_reg_i16_to_i64:
; SI: S_MOV_B32 {{s[0-9]+}}, -1
; SI: S_ADD_I32 [[VAL:s[0-9]+]],
; SI: S_SEXT_I32_I16 [[EXTRACT:s[0-9]+]], [[VAL]]
@@ -135,7 +135,7 @@ define void @sext_in_reg_i16_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) noun
ret void
}
-; FUNC-LABEL: @sext_in_reg_i32_to_i64
+; FUNC-LABEL: {{^}}sext_in_reg_i32_to_i64:
; SI: S_LOAD_DWORD
; SI: S_LOAD_DWORD
; SI: S_ADD_I32 [[ADD:s[0-9]+]],
@@ -161,7 +161,7 @@ define void @sext_in_reg_i32_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) noun
}
; This is broken on Evergreen for some reason related to the <1 x i64> kernel arguments.
-; XFUNC-LABEL: @sext_in_reg_i8_to_v1i64
+; XFUNC-LABEL: {{^}}sext_in_reg_i8_to_v1i64:
; XSI: S_BFE_I32 [[EXTRACT:s[0-9]+]], {{s[0-9]+}}, 524288
; XSI: S_ASHR_I32 {{v[0-9]+}}, [[EXTRACT]], 31
; XSI: BUFFER_STORE_DWORD
@@ -175,7 +175,7 @@ define void @sext_in_reg_i32_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) noun
; ret void
; }
-; FUNC-LABEL: @sext_in_reg_i1_in_i32_other_amount
+; FUNC-LABEL: {{^}}sext_in_reg_i1_in_i32_other_amount:
; SI-NOT: BFE
; SI: S_LSHL_B32 [[REG:s[0-9]+]], {{s[0-9]+}}, 6
; SI: S_ASHR_I32 {{s[0-9]+}}, [[REG]], 7
@@ -194,7 +194,7 @@ define void @sext_in_reg_i1_in_i32_other_amount(i32 addrspace(1)* %out, i32 %a,
ret void
}
-; FUNC-LABEL: @sext_in_reg_v2i1_in_v2i32_other_amount
+; FUNC-LABEL: {{^}}sext_in_reg_v2i1_in_v2i32_other_amount:
; SI-DAG: S_LSHL_B32 [[REG0:s[0-9]+]], {{s[0-9]}}, 6
; SI-DAG: S_ASHR_I32 {{s[0-9]+}}, [[REG0]], 7
; SI-DAG: S_LSHL_B32 [[REG1:s[0-9]+]], {{s[0-9]}}, 6
@@ -218,7 +218,7 @@ define void @sext_in_reg_v2i1_in_v2i32_other_amount(<2 x i32> addrspace(1)* %out
}
-; FUNC-LABEL: @sext_in_reg_v2i1_to_v2i32
+; FUNC-LABEL: {{^}}sext_in_reg_v2i1_to_v2i32:
; SI: S_BFE_I32 {{s[0-9]+}}, {{s[0-9]+}}, 0x10000
; SI: S_BFE_I32 {{s[0-9]+}}, {{s[0-9]+}}, 0x10000
; SI: BUFFER_STORE_DWORDX2
@@ -235,7 +235,7 @@ define void @sext_in_reg_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %
ret void
}
-; FUNC-LABEL: @sext_in_reg_v4i1_to_v4i32
+; FUNC-LABEL: {{^}}sext_in_reg_v4i1_to_v4i32:
; SI: S_BFE_I32 {{s[0-9]+}}, {{s[0-9]+}}, 0x10000
; SI: S_BFE_I32 {{s[0-9]+}}, {{s[0-9]+}}, 0x10000
; SI: S_BFE_I32 {{s[0-9]+}}, {{s[0-9]+}}, 0x10000
@@ -256,7 +256,7 @@ define void @sext_in_reg_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %
ret void
}
-; FUNC-LABEL: @sext_in_reg_v2i8_to_v2i32
+; FUNC-LABEL: {{^}}sext_in_reg_v2i8_to_v2i32:
; SI: S_SEXT_I32_I8 {{s[0-9]+}}, {{s[0-9]+}}
; SI: S_SEXT_I32_I8 {{s[0-9]+}}, {{s[0-9]+}}
; SI: BUFFER_STORE_DWORDX2
@@ -273,7 +273,7 @@ define void @sext_in_reg_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %
ret void
}
-; FUNC-LABEL: @sext_in_reg_v4i8_to_v4i32
+; FUNC-LABEL: {{^}}sext_in_reg_v4i8_to_v4i32:
; SI: S_SEXT_I32_I8 {{s[0-9]+}}, {{s[0-9]+}}
; SI: S_SEXT_I32_I8 {{s[0-9]+}}, {{s[0-9]+}}
; SI: S_SEXT_I32_I8 {{s[0-9]+}}, {{s[0-9]+}}
@@ -294,7 +294,7 @@ define void @sext_in_reg_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %
ret void
}
-; FUNC-LABEL: @sext_in_reg_v2i16_to_v2i32
+; FUNC-LABEL: {{^}}sext_in_reg_v2i16_to_v2i32:
; SI: S_SEXT_I32_I16 {{s[0-9]+}}, {{s[0-9]+}}
; SI: S_SEXT_I32_I16 {{s[0-9]+}}, {{s[0-9]+}}
; SI: BUFFER_STORE_DWORDX2
@@ -311,7 +311,7 @@ define void @sext_in_reg_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32>
ret void
}
-; FUNC-LABEL: @testcase
+; FUNC-LABEL: {{^}}testcase:
define void @testcase(i8 addrspace(1)* %out, i8 %a) nounwind {
%and_a_1 = and i8 %a, 1
%cmp_eq = icmp eq i8 %and_a_1, 0
@@ -323,7 +323,7 @@ define void @testcase(i8 addrspace(1)* %out, i8 %a) nounwind {
ret void
}
-; FUNC-LABEL: @testcase_3
+; FUNC-LABEL: {{^}}testcase_3:
define void @testcase_3(i8 addrspace(1)* %out, i8 %a) nounwind {
%and_a_1 = and i8 %a, 1
%cmp_eq = icmp eq i8 %and_a_1, 0
@@ -335,7 +335,7 @@ define void @testcase_3(i8 addrspace(1)* %out, i8 %a) nounwind {
ret void
}
-; FUNC-LABEL: @vgpr_sext_in_reg_v4i8_to_v4i32
+; FUNC-LABEL: {{^}}vgpr_sext_in_reg_v4i8_to_v4i32:
; SI: V_BFE_I32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 8
; SI: V_BFE_I32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 8
; SI: V_BFE_I32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 8
@@ -350,7 +350,7 @@ define void @vgpr_sext_in_reg_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i
ret void
}
-; FUNC-LABEL: @vgpr_sext_in_reg_v4i16_to_v4i32
+; FUNC-LABEL: {{^}}vgpr_sext_in_reg_v4i16_to_v4i32:
; SI: V_BFE_I32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 16
; SI: V_BFE_I32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 16
define void @vgpr_sext_in_reg_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %a, <4 x i32> addrspace(1)* %b) nounwind {
@@ -366,7 +366,7 @@ define void @vgpr_sext_in_reg_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x
; FIXME: The BFE should really be eliminated. I think it should happen
; when computeKnownBitsForTargetNode is implemented for imax.
-; FUNC-LABEL: @sext_in_reg_to_illegal_type
+; FUNC-LABEL: {{^}}sext_in_reg_to_illegal_type:
; SI: BUFFER_LOAD_SBYTE
; SI: V_MAX_I32
; SI: V_BFE_I32
@@ -383,7 +383,7 @@ define void @sext_in_reg_to_illegal_type(i16 addrspace(1)* nocapture %out, i8 ad
declare i32 @llvm.AMDGPU.bfe.i32(i32, i32, i32) nounwind readnone
-; FUNC-LABEL: @bfe_0_width
+; FUNC-LABEL: {{^}}bfe_0_width:
; SI-NOT: BFE
; SI: S_ENDPGM
define void @bfe_0_width(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) nounwind {
@@ -393,7 +393,7 @@ define void @bfe_0_width(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) nounwin
ret void
}
-; FUNC-LABEL: @bfe_8_bfe_8
+; FUNC-LABEL: {{^}}bfe_8_bfe_8:
; SI: V_BFE_I32
; SI-NOT: BFE
; SI: S_ENDPGM
@@ -405,7 +405,7 @@ define void @bfe_8_bfe_8(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) nounwin
ret void
}
-; FUNC-LABEL: @bfe_8_bfe_16
+; FUNC-LABEL: {{^}}bfe_8_bfe_16:
; SI: V_BFE_I32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 8
; SI: S_ENDPGM
define void @bfe_8_bfe_16(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) nounwind {
@@ -417,7 +417,7 @@ define void @bfe_8_bfe_16(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) nounwi
}
; This really should be folded into 1
-; FUNC-LABEL: @bfe_16_bfe_8
+; FUNC-LABEL: {{^}}bfe_16_bfe_8:
; SI: V_BFE_I32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 8
; SI-NOT: BFE
; SI: S_ENDPGM
@@ -430,7 +430,7 @@ define void @bfe_16_bfe_8(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) nounwi
}
; Make sure there isn't a redundant BFE
-; FUNC-LABEL: @sext_in_reg_i8_to_i32_bfe
+; FUNC-LABEL: {{^}}sext_in_reg_i8_to_i32_bfe:
; SI: S_SEXT_I32_I8 s{{[0-9]+}}, s{{[0-9]+}}
; SI-NOT: BFE
; SI: S_ENDPGM
@@ -443,7 +443,7 @@ define void @sext_in_reg_i8_to_i32_bfe(i32 addrspace(1)* %out, i32 %a, i32 %b) n
ret void
}
-; FUNC-LABEL: @sext_in_reg_i8_to_i32_bfe_wrong
+; FUNC-LABEL: {{^}}sext_in_reg_i8_to_i32_bfe_wrong:
define void @sext_in_reg_i8_to_i32_bfe_wrong(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%c = add i32 %a, %b ; add to prevent folding into extload
%bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %c, i32 8, i32 0) nounwind readnone
@@ -453,7 +453,7 @@ define void @sext_in_reg_i8_to_i32_bfe_wrong(i32 addrspace(1)* %out, i32 %a, i32
ret void
}
-; FUNC-LABEL: @sextload_i8_to_i32_bfe
+; FUNC-LABEL: {{^}}sextload_i8_to_i32_bfe:
; SI: BUFFER_LOAD_SBYTE
; SI-NOT: BFE
; SI: S_ENDPGM
@@ -467,7 +467,7 @@ define void @sextload_i8_to_i32_bfe(i32 addrspace(1)* %out, i8 addrspace(1)* %pt
ret void
}
-; FUNC-LABEL: @sextload_i8_to_i32_bfe_0:
+; FUNC-LABEL: {{^}}sextload_i8_to_i32_bfe_0:
; SI-NOT: BFE
; SI: S_ENDPGM
define void @sextload_i8_to_i32_bfe_0(i32 addrspace(1)* %out, i8 addrspace(1)* %ptr) nounwind {
@@ -480,7 +480,7 @@ define void @sextload_i8_to_i32_bfe_0(i32 addrspace(1)* %out, i8 addrspace(1)* %
ret void
}
-; FUNC-LABEL: @sext_in_reg_i1_bfe_offset_0:
+; FUNC-LABEL: {{^}}sext_in_reg_i1_bfe_offset_0:
; SI-NOT: SHR
; SI-NOT: SHL
; SI: V_BFE_I32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 1
@@ -494,7 +494,7 @@ define void @sext_in_reg_i1_bfe_offset_0(i32 addrspace(1)* %out, i32 addrspace(1
ret void
}
-; FUNC-LABEL: @sext_in_reg_i1_bfe_offset_1
+; FUNC-LABEL: {{^}}sext_in_reg_i1_bfe_offset_1:
; SI: BUFFER_LOAD_DWORD
; SI-NOT: SHL
; SI-NOT: SHR
@@ -509,7 +509,7 @@ define void @sext_in_reg_i1_bfe_offset_1(i32 addrspace(1)* %out, i32 addrspace(1
ret void
}
-; FUNC-LABEL: @sext_in_reg_i2_bfe_offset_1:
+; FUNC-LABEL: {{^}}sext_in_reg_i2_bfe_offset_1:
; SI: BUFFER_LOAD_DWORD
; SI: V_LSHLREV_B32_e32 v{{[0-9]+}}, 30, v{{[0-9]+}}
; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 30, v{{[0-9]+}}
diff --git a/test/CodeGen/R600/sgpr-control-flow.ll b/test/CodeGen/R600/sgpr-control-flow.ll
index 326b37ac77a..4ce2a4bc9e4 100644
--- a/test/CodeGen/R600/sgpr-control-flow.ll
+++ b/test/CodeGen/R600/sgpr-control-flow.ll
@@ -7,7 +7,7 @@
; If the branch decision is made based on a value in an SGPR then all
; threads will execute the same code paths, so we don't need to worry
; about instructions in different blocks overwriting each other.
-; SI-LABEL: @sgpr_if_else_salu_br
+; SI-LABEL: {{^}}sgpr_if_else_salu_br:
; SI: S_ADD
; SI: S_ADD
@@ -34,7 +34,7 @@ endif:
; The two S_ADD instructions should write to different registers, since
; different threads will take different control flow paths.
-; SI-LABEL: @sgpr_if_else_valu_br
+; SI-LABEL: {{^}}sgpr_if_else_valu_br:
; SI: S_ADD_I32 [[SGPR:s[0-9]+]]
; SI-NOT: S_ADD_I32 [[SGPR]]
diff --git a/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll b/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll
index 9d8a623125f..fa6b0cdd29c 100644
--- a/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll
+++ b/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll
@@ -3,7 +3,7 @@
; Copy VGPR -> SGPR used twice as an instruction operand, which is then
; used in an REG_SEQUENCE that also needs to be handled.
-; SI-LABEL: @test_dup_operands:
+; SI-LABEL: {{^}}test_dup_operands:
; SI: V_ADD_I32_e32
define void @test_dup_operands(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %in) {
%a = load <2 x i32> addrspace(1)* %in
diff --git a/test/CodeGen/R600/sgpr-copy.ll b/test/CodeGen/R600/sgpr-copy.ll
index c7d5bf90644..a58427940d8 100644
--- a/test/CodeGen/R600/sgpr-copy.ll
+++ b/test/CodeGen/R600/sgpr-copy.ll
@@ -2,7 +2,7 @@
; This test checks that no VGPR to SGPR copies are created by the register
; allocator.
-; CHECK-LABEL: @phi1
+; CHECK-LABEL: {{^}}phi1:
; CHECK: S_BUFFER_LOAD_DWORD [[DST:s[0-9]]], {{s\[[0-9]+:[0-9]+\]}}, 0
; CHECK: V_MOV_B32_e32 v{{[0-9]}}, [[DST]]
@@ -29,7 +29,7 @@ ENDIF: ; preds = %main_body, %ELSE
}
; Make sure this program doesn't crash
-; CHECK-LABEL: @phi2
+; CHECK-LABEL: {{^}}phi2:
define void @phi2(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
main_body:
%20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
@@ -149,7 +149,7 @@ ENDIF24: ; preds = %ENDIF, %IF25
}
; We just want ot make sure the program doesn't crash
-; CHECK-LABEL: @loop
+; CHECK-LABEL: {{^}}loop:
define void @loop(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
main_body:
@@ -227,7 +227,7 @@ declare i32 @llvm.SI.packf16(float, float) #1
; registers were being identified as an SGPR regclass which was causing
; an assertion failure.
-; CHECK-LABEL: @sample_v3
+; CHECK-LABEL: {{^}}sample_v3:
; CHECK: IMAGE_SAMPLE
; CHECK: IMAGE_SAMPLE
; CHECK: EXP
@@ -269,7 +269,7 @@ endif:
!2 = metadata !{metadata !"const", null, i32 1}
-; CHECK-LABEL: @copy1
+; CHECK-LABEL: {{^}}copy1:
; CHECK: BUFFER_LOAD_DWORD
; CHECK: V_ADD
; CHECK: S_ENDPGM
@@ -296,7 +296,7 @@ endif:
}
; This test is just checking that we don't crash / assertion fail.
-; CHECK-LABEL: @copy2
+; CHECK-LABEL: {{^}}copy2:
; CHECK: S_ENDPGM
define void @copy2([17 x <16 x i8>] addrspace(2)* byval, [32 x <16 x i8>] addrspace(2)* byval, [16 x <32 x i8>] addrspace(2)* byval, float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
diff --git a/test/CodeGen/R600/shared-op-cycle.ll b/test/CodeGen/R600/shared-op-cycle.ll
index 0484fc9a856..f52a9baf4d1 100644
--- a/test/CodeGen/R600/shared-op-cycle.ll
+++ b/test/CodeGen/R600/shared-op-cycle.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-; CHECK: @main
+; CHECK: {{^}}main:
; CHECK: MULADD_IEEE *
; CHECK-NOT: MULADD_IEEE *
diff --git a/test/CodeGen/R600/shl.ll b/test/CodeGen/R600/shl.ll
index 43fab2a39dc..c3433d9077b 100644
--- a/test/CodeGen/R600/shl.ll
+++ b/test/CodeGen/R600/shl.ll
@@ -1,11 +1,11 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
-;EG-CHECK: @shl_v2i32
+;EG-CHECK: {{^}}shl_v2i32:
;EG-CHECK: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: @shl_v2i32
+;SI-CHECK: {{^}}shl_v2i32:
;SI-CHECK: V_LSHL_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
;SI-CHECK: V_LSHL_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
@@ -18,13 +18,13 @@ define void @shl_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in
ret void
}
-;EG-CHECK: @shl_v4i32
+;EG-CHECK: {{^}}shl_v4i32:
;EG-CHECK: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: @shl_v4i32
+;SI-CHECK: {{^}}shl_v4i32:
;SI-CHECK: V_LSHL_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
;SI-CHECK: V_LSHL_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
;SI-CHECK: V_LSHL_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
@@ -39,7 +39,7 @@ define void @shl_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in
ret void
}
-;EG-CHECK: @shl_i64
+;EG-CHECK: {{^}}shl_i64:
;EG-CHECK: SUB_INT {{\*? *}}[[COMPSH:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHIFT:T[0-9]+\.[XYZW]]]
;EG-CHECK: LSHR {{\* *}}[[TEMP:T[0-9]+\.[XYZW]]], [[OPLO:T[0-9]+\.[XYZW]]], {{[[COMPSH]]|PV.[XYZW]}}
;EG-CHECK: LSHR {{\*? *}}[[OVERF:T[0-9]+\.[XYZW]]], {{[[TEMP]]|PV.[XYZW]}}, 1
@@ -51,7 +51,7 @@ define void @shl_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in
;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW], .*}}, 0.0
-;SI-CHECK: @shl_i64
+;SI-CHECK: {{^}}shl_i64:
;SI-CHECK: V_LSHL_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
@@ -63,7 +63,7 @@ define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
ret void
}
-;EG-CHECK: @shl_v2i64
+;EG-CHECK: {{^}}shl_v2i64:
;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
;EG-CHECK-DAG: LSHR {{\*? *}}[[COMPSHA]]
@@ -85,7 +85,7 @@ define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
;EG-CHECK-DAG: CNDE_INT
;EG-CHECK-DAG: CNDE_INT
-;SI-CHECK: @shl_v2i64
+;SI-CHECK: {{^}}shl_v2i64:
;SI-CHECK: V_LSHL_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
;SI-CHECK: V_LSHL_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
@@ -98,7 +98,7 @@ define void @shl_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in
ret void
}
-;EG-CHECK: @shl_v4i64
+;EG-CHECK: {{^}}shl_v4i64:
;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHC:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHC:T[0-9]+\.[XYZW]]]
@@ -140,7 +140,7 @@ define void @shl_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in
;EG-CHECK-DAG: CNDE_INT
;EG-CHECK-DAG: CNDE_INT
-;SI-CHECK: @shl_v4i64
+;SI-CHECK: {{^}}shl_v4i64:
;SI-CHECK: V_LSHL_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
;SI-CHECK: V_LSHL_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
;SI-CHECK: V_LSHL_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
diff --git a/test/CodeGen/R600/shl_add_constant.ll b/test/CodeGen/R600/shl_add_constant.ll
index 60f35d78654..f8c64c8f973 100644
--- a/test/CodeGen/R600/shl_add_constant.ll
+++ b/test/CodeGen/R600/shl_add_constant.ll
@@ -4,7 +4,7 @@ declare i32 @llvm.r600.read.tidig.x() #1
; Test with inline immediate
-; FUNC-LABEL: @shl_2_add_9_i32
+; FUNC-LABEL: {{^}}shl_2_add_9_i32:
; SI: V_LSHLREV_B32_e32 [[REG:v[0-9]+]], 2, {{v[0-9]+}}
; SI: V_ADD_I32_e32 [[RESULT:v[0-9]+]], 36, [[REG]]
; SI: BUFFER_STORE_DWORD [[RESULT]]
@@ -19,7 +19,7 @@ define void @shl_2_add_9_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
ret void
}
-; FUNC-LABEL: @shl_2_add_9_i32_2_add_uses
+; FUNC-LABEL: {{^}}shl_2_add_9_i32_2_add_uses:
; SI-DAG: V_ADD_I32_e32 [[ADDREG:v[0-9]+]], 9, {{v[0-9]+}}
; SI-DAG: V_LSHLREV_B32_e32 [[SHLREG:v[0-9]+]], 2, {{v[0-9]+}}
; SI-DAG: BUFFER_STORE_DWORD [[ADDREG]]
@@ -38,7 +38,7 @@ define void @shl_2_add_9_i32_2_add_uses(i32 addrspace(1)* %out0, i32 addrspace(1
; Test with add literal constant
-; FUNC-LABEL: @shl_2_add_999_i32
+; FUNC-LABEL: {{^}}shl_2_add_999_i32:
; SI: V_LSHLREV_B32_e32 [[REG:v[0-9]+]], 2, {{v[0-9]+}}
; SI: V_ADD_I32_e32 [[RESULT:v[0-9]+]], 0xf9c, [[REG]]
; SI: BUFFER_STORE_DWORD [[RESULT]]
@@ -53,7 +53,7 @@ define void @shl_2_add_999_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0
ret void
}
-; FUNC-LABEL: @test_add_shl_add_constant
+; FUNC-LABEL: {{^}}test_add_shl_add_constant:
; SI-DAG: S_LOAD_DWORD [[X:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI-DAG: S_LOAD_DWORD [[Y:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
; SI: S_LSHL_B32 [[SHL3:s[0-9]+]], [[X]], 3
@@ -69,7 +69,7 @@ define void @test_add_shl_add_constant(i32 addrspace(1)* %out, i32 %x, i32 %y) #
ret void
}
-; FUNC-LABEL: @test_add_shl_add_constant_inv
+; FUNC-LABEL: {{^}}test_add_shl_add_constant_inv:
; SI-DAG: S_LOAD_DWORD [[X:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI-DAG: S_LOAD_DWORD [[Y:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
; SI: S_LSHL_B32 [[SHL3:s[0-9]+]], [[X]], 3
diff --git a/test/CodeGen/R600/shl_add_ptr.ll b/test/CodeGen/R600/shl_add_ptr.ll
index ecab1c8bd60..9d187059251 100644
--- a/test/CodeGen/R600/shl_add_ptr.ll
+++ b/test/CodeGen/R600/shl_add_ptr.ll
@@ -14,7 +14,7 @@ declare i32 @llvm.r600.read.tidig.x() #1
; Make sure the (add tid, 2) << 2 gets folded into the ds's offset as (tid << 2) + 8
-; SI-LABEL: @load_shl_base_lds_0
+; SI-LABEL: {{^}}load_shl_base_lds_0:
; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: DS_READ_B32 {{v[0-9]+}}, [[PTR]], 0x8, [M0]
; SI: S_ENDPGM
@@ -31,7 +31,7 @@ define void @load_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %ad
; Make sure once the first use is folded into the addressing mode, the
; remaining add use goes through the normal shl + add constant fold.
-; SI-LABEL: @load_shl_base_lds_1
+; SI-LABEL: {{^}}load_shl_base_lds_1:
; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: DS_READ_B32 [[RESULT:v[0-9]+]], [[PTR]], 0x8, [M0]
; SI: V_ADD_I32_e32 [[ADDUSE:v[0-9]+]], 8, v{{[0-9]+}}
@@ -51,7 +51,7 @@ define void @load_shl_base_lds_1(float addrspace(1)* %out, i32 addrspace(1)* %ad
@maxlds = addrspace(3) global [65536 x i8] zeroinitializer, align 4
-; SI-LABEL: @load_shl_base_lds_max_offset
+; SI-LABEL: {{^}}load_shl_base_lds_max_offset:
; SI: DS_READ_U8 v{{[0-9]+}}, v{{[0-9]+}}, 0xffff
; SI: S_ENDPGM
define void @load_shl_base_lds_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %lds, i32 addrspace(1)* %add_use) #0 {
@@ -67,7 +67,7 @@ define void @load_shl_base_lds_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)
; The two globals are placed adjacent in memory, so the same base
; pointer can be used with an offset into the second one.
-; SI-LABEL: @load_shl_base_lds_2
+; SI-LABEL: {{^}}load_shl_base_lds_2:
; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI-NEXT: DS_READ_B32 {{v[0-9]+}}, [[PTR]], 0x100, [M0]
; SI-NEXT: DS_READ_B32 {{v[0-9]+}}, [[PTR]], 0x900, [M0]
@@ -84,7 +84,7 @@ define void @load_shl_base_lds_2(float addrspace(1)* %out) #0 {
ret void
}
-; SI-LABEL: @store_shl_base_lds_0
+; SI-LABEL: {{^}}store_shl_base_lds_0:
; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: DS_WRITE_B32 [[PTR]], {{v[0-9]+}}, 0x8 [M0]
; SI: S_ENDPGM
@@ -114,7 +114,7 @@ define void @store_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %a
; }
-; SI-LABEL: @atomic_cmpxchg_shl_base_lds_0
+; SI-LABEL: {{^}}atomic_cmpxchg_shl_base_lds_0:
; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: DS_CMPST_RTN_B32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, {{v[0-9]+}}, 0x8
; SI: S_ENDPGM
@@ -129,7 +129,7 @@ define void @atomic_cmpxchg_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace
ret void
}
-; SI-LABEL: @atomic_swap_shl_base_lds_0
+; SI-LABEL: {{^}}atomic_swap_shl_base_lds_0:
; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: DS_WRXCHG_RTN_B32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
; SI: S_ENDPGM
@@ -143,7 +143,7 @@ define void @atomic_swap_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)
ret void
}
-; SI-LABEL: @atomic_add_shl_base_lds_0
+; SI-LABEL: {{^}}atomic_add_shl_base_lds_0:
; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: DS_ADD_RTN_U32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
; SI: S_ENDPGM
@@ -157,7 +157,7 @@ define void @atomic_add_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)*
ret void
}
-; SI-LABEL: @atomic_sub_shl_base_lds_0
+; SI-LABEL: {{^}}atomic_sub_shl_base_lds_0:
; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: DS_SUB_RTN_U32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
; SI: S_ENDPGM
@@ -171,7 +171,7 @@ define void @atomic_sub_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)*
ret void
}
-; SI-LABEL: @atomic_and_shl_base_lds_0
+; SI-LABEL: {{^}}atomic_and_shl_base_lds_0:
; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: DS_AND_RTN_B32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
; SI: S_ENDPGM
@@ -185,7 +185,7 @@ define void @atomic_and_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)*
ret void
}
-; SI-LABEL: @atomic_or_shl_base_lds_0
+; SI-LABEL: {{^}}atomic_or_shl_base_lds_0:
; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: DS_OR_RTN_B32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
; SI: S_ENDPGM
@@ -199,7 +199,7 @@ define void @atomic_or_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)*
ret void
}
-; SI-LABEL: @atomic_xor_shl_base_lds_0
+; SI-LABEL: {{^}}atomic_xor_shl_base_lds_0:
; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: DS_XOR_RTN_B32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
; SI: S_ENDPGM
@@ -223,7 +223,7 @@ define void @atomic_xor_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)*
; ret void
; }
-; SI-LABEL: @atomic_min_shl_base_lds_0
+; SI-LABEL: {{^}}atomic_min_shl_base_lds_0:
; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: DS_MIN_RTN_I32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
; SI: S_ENDPGM
@@ -237,7 +237,7 @@ define void @atomic_min_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)*
ret void
}
-; SI-LABEL: @atomic_max_shl_base_lds_0
+; SI-LABEL: {{^}}atomic_max_shl_base_lds_0:
; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: DS_MAX_RTN_I32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
; SI: S_ENDPGM
@@ -251,7 +251,7 @@ define void @atomic_max_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)*
ret void
}
-; SI-LABEL: @atomic_umin_shl_base_lds_0
+; SI-LABEL: {{^}}atomic_umin_shl_base_lds_0:
; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: DS_MIN_RTN_U32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
; SI: S_ENDPGM
@@ -265,7 +265,7 @@ define void @atomic_umin_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)
ret void
}
-; SI-LABEL: @atomic_umax_shl_base_lds_0
+; SI-LABEL: {{^}}atomic_umax_shl_base_lds_0:
; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: DS_MAX_RTN_U32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
; SI: S_ENDPGM
diff --git a/test/CodeGen/R600/si-annotate-cf-assertion.ll b/test/CodeGen/R600/si-annotate-cf-assertion.ll
index daa4667150b..6d60b0a33a4 100644
--- a/test/CodeGen/R600/si-annotate-cf-assertion.ll
+++ b/test/CodeGen/R600/si-annotate-cf-assertion.ll
@@ -4,7 +4,7 @@
define void @test(i32 addrspace(1)* %g, i8 addrspace(3)* %l, i32 %x) nounwind {
-; CHECK-LABEL: @test:
+; CHECK-LABEL: {{^}}test:
entry:
switch i32 %x, label %sw.default [
diff --git a/test/CodeGen/R600/si-lod-bias.ll b/test/CodeGen/R600/si-lod-bias.ll
index 8d7a79cdbda..5c8befa17c2 100644
--- a/test/CodeGen/R600/si-lod-bias.ll
+++ b/test/CodeGen/R600/si-lod-bias.ll
@@ -3,7 +3,7 @@
; This shader has the potential to generated illegal VGPR to SGPR copies if
; the wrong register class is used for the REG_SEQUENCE instructions.
-; CHECK: @main
+; CHECK: {{^}}main:
; CHECK: IMAGE_SAMPLE_B v{{\[[0-9]:[0-9]\]}}, 15, 0, 0, 0, 0, 0, 0, 0, v{{\[[0-9]:[0-9]\]}}
define void @main(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
diff --git a/test/CodeGen/R600/si-sgpr-spill.ll b/test/CodeGen/R600/si-sgpr-spill.ll
index 53a096513bb..78f915f9995 100644
--- a/test/CodeGen/R600/si-sgpr-spill.ll
+++ b/test/CodeGen/R600/si-sgpr-spill.ll
@@ -3,7 +3,7 @@
; These tests check that the compiler won't crash when it needs to spill
; SGPRs.
-; CHECK-LABEL: @main
+; CHECK-LABEL: {{^}}main:
; Writing to M0 from an SMRD instruction will hang the GPU.
; CHECK-NOT: S_BUFFER_LOAD_DWORD m0
; CHECK: S_ENDPGM
@@ -688,7 +688,7 @@ attributes #4 = { nounwind readonly }
!0 = metadata !{metadata !"const", null, i32 1}
-; CHECK-LABEL: @main1
+; CHECK-LABEL: {{^}}main1:
; CHECK: S_ENDPGM
define void @main1([17 x <16 x i8>] addrspace(2)* byval, [32 x <16 x i8>] addrspace(2)* byval, [16 x <32 x i8>] addrspace(2)* byval, float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
main_body:
diff --git a/test/CodeGen/R600/si-vector-hang.ll b/test/CodeGen/R600/si-vector-hang.ll
index 093234f7195..a6fec02747c 100644
--- a/test/CodeGen/R600/si-vector-hang.ll
+++ b/test/CodeGen/R600/si-vector-hang.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
-; CHECK: @test_8_min_char
+; CHECK: {{^}}test_8_min_char:
; CHECK: BUFFER_STORE_BYTE
; CHECK: BUFFER_STORE_BYTE
; CHECK: BUFFER_STORE_BYTE
diff --git a/test/CodeGen/R600/sign_extend.ll b/test/CodeGen/R600/sign_extend.ll
index dbc2320f21c..f9d8fcc14ad 100644
--- a/test/CodeGen/R600/sign_extend.ll
+++ b/test/CodeGen/R600/sign_extend.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
-; SI-LABEL: @s_sext_i1_to_i32:
+; SI-LABEL: {{^}}s_sext_i1_to_i32:
; SI: V_CNDMASK_B32_e64
; SI: S_ENDPGM
define void @s_sext_i1_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
@@ -10,7 +10,7 @@ define void @s_sext_i1_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
ret void
}
-; SI-LABEL: @test_s_sext_i32_to_i64:
+; SI-LABEL: {{^}}test_s_sext_i32_to_i64:
; SI: S_ASHR_I32
; SI: S_ENDPG
define void @test_s_sext_i32_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) nounwind {
@@ -22,7 +22,7 @@ entry:
ret void
}
-; SI-LABEL: @s_sext_i1_to_i64:
+; SI-LABEL: {{^}}s_sext_i1_to_i64:
; SI: V_CNDMASK_B32_e64
; SI: V_CNDMASK_B32_e64
; SI: S_ENDPGM
@@ -33,7 +33,7 @@ define void @s_sext_i1_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
ret void
}
-; SI-LABEL: @s_sext_i32_to_i64:
+; SI-LABEL: {{^}}s_sext_i32_to_i64:
; SI: S_ASHR_I32
; SI: S_ENDPGM
define void @s_sext_i32_to_i64(i64 addrspace(1)* %out, i32 %a) nounwind {
@@ -42,7 +42,7 @@ define void @s_sext_i32_to_i64(i64 addrspace(1)* %out, i32 %a) nounwind {
ret void
}
-; SI-LABEL: @v_sext_i32_to_i64:
+; SI-LABEL: {{^}}v_sext_i32_to_i64:
; SI: V_ASHR
; SI: S_ENDPGM
define void @v_sext_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
@@ -52,7 +52,7 @@ define void @v_sext_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) no
ret void
}
-; SI-LABEL: @s_sext_i16_to_i64:
+; SI-LABEL: {{^}}s_sext_i16_to_i64:
; SI: S_ENDPGM
define void @s_sext_i16_to_i64(i64 addrspace(1)* %out, i16 %a) nounwind {
%sext = sext i16 %a to i64
diff --git a/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll b/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll
index e6f8ce8ef0e..cb5297ca318 100644
--- a/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll
+++ b/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll
@@ -14,7 +14,7 @@ define void @trunc_select_i64(i32 addrspace(1)* %out, i64 %a, i64 %b, i32 %c) {
}
; FIXME: Fix truncating store for local memory
-; SI-LABEL: @trunc_load_alloca_i64:
+; SI-LABEL: {{^}}trunc_load_alloca_i64:
; SI: V_MOVRELS_B32
; SI-NOT: V_MOVRELS_B32
; SI: S_ENDPGM
diff --git a/test/CodeGen/R600/sint_to_fp.ll b/test/CodeGen/R600/sint_to_fp.ll
index 22340b9c426..b45982539c9 100644
--- a/test/CodeGen/R600/sint_to_fp.ll
+++ b/test/CodeGen/R600/sint_to_fp.ll
@@ -2,7 +2,7 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
-; FUNC-LABEL: @s_sint_to_fp_i32_to_f32
+; FUNC-LABEL: {{^}}s_sint_to_fp_i32_to_f32:
; R600: INT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[2].Z
; SI: V_CVT_F32_I32_e32 {{v[0-9]+}}, {{s[0-9]+$}}
define void @s_sint_to_fp_i32_to_f32(float addrspace(1)* %out, i32 %in) {
@@ -11,7 +11,7 @@ define void @s_sint_to_fp_i32_to_f32(float addrspace(1)* %out, i32 %in) {
ret void
}
-; FUNC-LABEL: @sint_to_fp_v2i32
+; FUNC-LABEL: {{^}}sint_to_fp_v2i32:
; R600-DAG: INT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[2].W
; R600-DAG: INT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[3].X
@@ -23,7 +23,7 @@ define void @sint_to_fp_v2i32(<2 x float> addrspace(1)* %out, <2 x i32> %in) {
ret void
}
-; FUNC-LABEL: @sint_to_fp_v4i32
+; FUNC-LABEL: {{^}}sint_to_fp_v4i32:
; R600: INT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600: INT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600: INT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
@@ -40,7 +40,7 @@ define void @sint_to_fp_v4i32(<4 x float> addrspace(1)* %out, <4 x i32> addrspac
ret void
}
-; FUNC-LABEL: @sint_to_fp_i1_f32:
+; FUNC-LABEL: {{^}}sint_to_fp_i1_f32:
; SI: V_CMP_EQ_I32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
; SI-NEXT: V_CNDMASK_B32_e64 [[RESULT:v[0-9]+]], 0, 1.0, [[CMP]]
; SI: BUFFER_STORE_DWORD [[RESULT]],
@@ -52,7 +52,7 @@ define void @sint_to_fp_i1_f32(float addrspace(1)* %out, i32 %in) {
ret void
}
-; FUNC-LABEL: @sint_to_fp_i1_f32_load:
+; FUNC-LABEL: {{^}}sint_to_fp_i1_f32_load:
; SI: V_CNDMASK_B32_e64 [[RESULT:v[0-9]+]], 0, -1.0
; SI: BUFFER_STORE_DWORD [[RESULT]],
; SI: S_ENDPGM
diff --git a/test/CodeGen/R600/sint_to_fp64.ll b/test/CodeGen/R600/sint_to_fp64.ll
index 12b8cf57cf5..43889b616da 100644
--- a/test/CodeGen/R600/sint_to_fp64.ll
+++ b/test/CodeGen/R600/sint_to_fp64.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
-; SI: @sint_to_fp64
+; SI: {{^}}sint_to_fp64:
; SI: V_CVT_F64_I32_e32
define void @sint_to_fp64(double addrspace(1)* %out, i32 %in) {
%result = sitofp i32 %in to double
@@ -8,7 +8,7 @@ define void @sint_to_fp64(double addrspace(1)* %out, i32 %in) {
ret void
}
-; SI-LABEL: @sint_to_fp_i1_f64:
+; SI-LABEL: {{^}}sint_to_fp_i1_f64:
; SI: V_CMP_EQ_I32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
; FIXME: We should the VGPR sources for V_CNDMASK are copied from SGPRs,
; we should be able to fold the SGPRs into the V_CNDMASK instructions.
@@ -23,7 +23,7 @@ define void @sint_to_fp_i1_f64(double addrspace(1)* %out, i32 %in) {
ret void
}
-; SI-LABEL: @sint_to_fp_i1_f64_load:
+; SI-LABEL: {{^}}sint_to_fp_i1_f64_load:
; SI: V_CNDMASK_B32_e64 [[IRESULT:v[0-9]]], 0, -1
; SI-NEXT: V_CVT_F64_I32_e32 [[RESULT:v\[[0-9]+:[0-9]\]]], [[IRESULT]]
; SI: BUFFER_STORE_DWORDX2 [[RESULT]]
diff --git a/test/CodeGen/R600/smrd.ll b/test/CodeGen/R600/smrd.ll
index b63b01e659e..329739142e2 100644
--- a/test/CodeGen/R600/smrd.ll
+++ b/test/CodeGen/R600/smrd.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=r600 -mcpu=SI -show-mc-encoding -verify-machineinstrs | FileCheck %s
; SMRD load with an immediate offset.
-; CHECK-LABEL: @smrd0
+; CHECK-LABEL: {{^}}smrd0:
; CHECK: S_LOAD_DWORD s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x1 ; encoding: [0x01
define void @smrd0(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
entry:
@@ -12,7 +12,7 @@ entry:
}
; SMRD load with the largest possible immediate offset.
-; CHECK-LABEL: @smrd1
+; CHECK-LABEL: {{^}}smrd1:
; CHECK: S_LOAD_DWORD s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff
define void @smrd1(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
entry:
@@ -23,7 +23,7 @@ entry:
}
; SMRD load with an offset greater than the largest possible immediate.
-; CHECK-LABEL: @smrd2
+; CHECK-LABEL: {{^}}smrd2:
; CHECK: S_MOV_B32 s[[OFFSET:[0-9]]], 0x400
; CHECK: S_LOAD_DWORD s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]]
; CHECK: S_ENDPGM
@@ -36,7 +36,7 @@ entry:
}
; SMRD load with a 64-bit offset
-; CHECK-LABEL: @smrd3
+; CHECK-LABEL: {{^}}smrd3:
; CHECK-DAG: S_MOV_B32 s[[SLO:[0-9]+]], 0
; CHECK-DAG: S_MOV_B32 s[[SHI:[0-9]+]], 4
; FIXME: We don't need to copy these values to VGPRs
@@ -54,7 +54,7 @@ entry:
}
; SMRD load using the load.const intrinsic with an immediate offset
-; CHECK-LABEL: @smrd_load_const0
+; CHECK-LABEL: {{^}}smrd_load_const0:
; CHECK: S_BUFFER_LOAD_DWORD s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x4 ; encoding: [0x04
define void @smrd_load_const0(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
main_body:
@@ -67,7 +67,7 @@ main_body:
; SMRD load using the load.const intrinsic with the largest possible immediate
; offset.
-; CHECK-LABEL: @smrd_load_const1
+; CHECK-LABEL: {{^}}smrd_load_const1:
; CHECK: S_BUFFER_LOAD_DWORD s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff
define void @smrd_load_const1(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
main_body:
@@ -80,7 +80,7 @@ main_body:
; SMRD load using the load.const intrinsic with an offset greater than the
; largets possible immediate.
; immediate offset.
-; CHECK-LABEL: @smrd_load_const2
+; CHECK-LABEL: {{^}}smrd_load_const2:
; CHECK: S_MOV_B32 s[[OFFSET:[0-9]]], 0x400
; CHECK: S_BUFFER_LOAD_DWORD s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]]
define void @smrd_load_const2(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
diff --git a/test/CodeGen/R600/split-scalar-i64-add.ll b/test/CodeGen/R600/split-scalar-i64-add.ll
index cc50f89eaa7..e9968af1be9 100644
--- a/test/CodeGen/R600/split-scalar-i64-add.ll
+++ b/test/CodeGen/R600/split-scalar-i64-add.ll
@@ -8,7 +8,7 @@ declare i32 @llvm.r600.read.tidig.x() readnone
; set in vcc, which is undefined since the low scalar half add sets
; scc instead.
-; FUNC-LABEL: @imp_def_vcc_split_i64_add_0
+; FUNC-LABEL: {{^}}imp_def_vcc_split_i64_add_0:
define void @imp_def_vcc_split_i64_add_0(i64 addrspace(1)* %out, i32 %val) {
%vec.0 = insertelement <2 x i32> undef, i32 %val, i32 0
%vec.1 = insertelement <2 x i32> %vec.0, i32 999999, i32 1
@@ -18,7 +18,7 @@ define void @imp_def_vcc_split_i64_add_0(i64 addrspace(1)* %out, i32 %val) {
ret void
}
-; FUNC-LABEL: @imp_def_vcc_split_i64_add_1
+; FUNC-LABEL: {{^}}imp_def_vcc_split_i64_add_1:
define void @imp_def_vcc_split_i64_add_1(i64 addrspace(1)* %out, i32 %val0, i64 %val1) {
%vec.0 = insertelement <2 x i32> undef, i32 %val0, i32 0
%vec.1 = insertelement <2 x i32> %vec.0, i32 99999, i32 1
diff --git a/test/CodeGen/R600/sra.ll b/test/CodeGen/R600/sra.ll
index 9eb3dc54407..f14c52021e0 100644
--- a/test/CodeGen/R600/sra.ll
+++ b/test/CodeGen/R600/sra.ll
@@ -1,11 +1,11 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
-;EG-CHECK-LABEL: @ashr_v2i32
+;EG-CHECK-LABEL: {{^}}ashr_v2i32:
;EG-CHECK: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK-LABEL: @ashr_v2i32
+;SI-CHECK-LABEL: {{^}}ashr_v2i32:
;SI-CHECK: V_ASHR_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
;SI-CHECK: V_ASHR_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
@@ -18,13 +18,13 @@ define void @ashr_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %i
ret void
}
-;EG-CHECK-LABEL: @ashr_v4i32
+;EG-CHECK-LABEL: {{^}}ashr_v4i32:
;EG-CHECK: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK-LABEL: @ashr_v4i32
+;SI-CHECK-LABEL: {{^}}ashr_v4i32:
;SI-CHECK: V_ASHR_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
;SI-CHECK: V_ASHR_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
;SI-CHECK: V_ASHR_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
@@ -39,10 +39,10 @@ define void @ashr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %i
ret void
}
-;EG-CHECK-LABEL: @ashr_i64
+;EG-CHECK-LABEL: {{^}}ashr_i64:
;EG-CHECK: ASHR
-;SI-CHECK-LABEL: @ashr_i64
+;SI-CHECK-LABEL: {{^}}ashr_i64:
;SI-CHECK: S_ASHR_I64 s[{{[0-9]}}:{{[0-9]}}], s[{{[0-9]}}:{{[0-9]}}], 8
define void @ashr_i64(i64 addrspace(1)* %out, i32 %in) {
entry:
@@ -52,7 +52,7 @@ entry:
ret void
}
-;EG-CHECK-LABEL: @ashr_i64_2
+;EG-CHECK-LABEL: {{^}}ashr_i64_2:
;EG-CHECK: SUB_INT {{\*? *}}[[COMPSH:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHIFT:T[0-9]+\.[XYZW]]]
;EG-CHECK: LSHL {{\* *}}[[TEMP:T[0-9]+\.[XYZW]]], [[OPHI:T[0-9]+\.[XYZW]]], {{[[COMPSH]]|PV.[XYZW]}}
;EG-CHECK: LSHL {{\*? *}}[[OVERF:T[0-9]+\.[XYZW]]], {{[[TEMP]]|PV.[XYZW]}}, 1
@@ -66,7 +66,7 @@ entry:
;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
-;SI-CHECK-LABEL: @ashr_i64_2
+;SI-CHECK-LABEL: {{^}}ashr_i64_2:
;SI-CHECK: V_ASHR_I64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
define void @ashr_i64_2(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
entry:
@@ -78,7 +78,7 @@ entry:
ret void
}
-;EG-CHECK-LABEL: @ashr_v2i64
+;EG-CHECK-LABEL: {{^}}ashr_v2i64:
;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHA]]
@@ -104,7 +104,7 @@ entry:
;EG-CHECK-DAG: CNDE_INT
;EG-CHECK-DAG: CNDE_INT
-;SI-CHECK-LABEL: @ashr_v2i64
+;SI-CHECK-LABEL: {{^}}ashr_v2i64:
;SI-CHECK: V_ASHR_I64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
;SI-CHECK: V_ASHR_I64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
@@ -117,7 +117,7 @@ define void @ashr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %i
ret void
}
-;EG-CHECK-LABEL: @ashr_v4i64
+;EG-CHECK-LABEL: {{^}}ashr_v4i64:
;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHC:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHC:T[0-9]+\.[XYZW]]]
@@ -167,7 +167,7 @@ define void @ashr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %i
;EG-CHECK-DAG: CNDE_INT
;EG-CHECK-DAG: CNDE_INT
-;SI-CHECK-LABEL: @ashr_v4i64
+;SI-CHECK-LABEL: {{^}}ashr_v4i64:
;SI-CHECK: V_ASHR_I64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
;SI-CHECK: V_ASHR_I64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
;SI-CHECK: V_ASHR_I64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
diff --git a/test/CodeGen/R600/srl.ll b/test/CodeGen/R600/srl.ll
index 44ad73f073e..d8800a45bc5 100644
--- a/test/CodeGen/R600/srl.ll
+++ b/test/CodeGen/R600/srl.ll
@@ -1,11 +1,11 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
-;EG-CHECK: @lshr_v2i32
+;EG-CHECK: {{^}}lshr_v2i32:
;EG-CHECK: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: @lshr_v2i32
+;SI-CHECK: {{^}}lshr_v2i32:
;SI-CHECK: V_LSHR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
;SI-CHECK: V_LSHR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
@@ -19,13 +19,13 @@ define void @lshr_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %i
}
-;EG-CHECK: @lshr_v4i32
+;EG-CHECK: {{^}}lshr_v4i32:
;EG-CHECK: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: @lshr_v4i32
+;SI-CHECK: {{^}}lshr_v4i32:
;SI-CHECK: V_LSHR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
;SI-CHECK: V_LSHR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
;SI-CHECK: V_LSHR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
@@ -40,7 +40,7 @@ define void @lshr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %i
ret void
}
-;EG-CHECK: @lshr_i64
+;EG-CHECK: {{^}}lshr_i64:
;EG-CHECK: SUB_INT {{\*? *}}[[COMPSH:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHIFT:T[0-9]+\.[XYZW]]]
;EG-CHECK: LSHL {{\* *}}[[TEMP:T[0-9]+\.[XYZW]]], [[OPHI:T[0-9]+\.[XYZW]]], {{[[COMPSH]]|PV.[XYZW]}}
;EG-CHECK: LSHL {{\*? *}}[[OVERF:T[0-9]+\.[XYZW]]], {{[[TEMP]]|PV.[XYZW]}}, 1
@@ -53,7 +53,7 @@ define void @lshr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %i
;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW], .*}}, 0.0
-;SI-CHECK: @lshr_i64
+;SI-CHECK: {{^}}lshr_i64:
;SI-CHECK: V_LSHR_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
define void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
@@ -65,7 +65,7 @@ define void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
ret void
}
-;EG-CHECK: @lshr_v2i64
+;EG-CHECK: {{^}}lshr_v2i64:
;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHA]]
@@ -89,7 +89,7 @@ define void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
;EG-CHECK-DAG: CNDE_INT
;EG-CHECK-DAG: CNDE_INT
-;SI-CHECK: @lshr_v2i64
+;SI-CHECK: {{^}}lshr_v2i64:
;SI-CHECK: V_LSHR_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
;SI-CHECK: V_LSHR_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
@@ -103,7 +103,7 @@ define void @lshr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %i
}
-;EG-CHECK: @lshr_v4i64
+;EG-CHECK: {{^}}lshr_v4i64:
;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHC:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHC:T[0-9]+\.[XYZW]]]
@@ -151,7 +151,7 @@ define void @lshr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %i
;EG-CHECK-DAG: CNDE_INT
;EG-CHECK-DAG: CNDE_INT
-;SI-CHECK: @lshr_v4i64
+;SI-CHECK: {{^}}lshr_v4i64:
;SI-CHECK: V_LSHR_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
;SI-CHECK: V_LSHR_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
;SI-CHECK: V_LSHR_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
diff --git a/test/CodeGen/R600/ssubo.ll b/test/CodeGen/R600/ssubo.ll
index 066cdf5cd93..18d3e02cefd 100644
--- a/test/CodeGen/R600/ssubo.ll
+++ b/test/CodeGen/R600/ssubo.ll
@@ -4,7 +4,7 @@
declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
declare { i64, i1 } @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
-; FUNC-LABEL: @ssubo_i64_zext
+; FUNC-LABEL: {{^}}ssubo_i64_zext:
define void @ssubo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%ssub = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) nounwind
%val = extractvalue { i64, i1 } %ssub, 0
@@ -15,7 +15,7 @@ define void @ssubo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
ret void
}
-; FUNC-LABEL: @s_ssubo_i32
+; FUNC-LABEL: {{^}}s_ssubo_i32:
define void @s_ssubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind {
%ssub = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 %b) nounwind
%val = extractvalue { i32, i1 } %ssub, 0
@@ -25,7 +25,7 @@ define void @s_ssubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32
ret void
}
-; FUNC-LABEL: @v_ssubo_i32
+; FUNC-LABEL: {{^}}v_ssubo_i32:
define void @v_ssubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%a = load i32 addrspace(1)* %aptr, align 4
%b = load i32 addrspace(1)* %bptr, align 4
@@ -37,7 +37,7 @@ define void @v_ssubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32
ret void
}
-; FUNC-LABEL: @s_ssubo_i64
+; FUNC-LABEL: {{^}}s_ssubo_i64:
; SI: S_SUB_U32
; SI: S_SUBB_U32
define void @s_ssubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind {
@@ -49,7 +49,7 @@ define void @s_ssubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64
ret void
}
-; FUNC-LABEL: @v_ssubo_i64
+; FUNC-LABEL: {{^}}v_ssubo_i64:
; SI: V_SUB_I32_e32
; SI: V_SUBB_U32_e32
define void @v_ssubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
diff --git a/test/CodeGen/R600/store-v3i32.ll b/test/CodeGen/R600/store-v3i32.ll
index 33578035daa..c87bd3591f9 100644
--- a/test/CodeGen/R600/store-v3i32.ll
+++ b/test/CodeGen/R600/store-v3i32.ll
@@ -4,7 +4,7 @@
; 3 vectors have the same size and alignment as 4 vectors, so this
; should be done in a single store.
-; SI-LABEL: @store_v3i32:
+; SI-LABEL: {{^}}store_v3i32:
; SI: BUFFER_STORE_DWORDX4
define void @store_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> %a) nounwind {
store <3 x i32> %a, <3 x i32> addrspace(1)* %out, align 16
diff --git a/test/CodeGen/R600/store-v3i64.ll b/test/CodeGen/R600/store-v3i64.ll
index 58d28b567bd..310ff415d50 100644
--- a/test/CodeGen/R600/store-v3i64.ll
+++ b/test/CodeGen/R600/store-v3i64.ll
@@ -1,7 +1,7 @@
; XFAIL: *
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI
-; SI-LABEL: @global_store_v3i64:
+; SI-LABEL: {{^}}global_store_v3i64:
; SI: BUFFER_STORE_DWORDX4
; SI: BUFFER_STORE_DWORDX4
define void @global_store_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> %x) {
@@ -9,19 +9,19 @@ define void @global_store_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> %x) {
ret void
}
-; SI-LABEL: @global_store_v3i64_unaligned:
+; SI-LABEL: {{^}}global_store_v3i64_unaligned:
define void @global_store_v3i64_unaligned(<3 x i64> addrspace(1)* %out, <3 x i64> %x) {
store <3 x i64> %x, <3 x i64> addrspace(1)* %out, align 1
ret void
}
-; SI-LABEL: @local_store_v3i64:
+; SI-LABEL: {{^}}local_store_v3i64:
define void @local_store_v3i64(<3 x i64> addrspace(3)* %out, <3 x i64> %x) {
store <3 x i64> %x, <3 x i64> addrspace(3)* %out, align 32
ret void
}
-; SI-LABEL: @local_store_v3i64_unaligned:
+; SI-LABEL: {{^}}local_store_v3i64_unaligned:
define void @local_store_v3i64_unaligned(<3 x i64> addrspace(1)* %out, <3 x i64> %x) {
store <3 x i64> %x, <3 x i64> addrspace(1)* %out, align 1
ret void
diff --git a/test/CodeGen/R600/store-vector-ptrs.ll b/test/CodeGen/R600/store-vector-ptrs.ll
index b2ea8468d79..aee639bb7f9 100644
--- a/test/CodeGen/R600/store-vector-ptrs.ll
+++ b/test/CodeGen/R600/store-vector-ptrs.ll
@@ -3,7 +3,7 @@
; This tests for a bug that caused a crash in
; AMDGPUDAGToDAGISel::SelectMUBUFScratch() which is used for selecting
; scratch loads and stores.
-; CHECK-LABEL: @store_vector_ptrs
+; CHECK-LABEL: {{^}}store_vector_ptrs:
define void @store_vector_ptrs(<4 x i32*>* %out, <4 x [1024 x i32]*> %array) nounwind {
%p = getelementptr <4 x [1024 x i32]*> %array, <4 x i16> zeroinitializer, <4 x i16> <i16 16, i16 16, i16 16, i16 16>
store <4 x i32*> %p, <4 x i32*>* %out
diff --git a/test/CodeGen/R600/store.ll b/test/CodeGen/R600/store.ll
index dd275338d7c..447044c9f33 100644
--- a/test/CodeGen/R600/store.ll
+++ b/test/CodeGen/R600/store.ll
@@ -5,7 +5,7 @@
;===------------------------------------------------------------------------===;
; Global Address Space
;===------------------------------------------------------------------------===;
-; FUNC-LABEL: @store_i1
+; FUNC-LABEL: {{^}}store_i1:
; EG-CHECK: MEM_RAT MSKOR
; SI-CHECK: BUFFER_STORE_BYTE
define void @store_i1(i1 addrspace(1)* %out) {
@@ -15,7 +15,7 @@ entry:
}
; i8 store
-; EG-CHECK-LABEL: @store_i8
+; EG-CHECK-LABEL: {{^}}store_i8:
; EG-CHECK: MEM_RAT MSKOR T[[RW_GPR:[0-9]]].XW, T{{[0-9]}}.X
; EG-CHECK: VTX_READ_8 [[VAL:T[0-9]\.X]], [[VAL]]
; IG 0: Get the byte index and truncate the value
@@ -34,7 +34,7 @@ entry:
; EG-CHECK: MOV T[[RW_GPR]].Y, 0.0
; EG-CHECK: MOV * T[[RW_GPR]].Z, 0.0
-; SI-CHECK-LABEL: @store_i8
+; SI-CHECK-LABEL: {{^}}store_i8:
; SI-CHECK: BUFFER_STORE_BYTE
define void @store_i8(i8 addrspace(1)* %out, i8 %in) {
@@ -44,7 +44,7 @@ entry:
}
; i16 store
-; EG-CHECK-LABEL: @store_i16
+; EG-CHECK-LABEL: {{^}}store_i16:
; EG-CHECK: MEM_RAT MSKOR T[[RW_GPR:[0-9]]].XW, T{{[0-9]}}.X
; EG-CHECK: VTX_READ_16 [[VAL:T[0-9]\.X]], [[VAL]]
; IG 0: Get the byte index and truncate the value
@@ -63,7 +63,7 @@ entry:
; EG-CHECK: MOV T[[RW_GPR]].Y, 0.0
; EG-CHECK: MOV * T[[RW_GPR]].Z, 0.0
-; SI-CHECK-LABEL: @store_i16
+; SI-CHECK-LABEL: {{^}}store_i16:
; SI-CHECK: BUFFER_STORE_SHORT
define void @store_i16(i16 addrspace(1)* %out, i16 %in) {
entry:
@@ -71,10 +71,10 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @store_v2i8
+; EG-CHECK-LABEL: {{^}}store_v2i8:
; EG-CHECK: MEM_RAT MSKOR
; EG-CHECK-NOT: MEM_RAT MSKOR
-; SI-CHECK-LABEL: @store_v2i8
+; SI-CHECK-LABEL: {{^}}store_v2i8:
; SI-CHECK: BUFFER_STORE_BYTE
; SI-CHECK: BUFFER_STORE_BYTE
define void @store_v2i8(<2 x i8> addrspace(1)* %out, <2 x i32> %in) {
@@ -85,11 +85,11 @@ entry:
}
-; EG-CHECK-LABEL: @store_v2i16
+; EG-CHECK-LABEL: {{^}}store_v2i16:
; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW
-; CM-CHECK-LABEL: @store_v2i16
+; CM-CHECK-LABEL: {{^}}store_v2i16:
; CM-CHECK: MEM_RAT_CACHELESS STORE_DWORD
-; SI-CHECK-LABEL: @store_v2i16
+; SI-CHECK-LABEL: {{^}}store_v2i16:
; SI-CHECK: BUFFER_STORE_SHORT
; SI-CHECK: BUFFER_STORE_SHORT
define void @store_v2i16(<2 x i16> addrspace(1)* %out, <2 x i32> %in) {
@@ -99,11 +99,11 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @store_v4i8
+; EG-CHECK-LABEL: {{^}}store_v4i8:
; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW
-; CM-CHECK-LABEL: @store_v4i8
+; CM-CHECK-LABEL: {{^}}store_v4i8:
; CM-CHECK: MEM_RAT_CACHELESS STORE_DWORD
-; SI-CHECK-LABEL: @store_v4i8
+; SI-CHECK-LABEL: {{^}}store_v4i8:
; SI-CHECK: BUFFER_STORE_BYTE
; SI-CHECK: BUFFER_STORE_BYTE
; SI-CHECK: BUFFER_STORE_BYTE
@@ -116,11 +116,11 @@ entry:
}
; floating-point store
-; EG-CHECK-LABEL: @store_f32
+; EG-CHECK-LABEL: {{^}}store_f32:
; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+\.X, T[0-9]+\.X}}, 1
-; CM-CHECK-LABEL: @store_f32
+; CM-CHECK-LABEL: {{^}}store_f32:
; CM-CHECK: MEM_RAT_CACHELESS STORE_DWORD T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK-LABEL: @store_f32
+; SI-CHECK-LABEL: {{^}}store_f32:
; SI-CHECK: BUFFER_STORE_DWORD
define void @store_f32(float addrspace(1)* %out, float %in) {
@@ -128,13 +128,13 @@ define void @store_f32(float addrspace(1)* %out, float %in) {
ret void
}
-; EG-CHECK-LABEL: @store_v4i16
+; EG-CHECK-LABEL: {{^}}store_v4i16:
; EG-CHECK: MEM_RAT MSKOR
; EG-CHECK: MEM_RAT MSKOR
; EG-CHECK: MEM_RAT MSKOR
; EG-CHECK: MEM_RAT MSKOR
; EG-CHECK-NOT: MEM_RAT MSKOR
-; SI-CHECK-LABEL: @store_v4i16
+; SI-CHECK-LABEL: {{^}}store_v4i16:
; SI-CHECK: BUFFER_STORE_SHORT
; SI-CHECK: BUFFER_STORE_SHORT
; SI-CHECK: BUFFER_STORE_SHORT
@@ -148,11 +148,11 @@ entry:
}
; vec2 floating-point stores
-; EG-CHECK-LABEL: @store_v2f32
+; EG-CHECK-LABEL: {{^}}store_v2f32:
; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW
-; CM-CHECK-LABEL: @store_v2f32
+; CM-CHECK-LABEL: {{^}}store_v2f32:
; CM-CHECK: MEM_RAT_CACHELESS STORE_DWORD
-; SI-CHECK-LABEL: @store_v2f32
+; SI-CHECK-LABEL: {{^}}store_v2f32:
; SI-CHECK: BUFFER_STORE_DWORDX2
define void @store_v2f32(<2 x float> addrspace(1)* %out, float %a, float %b) {
@@ -163,13 +163,13 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @store_v4i32
+; EG-CHECK-LABEL: {{^}}store_v4i32:
; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW
; EG-CHECK-NOT: MEM_RAT_CACHELESS STORE_RAW
-; CM-CHECK-LABEL: @store_v4i32
+; CM-CHECK-LABEL: {{^}}store_v4i32:
; CM-CHECK: MEM_RAT_CACHELESS STORE_DWORD
; CM-CHECK-NOT: MEM_RAT_CACHELESS STORE_DWORD
-; SI-CHECK-LABEL: @store_v4i32
+; SI-CHECK-LABEL: {{^}}store_v4i32:
; SI-CHECK: BUFFER_STORE_DWORDX4
define void @store_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %in) {
entry:
@@ -177,7 +177,7 @@ entry:
ret void
}
-; FUNC-LABEL: @store_i64_i8
+; FUNC-LABEL: {{^}}store_i64_i8:
; EG-CHECK: MEM_RAT MSKOR
; SI-CHECK: BUFFER_STORE_BYTE
define void @store_i64_i8(i8 addrspace(1)* %out, i64 %in) {
@@ -187,7 +187,7 @@ entry:
ret void
}
-; FUNC-LABEL: @store_i64_i16
+; FUNC-LABEL: {{^}}store_i64_i16:
; EG-CHECK: MEM_RAT MSKOR
; SI-CHECK: BUFFER_STORE_SHORT
define void @store_i64_i16(i16 addrspace(1)* %out, i64 %in) {
@@ -201,7 +201,7 @@ entry:
; Local Address Space
;===------------------------------------------------------------------------===;
-; FUNC-LABEL: @store_local_i1
+; FUNC-LABEL: {{^}}store_local_i1:
; EG-CHECK: LDS_BYTE_WRITE
; SI-CHECK: DS_WRITE_B8
define void @store_local_i1(i1 addrspace(3)* %out) {
@@ -210,29 +210,29 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @store_local_i8
+; EG-CHECK-LABEL: {{^}}store_local_i8:
; EG-CHECK: LDS_BYTE_WRITE
-; SI-CHECK-LABEL: @store_local_i8
+; SI-CHECK-LABEL: {{^}}store_local_i8:
; SI-CHECK: DS_WRITE_B8
define void @store_local_i8(i8 addrspace(3)* %out, i8 %in) {
store i8 %in, i8 addrspace(3)* %out
ret void
}
-; EG-CHECK-LABEL: @store_local_i16
+; EG-CHECK-LABEL: {{^}}store_local_i16:
; EG-CHECK: LDS_SHORT_WRITE
-; SI-CHECK-LABEL: @store_local_i16
+; SI-CHECK-LABEL: {{^}}store_local_i16:
; SI-CHECK: DS_WRITE_B16
define void @store_local_i16(i16 addrspace(3)* %out, i16 %in) {
store i16 %in, i16 addrspace(3)* %out
ret void
}
-; EG-CHECK-LABEL: @store_local_v2i16
+; EG-CHECK-LABEL: {{^}}store_local_v2i16:
; EG-CHECK: LDS_WRITE
-; CM-CHECK-LABEL: @store_local_v2i16
+; CM-CHECK-LABEL: {{^}}store_local_v2i16:
; CM-CHECK: LDS_WRITE
-; SI-CHECK-LABEL: @store_local_v2i16
+; SI-CHECK-LABEL: {{^}}store_local_v2i16:
; SI-CHECK: DS_WRITE_B16
; SI-CHECK: DS_WRITE_B16
define void @store_local_v2i16(<2 x i16> addrspace(3)* %out, <2 x i16> %in) {
@@ -241,11 +241,11 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @store_local_v4i8
+; EG-CHECK-LABEL: {{^}}store_local_v4i8:
; EG-CHECK: LDS_WRITE
-; CM-CHECK-LABEL: @store_local_v4i8
+; CM-CHECK-LABEL: {{^}}store_local_v4i8:
; CM-CHECK: LDS_WRITE
-; SI-CHECK-LABEL: @store_local_v4i8
+; SI-CHECK-LABEL: {{^}}store_local_v4i8:
; SI-CHECK: DS_WRITE_B8
; SI-CHECK: DS_WRITE_B8
; SI-CHECK: DS_WRITE_B8
@@ -256,13 +256,13 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @store_local_v2i32
+; EG-CHECK-LABEL: {{^}}store_local_v2i32:
; EG-CHECK: LDS_WRITE
; EG-CHECK: LDS_WRITE
-; CM-CHECK-LABEL: @store_local_v2i32
+; CM-CHECK-LABEL: {{^}}store_local_v2i32:
; CM-CHECK: LDS_WRITE
; CM-CHECK: LDS_WRITE
-; SI-CHECK-LABEL: @store_local_v2i32
+; SI-CHECK-LABEL: {{^}}store_local_v2i32:
; SI-CHECK: DS_WRITE_B64
define void @store_local_v2i32(<2 x i32> addrspace(3)* %out, <2 x i32> %in) {
entry:
@@ -270,17 +270,17 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @store_local_v4i32
+; EG-CHECK-LABEL: {{^}}store_local_v4i32:
; EG-CHECK: LDS_WRITE
; EG-CHECK: LDS_WRITE
; EG-CHECK: LDS_WRITE
; EG-CHECK: LDS_WRITE
-; CM-CHECK-LABEL: @store_local_v4i32
+; CM-CHECK-LABEL: {{^}}store_local_v4i32:
; CM-CHECK: LDS_WRITE
; CM-CHECK: LDS_WRITE
; CM-CHECK: LDS_WRITE
; CM-CHECK: LDS_WRITE
-; SI-CHECK-LABEL: @store_local_v4i32
+; SI-CHECK-LABEL: {{^}}store_local_v4i32:
; SI-CHECK: DS_WRITE_B32
; SI-CHECK: DS_WRITE_B32
; SI-CHECK: DS_WRITE_B32
@@ -291,7 +291,7 @@ entry:
ret void
}
-; FUNC-LABEL: @store_local_i64_i8
+; FUNC-LABEL: {{^}}store_local_i64_i8:
; EG-CHECK: LDS_BYTE_WRITE
; SI-CHECK: DS_WRITE_B8
define void @store_local_i64_i8(i8 addrspace(3)* %out, i64 %in) {
@@ -301,7 +301,7 @@ entry:
ret void
}
-; FUNC-LABEL: @store_local_i64_i16
+; FUNC-LABEL: {{^}}store_local_i64_i16:
; EG-CHECK: LDS_SHORT_WRITE
; SI-CHECK: DS_WRITE_B16
define void @store_local_i64_i16(i16 addrspace(3)* %out, i64 %in) {
@@ -318,11 +318,11 @@ entry:
; Evergreen / Northern Islands don't support 64-bit stores yet, so there should
; be two 32-bit stores.
-; EG-CHECK-LABEL: @vecload2
+; EG-CHECK-LABEL: {{^}}vecload2:
; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW
-; CM-CHECK-LABEL: @vecload2
+; CM-CHECK-LABEL: {{^}}vecload2:
; CM-CHECK: MEM_RAT_CACHELESS STORE_DWORD
-; SI-CHECK-LABEL: @vecload2
+; SI-CHECK-LABEL: {{^}}vecload2:
; SI-CHECK: BUFFER_STORE_DWORDX2
define void @vecload2(i32 addrspace(1)* nocapture %out, i32 addrspace(2)* nocapture %mem) #0 {
entry:
@@ -339,7 +339,7 @@ attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"=
; When i128 was a legal type this program generated cannot select errors:
-; FUNC-LABEL: @i128-const-store
+; FUNC-LABEL: {{^}}"i128-const-store":
; FIXME: We should be able to to this with one store instruction
; EG-CHECK: STORE_RAW
; EG-CHECK: STORE_RAW
diff --git a/test/CodeGen/R600/store.r600.ll b/test/CodeGen/R600/store.r600.ll
index 00589a0c6c8..3df30d4c669 100644
--- a/test/CodeGen/R600/store.r600.ll
+++ b/test/CodeGen/R600/store.r600.ll
@@ -3,7 +3,7 @@
; XXX: Merge this test into store.ll once it is supported on SI
; v4i32 store
-; EG-CHECK: @store_v4i32
+; EG-CHECK: {{^}}store_v4i32:
; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+\.XYZW, T[0-9]+\.X}}, 1
define void @store_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
@@ -13,7 +13,7 @@ define void @store_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %
}
; v4f32 store
-; EG-CHECK: @store_v4f32
+; EG-CHECK: {{^}}store_v4f32:
; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+\.XYZW, T[0-9]+\.X}}, 1
define void @store_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
%1 = load <4 x float> addrspace(1) * %in
diff --git a/test/CodeGen/R600/structurize.ll b/test/CodeGen/R600/structurize.ll
index c2acd938ad0..02e592e9a55 100644
--- a/test/CodeGen/R600/structurize.ll
+++ b/test/CodeGen/R600/structurize.ll
@@ -13,7 +13,7 @@
;
;
-; CHECK-LABEL: @branch_into_diamond
+; CHECK-LABEL: {{^}}branch_into_diamond:
; === entry block:
; CHECK: ALU_PUSH_BEFORE
; === Branch instruction (IF):
diff --git a/test/CodeGen/R600/structurize1.ll b/test/CodeGen/R600/structurize1.ll
index 8c10301a168..77432c1f9d2 100644
--- a/test/CodeGen/R600/structurize1.ll
+++ b/test/CodeGen/R600/structurize1.ll
@@ -16,7 +16,7 @@
; }
; }
-; CHECK-LABEL: @if_inside_loop
+; CHECK-LABEL: {{^}}if_inside_loop:
; CHECK: LOOP_START_DX10
; CHECK: END_LOOP
define void @if_inside_loop(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
diff --git a/test/CodeGen/R600/sub.ll b/test/CodeGen/R600/sub.ll
index a8196a0ea4d..bcdb53e6874 100644
--- a/test/CodeGen/R600/sub.ll
+++ b/test/CodeGen/R600/sub.ll
@@ -3,7 +3,7 @@
declare i32 @llvm.r600.read.tidig.x() readnone
-;FUNC-LABEL: @test2
+;FUNC-LABEL: {{^}}test2:
;EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
@@ -19,7 +19,7 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
ret void
}
-;FUNC-LABEL: @test4
+;FUNC-LABEL: {{^}}test4:
;EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
@@ -39,7 +39,7 @@ define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @s_sub_i64:
+; FUNC-LABEL: {{^}}s_sub_i64:
; SI: S_SUB_U32
; SI: S_SUBB_U32
@@ -54,7 +54,7 @@ define void @s_sub_i64(i64 addrspace(1)* noalias %out, i64 %a, i64 %b) nounwind
ret void
}
-; FUNC-LABEL: @v_sub_i64:
+; FUNC-LABEL: {{^}}v_sub_i64:
; SI: V_SUB_I32_e32
; SI: V_SUBB_U32_e32
diff --git a/test/CodeGen/R600/swizzle-export.ll b/test/CodeGen/R600/swizzle-export.ll
index 0a68f76c765..3e6f7a73263 100644
--- a/test/CodeGen/R600/swizzle-export.ll
+++ b/test/CodeGen/R600/swizzle-export.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-;EG-CHECK: @main
+;EG-CHECK: {{^}}main:
;EG-CHECK: EXPORT T{{[0-9]+}}.XYXX
;EG-CHECK: EXPORT T{{[0-9]+}}.ZXXX
;EG-CHECK: EXPORT T{{[0-9]+}}.XXWX
@@ -92,7 +92,7 @@ main_body:
ret void
}
-; EG-CHECK: @main2
+; EG-CHECK: {{^}}main2:
; EG-CHECK: T{{[0-9]+}}.XY__
; EG-CHECK: T{{[0-9]+}}.ZXY0
diff --git a/test/CodeGen/R600/trunc-store-i1.ll b/test/CodeGen/R600/trunc-store-i1.ll
index a3975c8b8e4..0e6f49daf6f 100644
--- a/test/CodeGen/R600/trunc-store-i1.ll
+++ b/test/CodeGen/R600/trunc-store-i1.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
-; SI-LABEL: @global_truncstore_i32_to_i1
+; SI-LABEL: {{^}}global_truncstore_i32_to_i1:
; SI: S_LOAD_DWORD [[LOAD:s[0-9]+]],
; SI: S_AND_B32 [[SREG:s[0-9]+]], [[LOAD]], 1
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], [[SREG]]
@@ -12,7 +12,7 @@ define void @global_truncstore_i32_to_i1(i1 addrspace(1)* %out, i32 %val) nounwi
ret void
}
-; SI-LABEL: @global_truncstore_i64_to_i1
+; SI-LABEL: {{^}}global_truncstore_i64_to_i1:
; SI: BUFFER_STORE_BYTE
define void @global_truncstore_i64_to_i1(i1 addrspace(1)* %out, i64 %val) nounwind {
%trunc = trunc i64 %val to i1
@@ -20,7 +20,7 @@ define void @global_truncstore_i64_to_i1(i1 addrspace(1)* %out, i64 %val) nounwi
ret void
}
-; SI-LABEL: @global_truncstore_i16_to_i1
+; SI-LABEL: {{^}}global_truncstore_i16_to_i1:
; SI: S_LOAD_DWORD [[LOAD:s[0-9]+]],
; SI: S_AND_B32 [[SREG:s[0-9]+]], [[LOAD]], 1
; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], [[SREG]]
diff --git a/test/CodeGen/R600/trunc-vector-store-assertion-failure.ll b/test/CodeGen/R600/trunc-vector-store-assertion-failure.ll
index ec959c21798..878ea3f4899 100644
--- a/test/CodeGen/R600/trunc-vector-store-assertion-failure.ll
+++ b/test/CodeGen/R600/trunc-vector-store-assertion-failure.ll
@@ -4,7 +4,7 @@
; vector stores at the end of a basic block were not being added to the
; LegalizedNodes list, which triggered an assertion failure.
-; CHECK-LABEL: @test
+; CHECK-LABEL: {{^}}test:
; CHECK: MEM_RAT_CACHELESS STORE_RAW
define void @test(<4 x i8> addrspace(1)* %out, i32 %cond, <4 x i8> %in) {
entry:
diff --git a/test/CodeGen/R600/trunc.ll b/test/CodeGen/R600/trunc.ll
index d3b191db282..16c57f7f386 100644
--- a/test/CodeGen/R600/trunc.ll
+++ b/test/CodeGen/R600/trunc.ll
@@ -2,12 +2,12 @@
; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG %s
define void @trunc_i64_to_i32_store(i32 addrspace(1)* %out, i64 %in) {
-; SI-LABEL: @trunc_i64_to_i32_store
+; SI-LABEL: {{^}}trunc_i64_to_i32_store:
; SI: S_LOAD_DWORD [[SLOAD:s[0-9]+]], s[0:1], 0xb
; SI: V_MOV_B32_e32 [[VLOAD:v[0-9]+]], [[SLOAD]]
; SI: BUFFER_STORE_DWORD [[VLOAD]]
-; EG-LABEL: @trunc_i64_to_i32_store
+; EG-LABEL: {{^}}trunc_i64_to_i32_store:
; EG: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
; EG: LSHR
; EG-NEXT: 2(
@@ -16,7 +16,7 @@ define void @trunc_i64_to_i32_store(i32 addrspace(1)* %out, i64 %in) {
ret void
}
-; SI-LABEL: @trunc_load_shl_i64:
+; SI-LABEL: {{^}}trunc_load_shl_i64:
; SI-DAG: S_LOAD_DWORDX2
; SI-DAG: S_LOAD_DWORD [[SREG:s[0-9]+]],
; SI: S_LSHL_B32 [[SHL:s[0-9]+]], [[SREG]], 2
@@ -29,7 +29,7 @@ define void @trunc_load_shl_i64(i32 addrspace(1)* %out, i64 %a) {
ret void
}
-; SI-LABEL: @trunc_shl_i64:
+; SI-LABEL: {{^}}trunc_shl_i64:
; SI: S_LOAD_DWORDX2 s{{\[}}[[LO_SREG:[0-9]+]]:{{[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
; SI: S_LSHL_B64 s{{\[}}[[LO_SHL:[0-9]+]]:{{[0-9]+\]}}, s{{\[}}[[LO_SREG]]:{{[0-9]+\]}}, 2
; SI: S_ADD_U32 s[[LO_SREG2:[0-9]+]], s[[LO_SHL]],
@@ -45,7 +45,7 @@ define void @trunc_shl_i64(i64 addrspace(1)* %out2, i32 addrspace(1)* %out, i64
ret void
}
-; SI-LABEL: @trunc_i32_to_i1:
+; SI-LABEL: {{^}}trunc_i32_to_i1:
; SI: V_AND_B32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
; SI: V_CMP_EQ_I32
define void @trunc_i32_to_i1(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) {
@@ -56,7 +56,7 @@ define void @trunc_i32_to_i1(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) {
ret void
}
-; SI-LABEL: @sgpr_trunc_i32_to_i1:
+; SI-LABEL: {{^}}sgpr_trunc_i32_to_i1:
; SI: V_AND_B32_e64 v{{[0-9]+}}, 1, s{{[0-9]+}}
; SI: V_CMP_EQ_I32
define void @sgpr_trunc_i32_to_i1(i32 addrspace(1)* %out, i32 %a) {
diff --git a/test/CodeGen/R600/uaddo.ll b/test/CodeGen/R600/uaddo.ll
index 0b854b543a5..acdbb18fd3c 100644
--- a/test/CodeGen/R600/uaddo.ll
+++ b/test/CodeGen/R600/uaddo.ll
@@ -4,7 +4,7 @@
declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
declare { i64, i1 } @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
-; FUNC-LABEL: @uaddo_i64_zext
+; FUNC-LABEL: {{^}}uaddo_i64_zext:
; SI: ADD
; SI: ADDC
; SI: ADDC
@@ -18,7 +18,7 @@ define void @uaddo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
ret void
}
-; FUNC-LABEL: @s_uaddo_i32
+; FUNC-LABEL: {{^}}s_uaddo_i32:
; SI: S_ADD_I32
define void @s_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind {
%uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) nounwind
@@ -29,7 +29,7 @@ define void @s_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32
ret void
}
-; FUNC-LABEL: @v_uaddo_i32
+; FUNC-LABEL: {{^}}v_uaddo_i32:
; SI: V_ADD_I32
define void @v_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%a = load i32 addrspace(1)* %aptr, align 4
@@ -42,7 +42,7 @@ define void @v_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32
ret void
}
-; FUNC-LABEL: @s_uaddo_i64
+; FUNC-LABEL: {{^}}s_uaddo_i64:
; SI: S_ADD_U32
; SI: S_ADDC_U32
define void @s_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind {
@@ -54,7 +54,7 @@ define void @s_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64
ret void
}
-; FUNC-LABEL: @v_uaddo_i64
+; FUNC-LABEL: {{^}}v_uaddo_i64:
; SI: V_ADD_I32
; SI: V_ADDC_U32
define void @v_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
diff --git a/test/CodeGen/R600/udiv.ll b/test/CodeGen/R600/udiv.ll
index 53713217a1c..6f05e8526f3 100644
--- a/test/CodeGen/R600/udiv.ll
+++ b/test/CodeGen/R600/udiv.ll
@@ -1,7 +1,7 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
-;EG-CHECK-LABEL: @test
+;EG-CHECK-LABEL: {{^}}test:
;EG-CHECK-NOT: SETGE_INT
;EG-CHECK: CF_END
@@ -18,9 +18,9 @@ define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
;The goal of this test is to make sure the ISel doesn't fail when it gets
;a v4i32 udiv
-;EG-CHECK-LABEL: @test2
+;EG-CHECK-LABEL: {{^}}test2:
;EG-CHECK: CF_END
-;SI-CHECK-LABEL: @test2
+;SI-CHECK-LABEL: {{^}}test2:
;SI-CHECK: S_ENDPGM
define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
@@ -32,9 +32,9 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
ret void
}
-;EG-CHECK-LABEL: @test4
+;EG-CHECK-LABEL: {{^}}test4:
;EG-CHECK: CF_END
-;SI-CHECK-LABEL: @test4
+;SI-CHECK-LABEL: {{^}}test4:
;SI-CHECK: S_ENDPGM
define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
diff --git a/test/CodeGen/R600/udivrem.ll b/test/CodeGen/R600/udivrem.ll
index 5f5753adca3..57db20f188f 100644
--- a/test/CodeGen/R600/udivrem.ll
+++ b/test/CodeGen/R600/udivrem.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck --check-prefix=SI --check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=EG --check-prefix=FUNC %s
-; FUNC-LABEL: @test_udivrem
+; FUNC-LABEL: {{^}}test_udivrem:
; EG: RECIP_UINT
; EG-DAG: MULHI
; EG-DAG: MULLO_INT
@@ -58,7 +58,7 @@ define void @test_udivrem(i32 addrspace(1)* %out, i32 %x, i32 %y) {
ret void
}
-; FUNC-LABEL: @test_udivrem_v2
+; FUNC-LABEL: {{^}}test_udivrem_v2:
; EG-DAG: RECIP_UINT
; EG-DAG: MULHI
; EG-DAG: MULLO_INT
@@ -162,7 +162,7 @@ define void @test_udivrem_v2(<2 x i32> addrspace(1)* %out, <2 x i32> %x, <2 x i3
}
-; FUNC-LABEL: @test_udivrem_v4
+; FUNC-LABEL: {{^}}test_udivrem_v4:
; EG-DAG: RECIP_UINT
; EG-DAG: MULHI
; EG-DAG: MULLO_INT
diff --git a/test/CodeGen/R600/udivrem24.ll b/test/CodeGen/R600/udivrem24.ll
index 219c662b7ef..191ab7a4cf2 100644
--- a/test/CodeGen/R600/udivrem24.ll
+++ b/test/CodeGen/R600/udivrem24.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; FUNC-LABEL: @udiv24_i8
+; FUNC-LABEL: {{^}}udiv24_i8:
; SI: V_CVT_F32_UBYTE
; SI: V_CVT_F32_UBYTE
; SI: V_RCP_F32
@@ -20,7 +20,7 @@ define void @udiv24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @udiv24_i16
+; FUNC-LABEL: {{^}}udiv24_i16:
; SI: V_CVT_F32_U32
; SI: V_CVT_F32_U32
; SI: V_RCP_F32
@@ -39,7 +39,7 @@ define void @udiv24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @udiv24_i32
+; FUNC-LABEL: {{^}}udiv24_i32:
; SI: V_CVT_F32_U32
; SI-DAG: V_CVT_F32_U32
; SI-DAG: V_RCP_F32
@@ -62,7 +62,7 @@ define void @udiv24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @udiv25_i32
+; FUNC-LABEL: {{^}}udiv25_i32:
; RCP_IFLAG is for URECIP in the full 32b alg
; SI: V_RCP_IFLAG
; SI-NOT: V_RCP_F32
@@ -82,7 +82,7 @@ define void @udiv25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @test_no_udiv24_i32_1
+; FUNC-LABEL: {{^}}test_no_udiv24_i32_1:
; RCP_IFLAG is for URECIP in the full 32b alg
; SI: V_RCP_IFLAG
; SI-NOT: V_RCP_F32
@@ -102,7 +102,7 @@ define void @test_no_udiv24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
ret void
}
-; FUNC-LABEL: @test_no_udiv24_i32_2
+; FUNC-LABEL: {{^}}test_no_udiv24_i32_2:
; RCP_IFLAG is for URECIP in the full 32b alg
; SI: V_RCP_IFLAG
; SI-NOT: V_RCP_F32
@@ -122,7 +122,7 @@ define void @test_no_udiv24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
ret void
}
-; FUNC-LABEL: @urem24_i8
+; FUNC-LABEL: {{^}}urem24_i8:
; SI: V_CVT_F32_UBYTE
; SI: V_CVT_F32_UBYTE
; SI: V_RCP_F32
@@ -141,7 +141,7 @@ define void @urem24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @urem24_i16
+; FUNC-LABEL: {{^}}urem24_i16:
; SI: V_CVT_F32_U32
; SI: V_CVT_F32_U32
; SI: V_RCP_F32
@@ -160,7 +160,7 @@ define void @urem24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @urem24_i32
+; FUNC-LABEL: {{^}}urem24_i32:
; SI: V_CVT_F32_U32
; SI: V_CVT_F32_U32
; SI: V_RCP_F32
@@ -183,7 +183,7 @@ define void @urem24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @urem25_i32
+; FUNC-LABEL: {{^}}urem25_i32:
; RCP_IFLAG is for URECIP in the full 32b alg
; SI: V_RCP_IFLAG
; SI-NOT: V_RCP_F32
@@ -203,7 +203,7 @@ define void @urem25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
ret void
}
-; FUNC-LABEL: @test_no_urem24_i32_1
+; FUNC-LABEL: {{^}}test_no_urem24_i32_1:
; RCP_IFLAG is for URECIP in the full 32b alg
; SI: V_RCP_IFLAG
; SI-NOT: V_RCP_F32
@@ -223,7 +223,7 @@ define void @test_no_urem24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
ret void
}
-; FUNC-LABEL: @test_no_urem24_i32_2
+; FUNC-LABEL: {{^}}test_no_urem24_i32_2:
; RCP_IFLAG is for URECIP in the full 32b alg
; SI: V_RCP_IFLAG
; SI-NOT: V_RCP_F32
diff --git a/test/CodeGen/R600/udivrem64.ll b/test/CodeGen/R600/udivrem64.ll
index a71315a12d8..caf8ebb28c6 100644
--- a/test/CodeGen/R600/udivrem64.ll
+++ b/test/CodeGen/R600/udivrem64.ll
@@ -1,7 +1,7 @@
;XUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG --check-prefix=FUNC %s
-;FUNC-LABEL: @test_udiv
+;FUNC-LABEL: {{^}}test_udiv:
;EG: RECIP_UINT
;EG: LSHL {{.*}}, 1,
;EG: BFE_UINT
@@ -41,7 +41,7 @@ define void @test_udiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
ret void
}
-;FUNC-LABEL: @test_urem
+;FUNC-LABEL: {{^}}test_urem:
;EG: RECIP_UINT
;EG: BFE_UINT
;EG: BFE_UINT
diff --git a/test/CodeGen/R600/uint_to_fp.f64.ll b/test/CodeGen/R600/uint_to_fp.f64.ll
index 9a41796a06b..19dbbee8354 100644
--- a/test/CodeGen/R600/uint_to_fp.f64.ll
+++ b/test/CodeGen/R600/uint_to_fp.f64.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
-; SI-LABEL: @uint_to_fp_f64_i32
+; SI-LABEL: {{^}}uint_to_fp_f64_i32:
; SI: V_CVT_F64_U32_e32
; SI: S_ENDPGM
define void @uint_to_fp_f64_i32(double addrspace(1)* %out, i32 %in) {
@@ -9,7 +9,7 @@ define void @uint_to_fp_f64_i32(double addrspace(1)* %out, i32 %in) {
ret void
}
-; SI-LABEL: @uint_to_fp_i1_f64:
+; SI-LABEL: {{^}}uint_to_fp_i1_f64:
; SI: V_CMP_EQ_I32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
; FIXME: We should the VGPR sources for V_CNDMASK are copied from SGPRs,
; we should be able to fold the SGPRs into the V_CNDMASK instructions.
@@ -24,7 +24,7 @@ define void @uint_to_fp_i1_f64(double addrspace(1)* %out, i32 %in) {
ret void
}
-; SI-LABEL: @uint_to_fp_i1_f64_load:
+; SI-LABEL: {{^}}uint_to_fp_i1_f64_load:
; SI: V_CNDMASK_B32_e64 [[IRESULT:v[0-9]]], 0, 1
; SI-NEXT: V_CVT_F64_U32_e32 [[RESULT:v\[[0-9]+:[0-9]\]]], [[IRESULT]]
; SI: BUFFER_STORE_DWORDX2 [[RESULT]]
diff --git a/test/CodeGen/R600/uint_to_fp.ll b/test/CodeGen/R600/uint_to_fp.ll
index 9d4dacff371..657d6f8e22d 100644
--- a/test/CodeGen/R600/uint_to_fp.ll
+++ b/test/CodeGen/R600/uint_to_fp.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
-; FUNC-LABEL: @uint_to_fp_v2i32
+; FUNC-LABEL: {{^}}uint_to_fp_v2i32:
; R600-DAG: UINT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[2].W
; R600-DAG: UINT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[3].X
@@ -14,7 +14,7 @@ define void @uint_to_fp_v2i32(<2 x float> addrspace(1)* %out, <2 x i32> %in) {
ret void
}
-; FUNC-LABEL: @uint_to_fp_v4i32
+; FUNC-LABEL: {{^}}uint_to_fp_v4i32:
; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
@@ -32,7 +32,7 @@ define void @uint_to_fp_v4i32(<4 x float> addrspace(1)* %out, <4 x i32> addrspac
ret void
}
-; FUNC-LABEL: @uint_to_fp_i64_f32
+; FUNC-LABEL: {{^}}uint_to_fp_i64_f32:
; R600: UINT_TO_FLT
; R600: UINT_TO_FLT
; R600: MULADD_IEEE
@@ -47,7 +47,7 @@ entry:
ret void
}
-; FUNC-LABEL: @uint_to_fp_i1_f32:
+; FUNC-LABEL: {{^}}uint_to_fp_i1_f32:
; SI: V_CMP_EQ_I32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
; SI-NEXT: V_CNDMASK_B32_e64 [[RESULT:v[0-9]+]], 0, 1.0, [[CMP]]
; SI: BUFFER_STORE_DWORD [[RESULT]],
@@ -59,7 +59,7 @@ define void @uint_to_fp_i1_f32(float addrspace(1)* %out, i32 %in) {
ret void
}
-; FUNC-LABEL: @uint_to_fp_i1_f32_load:
+; FUNC-LABEL: {{^}}uint_to_fp_i1_f32_load:
; SI: V_CNDMASK_B32_e64 [[RESULT:v[0-9]+]], 0, 1.0
; SI: BUFFER_STORE_DWORD [[RESULT]],
; SI: S_ENDPGM
diff --git a/test/CodeGen/R600/unaligned-load-store.ll b/test/CodeGen/R600/unaligned-load-store.ll
index cca7df5f34e..f2efd1c5b6d 100644
--- a/test/CodeGen/R600/unaligned-load-store.ll
+++ b/test/CodeGen/R600/unaligned-load-store.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
; FIXME: This is probably wrong. This probably needs to expand to 8-bit reads and writes.
-; SI-LABEL: @unaligned_load_store_i32:
+; SI-LABEL: {{^}}unaligned_load_store_i32:
; SI: DS_READ_U16
; SI: DS_READ_U16
; SI: DS_WRITE_B32
@@ -12,7 +12,7 @@ define void @unaligned_load_store_i32(i32 addrspace(3)* %p, i32 addrspace(3)* %r
ret void
}
-; SI-LABEL: @unaligned_load_store_v4i32:
+; SI-LABEL: {{^}}unaligned_load_store_v4i32:
; SI: DS_READ_U16
; SI: DS_READ_U16
; SI: DS_READ_U16
@@ -32,7 +32,7 @@ define void @unaligned_load_store_v4i32(<4 x i32> addrspace(3)* %p, <4 x i32> ad
ret void
}
-; SI-LABEL: @load_lds_i64_align_4
+; SI-LABEL: {{^}}load_lds_i64_align_4:
; SI: DS_READ2_B32
; SI: S_ENDPGM
define void @load_lds_i64_align_4(i64 addrspace(1)* nocapture %out, i64 addrspace(3)* %in) #0 {
@@ -41,7 +41,7 @@ define void @load_lds_i64_align_4(i64 addrspace(1)* nocapture %out, i64 addrspac
ret void
}
-; SI-LABEL: @load_lds_i64_align_4_with_offset
+; SI-LABEL: {{^}}load_lds_i64_align_4_with_offset:
; SI: DS_READ2_B32 v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]}}, 0x8, 0x9
; SI: S_ENDPGM
define void @load_lds_i64_align_4_with_offset(i64 addrspace(1)* nocapture %out, i64 addrspace(3)* %in) #0 {
@@ -51,7 +51,7 @@ define void @load_lds_i64_align_4_with_offset(i64 addrspace(1)* nocapture %out,
ret void
}
-; SI-LABEL: @load_lds_i64_align_4_with_split_offset
+; SI-LABEL: {{^}}load_lds_i64_align_4_with_split_offset:
; The tests for the case where the lo offset is 8-bits, but the hi offset is 9-bits
; SI: DS_READ2_B32 v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]}}, 0x0, 0x1
; SI: S_ENDPGM
@@ -71,7 +71,7 @@ define void @load_lds_i64_align_4_with_split_offset(i64 addrspace(1)* nocapture
; ret void
; }
-; SI-LABEL: @store_lds_i64_align_4
+; SI-LABEL: {{^}}store_lds_i64_align_4:
; SI: DS_WRITE2_B32
; SI: S_ENDPGM
define void @store_lds_i64_align_4(i64 addrspace(3)* %out, i64 %val) #0 {
@@ -79,7 +79,7 @@ define void @store_lds_i64_align_4(i64 addrspace(3)* %out, i64 %val) #0 {
ret void
}
-; SI-LABEL: @store_lds_i64_align_4_with_offset
+; SI-LABEL: {{^}}store_lds_i64_align_4_with_offset:
; SI: DS_WRITE2_B32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x8, 0x9
; SI: S_ENDPGM
define void @store_lds_i64_align_4_with_offset(i64 addrspace(3)* %out) #0 {
@@ -88,7 +88,7 @@ define void @store_lds_i64_align_4_with_offset(i64 addrspace(3)* %out) #0 {
ret void
}
-; SI-LABEL: @store_lds_i64_align_4_with_split_offset
+; SI-LABEL: {{^}}store_lds_i64_align_4_with_split_offset:
; The tests for the case where the lo offset is 8-bits, but the hi offset is 9-bits
; SI: DS_WRITE2_B32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]}}, 0x0, 0x1
; SI: S_ENDPGM
diff --git a/test/CodeGen/R600/unhandled-loop-condition-assertion.ll b/test/CodeGen/R600/unhandled-loop-condition-assertion.ll
index e4129c51123..ee0a83084af 100644
--- a/test/CodeGen/R600/unhandled-loop-condition-assertion.ll
+++ b/test/CodeGen/R600/unhandled-loop-condition-assertion.ll
@@ -5,7 +5,7 @@
; SI hits an assertion at -O0, evergreen hits a not implemented unreachable.
-; COMMON-LABEL: @branch_true:
+; COMMON-LABEL: {{^}}branch_true:
define void @branch_true(i8 addrspace(1)* nocapture %main, i32 %main_stride) #0 {
entry:
br i1 true, label %for.end, label %for.body.lr.ph
@@ -39,7 +39,7 @@ for.end: ; preds = %for.body, %entry
ret void
}
-; COMMON-LABEL: @branch_false:
+; COMMON-LABEL: {{^}}branch_false:
; SI: .text
; SI-NEXT: S_ENDPGM
define void @branch_false(i8 addrspace(1)* nocapture %main, i32 %main_stride) #0 {
@@ -75,7 +75,7 @@ for.end: ; preds = %for.body, %entry
ret void
}
-; COMMON-LABEL: @branch_undef:
+; COMMON-LABEL: {{^}}branch_undef:
; SI: .text
; SI-NEXT: S_ENDPGM
define void @branch_undef(i8 addrspace(1)* nocapture %main, i32 %main_stride) #0 {
diff --git a/test/CodeGen/R600/unsupported-cc.ll b/test/CodeGen/R600/unsupported-cc.ll
index f986a0251dc..8ab4faf2f14 100644
--- a/test/CodeGen/R600/unsupported-cc.ll
+++ b/test/CodeGen/R600/unsupported-cc.ll
@@ -2,7 +2,7 @@
; These tests are for condition codes that are not supported by the hardware
-; CHECK-LABEL: @slt
+; CHECK-LABEL: {{^}}slt:
; CHECK: SETGT_INT {{\** *}}T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z
; CHECK-NEXT: LSHR
; CHECK-NEXT: 5(7.006492e-45)
@@ -14,7 +14,7 @@ entry:
ret void
}
-; CHECK-LABEL: @ult_i32
+; CHECK-LABEL: {{^}}ult_i32:
; CHECK: SETGT_UINT {{\** *}}T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z
; CHECK-NEXT: LSHR
; CHECK-NEXT: 5(7.006492e-45)
@@ -26,7 +26,7 @@ entry:
ret void
}
-; CHECK-LABEL: @ult_float
+; CHECK-LABEL: {{^}}ult_float:
; CHECK: SETGE * T{{[0-9]}}.[[CHAN:[XYZW]]], KC0[2].Z, literal.x
; CHECK-NEXT: 1084227584(5.000000e+00)
; CHECK-NEXT: SETE T{{[0-9]\.[XYZW]}}, PV.[[CHAN]], 0.0
@@ -39,7 +39,7 @@ entry:
ret void
}
-; CHECK-LABEL: @ult_float_native
+; CHECK-LABEL: {{^}}ult_float_native:
; CHECK: SETGE T{{[0-9]\.[XYZW]}}, KC0[2].Z, literal.x
; CHECK-NEXT: LSHR *
; CHECK-NEXT: 1084227584(5.000000e+00)
@@ -51,7 +51,7 @@ entry:
ret void
}
-; CHECK-LABEL: @olt
+; CHECK-LABEL: {{^}}olt:
; CHECK: SETGT T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z
; CHECK-NEXT: LSHR *
; CHECK-NEXT: 1084227584(5.000000e+00)
@@ -63,7 +63,7 @@ entry:
ret void
}
-; CHECK-LABEL: @sle
+; CHECK-LABEL: {{^}}sle:
; CHECK: SETGT_INT {{\** *}}T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z
; CHECK-NEXT: LSHR
; CHECK-NEXT: 6(8.407791e-45)
@@ -75,7 +75,7 @@ entry:
ret void
}
-; CHECK-LABEL: @ule_i32
+; CHECK-LABEL: {{^}}ule_i32:
; CHECK: SETGT_UINT {{\** *}}T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z
; CHECK-NEXT: LSHR
; CHECK-NEXT: 6(8.407791e-45)
@@ -87,7 +87,7 @@ entry:
ret void
}
-; CHECK-LABEL: @ule_float
+; CHECK-LABEL: {{^}}ule_float:
; CHECK: SETGT * T{{[0-9]}}.[[CHAN:[XYZW]]], KC0[2].Z, literal.x
; CHECK-NEXT: 1084227584(5.000000e+00)
; CHECK-NEXT: SETE T{{[0-9]\.[XYZW]}}, PV.[[CHAN]], 0.0
@@ -100,7 +100,7 @@ entry:
ret void
}
-; CHECK-LABEL: @ule_float_native
+; CHECK-LABEL: {{^}}ule_float_native:
; CHECK: SETGT T{{[0-9]\.[XYZW]}}, KC0[2].Z, literal.x
; CHECK-NEXT: LSHR *
; CHECK-NEXT: 1084227584(5.000000e+00)
@@ -112,7 +112,7 @@ entry:
ret void
}
-; CHECK-LABEL: @ole
+; CHECK-LABEL: {{^}}ole:
; CHECK: SETGE T{{[0-9]\.[XYZW]}}, literal.x, KC0[2].Z
; CHECK-NEXT: LSHR *
; CHECK-NEXT:1084227584(5.000000e+00)
diff --git a/test/CodeGen/R600/urem.ll b/test/CodeGen/R600/urem.ll
index 8045145bd10..449e19f3c72 100644
--- a/test/CodeGen/R600/urem.ll
+++ b/test/CodeGen/R600/urem.ll
@@ -5,9 +5,9 @@
;The goal of this test is to make sure the ISel doesn't fail when it gets
;a v2i32/v4i32 urem
-;EG-CHECK: @test2
+;EG-CHECK: {{^}}test2:
;EG-CHECK: CF_END
-;SI-CHECK: @test2
+;SI-CHECK: {{^}}test2:
;SI-CHECK: S_ENDPGM
define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
@@ -19,9 +19,9 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
ret void
}
-;EG-CHECK: @test4
+;EG-CHECK: {{^}}test4:
;EG-CHECK: CF_END
-;SI-CHECK: @test4
+;SI-CHECK: {{^}}test4:
;SI-CHECK: S_ENDPGM
define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
diff --git a/test/CodeGen/R600/use-sgpr-multiple-times.ll b/test/CodeGen/R600/use-sgpr-multiple-times.ll
index 5c007183354..0744295006b 100644
--- a/test/CodeGen/R600/use-sgpr-multiple-times.ll
+++ b/test/CodeGen/R600/use-sgpr-multiple-times.ll
@@ -5,7 +5,7 @@ declare float @llvm.fmuladd.f32(float, float, float) #1
declare i32 @llvm.AMDGPU.imad24(i32, i32, i32) #1
-; SI-LABEL: @test_sgpr_use_twice_binop:
+; SI-LABEL: {{^}}test_sgpr_use_twice_binop:
; SI: S_LOAD_DWORD [[SGPR:s[0-9]+]],
; SI: V_ADD_F32_e64 [[RESULT:v[0-9]+]], [[SGPR]], [[SGPR]]
; SI: BUFFER_STORE_DWORD [[RESULT]]
@@ -15,7 +15,7 @@ define void @test_sgpr_use_twice_binop(float addrspace(1)* %out, float %a) #0 {
ret void
}
-; SI-LABEL: @test_sgpr_use_three_ternary_op:
+; SI-LABEL: {{^}}test_sgpr_use_three_ternary_op:
; SI: S_LOAD_DWORD [[SGPR:s[0-9]+]],
; SI: V_FMA_F32 [[RESULT:v[0-9]+]], [[SGPR]], [[SGPR]], [[SGPR]]
; SI: BUFFER_STORE_DWORD [[RESULT]]
@@ -25,7 +25,7 @@ define void @test_sgpr_use_three_ternary_op(float addrspace(1)* %out, float %a)
ret void
}
-; SI-LABEL: @test_sgpr_use_twice_ternary_op_a_a_b:
+; SI-LABEL: {{^}}test_sgpr_use_twice_ternary_op_a_a_b:
; SI: S_LOAD_DWORD [[SGPR0:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI: S_LOAD_DWORD [[SGPR1:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
; SI: V_MOV_B32_e32 [[VGPR1:v[0-9]+]], [[SGPR1]]
@@ -37,7 +37,7 @@ define void @test_sgpr_use_twice_ternary_op_a_a_b(float addrspace(1)* %out, floa
ret void
}
-; SI-LABEL: @test_sgpr_use_twice_ternary_op_a_b_a:
+; SI-LABEL: {{^}}test_sgpr_use_twice_ternary_op_a_b_a:
; SI: S_LOAD_DWORD [[SGPR0:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI: S_LOAD_DWORD [[SGPR1:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
; SI: V_MOV_B32_e32 [[VGPR1:v[0-9]+]], [[SGPR1]]
@@ -49,7 +49,7 @@ define void @test_sgpr_use_twice_ternary_op_a_b_a(float addrspace(1)* %out, floa
ret void
}
-; SI-LABEL: @test_sgpr_use_twice_ternary_op_b_a_a:
+; SI-LABEL: {{^}}test_sgpr_use_twice_ternary_op_b_a_a:
; SI: S_LOAD_DWORD [[SGPR0:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI: S_LOAD_DWORD [[SGPR1:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
; SI: V_MOV_B32_e32 [[VGPR1:v[0-9]+]], [[SGPR1]]
@@ -61,7 +61,7 @@ define void @test_sgpr_use_twice_ternary_op_b_a_a(float addrspace(1)* %out, floa
ret void
}
-; SI-LABEL: @test_sgpr_use_twice_ternary_op_a_a_imm:
+; SI-LABEL: {{^}}test_sgpr_use_twice_ternary_op_a_a_imm:
; SI: S_LOAD_DWORD [[SGPR:s[0-9]+]]
; SI: V_FMA_F32 [[RESULT:v[0-9]+]], [[SGPR]], [[SGPR]], 2.0
; SI: BUFFER_STORE_DWORD [[RESULT]]
@@ -71,7 +71,7 @@ define void @test_sgpr_use_twice_ternary_op_a_a_imm(float addrspace(1)* %out, fl
ret void
}
-; SI-LABEL: @test_sgpr_use_twice_ternary_op_a_imm_a:
+; SI-LABEL: {{^}}test_sgpr_use_twice_ternary_op_a_imm_a:
; SI: S_LOAD_DWORD [[SGPR:s[0-9]+]]
; SI: V_FMA_F32 [[RESULT:v[0-9]+]], [[SGPR]], 2.0, [[SGPR]]
; SI: BUFFER_STORE_DWORD [[RESULT]]
@@ -82,7 +82,7 @@ define void @test_sgpr_use_twice_ternary_op_a_imm_a(float addrspace(1)* %out, fl
}
; Don't use fma since fma c, x, y is canonicalized to fma x, c, y
-; SI-LABEL: @test_sgpr_use_twice_ternary_op_imm_a_a:
+; SI-LABEL: {{^}}test_sgpr_use_twice_ternary_op_imm_a_a:
; SI: S_LOAD_DWORD [[SGPR:s[0-9]+]]
; SI: V_MAD_I32_I24 [[RESULT:v[0-9]+]], 2, [[SGPR]], [[SGPR]]
; SI: BUFFER_STORE_DWORD [[RESULT]]
diff --git a/test/CodeGen/R600/usubo.ll b/test/CodeGen/R600/usubo.ll
index c293ad78e06..10e79d7ab54 100644
--- a/test/CodeGen/R600/usubo.ll
+++ b/test/CodeGen/R600/usubo.ll
@@ -4,7 +4,7 @@
declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
declare { i64, i1 } @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
-; FUNC-LABEL: @usubo_i64_zext
+; FUNC-LABEL: {{^}}usubo_i64_zext:
define void @usubo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) nounwind
%val = extractvalue { i64, i1 } %usub, 0
@@ -15,7 +15,7 @@ define void @usubo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
ret void
}
-; FUNC-LABEL: @s_usubo_i32
+; FUNC-LABEL: {{^}}s_usubo_i32:
; SI: S_SUB_I32
define void @s_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind {
%usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) nounwind
@@ -26,7 +26,7 @@ define void @s_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32
ret void
}
-; FUNC-LABEL: @v_usubo_i32
+; FUNC-LABEL: {{^}}v_usubo_i32:
; SI: V_SUBREV_I32_e32
define void @v_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%a = load i32 addrspace(1)* %aptr, align 4
@@ -39,7 +39,7 @@ define void @v_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32
ret void
}
-; FUNC-LABEL: @s_usubo_i64
+; FUNC-LABEL: {{^}}s_usubo_i64:
; SI: S_SUB_U32
; SI: S_SUBB_U32
define void @s_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind {
@@ -51,7 +51,7 @@ define void @s_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64
ret void
}
-; FUNC-LABEL: @v_usubo_i64
+; FUNC-LABEL: {{^}}v_usubo_i64:
; SI: V_SUB_I32
; SI: V_SUBB_U32
define void @v_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
diff --git a/test/CodeGen/R600/v1i64-kernel-arg.ll b/test/CodeGen/R600/v1i64-kernel-arg.ll
index 2aa1221b366..31755125c03 100644
--- a/test/CodeGen/R600/v1i64-kernel-arg.ll
+++ b/test/CodeGen/R600/v1i64-kernel-arg.ll
@@ -2,14 +2,14 @@
; XFAIL: *
; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck %s
-; CHECK-LABEL: @kernel_arg_i64
+; CHECK-LABEL: {{^}}kernel_arg_i64:
define void @kernel_arg_i64(i64 addrspace(1)* %out, i64 %a) nounwind {
store i64 %a, i64 addrspace(1)* %out, align 8
ret void
}
; i64 arg works, v1i64 arg does not.
-; CHECK-LABEL: @kernel_arg_v1i64
+; CHECK-LABEL: {{^}}kernel_arg_v1i64:
define void @kernel_arg_v1i64(<1 x i64> addrspace(1)* %out, <1 x i64> %a) nounwind {
store <1 x i64> %a, <1 x i64> addrspace(1)* %out, align 8
ret void
diff --git a/test/CodeGen/R600/v_cndmask.ll b/test/CodeGen/R600/v_cndmask.ll
index 51e3d8b080f..b9c25105a83 100644
--- a/test/CodeGen/R600/v_cndmask.ll
+++ b/test/CodeGen/R600/v_cndmask.ll
@@ -2,7 +2,7 @@
declare i32 @llvm.r600.read.tidig.x() #1
-; SI-LABEL: @v_cnd_nan_nosgpr
+; SI-LABEL: {{^}}v_cnd_nan_nosgpr:
; SI: V_CNDMASK_B32_e64 v{{[0-9]}}, v{{[0-9]}}, -1, s{{\[[0-9]+:[0-9]+\]}}
; SI-DAG: v{{[0-9]}}
; All nan values are converted to 0xffffffff
@@ -22,7 +22,7 @@ define void @v_cnd_nan_nosgpr(float addrspace(1)* %out, i32 %c, float addrspace(
; single constant bus SGPR usage is the last operand, and it should
; never be moved.
-; SI-LABEL: @v_cnd_nan
+; SI-LABEL: {{^}}v_cnd_nan:
; SI: V_CNDMASK_B32_e64 v{{[0-9]}}, v{{[0-9]}}, -1, s{{\[[0-9]+:[0-9]+\]}}
; SI-DAG: v{{[0-9]}}
; All nan values are converted to 0xffffffff
diff --git a/test/CodeGen/R600/vector-alloca.ll b/test/CodeGen/R600/vector-alloca.ll
index ec1995f6808..0b457a858e8 100644
--- a/test/CodeGen/R600/vector-alloca.ll
+++ b/test/CodeGen/R600/vector-alloca.ll
@@ -2,7 +2,7 @@
; RUN: llc -march=r600 -mcpu=verde -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=verde -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @vector_read
+; FUNC-LABEL: {{^}}vector_read:
; EG: MOV
; EG: MOV
; EG: MOV
@@ -25,7 +25,7 @@ entry:
ret void
}
-; FUNC-LABEL: @vector_write
+; FUNC-LABEL: {{^}}vector_write:
; EG: MOV
; EG: MOV
; EG: MOV
@@ -53,7 +53,7 @@ entry:
; This test should be optimize to:
; store i32 0, i32 addrspace(1)* %out
-; FUNC-LABEL: @bitcast_gep
+; FUNC-LABEL: {{^}}bitcast_gep:
; EG: STORE_RAW
define void @bitcast_gep(i32 addrspace(1)* %out, i32 %w_index, i32 %r_index) {
entry:
diff --git a/test/CodeGen/R600/vertex-fetch-encoding.ll b/test/CodeGen/R600/vertex-fetch-encoding.ll
index 7ea7a5c079c..e24744e5ea4 100644
--- a/test/CodeGen/R600/vertex-fetch-encoding.ll
+++ b/test/CodeGen/R600/vertex-fetch-encoding.ll
@@ -1,9 +1,9 @@
; RUN: llc < %s -march=r600 -show-mc-encoding -mcpu=barts | FileCheck --check-prefix=NI-CHECK %s
; RUN: llc < %s -march=r600 -show-mc-encoding -mcpu=cayman | FileCheck --check-prefix=CM-CHECK %s
-; NI-CHECK: @vtx_fetch32
+; NI-CHECK: {{^}}vtx_fetch32:
; NI-CHECK: VTX_READ_32 T[[GPR:[0-9]]].X, T[[GPR]].X, 0 ; encoding: [0x40,0x01,0x0[[GPR]],0x10,0x0[[GPR]],0xf0,0x5f,0x13,0x00,0x00,0x08,0x00
-; CM-CHECK: @vtx_fetch32
+; CM-CHECK: {{^}}vtx_fetch32:
; CM-CHECK: VTX_READ_32 T[[GPR:[0-9]]].X, T[[GPR]].X, 0 ; encoding: [0x40,0x01,0x0[[GPR]],0x00,0x0[[GPR]],0xf0,0x5f,0x13,0x00,0x00,0x00,0x00
define void @vtx_fetch32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
@@ -13,7 +13,7 @@ entry:
ret void
}
-; NI-CHECK: @vtx_fetch128
+; NI-CHECK: {{^}}vtx_fetch128:
; NI-CHECK: VTX_READ_128 T[[DST:[0-9]]].XYZW, T[[SRC:[0-9]]].X, 0 ; encoding: [0x40,0x01,0x0[[SRC]],0x40,0x0[[DST]],0x10,0x8d,0x18,0x00,0x00,0x08,0x00
; XXX: Add a case for Cayman when v4i32 stores are supported.
diff --git a/test/CodeGen/R600/vop-shrink.ll b/test/CodeGen/R600/vop-shrink.ll
index bf1aae4565e..034d3f114ee 100644
--- a/test/CodeGen/R600/vop-shrink.ll
+++ b/test/CodeGen/R600/vop-shrink.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; Test that we correctly commute a sub instruction
-; FUNC-LABEL: @sub_rev
+; FUNC-LABEL: {{^}}sub_rev:
; SI-NOT: V_SUB_I32_e32 v{{[0-9]+}}, s
; SI: V_SUBREV_I32_e32 v{{[0-9]+}}, s
@@ -32,7 +32,7 @@ endif: ; preds = %else, %if
; Test that we fold an immediate that was illegal for a 64-bit op into the
; 32-bit op when we shrink it.
-; FUNC-LABEL: @add_fold
+; FUNC-LABEL: {{^}}add_fold:
; SI: V_ADD_F32_e32 v{{[0-9]+}}, 0x44800000
define void @add_fold(float addrspace(1)* %out) {
entry:
diff --git a/test/CodeGen/R600/vselect.ll b/test/CodeGen/R600/vselect.ll
index dca7b067b26..a1f4ae1261b 100644
--- a/test/CodeGen/R600/vselect.ll
+++ b/test/CodeGen/R600/vselect.ll
@@ -1,11 +1,11 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
;RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
-;EG-CHECK: @test_select_v2i32
+;EG-CHECK: {{^}}test_select_v2i32:
;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: @test_select_v2i32
+;SI-CHECK: {{^}}test_select_v2i32:
;SI-CHECK: V_CNDMASK_B32_e64
;SI-CHECK: V_CNDMASK_B32_e64
@@ -19,11 +19,11 @@ entry:
ret void
}
-;EG-CHECK: @test_select_v2f32
+;EG-CHECK: {{^}}test_select_v2f32:
;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: @test_select_v2f32
+;SI-CHECK: {{^}}test_select_v2f32:
;SI-CHECK: V_CNDMASK_B32_e64
;SI-CHECK: V_CNDMASK_B32_e64
@@ -37,13 +37,13 @@ entry:
ret void
}
-;EG-CHECK: @test_select_v4i32
+;EG-CHECK: {{^}}test_select_v4i32:
;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: @test_select_v4i32
+;SI-CHECK: {{^}}test_select_v4i32:
;SI-CHECK: V_CNDMASK_B32_e64
;SI-CHECK: V_CNDMASK_B32_e64
;SI-CHECK: V_CNDMASK_B32_e64
@@ -59,7 +59,7 @@ entry:
ret void
}
-;EG-CHECK: @test_select_v4f32
+;EG-CHECK: {{^}}test_select_v4f32:
;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
diff --git a/test/CodeGen/R600/vselect64.ll b/test/CodeGen/R600/vselect64.ll
index 604695b4fa6..ef85ebe7899 100644
--- a/test/CodeGen/R600/vselect64.ll
+++ b/test/CodeGen/R600/vselect64.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
; XXX: Merge this test into vselect.ll once SI supports 64-bit select.
-; CHECK-LABEL: @test_select_v4i64
+; CHECK-LABEL: {{^}}test_select_v4i64:
; Make sure the vectors aren't being stored on the stack. We know they are
; being stored on the stack if the shaders uses at leat 10 registers.
; CHECK-NOT: {{\**}} MOV T{{[0-9][0-9]}}.X
diff --git a/test/CodeGen/R600/vtx-fetch-branch.ll b/test/CodeGen/R600/vtx-fetch-branch.ll
index 0fc99dee0db..bcbe34ea543 100644
--- a/test/CodeGen/R600/vtx-fetch-branch.ll
+++ b/test/CodeGen/R600/vtx-fetch-branch.ll
@@ -6,7 +6,7 @@
; after the fetch clause.
-; CHECK-LABEL: @test
+; CHECK-LABEL: {{^}}test:
; CHECK-NOT: ALU_POP_AFTER
; CHECK: TEX
; CHECK-NEXT: POP
diff --git a/test/CodeGen/R600/vtx-schedule.ll b/test/CodeGen/R600/vtx-schedule.ll
index ce852c5efef..8254c992347 100644
--- a/test/CodeGen/R600/vtx-schedule.ll
+++ b/test/CodeGen/R600/vtx-schedule.ll
@@ -4,7 +4,7 @@
; the result of another VTX_READ instruction were being grouped in the
; same fetch clasue.
-; CHECK: @test
+; CHECK: {{^}}test:
; CHECK: Fetch clause
; CHECK: VTX_READ_32 [[IN0:T[0-9]+\.X]], [[IN0]], 0
; CHECK: Fetch clause
diff --git a/test/CodeGen/R600/wait.ll b/test/CodeGen/R600/wait.ll
index b0b7e919586..7703b84ec28 100644
--- a/test/CodeGen/R600/wait.ll
+++ b/test/CodeGen/R600/wait.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -strict-whitespace %s
-; CHECK-LABEL: @main
+; CHECK-LABEL: {{^}}main:
; CHECK: S_LOAD_DWORDX4
; CHECK: S_LOAD_DWORDX4
; CHECK: S_WAITCNT lgkmcnt(0){{$}}
diff --git a/test/CodeGen/R600/work-item-intrinsics.ll b/test/CodeGen/R600/work-item-intrinsics.ll
index 1dbd9b81506..a1337ae1f4d 100644
--- a/test/CodeGen/R600/work-item-intrinsics.ll
+++ b/test/CodeGen/R600/work-item-intrinsics.ll
@@ -2,7 +2,7 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @ngroups_x
+; FUNC-LABEL: {{^}}ngroups_x:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV [[VAL]], KC0[0].X
@@ -16,7 +16,7 @@ entry:
ret void
}
-; FUNC-LABEL: @ngroups_y
+; FUNC-LABEL: {{^}}ngroups_y:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV [[VAL]], KC0[0].Y
@@ -30,7 +30,7 @@ entry:
ret void
}
-; FUNC-LABEL: @ngroups_z
+; FUNC-LABEL: {{^}}ngroups_z:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV [[VAL]], KC0[0].Z
@@ -44,7 +44,7 @@ entry:
ret void
}
-; FUNC-LABEL: @global_size_x
+; FUNC-LABEL: {{^}}global_size_x:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV [[VAL]], KC0[0].W
@@ -58,7 +58,7 @@ entry:
ret void
}
-; FUNC-LABEL: @global_size_y
+; FUNC-LABEL: {{^}}global_size_y:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV [[VAL]], KC0[1].X
@@ -72,7 +72,7 @@ entry:
ret void
}
-; FUNC-LABEL: @global_size_z
+; FUNC-LABEL: {{^}}global_size_z:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV [[VAL]], KC0[1].Y
@@ -86,7 +86,7 @@ entry:
ret void
}
-; FUNC-LABEL: @local_size_x
+; FUNC-LABEL: {{^}}local_size_x:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV [[VAL]], KC0[1].Z
@@ -100,7 +100,7 @@ entry:
ret void
}
-; FUNC-LABEL: @local_size_y
+; FUNC-LABEL: {{^}}local_size_y:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV [[VAL]], KC0[1].W
@@ -114,7 +114,7 @@ entry:
ret void
}
-; FUNC-LABEL: @local_size_z
+; FUNC-LABEL: {{^}}local_size_z:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV [[VAL]], KC0[2].X
@@ -132,7 +132,7 @@ entry:
; Currently we always use exactly 2 user sgprs for the pointer to the
; kernel arguments, but this may change in the future.
-; FUNC-LABEL: @tgid_x
+; FUNC-LABEL: {{^}}tgid_x:
; SI: V_MOV_B32_e32 [[VVAL:v[0-9]+]], s4
; SI: BUFFER_STORE_DWORD [[VVAL]]
define void @tgid_x (i32 addrspace(1)* %out) {
@@ -142,7 +142,7 @@ entry:
ret void
}
-; FUNC-LABEL: @tgid_y
+; FUNC-LABEL: {{^}}tgid_y:
; SI: V_MOV_B32_e32 [[VVAL:v[0-9]+]], s5
; SI: BUFFER_STORE_DWORD [[VVAL]]
define void @tgid_y (i32 addrspace(1)* %out) {
@@ -152,7 +152,7 @@ entry:
ret void
}
-; FUNC-LABEL: @tgid_z
+; FUNC-LABEL: {{^}}tgid_z:
; SI: V_MOV_B32_e32 [[VVAL:v[0-9]+]], s6
; SI: BUFFER_STORE_DWORD [[VVAL]]
define void @tgid_z (i32 addrspace(1)* %out) {
@@ -162,7 +162,7 @@ entry:
ret void
}
-; FUNC-LABEL: @tidig_x
+; FUNC-LABEL: {{^}}tidig_x:
; SI: BUFFER_STORE_DWORD v0
define void @tidig_x (i32 addrspace(1)* %out) {
entry:
@@ -171,7 +171,7 @@ entry:
ret void
}
-; FUNC-LABEL: @tidig_y
+; FUNC-LABEL: {{^}}tidig_y:
; SI: BUFFER_STORE_DWORD v1
define void @tidig_y (i32 addrspace(1)* %out) {
entry:
@@ -180,7 +180,7 @@ entry:
ret void
}
-; FUNC-LABEL: @tidig_z
+; FUNC-LABEL: {{^}}tidig_z:
; SI: BUFFER_STORE_DWORD v2
define void @tidig_z (i32 addrspace(1)* %out) {
entry:
diff --git a/test/CodeGen/R600/wrong-transalu-pos-fix.ll b/test/CodeGen/R600/wrong-transalu-pos-fix.ll
index b1cbe3ffbc4..d652d2d901e 100644
--- a/test/CodeGen/R600/wrong-transalu-pos-fix.ll
+++ b/test/CodeGen/R600/wrong-transalu-pos-fix.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
; We want all MULLO_INT inst to be last in their instruction group
-;CHECK: @fill3d
+;CHECK: {{^}}fill3d:
;CHECK-NOT: MULLO_INT T[0-9]+
; ModuleID = 'radeon'
diff --git a/test/CodeGen/R600/xor.ll b/test/CodeGen/R600/xor.ll
index 8c2c80ed195..00e57f68ec7 100644
--- a/test/CodeGen/R600/xor.ll
+++ b/test/CodeGen/R600/xor.ll
@@ -1,11 +1,11 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
-;EG-CHECK: @xor_v2i32
+;EG-CHECK: {{^}}xor_v2i32:
;EG-CHECK: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: @xor_v2i32
+;SI-CHECK: {{^}}xor_v2i32:
;SI-CHECK: V_XOR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
;SI-CHECK: V_XOR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
@@ -18,13 +18,13 @@ define void @xor_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in
ret void
}
-;EG-CHECK: @xor_v4i32
+;EG-CHECK: {{^}}xor_v4i32:
;EG-CHECK: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: @xor_v4i32
+;SI-CHECK: {{^}}xor_v4i32:
;SI-CHECK: V_XOR_B32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
;SI-CHECK: V_XOR_B32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
;SI-CHECK: V_XOR_B32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
@@ -38,10 +38,10 @@ define void @xor_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in
ret void
}
-;EG-CHECK: @xor_i1
+;EG-CHECK: {{^}}xor_i1:
;EG-CHECK: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], PS}}
-;SI-CHECK: @xor_i1
+;SI-CHECK: {{^}}xor_i1:
;SI-CHECK: V_XOR_B32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define void @xor_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) {
@@ -55,7 +55,7 @@ define void @xor_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float ad
ret void
}
-; SI-CHECK-LABEL: @vector_xor_i32
+; SI-CHECK-LABEL: {{^}}vector_xor_i32:
; SI-CHECK: V_XOR_B32_e32
define void @vector_xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
%a = load i32 addrspace(1)* %in0
@@ -65,7 +65,7 @@ define void @vector_xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32
ret void
}
-; SI-CHECK-LABEL: @scalar_xor_i32
+; SI-CHECK-LABEL: {{^}}scalar_xor_i32:
; SI-CHECK: S_XOR_B32
define void @scalar_xor_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
%result = xor i32 %a, %b
@@ -73,7 +73,7 @@ define void @scalar_xor_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
ret void
}
-; SI-CHECK-LABEL: @scalar_not_i32
+; SI-CHECK-LABEL: {{^}}scalar_not_i32:
; SI-CHECK: S_NOT_B32
define void @scalar_not_i32(i32 addrspace(1)* %out, i32 %a) {
%result = xor i32 %a, -1
@@ -81,7 +81,7 @@ define void @scalar_not_i32(i32 addrspace(1)* %out, i32 %a) {
ret void
}
-; SI-CHECK-LABEL: @vector_not_i32
+; SI-CHECK-LABEL: {{^}}vector_not_i32:
; SI-CHECK: V_NOT_B32
define void @vector_not_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
%a = load i32 addrspace(1)* %in0
@@ -91,7 +91,7 @@ define void @vector_not_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32
ret void
}
-; SI-CHECK-LABEL: @vector_xor_i64
+; SI-CHECK-LABEL: {{^}}vector_xor_i64:
; SI-CHECK: V_XOR_B32_e32
; SI-CHECK: V_XOR_B32_e32
; SI-CHECK: S_ENDPGM
@@ -103,7 +103,7 @@ define void @vector_xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64
ret void
}
-; SI-CHECK-LABEL: @scalar_xor_i64
+; SI-CHECK-LABEL: {{^}}scalar_xor_i64:
; SI-CHECK: S_XOR_B64
; SI-CHECK: S_ENDPGM
define void @scalar_xor_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
@@ -112,7 +112,7 @@ define void @scalar_xor_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
ret void
}
-; SI-CHECK-LABEL: @scalar_not_i64
+; SI-CHECK-LABEL: {{^}}scalar_not_i64:
; SI-CHECK: S_NOT_B64
define void @scalar_not_i64(i64 addrspace(1)* %out, i64 %a) {
%result = xor i64 %a, -1
@@ -120,7 +120,7 @@ define void @scalar_not_i64(i64 addrspace(1)* %out, i64 %a) {
ret void
}
-; SI-CHECK-LABEL: @vector_not_i64
+; SI-CHECK-LABEL: {{^}}vector_not_i64:
; SI-CHECK: V_NOT_B32
; SI-CHECK: V_NOT_B32
define void @vector_not_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) {
@@ -135,7 +135,7 @@ define void @vector_not_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64
; Note that in the future the backend may be smart enough to
; use an SALU instruction for this.
-; SI-CHECK-LABEL: @xor_cf
+; SI-CHECK-LABEL: {{^}}xor_cf:
; SI-CHECK: S_XOR_B64
define void @xor_cf(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b) {
entry:
diff --git a/test/CodeGen/R600/zero_extend.ll b/test/CodeGen/R600/zero_extend.ll
index 1a0fd731d7d..0e527d6d1a2 100644
--- a/test/CodeGen/R600/zero_extend.ll
+++ b/test/CodeGen/R600/zero_extend.ll
@@ -1,11 +1,11 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
-; R600-CHECK: @test
+; R600-CHECK: {{^}}test:
; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW
; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW
-; SI-CHECK: @test
+; SI-CHECK: {{^}}test:
; SI-CHECK: S_MOV_B32 [[ZERO:s[0-9]]], 0{{$}}
; SI-CHECK: V_MOV_B32_e32 v[[V_ZERO:[0-9]]], [[ZERO]]
; SI-CHECK: BUFFER_STORE_DWORDX2 v[0:[[V_ZERO]]{{\]}}
@@ -18,7 +18,7 @@ entry:
ret void
}
-; SI-CHECK-LABEL: @testi1toi32
+; SI-CHECK-LABEL: {{^}}testi1toi32:
; SI-CHECK: V_CNDMASK_B32
define void @testi1toi32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
entry:
@@ -28,7 +28,7 @@ entry:
ret void
}
-; SI-CHECK-LABEL: @zext_i1_to_i64
+; SI-CHECK-LABEL: {{^}}zext_i1_to_i64:
; SI-CHECK: V_CMP_EQ_I32
; SI-CHECK: V_CNDMASK_B32
; SI-CHECK: S_MOV_B32 s{{[0-9]+}}, 0