summaryrefslogtreecommitdiff
path: root/test/CodeGen
diff options
context:
space:
mode:
authorMatthias Braun <matze@braunis.de>2016-06-25 00:23:00 +0000
committerMatthias Braun <matze@braunis.de>2016-06-25 00:23:00 +0000
commitf011e371810ab1e664c2051fbe073ad1c7904a2e (patch)
tree37d2603f61d542bdef0c41a2a40c3cbe95b079d7 /test/CodeGen
parentc1fd19fd0b89b4755207c62dfe8c2ef991cbb250 (diff)
MachineScheduler: Fully compare top/bottom candidates
In bidirectional scheduling this gives more stable results than just comparing the "reason" fields of the top/bottom node because the reason field may be higher depending on what other nodes are in the queue. Differential Revision: http://reviews.llvm.org/D19401 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@273755 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/AArch64/arm64-convert-v4f64.ll4
-rw-r--r--test/CodeGen/AArch64/bitreverse.ll2
-rw-r--r--test/CodeGen/AArch64/cxx-tlscc.ll6
-rw-r--r--test/CodeGen/AArch64/vcvt-oversize.ll5
-rw-r--r--test/CodeGen/AArch64/vector-fcopysign.ll18
-rw-r--r--test/CodeGen/AMDGPU/and.ll2
-rw-r--r--test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll4
-rw-r--r--test/CodeGen/AMDGPU/ctpop64.ll6
-rw-r--r--test/CodeGen/AMDGPU/ds_read2_offset_order.ll1
-rw-r--r--test/CodeGen/AMDGPU/ds_read2st64.ll2
-rw-r--r--test/CodeGen/AMDGPU/fneg-fabs.f64.ll2
-rw-r--r--test/CodeGen/AMDGPU/indirect-addressing-si.ll4
-rw-r--r--test/CodeGen/AMDGPU/insert_vector_elt.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.AMDGPU.rsq.clamped.f64.ll5
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.rsq.clamp.ll3
-rw-r--r--test/CodeGen/AMDGPU/local-memory-two-objects.ll3
-rw-r--r--test/CodeGen/AMDGPU/move-addr64-rsrc-dead-subreg-writes.ll4
-rw-r--r--test/CodeGen/AMDGPU/sra.ll4
-rw-r--r--test/CodeGen/PowerPC/ppc-shrink-wrapping.ll5
-rw-r--r--test/CodeGen/PowerPC/ppc64-byval-align.ll3
20 files changed, 44 insertions, 43 deletions
diff --git a/test/CodeGen/AArch64/arm64-convert-v4f64.ll b/test/CodeGen/AArch64/arm64-convert-v4f64.ll
index b8da3991031..ed061122f31 100644
--- a/test/CodeGen/AArch64/arm64-convert-v4f64.ll
+++ b/test/CodeGen/AArch64/arm64-convert-v4f64.ll
@@ -23,8 +23,8 @@ define <8 x i8> @fptosi_v4f64_to_v4i8(<8 x double>* %ptr) {
; CHECK-DAG: xtn2 v[[NA2]].4s, v[[CONV3]].2d
; CHECK-DAG: xtn v[[NA0:[0-9]+]].2s, v[[CONV0]].2d
; CHECK-DAG: xtn2 v[[NA0]].4s, v[[CONV1]].2d
-; CHECK-DAG: xtn v[[TMP1:[0-9]+]].4h, v[[NA0]].4s
-; CHECK-DAG: xtn2 v[[TMP1]].8h, v[[NA2]].4s
+; CHECK-DAG: xtn v[[TMP1:[0-9]+]].4h, v[[NA2]].4s
+; CHECK-DAG: xtn2 v[[TMP1]].8h, v[[NA0]].4s
; CHECK: xtn v0.8b, v[[TMP1]].8h
%tmp1 = load <8 x double>, <8 x double>* %ptr
%tmp2 = fptosi <8 x double> %tmp1 to <8 x i8>
diff --git a/test/CodeGen/AArch64/bitreverse.ll b/test/CodeGen/AArch64/bitreverse.ll
index 2538ffdbd6c..2eee7cfd8b9 100644
--- a/test/CodeGen/AArch64/bitreverse.ll
+++ b/test/CodeGen/AArch64/bitreverse.ll
@@ -52,7 +52,7 @@ define <8 x i8> @g_vec(<8 x i8> %a) {
; CHECK-DAG: movi [[M2:v.*]], #64
; CHECK-DAG: movi [[M3:v.*]], #32
; CHECK-DAG: movi [[M4:v.*]], #16
-; CHECK-DAG: movi [[M5:v.*]], #8
+; CHECK-DAG: movi [[M5:v.*]], #8{{$}}
; CHECK-DAG: movi [[M6:v.*]], #4{{$}}
; CHECK-DAG: movi [[M7:v.*]], #2{{$}}
; CHECK-DAG: movi [[M8:v.*]], #1{{$}}
diff --git a/test/CodeGen/AArch64/cxx-tlscc.ll b/test/CodeGen/AArch64/cxx-tlscc.ll
index 948b1c18f27..a36aad51ca8 100644
--- a/test/CodeGen/AArch64/cxx-tlscc.ll
+++ b/test/CodeGen/AArch64/cxx-tlscc.ll
@@ -44,7 +44,9 @@ __tls_init.exit:
; CHECK-NOT: stp d3, d2
; CHECK-NOT: stp d1, d0
; CHECK-NOT: stp x20, x19
-; CHECK-NOT: stp x14, x13
+; FIXME: The splitting logic in the register allocator fails to split along
+; control flow here, we used to get this right by accident before...
+; CHECK-NOTXX: stp x14, x13
; CHECK-NOT: stp x12, x11
; CHECK-NOT: stp x10, x9
; CHECK-NOT: stp x8, x7
@@ -63,7 +65,7 @@ __tls_init.exit:
; CHECK-NOT: ldp x8, x7
; CHECK-NOT: ldp x10, x9
; CHECK-NOT: ldp x12, x11
-; CHECK-NOT: ldp x14, x13
+; CHECK-NOTXX: ldp x14, x13
; CHECK-NOT: ldp x20, x19
; CHECK-NOT: ldp d1, d0
; CHECK-NOT: ldp d3, d2
diff --git a/test/CodeGen/AArch64/vcvt-oversize.ll b/test/CodeGen/AArch64/vcvt-oversize.ll
index 066a4b66620..b6e25cfadaa 100644
--- a/test/CodeGen/AArch64/vcvt-oversize.ll
+++ b/test/CodeGen/AArch64/vcvt-oversize.ll
@@ -2,8 +2,9 @@
define <8 x i8> @float_to_i8(<8 x float>* %in) {
; CHECK-LABEL: float_to_i8:
-; CHECK-DAG: fadd v[[LSB:[0-9]+]].4s, v0.4s, v0.4s
-; CHECK-DAG: fadd v[[MSB:[0-9]+]].4s, v1.4s, v1.4s
+; CHECK: ldp q1, q0, [x0]
+; CHECK-DAG: fadd v[[LSB:[0-9]+]].4s, v1.4s, v1.4s
+; CHECK-DAG: fadd v[[MSB:[0-9]+]].4s, v0.4s, v0.4s
; CHECK-DAG: fcvtzu v[[LSB2:[0-9]+]].4s, v[[LSB]].4s
; CHECK-DAG: fcvtzu v[[MSB2:[0-9]+]].4s, v[[MSB]].4s
; CHECK-DAG: xtn v[[TMP:[0-9]+]].4h, v[[LSB]].4s
diff --git a/test/CodeGen/AArch64/vector-fcopysign.ll b/test/CodeGen/AArch64/vector-fcopysign.ll
index a9b2eb2101f..47d75d5ecc6 100644
--- a/test/CodeGen/AArch64/vector-fcopysign.ll
+++ b/test/CodeGen/AArch64/vector-fcopysign.ll
@@ -94,21 +94,21 @@ define <4 x float> @test_copysign_v4f32_v4f32(<4 x float> %a, <4 x float> %b) #0
define <4 x float> @test_copysign_v4f32_v4f64(<4 x float> %a, <4 x double> %b) #0 {
; CHECK-LABEL: test_copysign_v4f32_v4f64:
; CHECK-NEXT: mov s3, v0[1]
-; CHECK-NEXT: mov d4, v1[1]
-; CHECK-NEXT: movi.4s v5, #128, lsl #24
-; CHECK-NEXT: fcvt s1, d1
+; CHECK-NEXT: movi.4s v4, #128, lsl #24
+; CHECK-NEXT: fcvt s5, d1
; CHECK-NEXT: mov s6, v0[2]
; CHECK-NEXT: mov s7, v0[3]
-; CHECK-NEXT: fcvt s16, d2
-; CHECK-NEXT: bit.16b v0, v1, v5
-; CHECK-NEXT: bit.16b v6, v16, v5
-; CHECK-NEXT: fcvt s1, d4
-; CHECK-NEXT: bit.16b v3, v1, v5
+; CHECK-NEXT: bit.16b v0, v5, v4
+; CHECK-NEXT: fcvt s5, d2
+; CHECK-NEXT: bit.16b v6, v5, v4
+; CHECK-NEXT: mov d1, v1[1]
+; CHECK-NEXT: fcvt s1, d1
+; CHECK-NEXT: bit.16b v3, v1, v4
; CHECK-NEXT: mov d1, v2[1]
; CHECK-NEXT: fcvt s1, d1
; CHECK-NEXT: ins.s v0[1], v3[0]
; CHECK-NEXT: ins.s v0[2], v6[0]
-; CHECK-NEXT: bit.16b v7, v1, v5
+; CHECK-NEXT: bit.16b v7, v1, v4
; CHECK-NEXT: ins.s v0[3], v7[0]
; CHECK-NEXT: ret
%tmp0 = fptrunc <4 x double> %b to <4 x float>
diff --git a/test/CodeGen/AMDGPU/and.ll b/test/CodeGen/AMDGPU/and.ll
index c6d5bf1284a..0046bc93826 100644
--- a/test/CodeGen/AMDGPU/and.ll
+++ b/test/CodeGen/AMDGPU/and.ll
@@ -486,8 +486,8 @@ define void @s_and_inline_imm_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(
; low 32-bits, which is not a valid 64-bit inline immmediate.
; FUNC-LABEL: {{^}}s_and_inline_imm_f32_4.0_i64:
-; SI: s_load_dwordx2
; SI: s_load_dword s
+; SI: s_load_dwordx2
; SI-NOT: and
; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, 4.0
; SI-NOT: and
diff --git a/test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll b/test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll
index 77028d86fbb..6a2716cc903 100644
--- a/test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll
+++ b/test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll
@@ -21,8 +21,8 @@ define void @lds_atomic_cmpxchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrs
}
; FUNC-LABEL: {{^}}lds_atomic_cmpxchg_ret_i64_offset:
-; SICI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
-; SICI: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
+; SICI-DAG: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SICI-DAG: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
; VI-DAG: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
; VI-DAG: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x34
; GCN-DAG: v_mov_b32_e32 v[[LOVCMP:[0-9]+]], 7
diff --git a/test/CodeGen/AMDGPU/ctpop64.ll b/test/CodeGen/AMDGPU/ctpop64.ll
index f2afbe1bd53..dca0cc1edb4 100644
--- a/test/CodeGen/AMDGPU/ctpop64.ll
+++ b/test/CodeGen/AMDGPU/ctpop64.ll
@@ -155,10 +155,10 @@ define void @s_ctpop_i128(i32 addrspace(1)* noalias %out, i128 %val) nounwind {
}
; FUNC-LABEL: {{^}}s_ctpop_i65:
-; GCN: s_bcnt1_i32_b64
; GCN: s_and_b32
-; GCN: s_bcnt1_i32_b64
-; GCN: s_add_i32
+; GCN: s_bcnt1_i32_b64 [[REG0:s[0-9]+]],
+; GCN: s_bcnt1_i32_b64 [[REG1:s[0-9]+]],
+; GCN: s_add_i32 {{s[0-9]+}}, [[REG0]], [[REG1]]
; GCN: s_endpgm
define void @s_ctpop_i65(i32 addrspace(1)* noalias %out, i65 %val) nounwind {
%ctpop = call i65 @llvm.ctpop.i65(i65 %val) nounwind readnone
diff --git a/test/CodeGen/AMDGPU/ds_read2_offset_order.ll b/test/CodeGen/AMDGPU/ds_read2_offset_order.ll
index 65758da28b0..57e190e0cca 100644
--- a/test/CodeGen/AMDGPU/ds_read2_offset_order.ll
+++ b/test/CodeGen/AMDGPU/ds_read2_offset_order.ll
@@ -8,7 +8,6 @@
; SI-LABEL: {{^}}offset_order:
-; SI: ds_read2st64_b32 v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} offset0:4{{$}}
; SI: ds_read2_b32 v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} offset0:2 offset1:3
; SI: ds_read2_b32 v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} offset0:14 offset1:12
; SI: ds_read_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:44
diff --git a/test/CodeGen/AMDGPU/ds_read2st64.ll b/test/CodeGen/AMDGPU/ds_read2st64.ll
index 3f3e40ee278..725c3f9dd7c 100644
--- a/test/CodeGen/AMDGPU/ds_read2st64.ll
+++ b/test/CodeGen/AMDGPU/ds_read2st64.ll
@@ -197,8 +197,8 @@ define void @simple_read2st64_f64_max_offset(double addrspace(1)* %out, double a
; SI-LABEL: @simple_read2st64_f64_over_max_offset
; SI-NOT: ds_read2st64_b64
-; SI: v_add_i32_e32 [[BIGADD:v[0-9]+]], vcc, 0x10000, {{v[0-9]+}}
; SI: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset:512
+; SI: v_add_i32_e32 [[BIGADD:v[0-9]+]], vcc, 0x10000, {{v[0-9]+}}
; SI: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, [[BIGADD]]
; SI: s_endpgm
define void @simple_read2st64_f64_over_max_offset(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
diff --git a/test/CodeGen/AMDGPU/fneg-fabs.f64.ll b/test/CodeGen/AMDGPU/fneg-fabs.f64.ll
index bac62da1b28..b03f318f457 100644
--- a/test/CodeGen/AMDGPU/fneg-fabs.f64.ll
+++ b/test/CodeGen/AMDGPU/fneg-fabs.f64.ll
@@ -55,7 +55,7 @@ define void @fneg_fabs_fn_free_f64(double addrspace(1)* %out, i64 %in) {
}
; GCN-LABEL: {{^}}fneg_fabs_f64:
-; GCN: s_load_dwordx2
+; GCN-DAG: s_load_dwordx2
; GCN-DAG: v_bfrev_b32_e32 [[IMMREG:v[0-9]+]], 1{{$}}
; SI-DAG: s_load_dwordx2 s{{\[}}[[LO_X:[0-9]+]]:[[HI_X:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], 0xb
; VI-DAG: s_load_dwordx2 s{{\[}}[[LO_X:[0-9]+]]:[[HI_X:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], 0x2c
diff --git a/test/CodeGen/AMDGPU/indirect-addressing-si.ll b/test/CodeGen/AMDGPU/indirect-addressing-si.ll
index d5120ba74e6..c4e54a737f1 100644
--- a/test/CodeGen/AMDGPU/indirect-addressing-si.ll
+++ b/test/CodeGen/AMDGPU/indirect-addressing-si.ll
@@ -179,7 +179,7 @@ entry:
; CHECK-LABEL: {{^}}extract_vgpr_offset_multiple_in_block:
-; CHECK: {{buffer|flat}}_load_dword [[IDX0:v[0-9]+]]
+; CHECK-DAG: {{buffer|flat}}_load_dword [[IDX0:v[0-9]+]]
; CHECK-DAG: s_mov_b32 [[S_ELT0:s[0-9]+]], 7
; CHECK-DAG: s_mov_b32 [[S_ELT1:s[0-9]+]], 9
; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], [[S_ELT0]]
@@ -199,7 +199,7 @@ entry:
; FIXME: Redundant copy
; CHECK: s_mov_b64 exec, [[MASK]]
-; CHECK: s_mov_b64 [[MASK]], exec
+; CHECK: s_mov_b64 [[MASK2:s\[[0-9]+:[0-9]+\]]], exec
; CHECK: [[LOOP1:BB[0-9]+_[0-9]+]]:
; CHECK: v_readfirstlane_b32 vcc_lo, [[IDX0]]
diff --git a/test/CodeGen/AMDGPU/insert_vector_elt.ll b/test/CodeGen/AMDGPU/insert_vector_elt.ll
index 1965c47520d..2d1b7f0efa2 100644
--- a/test/CodeGen/AMDGPU/insert_vector_elt.ll
+++ b/test/CodeGen/AMDGPU/insert_vector_elt.ll
@@ -10,13 +10,13 @@
; not just directly into the vector component?
; GCN-LABEL: {{^}}insertelement_v4f32_0:
-; GCN: s_load_dwordx4 s{{\[}}[[LOW_REG:[0-9]+]]:
+; GCN: s_load_dwordx4
; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
; GCN-DAG: v_mov_b32_e32 [[CONSTREG:v[0-9]+]], 0x40a00000
-; GCN-DAG: v_mov_b32_e32 v[[LOW_REG]], [[CONSTREG]]
+; GCN-DAG: v_mov_b32_e32 v[[LOW_REG:[0-9]+]], [[CONSTREG]]
; GCN: buffer_store_dwordx4 v{{\[}}[[LOW_REG]]:
define void @insertelement_v4f32_0(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
%vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 0
diff --git a/test/CodeGen/AMDGPU/llvm.AMDGPU.rsq.clamped.f64.ll b/test/CodeGen/AMDGPU/llvm.AMDGPU.rsq.clamped.f64.ll
index 70fbee44251..3098ed7d157 100644
--- a/test/CodeGen/AMDGPU/llvm.AMDGPU.rsq.clamped.f64.ll
+++ b/test/CodeGen/AMDGPU/llvm.AMDGPU.rsq.clamped.f64.ll
@@ -10,10 +10,9 @@ declare double @llvm.AMDGPU.rsq.clamped.f64(double) nounwind readnone
; TODO: this constant should be folded:
; VI-DAG: s_mov_b32 s[[LOW1:[0-9+]]], -1
; VI-DAG: s_mov_b32 s[[HIGH1:[0-9+]]], 0x7fefffff
-; VI-DAG: s_mov_b32 s[[HIGH2:[0-9+]]], 0xffefffff
-; VI-DAG: s_mov_b32 s[[LOW2:[0-9+]]], s[[LOW1]]
; VI-DAG: v_min_f64 v[0:1], [[RSQ]], s{{\[}}[[LOW1]]:[[HIGH1]]]
-; VI-DAG: v_max_f64 v[0:1], v[0:1], s{{\[}}[[LOW2]]:[[HIGH2]]]
+; VI-DAG: s_mov_b32 s[[HIGH2:[0-9+]]], 0xffefffff
+; VI-DAG: v_max_f64 v[0:1], v[0:1], s{{\[}}[[LOW1]]:[[HIGH2]]]
define void @rsq_clamped_f64(double addrspace(1)* %out, double %src) nounwind {
%rsq_clamped = call double @llvm.AMDGPU.rsq.clamped.f64(double %src) nounwind readnone
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.clamp.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.clamp.ll
index dff2f599061..73a5c54e175 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.clamp.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.clamp.ll
@@ -29,9 +29,8 @@ define void @rsq_clamp_f32(float addrspace(1)* %out, float %src) #0 {
; VI-DAG: s_mov_b32 s[[HIGH1:[0-9+]]], 0x7fefffff
; VI-DAG: s_mov_b32 s[[HIGH2:[0-9+]]], 0xffefffff
; VI-DAG: v_rsq_f64_e32 [[RSQ:v\[[0-9]+:[0-9]+\]]], s[{{[0-9]+:[0-9]+}}
-; VI-DAG: s_mov_b32 s[[LOW2:[0-9+]]], s[[LOW1]]
; VI-DAG: v_min_f64 v[0:1], [[RSQ]], s{{\[}}[[LOW1]]:[[HIGH1]]]
-; VI-DAG: v_max_f64 v[0:1], v[0:1], s{{\[}}[[LOW2]]:[[HIGH2]]]
+; VI-DAG: v_max_f64 v[0:1], v[0:1], s{{\[}}[[LOW1]]:[[HIGH2]]]
define void @rsq_clamp_f64(double addrspace(1)* %out, double %src) #0 {
%rsq_clamp = call double @llvm.amdgcn.rsq.clamp.f64(double %src)
store double %rsq_clamp, double addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/local-memory-two-objects.ll b/test/CodeGen/AMDGPU/local-memory-two-objects.ll
index 31c7399122d..cec334f7df6 100644
--- a/test/CodeGen/AMDGPU/local-memory-two-objects.ll
+++ b/test/CodeGen/AMDGPU/local-memory-two-objects.ll
@@ -32,7 +32,8 @@
; GCN: v_lshlrev_b32_e32 [[ADDRW:v[0-9]+]], 2, v0
-; CI-DAG: ds_write2_b32 [[ADDRW]], {{v[0-9]*}}, {{v[0-9]+}} offset0:4
+; CI-DAG: ds_write_b32 [[ADDRW]], {{v[0-9]*}} offset:16
+; CI-DAG: ds_write_b32 [[ADDRW]], {{v[0-9]*$}}
; SI: v_add_i32_e32 [[ADDRW_OFF:v[0-9]+]], vcc, 16, [[ADDRW]]
diff --git a/test/CodeGen/AMDGPU/move-addr64-rsrc-dead-subreg-writes.ll b/test/CodeGen/AMDGPU/move-addr64-rsrc-dead-subreg-writes.ll
index 85dfbe6b8a3..36f12573c17 100644
--- a/test/CodeGen/AMDGPU/move-addr64-rsrc-dead-subreg-writes.ll
+++ b/test/CodeGen/AMDGPU/move-addr64-rsrc-dead-subreg-writes.ll
@@ -10,10 +10,10 @@
; GCN-DAG: buffer_load_dwordx2 v{{\[}}[[LDPTRLO:[0-9]+]]:[[LDPTRHI:[0-9]+]]{{\]}}
; GCN-NOT: v_mov_b32
-; GCN: v_mov_b32_e32 v[[VARG1LO:[0-9]+]], s[[ARG1LO]]
-; GCN-NOT: v_mov_b32
; GCN: v_mov_b32_e32 v[[VARG1HI:[0-9]+]], s[[ARG1HI]]
; GCN-NOT: v_mov_b32
+; GCN: v_mov_b32_e32 v[[VARG1LO:[0-9]+]], s[[ARG1LO]]
+; GCN-NOT: v_mov_b32
; GCN: v_add_i32_e32 v[[PTRLO:[0-9]+]], vcc, v[[LDPTRLO]], v[[VARG1LO]]
; GCN: v_addc_u32_e32 v[[PTRHI:[0-9]+]], vcc, v[[LDPTRHI]], v[[VARG1HI]]
diff --git a/test/CodeGen/AMDGPU/sra.ll b/test/CodeGen/AMDGPU/sra.ll
index 67406574281..dddfbfd3ed1 100644
--- a/test/CodeGen/AMDGPU/sra.ll
+++ b/test/CodeGen/AMDGPU/sra.ll
@@ -228,9 +228,9 @@ define void @v_ashr_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
}
; GCN-LABEL: {{^}}s_ashr_63_i64:
-; GCN-DAG: s_load_dword s[[HI:[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, {{0xc|0x30}}
+; GCN: s_load_dword s[[HI:[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, {{0xc|0x30}}
; GCN: s_ashr_i32 s[[SHIFT:[0-9]+]], s[[HI]], 31
-; GCN: s_add_u32 {{s[0-9]+}}, s[[HI]], {{s[0-9]+}}
+; GCN: s_add_u32 {{s[0-9]+}}, s[[SHIFT]], {{s[0-9]+}}
; GCN: s_addc_u32 {{s[0-9]+}}, s[[SHIFT]], {{s[0-9]+}}
define void @s_ashr_63_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
%result = ashr i64 %a, 63
diff --git a/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll b/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll
index 7493d2f1df1..3dcb0b2aee1 100644
--- a/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll
+++ b/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll
@@ -629,10 +629,11 @@ end:
; CHECK-LABEL: transpose
;
; Store of callee-save register saved by shrink wrapping
-; CHECK: std [[CSR:[0-9]+]], -[[STACK_OFFSET:[0-9]+]](1) # 8-byte Folded Spill
+; FIXME: Test disabled: Improved scheduling needs no spills/reloads any longer!
+; CHECKXX: std [[CSR:[0-9]+]], -[[STACK_OFFSET:[0-9]+]](1) # 8-byte Folded Spill
;
; Reload of callee-save register
-; CHECK: ld [[CSR]], -[[STACK_OFFSET]](1) # 8-byte Folded Reload
+; CHECKXX: ld [[CSR]], -[[STACK_OFFSET]](1) # 8-byte Folded Reload
;
; Ensure no subsequent uses of callee-save register before end of function
; CHECK-NOT: {{[a-z]+}} [[CSR]]
diff --git a/test/CodeGen/PowerPC/ppc64-byval-align.ll b/test/CodeGen/PowerPC/ppc64-byval-align.ll
index 7170f590658..89e7cc6c50e 100644
--- a/test/CodeGen/PowerPC/ppc64-byval-align.ll
+++ b/test/CodeGen/PowerPC/ppc64-byval-align.ll
@@ -35,8 +35,7 @@ entry:
ret i64 %0
}
; CHECK-LABEL: @callee2
-; CHECK: ld [[REG:[0-9]+]], 128(1)
-; CHECK: mr 3, [[REG]]
+; CHECK: ld 3, 128(1)
; CHECK: blr
declare i64 @test2(%struct.pad* byval, i32 signext, %struct.test* byval align 16)