1*67e74705SXin Li // RUN: %clang_cc1 -triple x86_64 -emit-llvm -o - %s | FileCheck %s
2*67e74705SXin Li
3*67e74705SXin Li // Check that we don't generate unnecessary reloads.
4*67e74705SXin Li //
5*67e74705SXin Li // CHECK-LABEL: define void @f0()
6*67e74705SXin Li // CHECK: [[x_0:%.*]] = alloca i32, align 4
7*67e74705SXin Li // CHECK-NEXT: [[y_0:%.*]] = alloca i32, align 4
8*67e74705SXin Li // CHECK-NEXT: store i32 1, i32* [[x_0]]
9*67e74705SXin Li // CHECK-NEXT: store i32 1, i32* [[x_0]]
10*67e74705SXin Li // CHECK-NEXT: store i32 1, i32* [[y_0]]
11*67e74705SXin Li // CHECK: }
f0()12*67e74705SXin Li void f0() {
13*67e74705SXin Li int x, y;
14*67e74705SXin Li x = 1;
15*67e74705SXin Li y = (x = 1);
16*67e74705SXin Li }
17*67e74705SXin Li
18*67e74705SXin Li // This used to test that we generate reloads for volatile access,
19*67e74705SXin Li // but that does not appear to be correct behavior for C.
20*67e74705SXin Li //
21*67e74705SXin Li // CHECK-LABEL: define void @f1()
22*67e74705SXin Li // CHECK: [[x_1:%.*]] = alloca i32, align 4
23*67e74705SXin Li // CHECK-NEXT: [[y_1:%.*]] = alloca i32, align 4
24*67e74705SXin Li // CHECK-NEXT: store volatile i32 1, i32* [[x_1]]
25*67e74705SXin Li // CHECK-NEXT: store volatile i32 1, i32* [[x_1]]
26*67e74705SXin Li // CHECK-NEXT: store volatile i32 1, i32* [[y_1]]
27*67e74705SXin Li // CHECK: }
f1()28*67e74705SXin Li void f1() {
29*67e74705SXin Li volatile int x, y;
30*67e74705SXin Li x = 1;
31*67e74705SXin Li y = (x = 1);
32*67e74705SXin Li }
33