1*9880d681SAndroid Build Coastguard Worker; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2,sse-unaligned-mem | FileCheck %s --check-prefix=SSE2 2*9880d681SAndroid Build Coastguard Worker; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=AVX 3*9880d681SAndroid Build Coastguard Worker 4*9880d681SAndroid Build Coastguard Worker; Although we have the ability to fold an unaligned load with AVX 5*9880d681SAndroid Build Coastguard Worker; and under special conditions with some SSE implementations, we 6*9880d681SAndroid Build Coastguard Worker; can not fold the load under any circumstances in these test 7*9880d681SAndroid Build Coastguard Worker; cases because they are not 16-byte loads. The load must be 8*9880d681SAndroid Build Coastguard Worker; executed as a scalar ('movs*') with a zero extension to 9*9880d681SAndroid Build Coastguard Worker; 128-bits and then used in the packed logical ('andp*') op. 10*9880d681SAndroid Build Coastguard Worker; PR22371 - http://llvm.org/bugs/show_bug.cgi?id=22371 11*9880d681SAndroid Build Coastguard Worker 12*9880d681SAndroid Build Coastguard Workerdefine double @load_double_no_fold(double %x, double %y) { 13*9880d681SAndroid Build Coastguard Worker; SSE2-LABEL: load_double_no_fold: 14*9880d681SAndroid Build Coastguard Worker; SSE2: BB#0: 15*9880d681SAndroid Build Coastguard Worker; SSE2-NEXT: cmplesd %xmm0, %xmm1 16*9880d681SAndroid Build Coastguard Worker; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero 17*9880d681SAndroid Build Coastguard Worker; SSE2-NEXT: andpd %xmm1, %xmm0 18*9880d681SAndroid Build Coastguard Worker; SSE2-NEXT: retq 19*9880d681SAndroid Build Coastguard Worker; 20*9880d681SAndroid Build Coastguard Worker; AVX-LABEL: load_double_no_fold: 21*9880d681SAndroid Build Coastguard Worker; AVX: BB#0: 22*9880d681SAndroid Build Coastguard Worker; AVX-NEXT: vcmplesd %xmm0, %xmm1, %xmm0 23*9880d681SAndroid Build Coastguard Worker; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero 24*9880d681SAndroid Build Coastguard Worker; AVX-NEXT: vandpd %xmm1, %xmm0, %xmm0 25*9880d681SAndroid Build Coastguard Worker; AVX-NEXT: retq 26*9880d681SAndroid Build Coastguard Worker 27*9880d681SAndroid Build Coastguard Worker %cmp = fcmp oge double %x, %y 28*9880d681SAndroid Build Coastguard Worker %zext = zext i1 %cmp to i32 29*9880d681SAndroid Build Coastguard Worker %conv = sitofp i32 %zext to double 30*9880d681SAndroid Build Coastguard Worker ret double %conv 31*9880d681SAndroid Build Coastguard Worker} 32*9880d681SAndroid Build Coastguard Worker 33*9880d681SAndroid Build Coastguard Workerdefine float @load_float_no_fold(float %x, float %y) { 34*9880d681SAndroid Build Coastguard Worker; SSE2-LABEL: load_float_no_fold: 35*9880d681SAndroid Build Coastguard Worker; SSE2: BB#0: 36*9880d681SAndroid Build Coastguard Worker; SSE2-NEXT: cmpless %xmm0, %xmm1 37*9880d681SAndroid Build Coastguard Worker; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero 38*9880d681SAndroid Build Coastguard Worker; SSE2-NEXT: andps %xmm1, %xmm0 39*9880d681SAndroid Build Coastguard Worker; SSE2-NEXT: retq 40*9880d681SAndroid Build Coastguard Worker; 41*9880d681SAndroid Build Coastguard Worker; AVX-LABEL: load_float_no_fold: 42*9880d681SAndroid Build Coastguard Worker; AVX: BB#0: 43*9880d681SAndroid Build Coastguard Worker; AVX-NEXT: vcmpless %xmm0, %xmm1, %xmm0 44*9880d681SAndroid Build Coastguard Worker; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero 45*9880d681SAndroid Build Coastguard Worker; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0 46*9880d681SAndroid Build Coastguard Worker; AVX-NEXT: retq 47*9880d681SAndroid Build Coastguard Worker 48*9880d681SAndroid Build Coastguard Worker %cmp = fcmp oge float %x, %y 49*9880d681SAndroid Build Coastguard Worker %zext = zext i1 %cmp to i32 50*9880d681SAndroid Build Coastguard Worker %conv = sitofp i32 %zext to float 51*9880d681SAndroid Build Coastguard Worker ret float %conv 52*9880d681SAndroid Build Coastguard Worker} 53*9880d681SAndroid Build Coastguard Worker 54