xref: /aosp_15_r20/external/cronet/base/cpu_unittest.cc (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/cpu.h"
6 
7 #include "base/containers/contains.h"
8 #include "base/logging.h"
9 #include "base/memory/protected_memory_buildflags.h"
10 #include "base/strings/string_util.h"
11 #include "base/test/gtest_util.h"
12 #include "build/build_config.h"
13 #include "testing/gtest/include/gtest/gtest.h"
14 
15 // Tests whether we can run extended instructions represented by the CPU
16 // information. This test actually executes some extended instructions (such as
17 // MMX, SSE, etc.) supported by the CPU and sees we can run them without
18 // "undefined instruction" exceptions. That is, this test succeeds when this
19 // test finishes without a crash.
TEST(CPU,RunExtendedInstructions)20 TEST(CPU, RunExtendedInstructions) {
21   // Retrieve the CPU information.
22   base::CPU cpu;
23 #if defined(ARCH_CPU_X86_FAMILY)
24 
25   ASSERT_TRUE(cpu.has_mmx());
26   ASSERT_TRUE(cpu.has_sse());
27   ASSERT_TRUE(cpu.has_sse2());
28   ASSERT_TRUE(cpu.has_sse3());
29 
30 // GCC and clang instruction test.
31 #if defined(COMPILER_GCC)
32   // Execute an MMX instruction.
33   __asm__ __volatile__("emms\n" : : : "mm0");
34 
35   // Execute an SSE instruction.
36   __asm__ __volatile__("xorps %%xmm0, %%xmm0\n" : : : "xmm0");
37 
38   // Execute an SSE 2 instruction.
39   __asm__ __volatile__("psrldq $0, %%xmm0\n" : : : "xmm0");
40 
41   // Execute an SSE 3 instruction.
42   __asm__ __volatile__("addsubpd %%xmm0, %%xmm0\n" : : : "xmm0");
43 
44   if (cpu.has_ssse3()) {
45     // Execute a Supplimental SSE 3 instruction.
46     __asm__ __volatile__("psignb %%xmm0, %%xmm0\n" : : : "xmm0");
47   }
48 
49   if (cpu.has_sse41()) {
50     // Execute an SSE 4.1 instruction.
51     __asm__ __volatile__("pmuldq %%xmm0, %%xmm0\n" : : : "xmm0");
52   }
53 
54   if (cpu.has_sse42()) {
55     // Execute an SSE 4.2 instruction.
56     __asm__ __volatile__("crc32 %%eax, %%eax\n" : : : "eax");
57   }
58 
59   if (cpu.has_popcnt()) {
60     // Execute a POPCNT instruction.
61     __asm__ __volatile__("popcnt %%eax, %%eax\n" : : : "eax");
62   }
63 
64   if (cpu.has_avx()) {
65     // Execute an AVX instruction.
66     __asm__ __volatile__("vzeroupper\n" : : : "xmm0");
67   }
68 
69   if (cpu.has_fma3()) {
70     // Execute a FMA3 instruction.
71     __asm__ __volatile__("vfmadd132ps %%xmm0, %%xmm0, %%xmm0\n" : : : "xmm0");
72   }
73 
74   if (cpu.has_avx2()) {
75     // Execute an AVX 2 instruction.
76     __asm__ __volatile__("vpunpcklbw %%ymm0, %%ymm0, %%ymm0\n" : : : "xmm0");
77   }
78 
79   if (cpu.has_pku()) {
80     // rdpkru
81     uint32_t pkru;
82     __asm__ __volatile__(".byte 0x0f,0x01,0xee\n"
83                          : "=a"(pkru)
84                          : "c"(0), "d"(0));
85   }
86 // Visual C 32 bit and ClangCL 32/64 bit test.
87 #elif defined(COMPILER_MSVC) && (defined(ARCH_CPU_32_BITS) || \
88       (defined(ARCH_CPU_64_BITS) && defined(__clang__)))
89 
90   // Execute an MMX instruction.
91   __asm emms;
92 
93   // Execute an SSE instruction.
94   __asm xorps xmm0, xmm0;
95 
96   // Execute an SSE 2 instruction.
97   __asm psrldq xmm0, 0;
98 
99   // Execute an SSE 3 instruction.
100   __asm addsubpd xmm0, xmm0;
101 
102   if (cpu.has_ssse3()) {
103     // Execute a Supplimental SSE 3 instruction.
104     __asm psignb xmm0, xmm0;
105   }
106 
107   if (cpu.has_sse41()) {
108     // Execute an SSE 4.1 instruction.
109     __asm pmuldq xmm0, xmm0;
110   }
111 
112   if (cpu.has_sse42()) {
113     // Execute an SSE 4.2 instruction.
114     __asm crc32 eax, eax;
115   }
116 
117   if (cpu.has_popcnt()) {
118     // Execute a POPCNT instruction.
119     __asm popcnt eax, eax;
120   }
121 
122   if (cpu.has_avx()) {
123     // Execute an AVX instruction.
124     __asm vzeroupper;
125   }
126 
127   if (cpu.has_fma3()) {
128     // Execute an AVX instruction.
129     __asm vfmadd132ps xmm0, xmm0, xmm0;
130   }
131 
132   if (cpu.has_avx2()) {
133     // Execute an AVX 2 instruction.
134     __asm vpunpcklbw ymm0, ymm0, ymm0
135   }
136 #endif  // defined(COMPILER_GCC)
137 #endif  // defined(ARCH_CPU_X86_FAMILY)
138 
139 #if defined(ARCH_CPU_ARM64)
140   // Check that the CPU is correctly reporting support for the Armv8.5-A memory
141   // tagging extension. The new MTE instructions aren't encoded in NOP space
142   // like BTI/Pointer Authentication and will crash older cores with a SIGILL if
143   // used incorrectly. This test demonstrates how it should be done and that
144   // this approach works.
145   if (cpu.has_mte()) {
146 #if !defined(__ARM_FEATURE_MEMORY_TAGGING)
147     // In this section, we're running on an MTE-compatible core, but we're
148     // building this file without MTE support. Fail this test to indicate that
149     // there's a problem with the base/ build configuration.
150     GTEST_FAIL()
151         << "MTE support detected (but base/ built without MTE support)";
152 #else
153     char ptr[32];
154     uint64_t val;
155     // Execute a trivial MTE instruction. Normally, MTE should be used via the
156     // intrinsics documented at
157     // https://developer.arm.com/documentation/101028/0012/10--Memory-tagging-intrinsics,
158     // this test uses the irg (Insert Random Tag) instruction directly to make
159     // sure that it's not optimized out by the compiler.
160     __asm__ __volatile__("irg %0, %1" : "=r"(val) : "r"(ptr));
161 #endif  // __ARM_FEATURE_MEMORY_TAGGING
162   }
163 #endif  // ARCH_CPU_ARM64
164 }
165 
166 // For https://crbug.com/249713
TEST(CPU,BrandAndVendorContainsNoNUL)167 TEST(CPU, BrandAndVendorContainsNoNUL) {
168   base::CPU cpu;
169   EXPECT_FALSE(base::Contains(cpu.cpu_brand(), '\0'));
170   EXPECT_FALSE(base::Contains(cpu.vendor_name(), '\0'));
171 }
172 
173 #if defined(ARCH_CPU_X86_FAMILY)
174 // Tests that we compute the correct CPU family and model based on the vendor
175 // and CPUID signature.
TEST(CPU,X86FamilyAndModel)176 TEST(CPU, X86FamilyAndModel) {
177   base::internal::X86ModelInfo info;
178 
179   // Check with an Intel Skylake signature.
180   info = base::internal::ComputeX86FamilyAndModel("GenuineIntel", 0x000406e3);
181   EXPECT_EQ(info.family, 6);
182   EXPECT_EQ(info.model, 78);
183   EXPECT_EQ(info.ext_family, 0);
184   EXPECT_EQ(info.ext_model, 4);
185 
186   // Check with an Intel Airmont signature.
187   info = base::internal::ComputeX86FamilyAndModel("GenuineIntel", 0x000406c2);
188   EXPECT_EQ(info.family, 6);
189   EXPECT_EQ(info.model, 76);
190   EXPECT_EQ(info.ext_family, 0);
191   EXPECT_EQ(info.ext_model, 4);
192 
193   // Check with an Intel Prescott signature.
194   info = base::internal::ComputeX86FamilyAndModel("GenuineIntel", 0x00000f31);
195   EXPECT_EQ(info.family, 15);
196   EXPECT_EQ(info.model, 3);
197   EXPECT_EQ(info.ext_family, 0);
198   EXPECT_EQ(info.ext_model, 0);
199 
200   // Check with an AMD Excavator signature.
201   info = base::internal::ComputeX86FamilyAndModel("AuthenticAMD", 0x00670f00);
202   EXPECT_EQ(info.family, 21);
203   EXPECT_EQ(info.model, 112);
204   EXPECT_EQ(info.ext_family, 6);
205   EXPECT_EQ(info.ext_model, 7);
206 }
207 #endif  // defined(ARCH_CPU_X86_FAMILY)
208 
209 #if defined(ARCH_CPU_ARM_FAMILY) && \
210     (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS))
TEST(CPU,ARMImplementerAndPartNumber)211 TEST(CPU, ARMImplementerAndPartNumber) {
212   base::CPU cpu;
213 
214   const std::string& cpu_brand = cpu.cpu_brand();
215 
216   // Some devices, including on the CQ, do not report a cpu_brand
217   // https://crbug.com/1166533 and https://crbug.com/1167123.
218   EXPECT_EQ(cpu_brand, base::TrimWhitespaceASCII(cpu_brand, base::TRIM_ALL));
219   EXPECT_GT(cpu.implementer(), 0u);
220   EXPECT_GT(cpu.part_number(), 0u);
221 }
222 #endif  // defined(ARCH_CPU_ARM_FAMILY) && (BUILDFLAG(IS_LINUX) ||
223         // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS))
224 
225 #if BUILDFLAG(PROTECTED_MEMORY_ENABLED)
TEST(CPUDeathTest,VerifyModifyingCPUInstanceNoAllocationCrashes)226 TEST(CPUDeathTest, VerifyModifyingCPUInstanceNoAllocationCrashes) {
227   const base::CPU& cpu = base::CPU::GetInstanceNoAllocation();
228   uint8_t* const bytes =
229       const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(&cpu));
230 
231   // We try and flip a couple of bits and expect the test to die immediately.
232   // Checks are limited to every 15th byte, otherwise the tests run into
233   // time-outs.
234   for (size_t byte_index = 0; byte_index < sizeof(cpu); byte_index += 15) {
235     const size_t local_bit_index = byte_index % 8;
236     EXPECT_CHECK_DEATH_WITH(bytes[byte_index] ^= (0x01 << local_bit_index), "");
237   }
238 }
239 #endif
240