1 /* SPDX-License-Identifier: GPL-2.0-only */
2
3 #ifndef CPU_X86_MSR_ACCESS_H
4 #define CPU_X86_MSR_ACCESS_H
5
6 #ifndef __ASSEMBLER__
7 #include <types.h>
8
9 typedef union msr_union {
10 struct {
11 unsigned int lo;
12 unsigned int hi;
13 };
14 uint64_t raw;
15 } msr_t;
16 _Static_assert(sizeof(msr_t) == sizeof(uint64_t), "Incorrect size for msr_t");
17
18 #if CONFIG(SOC_SETS_MSRS)
19 msr_t soc_msr_read(unsigned int index);
20 void soc_msr_write(unsigned int index, msr_t msr);
21
22 /* Handle MSR references in the other source code */
rdmsr(unsigned int index)23 static __always_inline msr_t rdmsr(unsigned int index)
24 {
25 return soc_msr_read(index);
26 }
27
wrmsr(unsigned int index,msr_t msr)28 static __always_inline void wrmsr(unsigned int index, msr_t msr)
29 {
30 soc_msr_write(index, msr);
31 }
32 #else /* CONFIG_SOC_SETS_MSRS */
33
34 /* The following functions require the __always_inline due to AMD
35 * function STOP_CAR_AND_CPU that disables cache as
36 * RAM, the cache as RAM stack can no longer be used. Called
37 * functions must be inlined to avoid stack usage. Also, the
38 * compiler must keep local variables register based and not
39 * allocated them from the stack. With gcc 4.5.0, some functions
40 * declared as inline are not being inlined. This patch forces
41 * these functions to always be inlined by adding the qualifier
42 * __always_inline to their declaration.
43 */
rdmsr(unsigned int index)44 static __always_inline msr_t rdmsr(unsigned int index)
45 {
46 msr_t result;
47 __asm__ __volatile__ (
48 "rdmsr"
49 : "=a" (result.lo), "=d" (result.hi)
50 : "c" (index)
51 );
52 return result;
53 }
54
wrmsr(unsigned int index,msr_t msr)55 static __always_inline void wrmsr(unsigned int index, msr_t msr)
56 {
57 __asm__ __volatile__ (
58 "wrmsr"
59 : /* No outputs */
60 : "c" (index), "a" (msr.lo), "d" (msr.hi)
61 );
62 }
63
64 #endif /* CONFIG_SOC_SETS_MSRS */
65 #endif /* __ASSEMBLER__ */
66 #endif /* CPU_X86_MSR_ACCESS_H */
67