1 /*
2 * Copyright (c) 2009 Corey Tabaka
3 * Copyright (c) 2015-2018 Intel Corporation
4 * Copyright (c) 2016 Travis Geiselbrecht
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files
8 * (the "Software"), to deal in the Software without restriction,
9 * including without limitation the rights to use, copy, modify, merge,
10 * publish, distribute, sublicense, and/or sell copies of the Software,
11 * and to permit persons to whom the Software is furnished to do so,
12 * subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
21 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25 #pragma once
26
27 #include <compiler.h>
28 #include <cpuid.h>
29 #include <sys/types.h>
30 #include <stdlib.h>
31 #include <stdbool.h>
32
33 __BEGIN_CDECLS
34
35 #define PFEX_P 0x01
36 #define PFEX_W 0x02
37 #define PFEX_U 0x04
38 #define PFEX_RSV 0x08
39 #define PFEX_I 0x10
40 #define X86_8BYTE_MASK 0xFFFFFFFF
41 #define X86_CPUID_VERSION_INFO 0x1
42 #define X86_CPUID_EXTEND_FEATURE 0x7
43 #define X86_CPUID_ADDR_WIDTH 0x80000008
44
45 #define X86_SMEP_BIT 7
46 #define X86_SMAP_BIT 20
47
48 #define X86_CPUID_CLFLUSH_BIT 19
49 #define X86_CPUID_CLFLUSHOPT_BIT 23
50 #define X86_CPUID_CLWS_BIT 24
51
52 struct x86_32_iframe {
53 uint32_t di, si, bp, sp, bx, dx, cx, ax; // pushed by common handler using pusha
54 uint32_t ds, es, fs, gs; // pushed by common handler
55 uint32_t vector; // pushed by stub
56 uint32_t err_code; // pushed by interrupt or stub
57 uint32_t ip, cs, flags; // pushed by interrupt
58 uint32_t user_sp, user_ss; // pushed by interrupt if priv change occurs
59 };
60
61 struct x86_64_iframe {
62 uint64_t di, si, bp, bx, dx, cx, ax; // pushed by common handler
63 uint64_t r8, r9, r10, r11, r12, r13, r14, r15; // pushed by common handler
64 uint64_t vector; // pushed by stub
65 uint64_t err_code; // pushed by interrupt or stub
66 uint64_t ip, cs, flags; // pushed by interrupt
67 uint64_t user_sp, user_ss; // pushed by interrupt if priv change occurs
68 };
69
70 #if ARCH_X86_32
71 typedef struct x86_32_iframe x86_iframe_t;
72 #elif ARCH_X86_64
73 typedef struct x86_64_iframe x86_iframe_t;
74 #endif
75
76 struct x86_32_context_switch_frame {
77 uint32_t edi, esi, ebp, esp, ebx, edx, ecx, eax;
78 uint32_t eflags;
79 uint32_t eip;
80 };
81
82 struct x86_64_context_switch_frame {
83 uint64_t r15, r14, r13, r12;
84 uint64_t rbp;
85 uint64_t rbx;
86 uint64_t rflags;
87 uint64_t rip;
88 };
89
90 void x86_64_context_switch(vaddr_t *oldsp, vaddr_t newsp);
91
92 /*
93 * x86-32 TSS structure
94 */
95 typedef struct {
96 uint16_t backlink, __blh;
97 uint32_t esp0;
98 uint16_t ss0, __ss0h;
99 uint32_t esp1;
100 uint16_t ss1, __ss1h;
101 uint32_t esp2;
102 uint16_t ss2, __ss2h;
103 uint32_t cr3;
104 uint32_t eip;
105 uint32_t eflags;
106 uint32_t eax, ecx, edx, ebx;
107 uint32_t esp, ebp, esi, edi;
108 uint16_t es, __esh;
109 uint16_t cs, __csh;
110 uint16_t ss, __ssh;
111 uint16_t ds, __dsh;
112 uint16_t fs, __fsh;
113 uint16_t gs, __gsh;
114 uint16_t ldt, __ldth;
115 uint16_t trace, bitmap;
116
117 uint8_t tss_bitmap[8192];
118 } __PACKED tss_32_t;
119
120 /*
121 * x86-64 TSS structure
122 */
123 typedef struct {
124 uint32_t rsvd0;
125 uint64_t rsp0;
126 uint64_t rsp1;
127 uint64_t rsp2;
128 uint32_t rsvd1;
129 uint32_t rsvd2;
130 uint64_t ist1;
131 uint64_t ist2;
132 uint64_t ist3;
133 uint64_t ist4;
134 uint64_t ist5;
135 uint64_t ist6;
136 uint64_t ist7;
137 uint32_t rsvd3;
138 uint32_t rsvd4;
139 uint16_t rsvd5;
140 uint16_t iomap_base;
141 } __PACKED tss_64_t;
142
143 #if ARCH_X86_32
144 typedef tss_32_t tss_t;
145 #elif ARCH_X86_64
146 typedef tss_64_t tss_t;
147 #endif
148
149 #define X86_CR0_PE 0x00000001 /* protected mode enable */
150 #define X86_CR0_MP 0x00000002 /* monitor coprocessor */
151 #define X86_CR0_EM 0x00000004 /* emulation */
152 #define X86_CR0_TS 0x00000008 /* task switched */
153 #define X86_CR0_NE 0x00000020 /* enable x87 exception */
154 #define X86_CR0_WP 0x00010000 /* supervisor write protect */
155 #define X86_CR0_NW 0x20000000 /* not write-through */
156 #define X86_CR0_CD 0x40000000 /* cache disable */
157 #define X86_CR0_PG 0x80000000 /* enable paging */
158 #define X86_CR4_PAE 0x00000020 /* PAE paging */
159 #define X86_CR4_OSFXSR 0x00000200 /* os supports fxsave */
160 #define X86_CR4_OSXMMEXPT 0x00000400 /* os supports xmm exception */
161 #define X86_CR4_FSGSBASE 0x00010000 /* FSGSBASE enable bit */
162 #define X86_CR4_OSXSAVE 0x00040000 /* os supports xsave */
163 #define X86_CR4_SMEP 0x00100000 /* SMEP protection enabling */
164 #define X86_CR4_SMAP 0x00200000 /* SMAP protection enabling */
165 #define X86_CR4_PSE 0xffffffef /* Disabling PSE bit in the CR4 */
166 #define X86_EFER_NXE 0x00000800 /* to enable execute disable bit */
167 #define X86_MSR_EFER 0xc0000080 /* EFER Model Specific Register id */
168 #define X86_MSR_FS_BASE 0xc0000100 /* Map of base address of FS */
169 #define X86_MSR_GS_BASE 0xc0000101 /* Map of base address of GS */
170 #define X86_MSR_KRNL_GS_BASE 0xc0000102 /* Swap target of base address of GS */
171
cpuid(uint32_t leaf,uint32_t * eax,uint32_t * ebx,uint32_t * ecx,uint32_t * edx)172 static inline void cpuid(uint32_t leaf,
173 uint32_t *eax,
174 uint32_t *ebx,
175 uint32_t *ecx,
176 uint32_t *edx)
177 {
178 __cpuid(leaf, *eax, *ebx, *ecx, *edx);
179 }
180
cpuid_count(uint32_t leaf,uint32_t sub_leaf,uint32_t * eax,uint32_t * ebx,uint32_t * ecx,uint32_t * edx)181 static inline void cpuid_count(uint32_t leaf,
182 uint32_t sub_leaf,
183 uint32_t *eax,
184 uint32_t *ebx,
185 uint32_t *ecx,
186 uint32_t *edx)
187 {
188 __cpuid_count(leaf, sub_leaf, *eax, *ebx, *ecx, *edx);
189 }
190
check_smep_avail(void)191 static inline bool check_smep_avail(void)
192 {
193 uint32_t reg_b;
194 uint32_t unused;
195
196 cpuid_count(X86_CPUID_EXTEND_FEATURE,
197 0x0,
198 &unused,
199 ®_b,
200 &unused,
201 &unused);
202
203 return !!((reg_b >> X86_SMEP_BIT) & 0x1);
204 }
205
check_smap_avail(void)206 static inline bool check_smap_avail(void)
207 {
208 uint32_t reg_b;
209 uint32_t unused;
210
211 cpuid_count(X86_CPUID_EXTEND_FEATURE,
212 0x0,
213 &unused,
214 ®_b,
215 &unused,
216 &unused);
217
218 return !!((reg_b >> X86_SMAP_BIT) & 0x1);
219 }
220
221 #if ARCH_X86_32
set_in_cr0(uint32_t mask)222 static inline void set_in_cr0(uint32_t mask)
223 {
224 __asm__ __volatile__ (
225 "movl %%cr0,%%eax \n\t"
226 "orl %0,%%eax \n\t"
227 "movl %%eax,%%cr0 \n\t"
228 : : "irg" (mask)
229 :"ax");
230 }
231
clear_in_cr0(uint32_t mask)232 static inline void clear_in_cr0(uint32_t mask)
233 {
234 __asm__ __volatile__ (
235 "movl %%cr0, %%eax \n\t"
236 "andl %0, %%eax \n\t"
237 "movl %%eax, %%cr0 \n\t"
238 : : "irg" (~mask)
239 : "ax");
240 }
241
x86_clts(void)242 static inline void x86_clts(void) {__asm__ __volatile__ ("clts"); }
x86_hlt(void)243 static inline void x86_hlt(void) {__asm__ __volatile__ ("hlt"); }
x86_sti(void)244 static inline void x86_sti(void) {__asm__ __volatile__ ("sti"); }
x86_cli(void)245 static inline void x86_cli(void) {__asm__ __volatile__ ("cli"); }
x86_ltr(uint16_t sel)246 static inline void x86_ltr(uint16_t sel)
247 {
248 __asm__ __volatile__ ("ltr %%ax" :: "a" (sel));
249 }
250
x86_get_cr2(void)251 static inline uint32_t x86_get_cr2(void)
252 {
253 uint32_t rv;
254
255 __asm__ __volatile__ (
256 "movl %%cr2, %0"
257 : "=r" (rv)
258 );
259
260 return rv;
261 }
262
263 typedef uint32_t x86_flags_t;
264
x86_save_flags(void)265 static inline uint32_t x86_save_flags(void)
266 {
267 unsigned int state;
268
269 __asm__ volatile(
270 "pushfl;"
271 "popl %0"
272 : "=rm" (state)
273 :: "memory");
274
275 return state;
276 }
277
x86_restore_flags(uint32_t flags)278 static inline void x86_restore_flags(uint32_t flags)
279 {
280 __asm__ volatile(
281 "pushl %0;"
282 "popfl"
283 :: "g" (flags)
284 : "memory", "cc");
285 }
286
287 #define rdtsc(low,high) \
288 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
289
290 #define rdtscl(low) \
291 __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
292
293 #define rdtscll(val) \
294 __asm__ __volatile__("rdtsc" : "=A" (val))
295
inp(uint16_t _port)296 static inline uint8_t inp(uint16_t _port)
297 {
298 uint8_t rv;
299 __asm__ __volatile__ ("inb %1, %0"
300 : "=a" (rv)
301 : "d" (_port));
302 return (rv);
303 }
304
inpw(uint16_t _port)305 static inline uint16_t inpw (uint16_t _port)
306 {
307 uint16_t rv;
308 __asm__ __volatile__ ("inw %1, %0"
309 : "=a" (rv)
310 : "d" (_port));
311 return (rv);
312 }
313
inpd(uint16_t _port)314 static inline uint32_t inpd(uint16_t _port)
315 {
316 uint32_t rv;
317 __asm__ __volatile__ ("inl %1, %0"
318 : "=a" (rv)
319 : "d" (_port));
320 return (rv);
321 }
322
outp(uint16_t _port,uint8_t _data)323 static inline void outp(uint16_t _port, uint8_t _data)
324 {
325 __asm__ __volatile__ ("outb %1, %0"
326 :
327 : "d" (_port),
328 "a" (_data));
329 }
330
outpw(uint16_t _port,uint16_t _data)331 static inline void outpw(uint16_t _port, uint16_t _data)
332 {
333 __asm__ __volatile__ ("outw %1, %0"
334 :
335 : "d" (_port),
336 "a" (_data));
337 }
338
outpd(uint16_t _port,uint32_t _data)339 static inline void outpd(uint16_t _port, uint32_t _data)
340 {
341 __asm__ __volatile__ ("outl %1, %0"
342 :
343 : "d" (_port),
344 "a" (_data));
345 }
346
inprep(uint16_t _port,uint8_t * _buffer,uint32_t _reads)347 static inline void inprep(uint16_t _port, uint8_t *_buffer, uint32_t _reads)
348 {
349 __asm__ __volatile__ ("pushal \n\t"
350 "pushfl \n\t"
351 "cli \n\t"
352 "cld \n\t"
353 "rep insb \n\t"
354 "popfl \n\t"
355 "popal"
356 :
357 : "d" (_port),
358 "D" (_buffer),
359 "c" (_reads));
360 }
361
outprep(uint16_t _port,uint8_t * _buffer,uint32_t _writes)362 static inline void outprep(uint16_t _port, uint8_t *_buffer, uint32_t _writes)
363 {
364 __asm__ __volatile__ ("pushal \n\t"
365 "pushfl \n\t"
366 "cli \n\t"
367 "cld \n\t"
368 "rep outsb \n\t"
369 "popfl \n\t"
370 "popal"
371 :
372 : "d" (_port),
373 "S" (_buffer),
374 "c" (_writes));
375 }
376
inpwrep(uint16_t _port,uint16_t * _buffer,uint32_t _reads)377 static inline void inpwrep(uint16_t _port, uint16_t *_buffer, uint32_t _reads)
378 {
379 __asm__ __volatile__ ("pushal \n\t"
380 "pushfl \n\t"
381 "cli \n\t"
382 "cld \n\t"
383 "rep insw \n\t"
384 "popfl \n\t"
385 "popal"
386 :
387 : "d" (_port),
388 "D" (_buffer),
389 "c" (_reads));
390 }
391
outpwrep(uint16_t _port,uint16_t * _buffer,uint32_t _writes)392 static inline void outpwrep(uint16_t _port, uint16_t *_buffer,
393 uint32_t _writes)
394 {
395 __asm__ __volatile__ ("pushal \n\t"
396 "pushfl \n\t"
397 "cli \n\t"
398 "cld \n\t"
399 "rep outsw \n\t"
400 "popfl \n\t"
401 "popal"
402 :
403 : "d" (_port),
404 "S" (_buffer),
405 "c" (_writes));
406 }
407
inpdrep(uint16_t _port,uint32_t * _buffer,uint32_t _reads)408 static inline void inpdrep(uint16_t _port, uint32_t *_buffer,
409 uint32_t _reads)
410 {
411 __asm__ __volatile__ ("pushal \n\t"
412 "pushfl \n\t"
413 "cli \n\t"
414 "cld \n\t"
415 "rep insl \n\t"
416 "popfl \n\t"
417 "popal"
418 :
419 : "d" (_port),
420 "D" (_buffer),
421 "c" (_reads));
422 }
423
outpdrep(uint16_t _port,uint32_t * _buffer,uint32_t _writes)424 static inline void outpdrep(uint16_t _port, uint32_t *_buffer,
425 uint32_t _writes)
426 {
427 __asm__ __volatile__ ("pushal \n\t"
428 "pushfl \n\t"
429 "cli \n\t"
430 "cld \n\t"
431 "rep outsl \n\t"
432 "popfl \n\t"
433 "popal"
434 :
435 : "d" (_port),
436 "S" (_buffer),
437 "c" (_writes));
438 }
439
read_msr(uint32_t msr_id)440 static inline uint64_t read_msr (uint32_t msr_id)
441 {
442 uint64_t msr_read_val = 0;
443 uint32_t low_val = 0;
444 uint32_t high_val = 0;
445
446 __asm__ __volatile__ (
447 "rdmsr \n\t"
448 : "=a" (low_val), "=d"(high_val)
449 : "c" (msr_id));
450
451 msr_read_val = high_val;
452 msr_read_val = (msr_read_val << 32) | low_val;
453
454 return msr_read_val;
455 }
456
write_msr(uint32_t msr_id,uint64_t msr_write_val)457 static inline void write_msr (uint32_t msr_id, uint64_t msr_write_val)
458 {
459 uint32_t low_val = (uint32_t)msr_write_val;
460 uint32_t high_val = (uint32_t)(msr_write_val >> 32);
461
462 __asm__ __volatile__ (
463 "wrmsr \n\t"
464 : : "c" (msr_id), "a" (low_val), "d"(high_val));
465 }
466
x86_get_cr3(void)467 static inline uint32_t x86_get_cr3(void)
468 {
469 uint32_t rv;
470
471 __asm__ __volatile__ (
472 "mov %%cr3, %0"
473 : "=r" (rv));
474 return rv;
475 }
476
x86_set_cr3(uint32_t in_val)477 static inline void x86_set_cr3(uint32_t in_val)
478 {
479 __asm__ __volatile__ (
480 "mov %0,%%cr3 \n\t"
481 :
482 :"r" (in_val));
483 }
484
x86_get_cr0(void)485 static inline uint32_t x86_get_cr0(void)
486 {
487 uint32_t rv;
488
489 __asm__ __volatile__ (
490 "mov %%cr0, %0 \n\t"
491 : "=r" (rv));
492 return rv;
493 }
494
x86_get_cr4(void)495 static inline uint32_t x86_get_cr4(void)
496 {
497 uint32_t rv;
498
499 __asm__ __volatile__ (
500 "mov %%cr4, %0 \n\t"
501 : "=r" (rv));
502 return rv;
503 }
504
505
x86_set_cr0(uint32_t in_val)506 static inline void x86_set_cr0(uint32_t in_val)
507 {
508 __asm__ __volatile__ (
509 "mov %0,%%cr0 \n\t"
510 :
511 :"r" (in_val));
512 }
513
x86_set_cr4(uint32_t in_val)514 static inline void x86_set_cr4(uint32_t in_val)
515 {
516 __asm__ __volatile__ (
517 "mov %0,%%cr4 \n\t"
518 :
519 :"r" (in_val));
520 }
521
x86_get_address_width(void)522 static inline uint32_t x86_get_address_width(void)
523 {
524 uint32_t reg_a;
525 uint32_t unused;
526
527 cpuid(X86_CPUID_ADDR_WIDTH, ®_a, &unused, &unused, &unused);
528
529 /* Extracting bit 15:8 from eax register */
530 return ((reg_a >> 8) & 0x0ff);
531 }
532
x86_is_paging_enabled(void)533 static inline bool x86_is_paging_enabled(void)
534 {
535 if (x86_get_cr0() & X86_CR0_PG)
536 return true;
537
538 return false;
539 }
540
x86_is_PAE_enabled(void)541 static inline uint32_t x86_is_PAE_enabled(void)
542 {
543 if (x86_is_paging_enabled() == false)
544 return false;
545
546 if (!(x86_get_cr4() & X86_CR4_PAE))
547 return false;
548
549 return true;
550 }
551
552 #endif // ARCH_X86_32
553
554 #if ARCH_X86_64
555
set_in_cr0(uint32_t mask)556 static inline void set_in_cr0(uint32_t mask)
557 {
558 __asm__ __volatile__ (
559 "movl %%cr0,%%eax \n\t"
560 "orl %0,%%eax \n\t"
561 "movl %%eax,%%cr0 \n\t"
562 : : "irg" (mask)
563 :"ax");
564 }
565
clear_in_cr0(uint32_t mask)566 static inline void clear_in_cr0(uint32_t mask)
567 {
568 __asm__ __volatile__ (
569 "movq %%cr0, %%rax \n\t"
570 "andq %0, %%rax \n\t"
571 "movq %%rax, %%cr0 \n\t"
572 : : "irg" (~mask)
573 : "ax");
574 }
575
x86_clts(void)576 static inline void x86_clts(void) {__asm__ __volatile__ ("clts"); }
x86_hlt(void)577 static inline void x86_hlt(void) {__asm__ __volatile__ ("hlt"); }
x86_sti(void)578 static inline void x86_sti(void) {__asm__ __volatile__ ("sti"); }
x86_cli(void)579 static inline void x86_cli(void) {__asm__ __volatile__ ("cli"); }
x86_ltr(uint16_t sel)580 static inline void x86_ltr(uint16_t sel)
581 {
582 __asm__ __volatile__ ("ltr %%ax" :: "a" (sel));
583 }
584
x86_get_cr2(void)585 static inline uint64_t x86_get_cr2(void)
586 {
587 uint64_t rv;
588
589 __asm__ __volatile__ (
590 "movq %%cr2, %0"
591 : "=r" (rv)
592 );
593
594 return rv;
595 }
596
597 typedef uint64_t x86_flags_t;
598
x86_save_flags(void)599 static inline uint64_t x86_save_flags(void)
600 {
601 uint64_t state;
602
603 __asm__ volatile(
604 "pushfq;"
605 "popq %0"
606 : "=rm" (state)
607 :: "memory");
608
609 return state;
610 }
611
x86_restore_flags(uint64_t flags)612 static inline void x86_restore_flags(uint64_t flags)
613 {
614 __asm__ volatile(
615 "pushq %0;"
616 "popfq"
617 :: "g" (flags)
618 : "memory", "cc");
619 }
620
621 #define rdtsc(low,high) \
622 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
623
624 #define rdtscl(low) \
625 __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
626
627 #define rdtscll(val) \
628 __asm__ __volatile__("rdtsc" : "=A" (val))
629
inp(uint16_t _port)630 static inline uint8_t inp(uint16_t _port)
631 {
632 uint8_t rv;
633 __asm__ __volatile__ ("inb %1, %0"
634 : "=a" (rv)
635 : "d" (_port));
636 return (rv);
637 }
638
inpw(uint16_t _port)639 static inline uint16_t inpw (uint16_t _port)
640 {
641 uint16_t rv;
642 __asm__ __volatile__ ("inw %1, %0"
643 : "=a" (rv)
644 : "d" (_port));
645 return (rv);
646 }
647
inpd(uint16_t _port)648 static inline uint32_t inpd(uint16_t _port)
649 {
650 uint32_t rv;
651 __asm__ __volatile__ ("inl %1, %0"
652 : "=a" (rv)
653 : "d" (_port));
654 return (rv);
655 }
656
outp(uint16_t _port,uint8_t _data)657 static inline void outp(uint16_t _port, uint8_t _data)
658 {
659 __asm__ __volatile__ ("outb %1, %0"
660 :
661 : "d" (_port),
662 "a" (_data));
663 }
664
outpw(uint16_t _port,uint16_t _data)665 static inline void outpw(uint16_t _port, uint16_t _data)
666 {
667 __asm__ __volatile__ ("outw %1, %0"
668 :
669 : "d" (_port),
670 "a" (_data));
671 }
672
outpd(uint16_t _port,uint32_t _data)673 static inline void outpd(uint16_t _port, uint32_t _data)
674 {
675 __asm__ __volatile__ ("outl %1, %0"
676 :
677 : "d" (_port),
678 "a" (_data));
679 }
680
inprep(uint16_t _port,uint8_t * _buffer,uint32_t _reads)681 static inline void inprep(uint16_t _port, uint8_t *_buffer, uint32_t _reads)
682 {
683 __asm__ __volatile__ ("pushfq \n\t"
684 "cli \n\t"
685 "cld \n\t"
686 "rep insb \n\t"
687 "popfq \n\t"
688 :
689 : "d" (_port),
690 "D" (_buffer),
691 "c" (_reads));
692 }
693
outprep(uint16_t _port,uint8_t * _buffer,uint32_t _writes)694 static inline void outprep(uint16_t _port, uint8_t *_buffer, uint32_t _writes)
695 {
696 __asm__ __volatile__ ("pushfq \n\t"
697 "cli \n\t"
698 "cld \n\t"
699 "rep outsb \n\t"
700 "popfq \n\t"
701 :
702 : "d" (_port),
703 "S" (_buffer),
704 "c" (_writes));
705 }
706
inpwrep(uint16_t _port,uint16_t * _buffer,uint32_t _reads)707 static inline void inpwrep(uint16_t _port, uint16_t *_buffer, uint32_t _reads)
708 {
709 __asm__ __volatile__ ("pushfq \n\t"
710 "cli \n\t"
711 "cld \n\t"
712 "rep insw \n\t"
713 "popfq \n\t"
714 :
715 : "d" (_port),
716 "D" (_buffer),
717 "c" (_reads));
718 }
719
outpwrep(uint16_t _port,uint16_t * _buffer,uint32_t _writes)720 static inline void outpwrep(uint16_t _port, uint16_t *_buffer,
721 uint32_t _writes)
722 {
723 __asm__ __volatile__ ("pushfq \n\t"
724 "cli \n\t"
725 "cld \n\t"
726 "rep outsw \n\t"
727 "popfq \n\t"
728 :
729 : "d" (_port),
730 "S" (_buffer),
731 "c" (_writes));
732 }
733
inpdrep(uint16_t _port,uint32_t * _buffer,uint32_t _reads)734 static inline void inpdrep(uint16_t _port, uint32_t *_buffer,
735 uint32_t _reads)
736 {
737 __asm__ __volatile__ ("pushfq \n\t"
738 "cli \n\t"
739 "cld \n\t"
740 "rep insl \n\t"
741 "popfq \n\t"
742 :
743 : "d" (_port),
744 "D" (_buffer),
745 "c" (_reads));
746 }
747
outpdrep(uint16_t _port,uint32_t * _buffer,uint32_t _writes)748 static inline void outpdrep(uint16_t _port, uint32_t *_buffer,
749 uint32_t _writes)
750 {
751 __asm__ __volatile__ ("pushfq \n\t"
752 "cli \n\t"
753 "cld \n\t"
754 "rep outsl \n\t"
755 "popfq \n\t"
756 :
757 : "d" (_port),
758 "S" (_buffer),
759 "c" (_writes));
760 }
761
read_msr(uint32_t msr_id)762 static inline uint64_t read_msr (uint32_t msr_id)
763 {
764 uint64_t msr_read_val = 0;
765 uint32_t low_val = 0;
766 uint32_t high_val = 0;
767
768 __asm__ __volatile__ (
769 "rdmsr \n\t"
770 : "=a" (low_val), "=d"(high_val)
771 : "c" (msr_id));
772
773 msr_read_val = high_val;
774 msr_read_val = (msr_read_val << 32) | low_val;
775
776 return msr_read_val;
777 }
778
write_msr(uint32_t msr_id,uint64_t msr_write_val)779 static inline void write_msr (uint32_t msr_id, uint64_t msr_write_val)
780 {
781 uint32_t low_val = (uint32_t)msr_write_val;
782 uint32_t high_val = (uint32_t)(msr_write_val >> 32);
783
784 __asm__ __volatile__ (
785 "wrmsr \n\t"
786 : : "c" (msr_id), "a" (low_val), "d"(high_val));
787 }
788
x86_get_cr3(void)789 static inline uint64_t x86_get_cr3(void)
790 {
791 uint64_t rv;
792
793 __asm__ __volatile__ (
794 "movq %%cr3, %0"
795 : "=r" (rv));
796 return rv;
797 }
798
x86_set_cr3(uint64_t in_val)799 static inline void x86_set_cr3(uint64_t in_val)
800 {
801 __asm__ __volatile__ (
802 "movq %0,%%cr3 \n\t"
803 :
804 :"r" (in_val));
805 }
806
x86_get_cr4(void)807 static inline uint64_t x86_get_cr4(void)
808 {
809 uint64_t rv;
810
811 __asm__ __volatile__ (
812 "movq %%cr4, %0 \n\t"
813 : "=r" (rv));
814 return rv;
815 }
816
x86_set_cr4(uint64_t in_val)817 static inline void x86_set_cr4(uint64_t in_val)
818 {
819 __asm__ __volatile__ (
820 "movq %0,%%cr4 \n\t"
821 :
822 :"r" (in_val));
823 }
824
x86_get_cr0(void)825 static inline uint64_t x86_get_cr0(void)
826 {
827 uint64_t rv;
828
829 __asm__ __volatile__ (
830 "movq %%cr0, %0 \n\t"
831 : "=r" (rv));
832 return rv;
833 }
834
x86_set_cr0(uint64_t in_val)835 static inline void x86_set_cr0(uint64_t in_val)
836 {
837 __asm__ __volatile__ (
838 "movq %0,%%cr0 \n\t"
839 :
840 :"r" (in_val));
841 }
842
x86_get_address_width(void)843 static inline uint32_t x86_get_address_width(void)
844 {
845 uint32_t reg_a;
846 uint32_t unused;
847
848 cpuid(X86_CPUID_ADDR_WIDTH, ®_a, &unused, &unused, &unused);
849
850 /*
851 Extracting bit 15:0 from eax register
852 Bits 07-00: #Physical Address Bits
853 Bits 15-08: #Linear Address Bits
854 */
855 return (reg_a & 0x0000ffff);
856 }
857
check_fsgsbase_avail(void)858 static inline bool check_fsgsbase_avail(void)
859 {
860 uint32_t reg_b;
861 uint32_t unused;
862
863 cpuid_count(X86_CPUID_EXTEND_FEATURE,
864 0x0,
865 &unused,
866 ®_b,
867 &unused,
868 &unused);
869
870 return (reg_b & 0x1);
871 }
872
x86_read_gs_with_offset(uintptr_t offset)873 static inline uint64_t x86_read_gs_with_offset(uintptr_t offset)
874 {
875 uint64_t ret;
876 __asm__ __volatile__ (
877 "movq %%gs:%1, %0"
878 :"=r" (ret)
879 :"m" (*(uint64_t *)offset));
880 return ret;
881 }
882
x86_write_gs_with_offset(uint64_t offset,uint64_t val)883 static inline void x86_write_gs_with_offset(uint64_t offset, uint64_t val)
884 {
885 __asm__ __volatile__ (
886 "movq %0, %%gs:%1"
887 :
888 :"ir" (val), "m" (*(uint64_t *)offset)
889 :"memory");
890 }
891
x86_allow_explicit_smap(void)892 static inline void x86_allow_explicit_smap(void) {
893 if (x86_get_cr4() & X86_CR4_SMAP) {
894 __asm__ __volatile__(
895 "stac"
896 :
897 :
898 :"memory");
899 }
900 }
901
x86_disallow_explicit_smap(void)902 static inline void x86_disallow_explicit_smap(void) {
903 if (x86_get_cr4() & X86_CR4_SMAP) {
904 __asm__ __volatile__(
905 "clac"
906 :
907 :
908 :"memory");
909 }
910 }
911
912 void x86_syscall(void);
913 void x86_check_and_fix_gs(void);
914
915 #endif // ARCH_X86_64
916
917 __END_CDECLS
918