1 /*
2  * Copyright (c) 2015 Google Inc. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <arch/arm64.h>
25 #include <kernel/thread.h>
26 #include <trace.h>
27 
28 #define LOCAL_TRACE 0
29 
30 static struct fpstate *current_fpstate[SMP_MAX_CPUS];
31 
arm64_fpu_load_state(struct thread * t)32 static void arm64_fpu_load_state(struct thread *t)
33 {
34     uint cpu = arch_curr_cpu_num();
35     struct fpstate *fpstate = &t->arch.fpstate;
36 
37     if (!arm64_fpu_load_fpstate(fpstate, false)) {
38         LTRACEF("cpu %d, thread %s, fpstate already valid\n", cpu, t->name);
39         return;
40     }
41     LTRACEF("cpu %d, thread %s, load fpstate %p, last cpu %d, last fpstate %p\n",
42             cpu, t->name, fpstate, fpstate->current_cpu, current_fpstate[cpu]);
43 }
44 
arm64_fpu_load_fpstate(struct fpstate * fpstate,bool force)45 bool arm64_fpu_load_fpstate(struct fpstate *fpstate, bool force)
46 {
47     uint cpu = arch_curr_cpu_num();
48 
49     if (!force && fpstate == current_fpstate[cpu] &&
50         fpstate->current_cpu == cpu) {
51         return false;
52     }
53     fpstate->current_cpu = cpu;
54     current_fpstate[cpu] = fpstate;
55 
56 
57     STATIC_ASSERT(sizeof(fpstate->regs) == 16 * 32);
58     __asm__ volatile("ldp     q0, q1, [%0, #(0 * 32)]\n"
59                      "ldp     q2, q3, [%0, #(1 * 32)]\n"
60                      "ldp     q4, q5, [%0, #(2 * 32)]\n"
61                      "ldp     q6, q7, [%0, #(3 * 32)]\n"
62                      "ldp     q8, q9, [%0, #(4 * 32)]\n"
63                      "ldp     q10, q11, [%0, #(5 * 32)]\n"
64                      "ldp     q12, q13, [%0, #(6 * 32)]\n"
65                      "ldp     q14, q15, [%0, #(7 * 32)]\n"
66                      "ldp     q16, q17, [%0, #(8 * 32)]\n"
67                      "ldp     q18, q19, [%0, #(9 * 32)]\n"
68                      "ldp     q20, q21, [%0, #(10 * 32)]\n"
69                      "ldp     q22, q23, [%0, #(11 * 32)]\n"
70                      "ldp     q24, q25, [%0, #(12 * 32)]\n"
71                      "ldp     q26, q27, [%0, #(13 * 32)]\n"
72                      "ldp     q28, q29, [%0, #(14 * 32)]\n"
73                      "ldp     q30, q31, [%0, #(15 * 32)]\n"
74                      "msr     fpcr, %1\n"
75                      "msr     fpsr, %2\n"
76                      :: "r"(fpstate),
77                      "r"((uint64_t)fpstate->fpcr),
78                      "r"((uint64_t)fpstate->fpsr));
79 
80     return true;
81 }
82 
arm64_fpu_save_state(struct thread * t)83 void arm64_fpu_save_state(struct thread *t)
84 {
85     struct fpstate *fpstate = &t->arch.fpstate;
86     arm64_fpu_save_fpstate(fpstate);
87 
88     LTRACEF("thread %s, fpcr %x, fpsr %x\n", t->name, fpstate->fpcr, fpstate->fpsr);
89 }
90 
arm64_fpu_save_fpstate(struct fpstate * fpstate)91 void arm64_fpu_save_fpstate(struct fpstate *fpstate)
92 {
93     uint64_t fpcr, fpsr;
94 
95     __asm__ volatile("stp     q0, q1, [%2, #(0 * 32)]\n"
96                      "stp     q2, q3, [%2, #(1 * 32)]\n"
97                      "stp     q4, q5, [%2, #(2 * 32)]\n"
98                      "stp     q6, q7, [%2, #(3 * 32)]\n"
99                      "stp     q8, q9, [%2, #(4 * 32)]\n"
100                      "stp     q10, q11, [%2, #(5 * 32)]\n"
101                      "stp     q12, q13, [%2, #(6 * 32)]\n"
102                      "stp     q14, q15, [%2, #(7 * 32)]\n"
103                      "stp     q16, q17, [%2, #(8 * 32)]\n"
104                      "stp     q18, q19, [%2, #(9 * 32)]\n"
105                      "stp     q20, q21, [%2, #(10 * 32)]\n"
106                      "stp     q22, q23, [%2, #(11 * 32)]\n"
107                      "stp     q24, q25, [%2, #(12 * 32)]\n"
108                      "stp     q26, q27, [%2, #(13 * 32)]\n"
109                      "stp     q28, q29, [%2, #(14 * 32)]\n"
110                      "stp     q30, q31, [%2, #(15 * 32)]\n"
111                      "mrs %0, fpcr\n"
112                      "mrs %1, fpsr\n"
113                      : "=r"(fpcr), "=r"(fpsr)
114                      : "r"(fpstate));
115 
116     fpstate->fpcr = fpcr;
117     fpstate->fpsr = fpsr;
118 }
119 
arm64_fpu_exception(struct arm64_iframe_long * iframe)120 void arm64_fpu_exception(struct arm64_iframe_long *iframe)
121 {
122     uint64_t cpacr = ARM64_READ_SYSREG(cpacr_el1);
123     if (((cpacr >> 20) & 3) != 3) {
124         cpacr |= 3 << 20;
125         ARM64_WRITE_SYSREG(cpacr_el1, cpacr);
126         thread_t *t = get_current_thread();
127         if (likely(t))
128             arm64_fpu_load_state(t);
129         return;
130     }
131 }
132