1 /*
2  * Copyright (c) 2016 Google Inc. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <arch/arch_ops.h>
25 #include <arch/aspace.h>
26 #include <assert.h>
27 #include <bits.h>
28 #include <trace.h>
29 #include <kernel/thread.h>
30 #include <inttypes.h>
31 
32 #define LOCAL_TRACE 0
33 
34 #if ARCH_ASPACE_HAS_ASID
35 
36 static uint64_t last_asid;
37 static bool old_asid_active;
38 static struct arch_aspace *active_aspace[SMP_MAX_CPUS];
39 static uint64_t active_asid_version[SMP_MAX_CPUS];
40 
vmm_asid_current(struct arch_aspace * aspace,uint64_t ref,uint64_t asid_mask)41 static bool vmm_asid_current(struct arch_aspace *aspace, uint64_t ref,
42                              uint64_t asid_mask)
43 {
44     if (!aspace) {
45         return true;
46     }
47     if (!aspace->asid) {
48         LTRACEF("unallocated asid for aspace %p\n", aspace);
49         return false;
50     }
51     if (((aspace->asid ^ ref) & ~asid_mask)) {
52         LTRACEF("old asid for aspace %p, 0x%" PRIxASID ", ref 0x%" PRIx64 ", mask 0x%" PRIx64 "\n",
53                 aspace, aspace->asid, ref, asid_mask);
54         return false;
55     }
56     return true;
57 }
58 
vmm_asid_allocate(struct arch_aspace * aspace,uint cpu,uint64_t asid_mask)59 static void vmm_asid_allocate(struct arch_aspace *aspace, uint cpu,
60                               uint64_t asid_mask)
61 {
62     uint i;
63 
64     active_aspace[cpu] = aspace;
65 
66     if (vmm_asid_current(aspace, last_asid, asid_mask)) {
67         return;
68     }
69 
70     if (old_asid_active) {
71         for (i = 0; i < SMP_MAX_CPUS; i++) {
72             if (i == cpu) {
73                 continue;
74             }
75             if (active_aspace[i] == aspace) {
76                 /*
77                  * Don't allocate a new asid if aspace is active on another
78                  * CPU. That CPU could perform asid specific tlb invalidate
79                  * broadcasts, that would be missed if the asid does not match.
80                  */
81                 return;
82             }
83         }
84     }
85 
86     aspace->asid = ++last_asid;
87     LTRACEF("cpu %d: aspace %p, new asid 0x%" PRIxASID "\n", cpu, aspace, aspace->asid);
88     if (!(last_asid & asid_mask)) {
89         old_asid_active = true;
90     }
91 
92     if (old_asid_active) {
93         i = 0;
94         old_asid_active = false;
95         while (i < SMP_MAX_CPUS) {
96             if (!vmm_asid_current(active_aspace[i], last_asid, asid_mask)) {
97                 old_asid_active = true;
98                 if (!((active_aspace[i]->asid ^ last_asid) & asid_mask)) {
99                     /* Skip asid in use by other CPUs */
100                     aspace->asid = ++last_asid;
101                     LTRACEF("cpu %d: conflict asid 0x%" PRIxASID " at cpu %d, new asid 0x%" PRIxASID "\n",
102                             cpu, active_aspace[i]->asid, i, aspace->asid);
103                     i = 0;
104                     continue;
105                 }
106             }
107             i++;
108         }
109     }
110 }
111 
112 /**
113  * vmm_asid_activate - Activate asid for aspace
114  * @aspace:     Arch aspace struct where asid is stored, or %NULL if no aspace
115  *              should be active.
116  * @asid_bits:  Number of bits in asid used by hardware.
117  *
118  * Called by arch_mmu_context_switch to allocate and activate an asid for
119  * @aspace.
120  *
121  * Return: %true TLBs needs to be flushed on this cpu, %false otherwise.
122  */
vmm_asid_activate(struct arch_aspace * aspace,uint asid_bits)123 bool vmm_asid_activate(struct arch_aspace *aspace, uint asid_bits)
124 {
125     uint cpu = arch_curr_cpu_num();
126     uint64_t asid_mask = BIT_MASK(asid_bits);
127 
128     DEBUG_ASSERT(thread_lock_held());
129 
130     vmm_asid_allocate(aspace, cpu, asid_mask);
131 
132     if (vmm_asid_current(aspace, active_asid_version[cpu], asid_mask)) {
133         return false;
134     }
135     DEBUG_ASSERT(aspace); /* NULL aspace is always current */
136 
137     active_asid_version[cpu] = aspace->asid & ~asid_mask;
138     LTRACEF("cpu %d: aspace %p, asid 0x%" PRIxASID "\n", cpu, aspace, aspace->asid);
139 
140     return true;
141 }
142 
143 #endif
144