1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains core hardware tag-based KASAN code.
4  *
5  * Copyright (c) 2020 Google, Inc.
6  * Author: Andrey Konovalov <[email protected]>
7  */
8 
9 #define pr_fmt(fmt) "kasan: " fmt
10 
11 #include <kunit/visibility.h>
12 #include <linux/init.h>
13 #include <linux/kasan.h>
14 #include <linux/kernel.h>
15 #include <linux/memory.h>
16 #include <linux/mm.h>
17 #include <linux/static_key.h>
18 #include <linux/string.h>
19 #include <linux/string_choices.h>
20 #include <linux/types.h>
21 #include <linux/vmalloc.h>
22 
23 #include "kasan.h"
24 
25 enum kasan_arg {
26 	KASAN_ARG_DEFAULT,
27 	KASAN_ARG_OFF,
28 	KASAN_ARG_ON,
29 };
30 
31 enum kasan_arg_mode {
32 	KASAN_ARG_MODE_DEFAULT,
33 	KASAN_ARG_MODE_SYNC,
34 	KASAN_ARG_MODE_ASYNC,
35 	KASAN_ARG_MODE_ASYMM,
36 };
37 
38 enum kasan_arg_vmalloc {
39 	KASAN_ARG_VMALLOC_DEFAULT,
40 	KASAN_ARG_VMALLOC_OFF,
41 	KASAN_ARG_VMALLOC_ON,
42 };
43 
44 static enum kasan_arg kasan_arg __ro_after_init;
45 static enum kasan_arg_mode kasan_arg_mode __ro_after_init;
46 static enum kasan_arg_vmalloc kasan_arg_vmalloc __initdata;
47 
48 /*
49  * Whether KASAN is enabled at all.
50  * The value remains false until KASAN is initialized by kasan_init_hw_tags().
51  */
52 DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled);
53 EXPORT_SYMBOL(kasan_flag_enabled);
54 
55 /*
56  * Whether the selected mode is synchronous, asynchronous, or asymmetric.
57  * Defaults to KASAN_MODE_SYNC.
58  */
59 enum kasan_mode kasan_mode __ro_after_init;
60 EXPORT_SYMBOL_GPL(kasan_mode);
61 
62 /* Whether to enable vmalloc tagging. */
63 #ifdef CONFIG_KASAN_VMALLOC
64 DEFINE_STATIC_KEY_TRUE(kasan_flag_vmalloc);
65 #else
66 DEFINE_STATIC_KEY_FALSE(kasan_flag_vmalloc);
67 #endif
68 EXPORT_SYMBOL_GPL(kasan_flag_vmalloc);
69 
70 #define PAGE_ALLOC_SAMPLE_DEFAULT	1
71 #define PAGE_ALLOC_SAMPLE_ORDER_DEFAULT	3
72 
73 /*
74  * Sampling interval of page_alloc allocation (un)poisoning.
75  * Defaults to no sampling.
76  */
77 unsigned long kasan_page_alloc_sample = PAGE_ALLOC_SAMPLE_DEFAULT;
78 
79 /*
80  * Minimum order of page_alloc allocations to be affected by sampling.
81  * The default value is chosen to match both
82  * PAGE_ALLOC_COSTLY_ORDER and SKB_FRAG_PAGE_ORDER.
83  */
84 unsigned int kasan_page_alloc_sample_order = PAGE_ALLOC_SAMPLE_ORDER_DEFAULT;
85 
86 DEFINE_PER_CPU(long, kasan_page_alloc_skip);
87 
88 /* kasan=off/on */
early_kasan_flag(char * arg)89 static int __init early_kasan_flag(char *arg)
90 {
91 	if (!arg)
92 		return -EINVAL;
93 
94 	if (!strcmp(arg, "off"))
95 		kasan_arg = KASAN_ARG_OFF;
96 	else if (!strcmp(arg, "on"))
97 		kasan_arg = KASAN_ARG_ON;
98 	else
99 		return -EINVAL;
100 
101 	return 0;
102 }
103 early_param("kasan", early_kasan_flag);
104 
105 /* kasan.mode=sync/async/asymm */
early_kasan_mode(char * arg)106 static int __init early_kasan_mode(char *arg)
107 {
108 	if (!arg)
109 		return -EINVAL;
110 
111 	if (!strcmp(arg, "sync"))
112 		kasan_arg_mode = KASAN_ARG_MODE_SYNC;
113 	else if (!strcmp(arg, "async"))
114 		kasan_arg_mode = KASAN_ARG_MODE_ASYNC;
115 	else if (!strcmp(arg, "asymm"))
116 		kasan_arg_mode = KASAN_ARG_MODE_ASYMM;
117 	else
118 		return -EINVAL;
119 
120 	return 0;
121 }
122 early_param("kasan.mode", early_kasan_mode);
123 
124 /* kasan.vmalloc=off/on */
early_kasan_flag_vmalloc(char * arg)125 static int __init early_kasan_flag_vmalloc(char *arg)
126 {
127 	if (!arg)
128 		return -EINVAL;
129 
130 	if (!IS_ENABLED(CONFIG_KASAN_VMALLOC))
131 		return 0;
132 
133 	if (!strcmp(arg, "off"))
134 		kasan_arg_vmalloc = KASAN_ARG_VMALLOC_OFF;
135 	else if (!strcmp(arg, "on"))
136 		kasan_arg_vmalloc = KASAN_ARG_VMALLOC_ON;
137 	else
138 		return -EINVAL;
139 
140 	return 0;
141 }
142 early_param("kasan.vmalloc", early_kasan_flag_vmalloc);
143 
kasan_mode_info(void)144 static inline const char *kasan_mode_info(void)
145 {
146 	if (kasan_mode == KASAN_MODE_ASYNC)
147 		return "async";
148 	else if (kasan_mode == KASAN_MODE_ASYMM)
149 		return "asymm";
150 	else
151 		return "sync";
152 }
153 
154 /* kasan.page_alloc.sample=<sampling interval> */
early_kasan_flag_page_alloc_sample(char * arg)155 static int __init early_kasan_flag_page_alloc_sample(char *arg)
156 {
157 	int rv;
158 
159 	if (!arg)
160 		return -EINVAL;
161 
162 	rv = kstrtoul(arg, 0, &kasan_page_alloc_sample);
163 	if (rv)
164 		return rv;
165 
166 	if (!kasan_page_alloc_sample || kasan_page_alloc_sample > LONG_MAX) {
167 		kasan_page_alloc_sample = PAGE_ALLOC_SAMPLE_DEFAULT;
168 		return -EINVAL;
169 	}
170 
171 	return 0;
172 }
173 early_param("kasan.page_alloc.sample", early_kasan_flag_page_alloc_sample);
174 
175 /* kasan.page_alloc.sample.order=<minimum page order> */
early_kasan_flag_page_alloc_sample_order(char * arg)176 static int __init early_kasan_flag_page_alloc_sample_order(char *arg)
177 {
178 	int rv;
179 
180 	if (!arg)
181 		return -EINVAL;
182 
183 	rv = kstrtouint(arg, 0, &kasan_page_alloc_sample_order);
184 	if (rv)
185 		return rv;
186 
187 	if (kasan_page_alloc_sample_order > INT_MAX) {
188 		kasan_page_alloc_sample_order = PAGE_ALLOC_SAMPLE_ORDER_DEFAULT;
189 		return -EINVAL;
190 	}
191 
192 	return 0;
193 }
194 early_param("kasan.page_alloc.sample.order", early_kasan_flag_page_alloc_sample_order);
195 
196 /*
197  * kasan_init_hw_tags_cpu() is called for each CPU.
198  * Not marked as __init as a CPU can be hot-plugged after boot.
199  */
kasan_init_hw_tags_cpu(void)200 void kasan_init_hw_tags_cpu(void)
201 {
202 	/*
203 	 * There's no need to check that the hardware is MTE-capable here,
204 	 * as this function is only called for MTE-capable hardware.
205 	 */
206 
207 	/*
208 	 * If KASAN is disabled via command line, don't initialize it.
209 	 * When this function is called, kasan_flag_enabled is not yet
210 	 * set by kasan_init_hw_tags(). Thus, check kasan_arg instead.
211 	 */
212 	if (kasan_arg == KASAN_ARG_OFF)
213 		return;
214 
215 	/*
216 	 * Enable async or asymm modes only when explicitly requested
217 	 * through the command line.
218 	 */
219 	kasan_enable_hw_tags();
220 }
221 
222 /* kasan_init_hw_tags() is called once on boot CPU. */
kasan_init_hw_tags(void)223 void __init kasan_init_hw_tags(void)
224 {
225 	/* If hardware doesn't support MTE, don't initialize KASAN. */
226 	if (!system_supports_mte())
227 		return;
228 
229 	/* If KASAN is disabled via command line, don't initialize it. */
230 	if (kasan_arg == KASAN_ARG_OFF)
231 		return;
232 
233 	switch (kasan_arg_mode) {
234 	case KASAN_ARG_MODE_DEFAULT:
235 		/* Default is specified by kasan_mode definition. */
236 		break;
237 	case KASAN_ARG_MODE_SYNC:
238 		kasan_mode = KASAN_MODE_SYNC;
239 		break;
240 	case KASAN_ARG_MODE_ASYNC:
241 		kasan_mode = KASAN_MODE_ASYNC;
242 		break;
243 	case KASAN_ARG_MODE_ASYMM:
244 		kasan_mode = KASAN_MODE_ASYMM;
245 		break;
246 	}
247 
248 	switch (kasan_arg_vmalloc) {
249 	case KASAN_ARG_VMALLOC_DEFAULT:
250 		/* Default is specified by kasan_flag_vmalloc definition. */
251 		break;
252 	case KASAN_ARG_VMALLOC_OFF:
253 		static_branch_disable(&kasan_flag_vmalloc);
254 		break;
255 	case KASAN_ARG_VMALLOC_ON:
256 		static_branch_enable(&kasan_flag_vmalloc);
257 		break;
258 	}
259 
260 	kasan_init_tags();
261 
262 	/* KASAN is now initialized, enable it. */
263 	static_branch_enable(&kasan_flag_enabled);
264 
265 	pr_info("KernelAddressSanitizer initialized (hw-tags, mode=%s, vmalloc=%s, stacktrace=%s)\n",
266 		kasan_mode_info(),
267 		str_on_off(kasan_vmalloc_enabled()),
268 		str_on_off(kasan_stack_collection_enabled()));
269 }
270 
271 #ifdef CONFIG_KASAN_VMALLOC
272 
unpoison_vmalloc_pages(const void * addr,u8 tag)273 static void unpoison_vmalloc_pages(const void *addr, u8 tag)
274 {
275 	struct vm_struct *area;
276 	int i;
277 
278 	/*
279 	 * As hardware tag-based KASAN only tags VM_ALLOC vmalloc allocations
280 	 * (see the comment in __kasan_unpoison_vmalloc), all of the pages
281 	 * should belong to a single area.
282 	 */
283 	area = find_vm_area((void *)addr);
284 	if (WARN_ON(!area))
285 		return;
286 
287 	for (i = 0; i < area->nr_pages; i++) {
288 		struct page *page = area->pages[i];
289 
290 		page_kasan_tag_set(page, tag);
291 	}
292 }
293 
init_vmalloc_pages(const void * start,unsigned long size)294 static void init_vmalloc_pages(const void *start, unsigned long size)
295 {
296 	const void *addr;
297 
298 	for (addr = start; addr < start + size; addr += PAGE_SIZE) {
299 		struct page *page = vmalloc_to_page(addr);
300 
301 		clear_highpage_kasan_tagged(page);
302 	}
303 }
304 
__kasan_unpoison_vmalloc(const void * start,unsigned long size,kasan_vmalloc_flags_t flags)305 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
306 				kasan_vmalloc_flags_t flags)
307 {
308 	u8 tag;
309 	unsigned long redzone_start, redzone_size;
310 
311 	if (!kasan_vmalloc_enabled()) {
312 		if (flags & KASAN_VMALLOC_INIT)
313 			init_vmalloc_pages(start, size);
314 		return (void *)start;
315 	}
316 
317 	/*
318 	 * Don't tag non-VM_ALLOC mappings, as:
319 	 *
320 	 * 1. Unlike the software KASAN modes, hardware tag-based KASAN only
321 	 *    supports tagging physical memory. Therefore, it can only tag a
322 	 *    single mapping of normal physical pages.
323 	 * 2. Hardware tag-based KASAN can only tag memory mapped with special
324 	 *    mapping protection bits, see arch_vmap_pgprot_tagged().
325 	 *    As non-VM_ALLOC mappings can be mapped outside of vmalloc code,
326 	 *    providing these bits would require tracking all non-VM_ALLOC
327 	 *    mappers.
328 	 *
329 	 * Thus, for VM_ALLOC mappings, hardware tag-based KASAN only tags
330 	 * the first virtual mapping, which is created by vmalloc().
331 	 * Tagging the page_alloc memory backing that vmalloc() allocation is
332 	 * skipped, see ___GFP_SKIP_KASAN.
333 	 *
334 	 * For non-VM_ALLOC allocations, page_alloc memory is tagged as usual.
335 	 */
336 	if (!(flags & KASAN_VMALLOC_VM_ALLOC)) {
337 		WARN_ON(flags & KASAN_VMALLOC_INIT);
338 		return (void *)start;
339 	}
340 
341 	/*
342 	 * Don't tag executable memory.
343 	 * The kernel doesn't tolerate having the PC register tagged.
344 	 */
345 	if (!(flags & KASAN_VMALLOC_PROT_NORMAL)) {
346 		WARN_ON(flags & KASAN_VMALLOC_INIT);
347 		return (void *)start;
348 	}
349 
350 	tag = kasan_random_tag();
351 	start = set_tag(start, tag);
352 
353 	/* Unpoison and initialize memory up to size. */
354 	kasan_unpoison(start, size, flags & KASAN_VMALLOC_INIT);
355 
356 	/*
357 	 * Explicitly poison and initialize the in-page vmalloc() redzone.
358 	 * Unlike software KASAN modes, hardware tag-based KASAN doesn't
359 	 * unpoison memory when populating shadow for vmalloc() space.
360 	 */
361 	redzone_start = round_up((unsigned long)start + size,
362 				 KASAN_GRANULE_SIZE);
363 	redzone_size = round_up(redzone_start, PAGE_SIZE) - redzone_start;
364 	kasan_poison((void *)redzone_start, redzone_size, KASAN_TAG_INVALID,
365 		     flags & KASAN_VMALLOC_INIT);
366 
367 	/*
368 	 * Set per-page tag flags to allow accessing physical memory for the
369 	 * vmalloc() mapping through page_address(vmalloc_to_page()).
370 	 */
371 	unpoison_vmalloc_pages(start, tag);
372 
373 	return (void *)start;
374 }
375 
__kasan_poison_vmalloc(const void * start,unsigned long size)376 void __kasan_poison_vmalloc(const void *start, unsigned long size)
377 {
378 	/*
379 	 * No tagging here.
380 	 * The physical pages backing the vmalloc() allocation are poisoned
381 	 * through the usual page_alloc paths.
382 	 */
383 }
384 
385 #endif
386 
kasan_enable_hw_tags(void)387 void kasan_enable_hw_tags(void)
388 {
389 	if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC)
390 		hw_enable_tag_checks_async();
391 	else if (kasan_arg_mode == KASAN_ARG_MODE_ASYMM)
392 		hw_enable_tag_checks_asymm();
393 	else
394 		hw_enable_tag_checks_sync();
395 }
396 
397 #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
398 
399 EXPORT_SYMBOL_IF_KUNIT(kasan_enable_hw_tags);
400 
kasan_force_async_fault(void)401 VISIBLE_IF_KUNIT void kasan_force_async_fault(void)
402 {
403 	hw_force_async_tag_fault();
404 }
405 EXPORT_SYMBOL_IF_KUNIT(kasan_force_async_fault);
406 
407 #endif
408