1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Basic resctrl file system operations
4 *
5 * Copyright (C) 2018 Intel Corporation
6 *
7 * Authors:
8 * Sai Praneeth Prakhya <[email protected]>,
9 * Fenghua Yu <[email protected]>
10 */
11 #include <fcntl.h>
12 #include <limits.h>
13
14 #include "resctrl.h"
15
16 int snc_unreliable;
17
find_resctrl_mount(char * buffer)18 static int find_resctrl_mount(char *buffer)
19 {
20 FILE *mounts;
21 char line[256], *fs, *mntpoint;
22
23 mounts = fopen("/proc/mounts", "r");
24 if (!mounts) {
25 ksft_perror("/proc/mounts");
26 return -ENXIO;
27 }
28 while (!feof(mounts)) {
29 if (!fgets(line, 256, mounts))
30 break;
31 fs = strtok(line, " \t");
32 if (!fs)
33 continue;
34 mntpoint = strtok(NULL, " \t");
35 if (!mntpoint)
36 continue;
37 fs = strtok(NULL, " \t");
38 if (!fs)
39 continue;
40 if (strcmp(fs, "resctrl"))
41 continue;
42
43 fclose(mounts);
44 if (buffer)
45 strncpy(buffer, mntpoint, 256);
46
47 return 0;
48 }
49
50 fclose(mounts);
51
52 return -ENOENT;
53 }
54
55 /*
56 * mount_resctrlfs - Mount resctrl FS at /sys/fs/resctrl
57 *
58 * Mounts resctrl FS. Fails if resctrl FS is already mounted to avoid
59 * pre-existing settings interfering with the test results.
60 *
61 * Return: 0 on success, < 0 on error.
62 */
mount_resctrlfs(void)63 int mount_resctrlfs(void)
64 {
65 int ret;
66
67 ret = find_resctrl_mount(NULL);
68 if (ret != -ENOENT)
69 return -1;
70
71 ksft_print_msg("Mounting resctrl to \"%s\"\n", RESCTRL_PATH);
72 ret = mount("resctrl", RESCTRL_PATH, "resctrl", 0, NULL);
73 if (ret)
74 ksft_perror("mount");
75
76 return ret;
77 }
78
umount_resctrlfs(void)79 int umount_resctrlfs(void)
80 {
81 char mountpoint[256];
82 int ret;
83
84 ret = find_resctrl_mount(mountpoint);
85 if (ret == -ENOENT)
86 return 0;
87 if (ret)
88 return ret;
89
90 if (umount(mountpoint)) {
91 ksft_perror("Unable to umount resctrl");
92
93 return -1;
94 }
95
96 return 0;
97 }
98
99 /*
100 * get_cache_level - Convert cache level from string to integer
101 * @cache_type: Cache level as string
102 *
103 * Return: cache level as integer or -1 if @cache_type is invalid.
104 */
get_cache_level(const char * cache_type)105 static int get_cache_level(const char *cache_type)
106 {
107 if (!strcmp(cache_type, "L3"))
108 return 3;
109 if (!strcmp(cache_type, "L2"))
110 return 2;
111
112 ksft_print_msg("Invalid cache level\n");
113 return -1;
114 }
115
get_resource_cache_level(const char * resource)116 static int get_resource_cache_level(const char *resource)
117 {
118 /* "MB" use L3 (LLC) as resource */
119 if (!strcmp(resource, "MB"))
120 return 3;
121 return get_cache_level(resource);
122 }
123
124 /*
125 * get_domain_id - Get resctrl domain ID for a specified CPU
126 * @resource: resource name
127 * @cpu_no: CPU number
128 * @domain_id: domain ID (cache ID; for MB, L3 cache ID)
129 *
130 * Return: >= 0 on success, < 0 on failure.
131 */
get_domain_id(const char * resource,int cpu_no,int * domain_id)132 int get_domain_id(const char *resource, int cpu_no, int *domain_id)
133 {
134 char phys_pkg_path[1024];
135 int cache_num;
136 FILE *fp;
137
138 cache_num = get_resource_cache_level(resource);
139 if (cache_num < 0)
140 return cache_num;
141
142 sprintf(phys_pkg_path, "%s%d/cache/index%d/id", PHYS_ID_PATH, cpu_no, cache_num);
143
144 fp = fopen(phys_pkg_path, "r");
145 if (!fp) {
146 ksft_perror("Failed to open cache id file");
147
148 return -1;
149 }
150 if (fscanf(fp, "%d", domain_id) <= 0) {
151 ksft_perror("Could not get domain ID");
152 fclose(fp);
153
154 return -1;
155 }
156 fclose(fp);
157
158 return 0;
159 }
160
161 /*
162 * Count number of CPUs in a /sys bitmap
163 */
count_sys_bitmap_bits(char * name)164 static unsigned int count_sys_bitmap_bits(char *name)
165 {
166 FILE *fp = fopen(name, "r");
167 int count = 0, c;
168
169 if (!fp)
170 return 0;
171
172 while ((c = fgetc(fp)) != EOF) {
173 if (!isxdigit(c))
174 continue;
175 switch (c) {
176 case 'f':
177 count++;
178 fallthrough;
179 case '7': case 'b': case 'd': case 'e':
180 count++;
181 fallthrough;
182 case '3': case '5': case '6': case '9': case 'a': case 'c':
183 count++;
184 fallthrough;
185 case '1': case '2': case '4': case '8':
186 count++;
187 break;
188 }
189 }
190 fclose(fp);
191
192 return count;
193 }
194
cpus_offline_empty(void)195 static bool cpus_offline_empty(void)
196 {
197 char offline_cpus_str[64];
198 FILE *fp;
199
200 fp = fopen("/sys/devices/system/cpu/offline", "r");
201 if (!fp) {
202 ksft_perror("Could not open /sys/devices/system/cpu/offline");
203 return 0;
204 }
205
206 if (fscanf(fp, "%63s", offline_cpus_str) < 0) {
207 if (!errno) {
208 fclose(fp);
209 return 1;
210 }
211 ksft_perror("Could not read /sys/devices/system/cpu/offline");
212 }
213
214 fclose(fp);
215
216 return 0;
217 }
218
219 /*
220 * Detect SNC by comparing #CPUs in node0 with #CPUs sharing LLC with CPU0.
221 * If any CPUs are offline declare the detection as unreliable.
222 */
snc_nodes_per_l3_cache(void)223 int snc_nodes_per_l3_cache(void)
224 {
225 int node_cpus, cache_cpus;
226 static int snc_mode;
227
228 if (!snc_mode) {
229 snc_mode = 1;
230 if (!cpus_offline_empty()) {
231 ksft_print_msg("Runtime SNC detection unreliable due to offline CPUs.\n");
232 ksft_print_msg("Setting SNC mode to disabled.\n");
233 snc_unreliable = 1;
234 return snc_mode;
235 }
236 node_cpus = count_sys_bitmap_bits("/sys/devices/system/node/node0/cpumap");
237 cache_cpus = count_sys_bitmap_bits("/sys/devices/system/cpu/cpu0/cache/index3/shared_cpu_map");
238
239 if (!node_cpus || !cache_cpus) {
240 ksft_print_msg("Could not determine Sub-NUMA Cluster mode.\n");
241 snc_unreliable = 1;
242 return snc_mode;
243 }
244 snc_mode = cache_cpus / node_cpus;
245
246 if (snc_mode > 1)
247 ksft_print_msg("SNC-%d mode discovered.\n", snc_mode);
248 }
249
250 return snc_mode;
251 }
252
253 /*
254 * get_cache_size - Get cache size for a specified CPU
255 * @cpu_no: CPU number
256 * @cache_type: Cache level L2/L3
257 * @cache_size: pointer to cache_size
258 *
259 * Return: = 0 on success, < 0 on failure.
260 */
get_cache_size(int cpu_no,const char * cache_type,unsigned long * cache_size)261 int get_cache_size(int cpu_no, const char *cache_type, unsigned long *cache_size)
262 {
263 char cache_path[1024], cache_str[64];
264 int length, i, cache_num;
265 FILE *fp;
266
267 cache_num = get_cache_level(cache_type);
268 if (cache_num < 0)
269 return cache_num;
270
271 sprintf(cache_path, "/sys/bus/cpu/devices/cpu%d/cache/index%d/size",
272 cpu_no, cache_num);
273 fp = fopen(cache_path, "r");
274 if (!fp) {
275 ksft_perror("Failed to open cache size");
276
277 return -1;
278 }
279 if (fscanf(fp, "%63s", cache_str) <= 0) {
280 ksft_perror("Could not get cache_size");
281 fclose(fp);
282
283 return -1;
284 }
285 fclose(fp);
286
287 length = (int)strlen(cache_str);
288
289 *cache_size = 0;
290
291 for (i = 0; i < length; i++) {
292 if ((cache_str[i] >= '0') && (cache_str[i] <= '9'))
293
294 *cache_size = *cache_size * 10 + (cache_str[i] - '0');
295
296 else if (cache_str[i] == 'K')
297
298 *cache_size = *cache_size * 1024;
299
300 else if (cache_str[i] == 'M')
301
302 *cache_size = *cache_size * 1024 * 1024;
303
304 else
305 break;
306 }
307
308 /*
309 * The amount of cache represented by each bit in the masks
310 * in the schemata file is reduced by a factor equal to SNC
311 * nodes per L3 cache.
312 * E.g. on a SNC-2 system with a 100MB L3 cache a test that
313 * allocates memory from its local SNC node (default behavior
314 * without using libnuma) will only see 50 MB llc_occupancy
315 * with a fully populated L3 mask in the schemata file.
316 */
317 if (cache_num == 3)
318 *cache_size /= snc_nodes_per_l3_cache();
319 return 0;
320 }
321
322 #define CORE_SIBLINGS_PATH "/sys/bus/cpu/devices/cpu"
323
324 /*
325 * get_bit_mask - Get bit mask from given file
326 * @filename: File containing the mask
327 * @mask: The bit mask returned as unsigned long
328 *
329 * Return: = 0 on success, < 0 on failure.
330 */
get_bit_mask(const char * filename,unsigned long * mask)331 static int get_bit_mask(const char *filename, unsigned long *mask)
332 {
333 FILE *fp;
334
335 if (!filename || !mask)
336 return -1;
337
338 fp = fopen(filename, "r");
339 if (!fp) {
340 ksft_print_msg("Failed to open bit mask file '%s': %s\n",
341 filename, strerror(errno));
342 return -1;
343 }
344
345 if (fscanf(fp, "%lx", mask) <= 0) {
346 ksft_print_msg("Could not read bit mask file '%s': %s\n",
347 filename, strerror(errno));
348 fclose(fp);
349
350 return -1;
351 }
352 fclose(fp);
353
354 return 0;
355 }
356
357 /*
358 * resource_info_unsigned_get - Read an unsigned value from
359 * /sys/fs/resctrl/info/@resource/@filename
360 * @resource: Resource name that matches directory name in
361 * /sys/fs/resctrl/info
362 * @filename: File in /sys/fs/resctrl/info/@resource
363 * @val: Contains read value on success.
364 *
365 * Return: = 0 on success, < 0 on failure. On success the read
366 * value is saved into @val.
367 */
resource_info_unsigned_get(const char * resource,const char * filename,unsigned int * val)368 int resource_info_unsigned_get(const char *resource, const char *filename,
369 unsigned int *val)
370 {
371 char file_path[PATH_MAX];
372 FILE *fp;
373
374 snprintf(file_path, sizeof(file_path), "%s/%s/%s", INFO_PATH, resource,
375 filename);
376
377 fp = fopen(file_path, "r");
378 if (!fp) {
379 ksft_print_msg("Error opening %s: %m\n", file_path);
380 return -1;
381 }
382
383 if (fscanf(fp, "%u", val) <= 0) {
384 ksft_print_msg("Could not get contents of %s: %m\n", file_path);
385 fclose(fp);
386 return -1;
387 }
388
389 fclose(fp);
390 return 0;
391 }
392
393 /*
394 * create_bit_mask- Create bit mask from start, len pair
395 * @start: LSB of the mask
396 * @len Number of bits in the mask
397 */
create_bit_mask(unsigned int start,unsigned int len)398 unsigned long create_bit_mask(unsigned int start, unsigned int len)
399 {
400 return ((1UL << len) - 1UL) << start;
401 }
402
403 /*
404 * count_contiguous_bits - Returns the longest train of bits in a bit mask
405 * @val A bit mask
406 * @start The location of the least-significant bit of the longest train
407 *
408 * Return: The length of the contiguous bits in the longest train of bits
409 */
count_contiguous_bits(unsigned long val,unsigned int * start)410 unsigned int count_contiguous_bits(unsigned long val, unsigned int *start)
411 {
412 unsigned long last_val;
413 unsigned int count = 0;
414
415 while (val) {
416 last_val = val;
417 val &= (val >> 1);
418 count++;
419 }
420
421 if (start) {
422 if (count)
423 *start = ffsl(last_val) - 1;
424 else
425 *start = 0;
426 }
427
428 return count;
429 }
430
431 /*
432 * get_full_cbm - Get full Cache Bit Mask (CBM)
433 * @cache_type: Cache type as "L2" or "L3"
434 * @mask: Full cache bit mask representing the maximal portion of cache
435 * available for allocation, returned as unsigned long.
436 *
437 * Return: = 0 on success, < 0 on failure.
438 */
get_full_cbm(const char * cache_type,unsigned long * mask)439 int get_full_cbm(const char *cache_type, unsigned long *mask)
440 {
441 char cbm_path[PATH_MAX];
442 int ret;
443
444 if (!cache_type)
445 return -1;
446
447 snprintf(cbm_path, sizeof(cbm_path), "%s/%s/cbm_mask",
448 INFO_PATH, cache_type);
449
450 ret = get_bit_mask(cbm_path, mask);
451 if (ret || !*mask)
452 return -1;
453
454 return 0;
455 }
456
457 /*
458 * get_shareable_mask - Get shareable mask from shareable_bits
459 * @cache_type: Cache type as "L2" or "L3"
460 * @shareable_mask: Shareable mask returned as unsigned long
461 *
462 * Return: = 0 on success, < 0 on failure.
463 */
get_shareable_mask(const char * cache_type,unsigned long * shareable_mask)464 static int get_shareable_mask(const char *cache_type, unsigned long *shareable_mask)
465 {
466 char mask_path[PATH_MAX];
467
468 if (!cache_type)
469 return -1;
470
471 snprintf(mask_path, sizeof(mask_path), "%s/%s/shareable_bits",
472 INFO_PATH, cache_type);
473
474 return get_bit_mask(mask_path, shareable_mask);
475 }
476
477 /*
478 * get_mask_no_shareable - Get Cache Bit Mask (CBM) without shareable bits
479 * @cache_type: Cache type as "L2" or "L3"
480 * @mask: The largest exclusive portion of the cache out of the
481 * full CBM, returned as unsigned long
482 *
483 * Parts of a cache may be shared with other devices such as GPU. This function
484 * calculates the largest exclusive portion of the cache where no other devices
485 * besides CPU have access to the cache portion.
486 *
487 * Return: = 0 on success, < 0 on failure.
488 */
get_mask_no_shareable(const char * cache_type,unsigned long * mask)489 int get_mask_no_shareable(const char *cache_type, unsigned long *mask)
490 {
491 unsigned long full_mask, shareable_mask;
492 unsigned int start, len;
493
494 if (get_full_cbm(cache_type, &full_mask) < 0)
495 return -1;
496 if (get_shareable_mask(cache_type, &shareable_mask) < 0)
497 return -1;
498
499 len = count_contiguous_bits(full_mask & ~shareable_mask, &start);
500 if (!len)
501 return -1;
502
503 *mask = create_bit_mask(start, len);
504
505 return 0;
506 }
507
508 /*
509 * taskset_benchmark - Taskset PID (i.e. benchmark) to a specified cpu
510 * @bm_pid: PID that should be binded
511 * @cpu_no: CPU number at which the PID would be binded
512 * @old_affinity: When not NULL, set to old CPU affinity
513 *
514 * Return: 0 on success, < 0 on error.
515 */
taskset_benchmark(pid_t bm_pid,int cpu_no,cpu_set_t * old_affinity)516 int taskset_benchmark(pid_t bm_pid, int cpu_no, cpu_set_t *old_affinity)
517 {
518 cpu_set_t my_set;
519
520 if (old_affinity) {
521 CPU_ZERO(old_affinity);
522 if (sched_getaffinity(bm_pid, sizeof(*old_affinity),
523 old_affinity)) {
524 ksft_perror("Unable to read CPU affinity");
525 return -1;
526 }
527 }
528
529 CPU_ZERO(&my_set);
530 CPU_SET(cpu_no, &my_set);
531
532 if (sched_setaffinity(bm_pid, sizeof(cpu_set_t), &my_set)) {
533 ksft_perror("Unable to taskset benchmark");
534
535 return -1;
536 }
537
538 return 0;
539 }
540
541 /*
542 * taskset_restore - Taskset PID to the earlier CPU affinity
543 * @bm_pid: PID that should be reset
544 * @old_affinity: The old CPU affinity to restore
545 *
546 * Return: 0 on success, < 0 on error.
547 */
taskset_restore(pid_t bm_pid,cpu_set_t * old_affinity)548 int taskset_restore(pid_t bm_pid, cpu_set_t *old_affinity)
549 {
550 if (sched_setaffinity(bm_pid, sizeof(*old_affinity), old_affinity)) {
551 ksft_perror("Unable to restore CPU affinity");
552 return -1;
553 }
554
555 return 0;
556 }
557
558 /*
559 * create_grp - Create a group only if one doesn't exist
560 * @grp_name: Name of the group
561 * @grp: Full path and name of the group
562 * @parent_grp: Full path and name of the parent group
563 *
564 * Creates a group @grp_name if it does not exist yet. If @grp_name is NULL,
565 * it is interpreted as the root group which always results in success.
566 *
567 * Return: 0 on success, < 0 on error.
568 */
create_grp(const char * grp_name,char * grp,const char * parent_grp)569 static int create_grp(const char *grp_name, char *grp, const char *parent_grp)
570 {
571 int found_grp = 0;
572 struct dirent *ep;
573 DIR *dp;
574
575 if (!grp_name)
576 return 0;
577
578 /* Check if requested grp exists or not */
579 dp = opendir(parent_grp);
580 if (dp) {
581 while ((ep = readdir(dp)) != NULL) {
582 if (strcmp(ep->d_name, grp_name) == 0)
583 found_grp = 1;
584 }
585 closedir(dp);
586 } else {
587 ksft_perror("Unable to open resctrl for group");
588
589 return -1;
590 }
591
592 /* Requested grp doesn't exist, hence create it */
593 if (found_grp == 0) {
594 if (mkdir(grp, 0) == -1) {
595 ksft_perror("Unable to create group");
596
597 return -1;
598 }
599 }
600
601 return 0;
602 }
603
write_pid_to_tasks(char * tasks,pid_t pid)604 static int write_pid_to_tasks(char *tasks, pid_t pid)
605 {
606 FILE *fp;
607
608 fp = fopen(tasks, "w");
609 if (!fp) {
610 ksft_perror("Failed to open tasks file");
611
612 return -1;
613 }
614 if (fprintf(fp, "%d\n", (int)pid) < 0) {
615 ksft_print_msg("Failed to write pid to tasks file\n");
616 fclose(fp);
617
618 return -1;
619 }
620 fclose(fp);
621
622 return 0;
623 }
624
625 /*
626 * write_bm_pid_to_resctrl - Write a PID (i.e. benchmark) to resctrl FS
627 * @bm_pid: PID that should be written
628 * @ctrlgrp: Name of the control monitor group (con_mon grp)
629 * @mongrp: Name of the monitor group (mon grp)
630 *
631 * If a con_mon grp is requested, create it and write pid to it, otherwise
632 * write pid to root con_mon grp.
633 * If a mon grp is requested, create it and write pid to it, otherwise
634 * pid is not written, this means that pid is in con_mon grp and hence
635 * should consult con_mon grp's mon_data directory for results.
636 *
637 * Return: 0 on success, < 0 on error.
638 */
write_bm_pid_to_resctrl(pid_t bm_pid,const char * ctrlgrp,const char * mongrp)639 int write_bm_pid_to_resctrl(pid_t bm_pid, const char *ctrlgrp, const char *mongrp)
640 {
641 char controlgroup[128], monitorgroup[512], monitorgroup_p[256];
642 char tasks[1024];
643 int ret = 0;
644
645 if (ctrlgrp)
646 sprintf(controlgroup, "%s/%s", RESCTRL_PATH, ctrlgrp);
647 else
648 sprintf(controlgroup, "%s", RESCTRL_PATH);
649
650 /* Create control and monitoring group and write pid into it */
651 ret = create_grp(ctrlgrp, controlgroup, RESCTRL_PATH);
652 if (ret)
653 goto out;
654 sprintf(tasks, "%s/tasks", controlgroup);
655 ret = write_pid_to_tasks(tasks, bm_pid);
656 if (ret)
657 goto out;
658
659 /* Create monitor group and write pid into if it is used */
660 if (mongrp) {
661 sprintf(monitorgroup_p, "%s/mon_groups", controlgroup);
662 sprintf(monitorgroup, "%s/%s", monitorgroup_p, mongrp);
663 ret = create_grp(mongrp, monitorgroup, monitorgroup_p);
664 if (ret)
665 goto out;
666
667 sprintf(tasks, "%s/mon_groups/%s/tasks",
668 controlgroup, mongrp);
669 ret = write_pid_to_tasks(tasks, bm_pid);
670 if (ret)
671 goto out;
672 }
673
674 out:
675 ksft_print_msg("Writing benchmark parameters to resctrl FS\n");
676 if (ret)
677 ksft_print_msg("Failed writing to resctrlfs\n");
678
679 return ret;
680 }
681
682 /*
683 * write_schemata - Update schemata of a con_mon grp
684 * @ctrlgrp: Name of the con_mon grp
685 * @schemata: Schemata that should be updated to
686 * @cpu_no: CPU number that the benchmark PID is binded to
687 * @resource: Resctrl resource (Eg: MB, L3, L2, etc.)
688 *
689 * Update schemata of a con_mon grp *only* if requested resctrl resource is
690 * allocation type
691 *
692 * Return: 0 on success, < 0 on error.
693 */
write_schemata(const char * ctrlgrp,char * schemata,int cpu_no,const char * resource)694 int write_schemata(const char *ctrlgrp, char *schemata, int cpu_no,
695 const char *resource)
696 {
697 char controlgroup[1024], reason[128], schema[1024] = {};
698 int domain_id, fd, schema_len, ret = 0;
699
700 if (!schemata) {
701 ksft_print_msg("Skipping empty schemata update\n");
702
703 return -1;
704 }
705
706 if (get_domain_id(resource, cpu_no, &domain_id) < 0) {
707 sprintf(reason, "Failed to get domain ID");
708 ret = -1;
709
710 goto out;
711 }
712
713 if (ctrlgrp)
714 sprintf(controlgroup, "%s/%s/schemata", RESCTRL_PATH, ctrlgrp);
715 else
716 sprintf(controlgroup, "%s/schemata", RESCTRL_PATH);
717
718 schema_len = snprintf(schema, sizeof(schema), "%s:%d=%s\n",
719 resource, domain_id, schemata);
720 if (schema_len < 0 || schema_len >= sizeof(schema)) {
721 snprintf(reason, sizeof(reason),
722 "snprintf() failed with return value : %d", schema_len);
723 ret = -1;
724 goto out;
725 }
726
727 fd = open(controlgroup, O_WRONLY);
728 if (fd < 0) {
729 snprintf(reason, sizeof(reason),
730 "open() failed : %s", strerror(errno));
731 ret = -1;
732
733 goto err_schema_not_empty;
734 }
735 if (write(fd, schema, schema_len) < 0) {
736 snprintf(reason, sizeof(reason),
737 "write() failed : %s", strerror(errno));
738 close(fd);
739 ret = -1;
740
741 goto err_schema_not_empty;
742 }
743 close(fd);
744
745 err_schema_not_empty:
746 schema[schema_len - 1] = 0;
747 out:
748 ksft_print_msg("Write schema \"%s\" to resctrl FS%s%s\n",
749 schema, ret ? " # " : "",
750 ret ? reason : "");
751
752 return ret;
753 }
754
check_resctrlfs_support(void)755 bool check_resctrlfs_support(void)
756 {
757 FILE *inf = fopen("/proc/filesystems", "r");
758 DIR *dp;
759 char *res;
760 bool ret = false;
761
762 if (!inf)
763 return false;
764
765 res = fgrep(inf, "nodev\tresctrl\n");
766
767 if (res) {
768 ret = true;
769 free(res);
770 }
771
772 fclose(inf);
773
774 ksft_print_msg("%s Check kernel supports resctrl filesystem\n",
775 ret ? "Pass:" : "Fail:");
776
777 if (!ret)
778 return ret;
779
780 dp = opendir(RESCTRL_PATH);
781 ksft_print_msg("%s Check resctrl mountpoint \"%s\" exists\n",
782 dp ? "Pass:" : "Fail:", RESCTRL_PATH);
783 if (dp)
784 closedir(dp);
785
786 ksft_print_msg("resctrl filesystem %s mounted\n",
787 find_resctrl_mount(NULL) ? "not" : "is");
788
789 return ret;
790 }
791
fgrep(FILE * inf,const char * str)792 char *fgrep(FILE *inf, const char *str)
793 {
794 char line[256];
795 int slen = strlen(str);
796
797 while (!feof(inf)) {
798 if (!fgets(line, 256, inf))
799 break;
800 if (strncmp(line, str, slen))
801 continue;
802
803 return strdup(line);
804 }
805
806 return NULL;
807 }
808
809 /*
810 * resctrl_resource_exists - Check if a resource is supported.
811 * @resource: Resctrl resource (e.g., MB, L3, L2, L3_MON, etc.)
812 *
813 * Return: True if the resource is supported, else false. False is
814 * also returned if resctrl FS is not mounted.
815 */
resctrl_resource_exists(const char * resource)816 bool resctrl_resource_exists(const char *resource)
817 {
818 char res_path[PATH_MAX];
819 struct stat statbuf;
820 int ret;
821
822 if (!resource)
823 return false;
824
825 ret = find_resctrl_mount(NULL);
826 if (ret)
827 return false;
828
829 snprintf(res_path, sizeof(res_path), "%s/%s", INFO_PATH, resource);
830
831 if (stat(res_path, &statbuf))
832 return false;
833
834 return true;
835 }
836
837 /*
838 * resctrl_mon_feature_exists - Check if requested monitoring feature is valid.
839 * @resource: Resource that uses the mon_features file. Currently only L3_MON
840 * is valid.
841 * @feature: Required monitor feature (in mon_features file).
842 *
843 * Return: True if the feature is supported, else false.
844 */
resctrl_mon_feature_exists(const char * resource,const char * feature)845 bool resctrl_mon_feature_exists(const char *resource, const char *feature)
846 {
847 char res_path[PATH_MAX];
848 char *res;
849 FILE *inf;
850
851 if (!feature || !resource)
852 return false;
853
854 snprintf(res_path, sizeof(res_path), "%s/%s/mon_features", INFO_PATH, resource);
855 inf = fopen(res_path, "r");
856 if (!inf)
857 return false;
858
859 res = fgrep(inf, feature);
860 free(res);
861 fclose(inf);
862
863 return !!res;
864 }
865
866 /*
867 * resource_info_file_exists - Check if a file is present inside
868 * /sys/fs/resctrl/info/@resource.
869 * @resource: Required resource (Eg: MB, L3, L2, etc.)
870 * @file: Required file.
871 *
872 * Return: True if the /sys/fs/resctrl/info/@resource/@file exists, else false.
873 */
resource_info_file_exists(const char * resource,const char * file)874 bool resource_info_file_exists(const char *resource, const char *file)
875 {
876 char res_path[PATH_MAX];
877 struct stat statbuf;
878
879 if (!file || !resource)
880 return false;
881
882 snprintf(res_path, sizeof(res_path), "%s/%s/%s", INFO_PATH, resource,
883 file);
884
885 if (stat(res_path, &statbuf))
886 return false;
887
888 return true;
889 }
890
test_resource_feature_check(const struct resctrl_test * test)891 bool test_resource_feature_check(const struct resctrl_test *test)
892 {
893 return resctrl_resource_exists(test->resource);
894 }
895
filter_dmesg(void)896 int filter_dmesg(void)
897 {
898 char line[1024];
899 FILE *fp;
900 int pipefds[2];
901 pid_t pid;
902 int ret;
903
904 ret = pipe(pipefds);
905 if (ret) {
906 ksft_perror("pipe");
907 return ret;
908 }
909 fflush(stdout);
910 pid = fork();
911 if (pid == 0) {
912 close(pipefds[0]);
913 dup2(pipefds[1], STDOUT_FILENO);
914 execlp("dmesg", "dmesg", NULL);
915 ksft_perror("Executing dmesg");
916 exit(1);
917 }
918 close(pipefds[1]);
919 fp = fdopen(pipefds[0], "r");
920 if (!fp) {
921 ksft_perror("fdopen(pipe)");
922 kill(pid, SIGTERM);
923
924 return -1;
925 }
926
927 while (fgets(line, 1024, fp)) {
928 if (strstr(line, "intel_rdt:"))
929 ksft_print_msg("dmesg: %s", line);
930 if (strstr(line, "resctrl:"))
931 ksft_print_msg("dmesg: %s", line);
932 }
933 fclose(fp);
934 waitpid(pid, NULL, 0);
935
936 return 0;
937 }
938
perf_event_open(struct perf_event_attr * hw_event,pid_t pid,int cpu,int group_fd,unsigned long flags)939 int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
940 int group_fd, unsigned long flags)
941 {
942 int ret;
943
944 ret = syscall(__NR_perf_event_open, hw_event, pid, cpu,
945 group_fd, flags);
946 return ret;
947 }
948
count_bits(unsigned long n)949 unsigned int count_bits(unsigned long n)
950 {
951 unsigned int count = 0;
952
953 while (n) {
954 count += n & 1;
955 n >>= 1;
956 }
957
958 return count;
959 }
960
961 /**
962 * snc_kernel_support - Check for existence of mon_sub_L3_00 file that indicates
963 * SNC resctrl support on the kernel side.
964 *
965 * Return: 0 if not supported, 1 if SNC is disabled or SNC discovery is
966 * unreliable or SNC is both enabled and supported.
967 */
snc_kernel_support(void)968 int snc_kernel_support(void)
969 {
970 char node_path[PATH_MAX];
971 struct stat statbuf;
972 int ret;
973
974 ret = snc_nodes_per_l3_cache();
975 /*
976 * If SNC is disabled then its kernel support isn't important. If SNC
977 * got disabled because the discovery process was unreliable the
978 * snc_unreliable variable was set. It can be used to verify the SNC
979 * discovery reliability elsewhere in the selftest.
980 */
981 if (ret == 1)
982 return ret;
983
984 snprintf(node_path, sizeof(node_path), "%s/%s", RESCTRL_PATH,
985 "mon_data/mon_L3_00/mon_sub_L3_00");
986
987 if (!stat(node_path, &statbuf))
988 return 1;
989
990 return 0;
991 }
992