1 /*
2 * Copyright (c) 2018 Google, Inc.
3 *
4 * SPDX-License-Identifier: GPL-2.0-or-later
5 *
6 * Task starts out as a CPU hog and then becomes small.
7 * Task placement and downmigration latency are verified.
8 */
9
10 #define _GNU_SOURCE
11 #include <errno.h>
12 #include <pthread.h>
13 #include <sched.h>
14 #include <time.h>
15
16 #include "tst_test.h"
17 #include "tst_safe_file_ops.h"
18 #include "tst_safe_pthread.h"
19
20 #include "trace_parse.h"
21 #include "util.h"
22
23 #define TRACE_EVENTS "sched_switch"
24
25 static int task_tid;
26
27 #define MAX_DOWNMIGRATE_LATENCY_US 100000
28 #define MAX_INCORRECT_CLUSTER_PCT 10
29 #define BURN_SEC 3
task_fn(void * arg LTP_ATTRIBUTE_UNUSED)30 static void *task_fn(void *arg LTP_ATTRIBUTE_UNUSED)
31 {
32 task_tid = gettid();
33
34 printf("Big task executing for %ds...\n", BURN_SEC);
35 burn(BURN_SEC * USEC_PER_SEC, 0);
36
37 printf("Changing to small task...\n");
38 tracefs_write("trace_marker", "SMALL TASK");
39 burn(BURN_SEC * USEC_PER_SEC, 1);
40
41 return NULL;
42 }
43
parse_results(void)44 static int parse_results(void)
45 {
46 int i, pct, rv = 0;
47 unsigned long long exec_start_us = 0;
48 unsigned long long too_big_cpu_us = 0;
49 unsigned long long too_small_cpu_us = 0;
50 unsigned long long big_task_us = 0;
51 unsigned long long small_task_us = 0;
52 unsigned long long smalltask_ts_usec = 0;
53 unsigned long long smalltask_tm_usec = 0;
54 unsigned long long downmigrate_ts_usec = 0;
55 unsigned long long downmigrate_latency_usec = 0;
56 cpu_set_t cpuset;
57
58 if (find_cpus_with_capacity(0, &cpuset)) {
59 printf("Failed to find the CPUs in the little cluster.\n");
60 return -1;
61 }
62
63 for (i = 0; i < num_trace_records; i++) {
64 unsigned long long segment_us;
65 struct trace_sched_switch *t = trace[i].event_data;
66 unsigned long long trace_ts_usec = TS_TO_USEC(trace[i].ts);
67
68 if (trace[i].event_type == TRACE_RECORD_TRACING_MARK_WRITE &&
69 !strcmp(trace[i].event_data, "SMALL TASK")) {
70 smalltask_tm_usec = trace_ts_usec;
71 continue;
72 }
73
74 if (trace[i].event_type != TRACE_RECORD_SCHED_SWITCH)
75 continue;
76
77 if (t->next_pid == task_tid) {
78 if (!smalltask_ts_usec && smalltask_tm_usec &&
79 trace_ts_usec > smalltask_tm_usec)
80 smalltask_ts_usec = trace_ts_usec;
81 /* Start of task execution segment. */
82 if (exec_start_us) {
83 printf("Trace parse fail: double exec start\n");
84 return -1;
85 }
86 exec_start_us = trace_ts_usec;
87 if (smalltask_ts_usec && !downmigrate_ts_usec &&
88 CPU_ISSET(trace[i].cpu, &cpuset))
89 downmigrate_ts_usec = exec_start_us;
90 continue;
91 }
92 if (t->prev_pid != task_tid)
93 continue;
94 /* End of task execution segment. */
95 segment_us = trace_ts_usec;
96 segment_us -= exec_start_us;
97 exec_start_us = 0;
98 if (CPU_ISSET(trace[i].cpu, &cpuset)) {
99 /* Task is running on little CPUs. */
100 if (!smalltask_ts_usec)
101 too_small_cpu_us += segment_us;
102 } else {
103 /* Task is running on big CPUs. */
104 if (smalltask_ts_usec) {
105 /*
106 * Downmigration is accounted separately, so
107 * only record mis-scheduled time here if it
108 * happened after downmigration.
109 */
110 if (downmigrate_ts_usec)
111 too_big_cpu_us += segment_us;
112 }
113 }
114 if (smalltask_ts_usec)
115 small_task_us += segment_us;
116 else
117 big_task_us += segment_us;
118 }
119
120 pct = (too_small_cpu_us * 100) / big_task_us;
121 rv |= (pct > MAX_INCORRECT_CLUSTER_PCT);
122 printf("Time incorrectly scheduled on small when task was big: "
123 "%lld usec (%d%% of big task CPU time)\n", too_small_cpu_us,
124 pct);
125 pct = (too_big_cpu_us * 100) / small_task_us;
126 rv |= (pct > MAX_INCORRECT_CLUSTER_PCT);
127 printf("Time incorrectly scheduled on big when task was small, after "
128 "downmigration: %lld usec (%d%% of small task CPU time)\n",
129 too_big_cpu_us, pct);
130
131 if (downmigrate_ts_usec) {
132 downmigrate_latency_usec = downmigrate_ts_usec -
133 smalltask_ts_usec;
134 printf("Downmigration latency: %lld usec\n",
135 downmigrate_latency_usec);
136 } else {
137 printf("Task never downmigrated!\n");
138 downmigrate_latency_usec = UINT_MAX;
139 }
140
141 return (rv || downmigrate_latency_usec > MAX_DOWNMIGRATE_LATENCY_US);
142 }
143
run(void)144 static void run(void)
145 {
146 pthread_t task_thread;
147
148 tst_res(TINFO, "Maximum incorrect cluster time percentage: %d%%",
149 MAX_INCORRECT_CLUSTER_PCT);
150 tst_res(TINFO, "Maximum downmigration latency: %d usec",
151 MAX_DOWNMIGRATE_LATENCY_US);
152
153 /* configure and enable tracing */
154 tracefs_write("tracing_on", "0");
155 tracefs_write("buffer_size_kb", "16384");
156 tracefs_write("set_event", TRACE_EVENTS);
157 tracefs_write("trace", "\n");
158 tracefs_write("tracing_on", "1");
159
160 SAFE_PTHREAD_CREATE(&task_thread, NULL, task_fn, NULL);
161 SAFE_PTHREAD_JOIN(task_thread, NULL);
162
163 /* disable tracing */
164 tracefs_write("tracing_on", "0");
165 LOAD_TRACE();
166
167 if (parse_results())
168 tst_res(TFAIL, "Task placement/migration latency goals "
169 "not met.\n");
170 else
171 tst_res(TPASS, "Task placement/migration latency goals "
172 "met.\n");
173 }
174
175 static struct tst_test test = {
176 .test_all = run,
177 .setup = trace_setup,
178 .cleanup = trace_cleanup,
179 };
180