1#!/usr/bin/env python 2# @lint-avoid-python-3-compatibility-imports 3# 4# runqlat Run queue (scheduler) latency as a histogram. 5# For Linux, uses BCC, eBPF. 6# 7# USAGE: runqlat [-h] [-T] [-m] [-P] [-L] [-p PID] [interval] [count] 8# 9# This measures the time a task spends waiting on a run queue for a turn 10# on-CPU, and shows this time as a histogram. This time should be small, but a 11# task may need to wait its turn due to CPU load. 12# 13# This measures two types of run queue latency: 14# 1. The time from a task being enqueued on a run queue to its context switch 15# and execution. This traces ttwu_do_wakeup(), wake_up_new_task() -> 16# finish_task_switch() with either raw tracepoints (if supported) or kprobes 17# and instruments the run queue latency after a voluntary context switch. 18# 2. The time from when a task was involuntary context switched and still 19# in the runnable state, to when it next executed. This is instrumented 20# from finish_task_switch() alone. 21# 22# Copyright 2016 Netflix, Inc. 23# Licensed under the Apache License, Version 2.0 (the "License") 24# 25# 07-Feb-2016 Brendan Gregg Created this. 26 27from __future__ import print_function 28from bcc import BPF 29from time import sleep, strftime 30import argparse 31 32# arguments 33examples = """examples: 34 ./runqlat # summarize run queue latency as a histogram 35 ./runqlat 1 10 # print 1 second summaries, 10 times 36 ./runqlat -mT 1 # 1s summaries, milliseconds, and timestamps 37 ./runqlat -P # show each PID separately 38 ./runqlat -p 185 # trace PID 185 only 39""" 40parser = argparse.ArgumentParser( 41 description="Summarize run queue (scheduler) latency as a histogram", 42 formatter_class=argparse.RawDescriptionHelpFormatter, 43 epilog=examples) 44parser.add_argument("-T", "--timestamp", action="store_true", 45 help="include timestamp on output") 46parser.add_argument("-m", "--milliseconds", action="store_true", 47 help="millisecond histogram") 48parser.add_argument("-P", "--pids", action="store_true", 49 help="print a histogram per process ID") 50# PID options are --pid and --pids, so namespaces should be --pidns (not done 51# yet) and --pidnss: 52parser.add_argument("--pidnss", action="store_true", 53 help="print a histogram per PID namespace") 54parser.add_argument("-L", "--tids", action="store_true", 55 help="print a histogram per thread ID") 56parser.add_argument("-p", "--pid", 57 help="trace this PID only") 58parser.add_argument("interval", nargs="?", default=99999999, 59 help="output interval, in seconds") 60parser.add_argument("count", nargs="?", default=99999999, 61 help="number of outputs") 62parser.add_argument("--ebpf", action="store_true", 63 help=argparse.SUPPRESS) 64args = parser.parse_args() 65countdown = int(args.count) 66debug = 0 67 68# define BPF program 69bpf_text = """ 70#include <uapi/linux/ptrace.h> 71#include <linux/sched.h> 72#include <linux/nsproxy.h> 73#include <linux/pid_namespace.h> 74#include <linux/init_task.h> 75 76typedef struct pid_key { 77 u32 id; 78 u64 slot; 79} pid_key_t; 80 81typedef struct pidns_key { 82 u32 id; 83 u64 slot; 84} pidns_key_t; 85 86BPF_HASH(start, u32); 87STORAGE 88 89// record enqueue timestamp 90static int trace_enqueue(u32 tgid, u32 pid) 91{ 92 if (FILTER || pid == 0) 93 return 0; 94 u64 ts = bpf_ktime_get_ns(); 95 start.update(&pid, &ts); 96 return 0; 97} 98 99static __always_inline unsigned int pid_namespace(struct task_struct *task) 100{ 101 102/* pids[] was removed from task_struct since commit 2c4704756cab7cfa031ada4dab361562f0e357c0 103 * Using the macro INIT_PID_LINK as a conditional judgment. 104 */ 105#ifdef INIT_PID_LINK 106 struct pid_link pids; 107 unsigned int level; 108 struct upid upid; 109 struct ns_common ns; 110 111 /* get the pid namespace by following task_active_pid_ns(), 112 * pid->numbers[pid->level].ns 113 */ 114 bpf_probe_read_kernel(&pids, sizeof(pids), &task->pids[PIDTYPE_PID]); 115 bpf_probe_read_kernel(&level, sizeof(level), &pids.pid->level); 116 bpf_probe_read_kernel(&upid, sizeof(upid), &pids.pid->numbers[level]); 117 bpf_probe_read_kernel(&ns, sizeof(ns), &upid.ns->ns); 118 119 return ns.inum; 120#else 121 struct pid *pid; 122 unsigned int level; 123 struct upid upid; 124 struct ns_common ns; 125 126 /* get the pid namespace by following task_active_pid_ns(), 127 * pid->numbers[pid->level].ns 128 */ 129 bpf_probe_read_kernel(&pid, sizeof(pid), &task->thread_pid); 130 bpf_probe_read_kernel(&level, sizeof(level), &pid->level); 131 bpf_probe_read_kernel(&upid, sizeof(upid), &pid->numbers[level]); 132 bpf_probe_read_kernel(&ns, sizeof(ns), &upid.ns->ns); 133 134 return ns.inum; 135#endif 136} 137""" 138 139bpf_text_kprobe = """ 140int trace_wake_up_new_task(struct pt_regs *ctx, struct task_struct *p) 141{ 142 return trace_enqueue(p->tgid, p->pid); 143} 144 145int trace_ttwu_do_wakeup(struct pt_regs *ctx, struct rq *rq, struct task_struct *p, 146 int wake_flags) 147{ 148 return trace_enqueue(p->tgid, p->pid); 149} 150 151// calculate latency 152int trace_run(struct pt_regs *ctx, struct task_struct *prev) 153{ 154 u32 pid, tgid; 155 156 // ivcsw: treat like an enqueue event and store timestamp 157 if (prev->STATE_FIELD == TASK_RUNNING) { 158 tgid = prev->tgid; 159 pid = prev->pid; 160 if (!(FILTER || pid == 0)) { 161 u64 ts = bpf_ktime_get_ns(); 162 start.update(&pid, &ts); 163 } 164 } 165 166 tgid = bpf_get_current_pid_tgid() >> 32; 167 pid = bpf_get_current_pid_tgid(); 168 if (FILTER || pid == 0) 169 return 0; 170 u64 *tsp, delta; 171 172 // fetch timestamp and calculate delta 173 tsp = start.lookup(&pid); 174 if (tsp == 0) { 175 return 0; // missed enqueue 176 } 177 delta = bpf_ktime_get_ns() - *tsp; 178 FACTOR 179 180 // store as histogram 181 STORE 182 183 start.delete(&pid); 184 return 0; 185} 186""" 187 188bpf_text_raw_tp = """ 189RAW_TRACEPOINT_PROBE(sched_wakeup) 190{ 191 // TP_PROTO(struct task_struct *p) 192 struct task_struct *p = (struct task_struct *)ctx->args[0]; 193 return trace_enqueue(p->tgid, p->pid); 194} 195 196RAW_TRACEPOINT_PROBE(sched_wakeup_new) 197{ 198 // TP_PROTO(struct task_struct *p) 199 struct task_struct *p = (struct task_struct *)ctx->args[0]; 200 return trace_enqueue(p->tgid, p->pid); 201} 202 203RAW_TRACEPOINT_PROBE(sched_switch) 204{ 205 // TP_PROTO(bool preempt, struct task_struct *prev, struct task_struct *next) 206 struct task_struct *prev = (struct task_struct *)ctx->args[1]; 207 struct task_struct *next = (struct task_struct *)ctx->args[2]; 208 u32 pid, tgid; 209 210 // ivcsw: treat like an enqueue event and store timestamp 211 if (prev->STATE_FIELD == TASK_RUNNING) { 212 tgid = prev->tgid; 213 pid = prev->pid; 214 if (!(FILTER || pid == 0)) { 215 u64 ts = bpf_ktime_get_ns(); 216 start.update(&pid, &ts); 217 } 218 } 219 220 tgid = next->tgid; 221 pid = next->pid; 222 if (FILTER || pid == 0) 223 return 0; 224 u64 *tsp, delta; 225 226 // fetch timestamp and calculate delta 227 tsp = start.lookup(&pid); 228 if (tsp == 0) { 229 return 0; // missed enqueue 230 } 231 delta = bpf_ktime_get_ns() - *tsp; 232 FACTOR 233 234 // store as histogram 235 STORE 236 237 start.delete(&pid); 238 return 0; 239} 240""" 241 242is_support_raw_tp = BPF.support_raw_tracepoint() 243if is_support_raw_tp: 244 bpf_text += bpf_text_raw_tp 245else: 246 bpf_text += bpf_text_kprobe 247 248# code substitutions 249if BPF.kernel_struct_has_field(b'task_struct', b'__state') == 1: 250 bpf_text = bpf_text.replace('STATE_FIELD', '__state') 251else: 252 bpf_text = bpf_text.replace('STATE_FIELD', 'state') 253if args.pid: 254 # pid from userspace point of view is thread group from kernel pov 255 bpf_text = bpf_text.replace('FILTER', 'tgid != %s' % args.pid) 256else: 257 bpf_text = bpf_text.replace('FILTER', '0') 258if args.milliseconds: 259 bpf_text = bpf_text.replace('FACTOR', 'delta /= 1000000;') 260 label = "msecs" 261else: 262 bpf_text = bpf_text.replace('FACTOR', 'delta /= 1000;') 263 label = "usecs" 264if args.pids or args.tids: 265 section = "pid" 266 pid = "tgid" 267 if args.tids: 268 pid = "pid" 269 section = "tid" 270 bpf_text = bpf_text.replace('STORAGE', 271 'BPF_HISTOGRAM(dist, pid_key_t);') 272 bpf_text = bpf_text.replace('STORE', 273 'pid_key_t key = {}; key.id = ' + pid + '; key.slot = bpf_log2l(delta); ' + 274 'dist.increment(key);') 275elif args.pidnss: 276 section = "pidns" 277 bpf_text = bpf_text.replace('STORAGE', 278 'BPF_HISTOGRAM(dist, pidns_key_t);') 279 bpf_text = bpf_text.replace('STORE', 'pidns_key_t key = ' + 280 '{.id = pid_namespace(prev), ' + 281 '.slot = bpf_log2l(delta)}; dist.atomic_increment(key);') 282else: 283 section = "" 284 bpf_text = bpf_text.replace('STORAGE', 'BPF_HISTOGRAM(dist);') 285 bpf_text = bpf_text.replace('STORE', 286 'dist.atomic_increment(bpf_log2l(delta));') 287if debug or args.ebpf: 288 print(bpf_text) 289 if args.ebpf: 290 exit() 291 292# load BPF program 293b = BPF(text=bpf_text) 294if not is_support_raw_tp: 295 b.attach_kprobe(event="ttwu_do_wakeup", fn_name="trace_ttwu_do_wakeup") 296 b.attach_kprobe(event="wake_up_new_task", fn_name="trace_wake_up_new_task") 297 b.attach_kprobe(event_re="^finish_task_switch$|^finish_task_switch\.isra\.\d$", 298 fn_name="trace_run") 299 300print("Tracing run queue latency... Hit Ctrl-C to end.") 301 302# output 303exiting = 0 if args.interval else 1 304dist = b.get_table("dist") 305while (1): 306 try: 307 sleep(int(args.interval)) 308 except KeyboardInterrupt: 309 exiting = 1 310 311 print() 312 if args.timestamp: 313 print("%-8s\n" % strftime("%H:%M:%S"), end="") 314 315 dist.print_log2_hist(label, section, section_print_fn=int) 316 dist.clear() 317 318 countdown -= 1 319 if exiting or countdown == 0: 320 exit() 321