1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef PERF_LOCK_CONTENTION_H
3 #define PERF_LOCK_CONTENTION_H
4
5 #include <linux/list.h>
6 #include <linux/rbtree.h>
7
8 struct lock_filter {
9 int nr_types;
10 int nr_addrs;
11 int nr_syms;
12 int nr_cgrps;
13 int nr_slabs;
14 unsigned int *types;
15 unsigned long *addrs;
16 char **syms;
17 u64 *cgrps;
18 char **slabs;
19 };
20
21 struct lock_stat {
22 struct hlist_node hash_entry;
23 struct rb_node rb; /* used for sorting */
24
25 u64 addr; /* address of lockdep_map, used as ID */
26 char *name; /* for strcpy(), we cannot use const */
27 u64 *callstack;
28
29 unsigned int nr_acquire;
30 unsigned int nr_acquired;
31 unsigned int nr_contended;
32 unsigned int nr_release;
33
34 union {
35 unsigned int nr_readlock;
36 unsigned int flags;
37 };
38 unsigned int nr_trylock;
39
40 /* these times are in nano sec. */
41 u64 avg_wait_time;
42 u64 wait_time_total;
43 u64 wait_time_min;
44 u64 wait_time_max;
45
46 int broken; /* flag of blacklist */
47 int combined;
48 };
49
50 /*
51 * States of lock_seq_stat
52 *
53 * UNINITIALIZED is required for detecting first event of acquire.
54 * As the nature of lock events, there is no guarantee
55 * that the first event for the locks are acquire,
56 * it can be acquired, contended or release.
57 */
58 #define SEQ_STATE_UNINITIALIZED 0 /* initial state */
59 #define SEQ_STATE_RELEASED 1
60 #define SEQ_STATE_ACQUIRING 2
61 #define SEQ_STATE_ACQUIRED 3
62 #define SEQ_STATE_READ_ACQUIRED 4
63 #define SEQ_STATE_CONTENDED 5
64
65 /*
66 * MAX_LOCK_DEPTH
67 * Imported from include/linux/sched.h.
68 * Should this be synchronized?
69 */
70 #define MAX_LOCK_DEPTH 48
71
72 /* based on kernel/lockdep.c */
73 #define LOCKHASH_BITS 12
74 #define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
75
76 extern struct hlist_head *lockhash_table;
77
78 /*
79 * struct lock_seq_stat:
80 * Place to put on state of one lock sequence
81 * 1) acquire -> acquired -> release
82 * 2) acquire -> contended -> acquired -> release
83 * 3) acquire (with read or try) -> release
84 * 4) Are there other patterns?
85 */
86 struct lock_seq_stat {
87 struct list_head list;
88 int state;
89 u64 prev_event_time;
90 u64 addr;
91
92 int read_count;
93 };
94
95 struct thread_stat {
96 struct rb_node rb;
97
98 u32 tid;
99 struct list_head seq_list;
100 };
101
102 /*
103 * CONTENTION_STACK_DEPTH
104 * Number of stack trace entries to find callers
105 */
106 #define CONTENTION_STACK_DEPTH 8
107
108 /*
109 * CONTENTION_STACK_SKIP
110 * Number of stack trace entries to skip when finding callers.
111 * The first few entries belong to the locking implementation itself.
112 */
113 #define CONTENTION_STACK_SKIP 4
114
115 /*
116 * flags for lock:contention_begin
117 * Imported from include/trace/events/lock.h.
118 */
119 #define LCB_F_SPIN (1U << 0)
120 #define LCB_F_READ (1U << 1)
121 #define LCB_F_WRITE (1U << 2)
122 #define LCB_F_RT (1U << 3)
123 #define LCB_F_PERCPU (1U << 4)
124 #define LCB_F_MUTEX (1U << 5)
125
126 struct evlist;
127 struct machine;
128 struct target;
129
130 struct lock_contention_fails {
131 int task;
132 int stack;
133 int time;
134 int data;
135 };
136
137 struct lock_contention {
138 struct evlist *evlist;
139 struct target *target;
140 struct machine *machine;
141 struct hlist_head *result;
142 struct lock_filter *filters;
143 struct lock_contention_fails fails;
144 struct rb_root cgroups;
145 unsigned long map_nr_entries;
146 int max_stack;
147 int stack_skip;
148 int aggr_mode;
149 int owner;
150 int nr_filtered;
151 bool save_callstack;
152 };
153
154 struct option;
155 int parse_call_stack(const struct option *opt, const char *str, int unset);
156 bool needs_callstack(void);
157
158 struct lock_stat *lock_stat_find(u64 addr);
159 struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags);
160
161 bool match_callstack_filter(struct machine *machine, u64 *callstack, int max_stack_depth);
162
163
164 #ifdef HAVE_BPF_SKEL
165 int lock_contention_prepare(struct lock_contention *con);
166 int lock_contention_start(void);
167 int lock_contention_stop(void);
168 int lock_contention_read(struct lock_contention *con);
169 int lock_contention_finish(struct lock_contention *con);
170
171 #else /* !HAVE_BPF_SKEL */
172
lock_contention_prepare(struct lock_contention * con __maybe_unused)173 static inline int lock_contention_prepare(struct lock_contention *con __maybe_unused)
174 {
175 return 0;
176 }
177
lock_contention_start(void)178 static inline int lock_contention_start(void) { return 0; }
lock_contention_stop(void)179 static inline int lock_contention_stop(void) { return 0; }
lock_contention_finish(struct lock_contention * con __maybe_unused)180 static inline int lock_contention_finish(struct lock_contention *con __maybe_unused)
181 {
182 return 0;
183 }
184
lock_contention_read(struct lock_contention * con __maybe_unused)185 static inline int lock_contention_read(struct lock_contention *con __maybe_unused)
186 {
187 return 0;
188 }
189
190 #endif /* HAVE_BPF_SKEL */
191
192 #endif /* PERF_LOCK_CONTENTION_H */
193