1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM writeback
4 
5 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_WRITEBACK_H
7 
8 #include <linux/tracepoint.h>
9 #include <linux/backing-dev.h>
10 #include <linux/writeback.h>
11 
12 #define show_inode_state(state)					\
13 	__print_flags(state, "|",				\
14 		{I_DIRTY_SYNC,		"I_DIRTY_SYNC"},	\
15 		{I_DIRTY_DATASYNC,	"I_DIRTY_DATASYNC"},	\
16 		{I_DIRTY_PAGES,		"I_DIRTY_PAGES"},	\
17 		{I_NEW,			"I_NEW"},		\
18 		{I_WILL_FREE,		"I_WILL_FREE"},		\
19 		{I_FREEING,		"I_FREEING"},		\
20 		{I_CLEAR,		"I_CLEAR"},		\
21 		{I_SYNC,		"I_SYNC"},		\
22 		{I_DIRTY_TIME,		"I_DIRTY_TIME"},	\
23 		{I_REFERENCED,		"I_REFERENCED"},	\
24 		{I_LINKABLE,		"I_LINKABLE"},		\
25 		{I_WB_SWITCH,		"I_WB_SWITCH"},		\
26 		{I_OVL_INUSE,		"I_OVL_INUSE"},		\
27 		{I_CREATING,		"I_CREATING"},		\
28 		{I_DONTCACHE,		"I_DONTCACHE"},		\
29 		{I_SYNC_QUEUED,		"I_SYNC_QUEUED"},	\
30 		{I_PINNING_NETFS_WB,	"I_PINNING_NETFS_WB"},	\
31 		{I_LRU_ISOLATING,	"I_LRU_ISOLATING"}	\
32 	)
33 
34 /* enums need to be exported to user space */
35 #undef EM
36 #undef EMe
37 #define EM(a,b) 	TRACE_DEFINE_ENUM(a);
38 #define EMe(a,b)	TRACE_DEFINE_ENUM(a);
39 
40 #define WB_WORK_REASON							\
41 	EM( WB_REASON_BACKGROUND,		"background")		\
42 	EM( WB_REASON_VMSCAN,			"vmscan")		\
43 	EM( WB_REASON_SYNC,			"sync")			\
44 	EM( WB_REASON_PERIODIC,			"periodic")		\
45 	EM( WB_REASON_LAPTOP_TIMER,		"laptop_timer")		\
46 	EM( WB_REASON_FS_FREE_SPACE,		"fs_free_space")	\
47 	EM( WB_REASON_FORKER_THREAD,		"forker_thread")	\
48 	EMe(WB_REASON_FOREIGN_FLUSH,		"foreign_flush")
49 
50 WB_WORK_REASON
51 
52 /*
53  * Now redefine the EM() and EMe() macros to map the enums to the strings
54  * that will be printed in the output.
55  */
56 #undef EM
57 #undef EMe
58 #define EM(a,b)		{ a, b },
59 #define EMe(a,b)	{ a, b }
60 
61 struct wb_writeback_work;
62 
63 DECLARE_EVENT_CLASS(writeback_folio_template,
64 
65 	TP_PROTO(struct folio *folio, struct address_space *mapping),
66 
67 	TP_ARGS(folio, mapping),
68 
69 	TP_STRUCT__entry (
70 		__array(char, name, 32)
71 		__field(ino_t, ino)
72 		__field(pgoff_t, index)
73 	),
74 
75 	TP_fast_assign(
76 		strscpy_pad(__entry->name,
77 			    bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
78 					 NULL), 32);
79 		__entry->ino = (mapping && mapping->host) ? mapping->host->i_ino : 0;
80 		__entry->index = folio->index;
81 	),
82 
83 	TP_printk("bdi %s: ino=%lu index=%lu",
84 		__entry->name,
85 		(unsigned long)__entry->ino,
86 		__entry->index
87 	)
88 );
89 
90 DEFINE_EVENT(writeback_folio_template, writeback_dirty_folio,
91 
92 	TP_PROTO(struct folio *folio, struct address_space *mapping),
93 
94 	TP_ARGS(folio, mapping)
95 );
96 
97 DEFINE_EVENT(writeback_folio_template, folio_wait_writeback,
98 
99 	TP_PROTO(struct folio *folio, struct address_space *mapping),
100 
101 	TP_ARGS(folio, mapping)
102 );
103 
104 DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
105 
106 	TP_PROTO(struct inode *inode, int flags),
107 
108 	TP_ARGS(inode, flags),
109 
110 	TP_STRUCT__entry (
111 		__array(char, name, 32)
112 		__field(ino_t, ino)
113 		__field(unsigned long, state)
114 		__field(unsigned long, flags)
115 	),
116 
117 	TP_fast_assign(
118 		struct backing_dev_info *bdi = inode_to_bdi(inode);
119 
120 		/* may be called for files on pseudo FSes w/ unregistered bdi */
121 		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
122 		__entry->ino		= inode->i_ino;
123 		__entry->state		= inode->i_state;
124 		__entry->flags		= flags;
125 	),
126 
127 	TP_printk("bdi %s: ino=%lu state=%s flags=%s",
128 		__entry->name,
129 		(unsigned long)__entry->ino,
130 		show_inode_state(__entry->state),
131 		show_inode_state(__entry->flags)
132 	)
133 );
134 
135 DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
136 
137 	TP_PROTO(struct inode *inode, int flags),
138 
139 	TP_ARGS(inode, flags)
140 );
141 
142 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
143 
144 	TP_PROTO(struct inode *inode, int flags),
145 
146 	TP_ARGS(inode, flags)
147 );
148 
149 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
150 
151 	TP_PROTO(struct inode *inode, int flags),
152 
153 	TP_ARGS(inode, flags)
154 );
155 
156 #ifdef CREATE_TRACE_POINTS
157 #ifdef CONFIG_CGROUP_WRITEBACK
158 
__trace_wb_assign_cgroup(struct bdi_writeback * wb)159 static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
160 {
161 	return cgroup_ino(wb->memcg_css->cgroup);
162 }
163 
__trace_wbc_assign_cgroup(struct writeback_control * wbc)164 static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
165 {
166 	if (wbc->wb)
167 		return __trace_wb_assign_cgroup(wbc->wb);
168 	else
169 		return 1;
170 }
171 #else	/* CONFIG_CGROUP_WRITEBACK */
172 
__trace_wb_assign_cgroup(struct bdi_writeback * wb)173 static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
174 {
175 	return 1;
176 }
177 
__trace_wbc_assign_cgroup(struct writeback_control * wbc)178 static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
179 {
180 	return 1;
181 }
182 
183 #endif	/* CONFIG_CGROUP_WRITEBACK */
184 #endif	/* CREATE_TRACE_POINTS */
185 
186 #ifdef CONFIG_CGROUP_WRITEBACK
187 TRACE_EVENT(inode_foreign_history,
188 
189 	TP_PROTO(struct inode *inode, struct writeback_control *wbc,
190 		 unsigned int history),
191 
192 	TP_ARGS(inode, wbc, history),
193 
194 	TP_STRUCT__entry(
195 		__array(char,		name, 32)
196 		__field(ino_t,		ino)
197 		__field(ino_t,		cgroup_ino)
198 		__field(unsigned int,	history)
199 	),
200 
201 	TP_fast_assign(
202 		strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
203 		__entry->ino		= inode->i_ino;
204 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
205 		__entry->history	= history;
206 	),
207 
208 	TP_printk("bdi %s: ino=%lu cgroup_ino=%lu history=0x%x",
209 		__entry->name,
210 		(unsigned long)__entry->ino,
211 		(unsigned long)__entry->cgroup_ino,
212 		__entry->history
213 	)
214 );
215 
216 TRACE_EVENT(inode_switch_wbs,
217 
218 	TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb,
219 		 struct bdi_writeback *new_wb),
220 
221 	TP_ARGS(inode, old_wb, new_wb),
222 
223 	TP_STRUCT__entry(
224 		__array(char,		name, 32)
225 		__field(ino_t,		ino)
226 		__field(ino_t,		old_cgroup_ino)
227 		__field(ino_t,		new_cgroup_ino)
228 	),
229 
230 	TP_fast_assign(
231 		strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32);
232 		__entry->ino		= inode->i_ino;
233 		__entry->old_cgroup_ino	= __trace_wb_assign_cgroup(old_wb);
234 		__entry->new_cgroup_ino	= __trace_wb_assign_cgroup(new_wb);
235 	),
236 
237 	TP_printk("bdi %s: ino=%lu old_cgroup_ino=%lu new_cgroup_ino=%lu",
238 		__entry->name,
239 		(unsigned long)__entry->ino,
240 		(unsigned long)__entry->old_cgroup_ino,
241 		(unsigned long)__entry->new_cgroup_ino
242 	)
243 );
244 
245 TRACE_EVENT(track_foreign_dirty,
246 
247 	TP_PROTO(struct folio *folio, struct bdi_writeback *wb),
248 
249 	TP_ARGS(folio, wb),
250 
251 	TP_STRUCT__entry(
252 		__array(char,		name, 32)
253 		__field(u64,		bdi_id)
254 		__field(ino_t,		ino)
255 		__field(unsigned int,	memcg_id)
256 		__field(ino_t,		cgroup_ino)
257 		__field(ino_t,		page_cgroup_ino)
258 	),
259 
260 	TP_fast_assign(
261 		struct address_space *mapping = folio_mapping(folio);
262 		struct inode *inode = mapping ? mapping->host : NULL;
263 
264 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
265 		__entry->bdi_id		= wb->bdi->id;
266 		__entry->ino		= inode ? inode->i_ino : 0;
267 		__entry->memcg_id	= wb->memcg_css->id;
268 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
269 		__entry->page_cgroup_ino = cgroup_ino(folio_memcg(folio)->css.cgroup);
270 	),
271 
272 	TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
273 		__entry->name,
274 		__entry->bdi_id,
275 		(unsigned long)__entry->ino,
276 		__entry->memcg_id,
277 		(unsigned long)__entry->cgroup_ino,
278 		(unsigned long)__entry->page_cgroup_ino
279 	)
280 );
281 
282 TRACE_EVENT(flush_foreign,
283 
284 	TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id,
285 		 unsigned int frn_memcg_id),
286 
287 	TP_ARGS(wb, frn_bdi_id, frn_memcg_id),
288 
289 	TP_STRUCT__entry(
290 		__array(char,		name, 32)
291 		__field(ino_t,		cgroup_ino)
292 		__field(unsigned int,	frn_bdi_id)
293 		__field(unsigned int,	frn_memcg_id)
294 	),
295 
296 	TP_fast_assign(
297 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
298 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
299 		__entry->frn_bdi_id	= frn_bdi_id;
300 		__entry->frn_memcg_id	= frn_memcg_id;
301 	),
302 
303 	TP_printk("bdi %s: cgroup_ino=%lu frn_bdi_id=%u frn_memcg_id=%u",
304 		__entry->name,
305 		(unsigned long)__entry->cgroup_ino,
306 		__entry->frn_bdi_id,
307 		__entry->frn_memcg_id
308 	)
309 );
310 #endif
311 
312 DECLARE_EVENT_CLASS(writeback_write_inode_template,
313 
314 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
315 
316 	TP_ARGS(inode, wbc),
317 
318 	TP_STRUCT__entry (
319 		__array(char, name, 32)
320 		__field(ino_t, ino)
321 		__field(int, sync_mode)
322 		__field(ino_t, cgroup_ino)
323 	),
324 
325 	TP_fast_assign(
326 		strscpy_pad(__entry->name,
327 			    bdi_dev_name(inode_to_bdi(inode)), 32);
328 		__entry->ino		= inode->i_ino;
329 		__entry->sync_mode	= wbc->sync_mode;
330 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
331 	),
332 
333 	TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%lu",
334 		__entry->name,
335 		(unsigned long)__entry->ino,
336 		__entry->sync_mode,
337 		(unsigned long)__entry->cgroup_ino
338 	)
339 );
340 
341 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
342 
343 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
344 
345 	TP_ARGS(inode, wbc)
346 );
347 
348 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
349 
350 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
351 
352 	TP_ARGS(inode, wbc)
353 );
354 
355 DECLARE_EVENT_CLASS(writeback_work_class,
356 	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
357 	TP_ARGS(wb, work),
358 	TP_STRUCT__entry(
359 		__array(char, name, 32)
360 		__field(long, nr_pages)
361 		__field(dev_t, sb_dev)
362 		__field(int, sync_mode)
363 		__field(int, for_kupdate)
364 		__field(int, range_cyclic)
365 		__field(int, for_background)
366 		__field(int, reason)
367 		__field(ino_t, cgroup_ino)
368 	),
369 	TP_fast_assign(
370 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
371 		__entry->nr_pages = work->nr_pages;
372 		__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
373 		__entry->sync_mode = work->sync_mode;
374 		__entry->for_kupdate = work->for_kupdate;
375 		__entry->range_cyclic = work->range_cyclic;
376 		__entry->for_background	= work->for_background;
377 		__entry->reason = work->reason;
378 		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
379 	),
380 	TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
381 		  "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%lu",
382 		  __entry->name,
383 		  MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
384 		  __entry->nr_pages,
385 		  __entry->sync_mode,
386 		  __entry->for_kupdate,
387 		  __entry->range_cyclic,
388 		  __entry->for_background,
389 		  __print_symbolic(__entry->reason, WB_WORK_REASON),
390 		  (unsigned long)__entry->cgroup_ino
391 	)
392 );
393 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
394 DEFINE_EVENT(writeback_work_class, name, \
395 	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
396 	TP_ARGS(wb, work))
397 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
398 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
399 DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
400 DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
401 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
402 
403 TRACE_EVENT(writeback_pages_written,
404 	TP_PROTO(long pages_written),
405 	TP_ARGS(pages_written),
406 	TP_STRUCT__entry(
407 		__field(long,		pages)
408 	),
409 	TP_fast_assign(
410 		__entry->pages		= pages_written;
411 	),
412 	TP_printk("%ld", __entry->pages)
413 );
414 
415 DECLARE_EVENT_CLASS(writeback_class,
416 	TP_PROTO(struct bdi_writeback *wb),
417 	TP_ARGS(wb),
418 	TP_STRUCT__entry(
419 		__array(char, name, 32)
420 		__field(ino_t, cgroup_ino)
421 	),
422 	TP_fast_assign(
423 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
424 		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
425 	),
426 	TP_printk("bdi %s: cgroup_ino=%lu",
427 		  __entry->name,
428 		  (unsigned long)__entry->cgroup_ino
429 	)
430 );
431 #define DEFINE_WRITEBACK_EVENT(name) \
432 DEFINE_EVENT(writeback_class, name, \
433 	TP_PROTO(struct bdi_writeback *wb), \
434 	TP_ARGS(wb))
435 
436 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
437 
438 TRACE_EVENT(writeback_bdi_register,
439 	TP_PROTO(struct backing_dev_info *bdi),
440 	TP_ARGS(bdi),
441 	TP_STRUCT__entry(
442 		__array(char, name, 32)
443 	),
444 	TP_fast_assign(
445 		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
446 	),
447 	TP_printk("bdi %s",
448 		__entry->name
449 	)
450 );
451 
452 DECLARE_EVENT_CLASS(wbc_class,
453 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
454 	TP_ARGS(wbc, bdi),
455 	TP_STRUCT__entry(
456 		__array(char, name, 32)
457 		__field(long, nr_to_write)
458 		__field(long, pages_skipped)
459 		__field(int, sync_mode)
460 		__field(int, for_kupdate)
461 		__field(int, for_background)
462 		__field(int, for_reclaim)
463 		__field(int, range_cyclic)
464 		__field(long, range_start)
465 		__field(long, range_end)
466 		__field(ino_t, cgroup_ino)
467 	),
468 
469 	TP_fast_assign(
470 		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
471 		__entry->nr_to_write	= wbc->nr_to_write;
472 		__entry->pages_skipped	= wbc->pages_skipped;
473 		__entry->sync_mode	= wbc->sync_mode;
474 		__entry->for_kupdate	= wbc->for_kupdate;
475 		__entry->for_background	= wbc->for_background;
476 		__entry->for_reclaim	= wbc->for_reclaim;
477 		__entry->range_cyclic	= wbc->range_cyclic;
478 		__entry->range_start	= (long)wbc->range_start;
479 		__entry->range_end	= (long)wbc->range_end;
480 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
481 	),
482 
483 	TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
484 		"bgrd=%d reclm=%d cyclic=%d "
485 		"start=0x%lx end=0x%lx cgroup_ino=%lu",
486 		__entry->name,
487 		__entry->nr_to_write,
488 		__entry->pages_skipped,
489 		__entry->sync_mode,
490 		__entry->for_kupdate,
491 		__entry->for_background,
492 		__entry->for_reclaim,
493 		__entry->range_cyclic,
494 		__entry->range_start,
495 		__entry->range_end,
496 		(unsigned long)__entry->cgroup_ino
497 	)
498 )
499 
500 #define DEFINE_WBC_EVENT(name) \
501 DEFINE_EVENT(wbc_class, name, \
502 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
503 	TP_ARGS(wbc, bdi))
504 DEFINE_WBC_EVENT(wbc_writepage);
505 
506 TRACE_EVENT(writeback_queue_io,
507 	TP_PROTO(struct bdi_writeback *wb,
508 		 struct wb_writeback_work *work,
509 		 unsigned long dirtied_before,
510 		 int moved),
511 	TP_ARGS(wb, work, dirtied_before, moved),
512 	TP_STRUCT__entry(
513 		__array(char,		name, 32)
514 		__field(unsigned long,	older)
515 		__field(long,		age)
516 		__field(int,		moved)
517 		__field(int,		reason)
518 		__field(ino_t,		cgroup_ino)
519 	),
520 	TP_fast_assign(
521 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
522 		__entry->older	= dirtied_before;
523 		__entry->age	= (jiffies - dirtied_before) * 1000 / HZ;
524 		__entry->moved	= moved;
525 		__entry->reason	= work->reason;
526 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
527 	),
528 	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu",
529 		__entry->name,
530 		__entry->older,	/* dirtied_before in jiffies */
531 		__entry->age,	/* dirtied_before in relative milliseconds */
532 		__entry->moved,
533 		__print_symbolic(__entry->reason, WB_WORK_REASON),
534 		(unsigned long)__entry->cgroup_ino
535 	)
536 );
537 
538 TRACE_EVENT(global_dirty_state,
539 
540 	TP_PROTO(unsigned long background_thresh,
541 		 unsigned long dirty_thresh
542 	),
543 
544 	TP_ARGS(background_thresh,
545 		dirty_thresh
546 	),
547 
548 	TP_STRUCT__entry(
549 		__field(unsigned long,	nr_dirty)
550 		__field(unsigned long,	nr_writeback)
551 		__field(unsigned long,	background_thresh)
552 		__field(unsigned long,	dirty_thresh)
553 		__field(unsigned long,	dirty_limit)
554 		__field(unsigned long,	nr_dirtied)
555 		__field(unsigned long,	nr_written)
556 	),
557 
558 	TP_fast_assign(
559 		__entry->nr_dirty	= global_node_page_state(NR_FILE_DIRTY);
560 		__entry->nr_writeback	= global_node_page_state(NR_WRITEBACK);
561 		__entry->nr_dirtied	= global_node_page_state(NR_DIRTIED);
562 		__entry->nr_written	= global_node_page_state(NR_WRITTEN);
563 		__entry->background_thresh = background_thresh;
564 		__entry->dirty_thresh	= dirty_thresh;
565 		__entry->dirty_limit	= global_wb_domain.dirty_limit;
566 	),
567 
568 	TP_printk("dirty=%lu writeback=%lu "
569 		  "bg_thresh=%lu thresh=%lu limit=%lu "
570 		  "dirtied=%lu written=%lu",
571 		  __entry->nr_dirty,
572 		  __entry->nr_writeback,
573 		  __entry->background_thresh,
574 		  __entry->dirty_thresh,
575 		  __entry->dirty_limit,
576 		  __entry->nr_dirtied,
577 		  __entry->nr_written
578 	)
579 );
580 
581 #define KBps(x)			((x) << (PAGE_SHIFT - 10))
582 
583 TRACE_EVENT(bdi_dirty_ratelimit,
584 
585 	TP_PROTO(struct bdi_writeback *wb,
586 		 unsigned long dirty_rate,
587 		 unsigned long task_ratelimit),
588 
589 	TP_ARGS(wb, dirty_rate, task_ratelimit),
590 
591 	TP_STRUCT__entry(
592 		__array(char,		bdi, 32)
593 		__field(unsigned long,	write_bw)
594 		__field(unsigned long,	avg_write_bw)
595 		__field(unsigned long,	dirty_rate)
596 		__field(unsigned long,	dirty_ratelimit)
597 		__field(unsigned long,	task_ratelimit)
598 		__field(unsigned long,	balanced_dirty_ratelimit)
599 		__field(ino_t,		cgroup_ino)
600 	),
601 
602 	TP_fast_assign(
603 		strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
604 		__entry->write_bw	= KBps(wb->write_bandwidth);
605 		__entry->avg_write_bw	= KBps(wb->avg_write_bandwidth);
606 		__entry->dirty_rate	= KBps(dirty_rate);
607 		__entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
608 		__entry->task_ratelimit	= KBps(task_ratelimit);
609 		__entry->balanced_dirty_ratelimit =
610 					KBps(wb->balanced_dirty_ratelimit);
611 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
612 	),
613 
614 	TP_printk("bdi %s: "
615 		  "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
616 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
617 		  "balanced_dirty_ratelimit=%lu cgroup_ino=%lu",
618 		  __entry->bdi,
619 		  __entry->write_bw,		/* write bandwidth */
620 		  __entry->avg_write_bw,	/* avg write bandwidth */
621 		  __entry->dirty_rate,		/* bdi dirty rate */
622 		  __entry->dirty_ratelimit,	/* base ratelimit */
623 		  __entry->task_ratelimit, /* ratelimit with position control */
624 		  __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
625 		  (unsigned long)__entry->cgroup_ino
626 	)
627 );
628 
629 TRACE_EVENT(balance_dirty_pages,
630 
631 	TP_PROTO(struct bdi_writeback *wb,
632 		 struct dirty_throttle_control *dtc,
633 		 unsigned long dirty_ratelimit,
634 		 unsigned long task_ratelimit,
635 		 unsigned long dirtied,
636 		 unsigned long period,
637 		 long pause,
638 		 unsigned long start_time),
639 
640 	TP_ARGS(wb, dtc,
641 		dirty_ratelimit, task_ratelimit,
642 		dirtied, period, pause, start_time),
643 
644 	TP_STRUCT__entry(
645 		__array(	 char,	bdi, 32)
646 		__field(unsigned long,	limit)
647 		__field(unsigned long,	setpoint)
648 		__field(unsigned long,	dirty)
649 		__field(unsigned long,	bdi_setpoint)
650 		__field(unsigned long,	bdi_dirty)
651 		__field(unsigned long,	dirty_ratelimit)
652 		__field(unsigned long,	task_ratelimit)
653 		__field(unsigned int,	dirtied)
654 		__field(unsigned int,	dirtied_pause)
655 		__field(unsigned long,	paused)
656 		__field(	 long,	pause)
657 		__field(unsigned long,	period)
658 		__field(	 long,	think)
659 		__field(ino_t,		cgroup_ino)
660 	),
661 
662 	TP_fast_assign(
663 		unsigned long freerun = (dtc->thresh + dtc->bg_thresh) / 2;
664 		strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
665 
666 		__entry->limit		= dtc->limit;
667 		__entry->setpoint	= (dtc->limit + freerun) / 2;
668 		__entry->dirty		= dtc->dirty;
669 		__entry->bdi_setpoint	= __entry->setpoint *
670 						dtc->wb_thresh / (dtc->thresh + 1);
671 		__entry->bdi_dirty	= dtc->wb_dirty;
672 		__entry->dirty_ratelimit = KBps(dirty_ratelimit);
673 		__entry->task_ratelimit	= KBps(task_ratelimit);
674 		__entry->dirtied	= dirtied;
675 		__entry->dirtied_pause	= current->nr_dirtied_pause;
676 		__entry->think		= current->dirty_paused_when == 0 ? 0 :
677 			 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
678 		__entry->period		= period * 1000 / HZ;
679 		__entry->pause		= pause * 1000 / HZ;
680 		__entry->paused		= (jiffies - start_time) * 1000 / HZ;
681 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
682 	),
683 
684 
685 	TP_printk("bdi %s: "
686 		  "limit=%lu setpoint=%lu dirty=%lu "
687 		  "bdi_setpoint=%lu bdi_dirty=%lu "
688 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
689 		  "dirtied=%u dirtied_pause=%u "
690 		  "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu",
691 		  __entry->bdi,
692 		  __entry->limit,
693 		  __entry->setpoint,
694 		  __entry->dirty,
695 		  __entry->bdi_setpoint,
696 		  __entry->bdi_dirty,
697 		  __entry->dirty_ratelimit,
698 		  __entry->task_ratelimit,
699 		  __entry->dirtied,
700 		  __entry->dirtied_pause,
701 		  __entry->paused,	/* ms */
702 		  __entry->pause,	/* ms */
703 		  __entry->period,	/* ms */
704 		  __entry->think,	/* ms */
705 		  (unsigned long)__entry->cgroup_ino
706 	  )
707 );
708 
709 TRACE_EVENT(writeback_sb_inodes_requeue,
710 
711 	TP_PROTO(struct inode *inode),
712 	TP_ARGS(inode),
713 
714 	TP_STRUCT__entry(
715 		__array(char, name, 32)
716 		__field(ino_t, ino)
717 		__field(unsigned long, state)
718 		__field(unsigned long, dirtied_when)
719 		__field(ino_t, cgroup_ino)
720 	),
721 
722 	TP_fast_assign(
723 		strscpy_pad(__entry->name,
724 			    bdi_dev_name(inode_to_bdi(inode)), 32);
725 		__entry->ino		= inode->i_ino;
726 		__entry->state		= inode->i_state;
727 		__entry->dirtied_when	= inode->dirtied_when;
728 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(inode_to_wb(inode));
729 	),
730 
731 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%lu",
732 		  __entry->name,
733 		  (unsigned long)__entry->ino,
734 		  show_inode_state(__entry->state),
735 		  __entry->dirtied_when,
736 		  (jiffies - __entry->dirtied_when) / HZ,
737 		  (unsigned long)__entry->cgroup_ino
738 	)
739 );
740 
741 DECLARE_EVENT_CLASS(writeback_single_inode_template,
742 
743 	TP_PROTO(struct inode *inode,
744 		 struct writeback_control *wbc,
745 		 unsigned long nr_to_write
746 	),
747 
748 	TP_ARGS(inode, wbc, nr_to_write),
749 
750 	TP_STRUCT__entry(
751 		__array(char, name, 32)
752 		__field(ino_t, ino)
753 		__field(unsigned long, state)
754 		__field(unsigned long, dirtied_when)
755 		__field(unsigned long, writeback_index)
756 		__field(long, nr_to_write)
757 		__field(unsigned long, wrote)
758 		__field(ino_t, cgroup_ino)
759 	),
760 
761 	TP_fast_assign(
762 		strscpy_pad(__entry->name,
763 			    bdi_dev_name(inode_to_bdi(inode)), 32);
764 		__entry->ino		= inode->i_ino;
765 		__entry->state		= inode->i_state;
766 		__entry->dirtied_when	= inode->dirtied_when;
767 		__entry->writeback_index = inode->i_mapping->writeback_index;
768 		__entry->nr_to_write	= nr_to_write;
769 		__entry->wrote		= nr_to_write - wbc->nr_to_write;
770 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
771 	),
772 
773 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
774 		  "index=%lu to_write=%ld wrote=%lu cgroup_ino=%lu",
775 		  __entry->name,
776 		  (unsigned long)__entry->ino,
777 		  show_inode_state(__entry->state),
778 		  __entry->dirtied_when,
779 		  (jiffies - __entry->dirtied_when) / HZ,
780 		  __entry->writeback_index,
781 		  __entry->nr_to_write,
782 		  __entry->wrote,
783 		  (unsigned long)__entry->cgroup_ino
784 	)
785 );
786 
787 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
788 	TP_PROTO(struct inode *inode,
789 		 struct writeback_control *wbc,
790 		 unsigned long nr_to_write),
791 	TP_ARGS(inode, wbc, nr_to_write)
792 );
793 
794 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
795 	TP_PROTO(struct inode *inode,
796 		 struct writeback_control *wbc,
797 		 unsigned long nr_to_write),
798 	TP_ARGS(inode, wbc, nr_to_write)
799 );
800 
801 DECLARE_EVENT_CLASS(writeback_inode_template,
802 	TP_PROTO(struct inode *inode),
803 
804 	TP_ARGS(inode),
805 
806 	TP_STRUCT__entry(
807 		__field(	dev_t,	dev			)
808 		__field(	ino_t,	ino			)
809 		__field(unsigned long,	state			)
810 		__field(	__u16, mode			)
811 		__field(unsigned long, dirtied_when		)
812 	),
813 
814 	TP_fast_assign(
815 		__entry->dev	= inode->i_sb->s_dev;
816 		__entry->ino	= inode->i_ino;
817 		__entry->state	= inode->i_state;
818 		__entry->mode	= inode->i_mode;
819 		__entry->dirtied_when = inode->dirtied_when;
820 	),
821 
822 	TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
823 		  MAJOR(__entry->dev), MINOR(__entry->dev),
824 		  (unsigned long)__entry->ino, __entry->dirtied_when,
825 		  show_inode_state(__entry->state), __entry->mode)
826 );
827 
828 DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
829 	TP_PROTO(struct inode *inode),
830 
831 	TP_ARGS(inode)
832 );
833 
834 DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput,
835 	TP_PROTO(struct inode *inode),
836 
837 	TP_ARGS(inode)
838 );
839 
840 DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
841 
842 	TP_PROTO(struct inode *inode),
843 
844 	TP_ARGS(inode)
845 );
846 
847 /*
848  * Inode writeback list tracking.
849  */
850 
851 DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback,
852 	TP_PROTO(struct inode *inode),
853 	TP_ARGS(inode)
854 );
855 
856 DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback,
857 	TP_PROTO(struct inode *inode),
858 	TP_ARGS(inode)
859 );
860 
861 #endif /* _TRACE_WRITEBACK_H */
862 
863 /* This part must be outside protection */
864 #include <trace/define_trace.h>
865