Lines Matching full:page

3  * Macros for manipulating and testing page->flags
18 * Various page->flags bits:
20 * PG_reserved is set for special pages. The "struct page" of such a page
25 * - Pages reserved or allocated early during boot (before the page allocator
27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
29 * be given to the page allocator.
32 * - The zero page(s)
44 * Consequently, PG_reserved for a page mapped into user space can indicate
45 * the zero page, the vDSO, MMIO pages or device memory.
48 * specific data (which is normally at page->private). It can be used by
55 * PG_locked also pins a page in pagecache, and blocks truncation of the file
58 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
61 * PG_swapbacked is set when a page uses swap as a backing storage. This are
66 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
69 * PG_arch_1 is an architecture specific page state bit. The generic code
70 * guarantees that this bit is cleared for a page when it first is entered into
71 * the page cache.
73 * PG_hwpoison indicates that a page got corrupted in hardware and contains
81 * The page flags field is split into two parts, the main flags area
94 PG_locked, /* Page is locked. Don't touch. */
95 PG_writeback, /* Page is under writeback */
101 …PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_…
111 PG_swapbacked, /* Page is backed by RAM/swap */
112 PG_unevictable, /* Page is "unevictable" */
115 PG_mlocked, /* Page is vma mlocked */
118 PG_hwpoison, /* hardware poisoned page. Don't touch */
135 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
140 * Depending on the way an anonymous folio can be mapped into a page
141 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
142 * THP), PG_anon_exclusive may be set only for the head page or for
154 /* Two page bits are conscripted by FS-Cache to maintain local caching
158 PG_fscache = PG_private_2, /* page backed by cache */
161 /* Pinned in Xen as a read-only pagetable page. */
165 /* Has a grant mapping of another (foreign) domain's page. */
170 /* non-lru isolated movable page */
182 * Flags only valid for compound pages. Stored in first tail page's
187 /* At least one page in this folio has the hwpoison flag set */
201 * Return the real head page struct iff the @page is a fake head page, otherwise
202 * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
204 static __always_inline const struct page *page_fixed_fake_head(const struct page *page) in page_fixed_fake_head() argument
207 return page; in page_fixed_fake_head()
210 * Only addresses aligned with PAGE_SIZE of struct page may be fake head in page_fixed_fake_head()
211 * struct page. The alignment check aims to avoid access the fields ( in page_fixed_fake_head()
212 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly) in page_fixed_fake_head()
215 if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && in page_fixed_fake_head()
216 test_bit(PG_head, &page->flags)) { in page_fixed_fake_head()
218 * We can safely access the field of the @page[1] with PG_head in page_fixed_fake_head()
219 * because the @page is a compound page composed with at least in page_fixed_fake_head()
222 unsigned long head = READ_ONCE(page[1].compound_head); in page_fixed_fake_head()
225 return (const struct page *)(head - 1); in page_fixed_fake_head()
227 return page; in page_fixed_fake_head()
230 static inline const struct page *page_fixed_fake_head(const struct page *page) in page_fixed_fake_head() argument
232 return page; in page_fixed_fake_head()
236 static __always_inline int page_is_fake_head(const struct page *page) in page_is_fake_head() argument
238 return page_fixed_fake_head(page) != page; in page_is_fake_head()
241 static __always_inline unsigned long _compound_head(const struct page *page) in _compound_head() argument
243 unsigned long head = READ_ONCE(page->compound_head); in _compound_head()
247 return (unsigned long)page_fixed_fake_head(page); in _compound_head()
250 #define compound_head(page) ((typeof(page))_compound_head(page)) argument
253 * page_folio - Converts from page to folio.
254 * @p: The page.
256 * Every page is part of a folio. This function cannot be called on a
259 * Context: No reference, nor lock is required on @page. If the caller
261 * it should re-check the folio still contains this page after gaining
263 * Return: The folio which contains this page.
266 const struct page *: (const struct folio *)_compound_head(p), \
267 struct page *: (struct folio *)_compound_head(p)))
270 * folio_page - Return a page from a folio.
272 * @n: The page number to return.
275 * check that the page number lies within @folio; the caller is presumed
276 * to have a reference to the page.
278 #define folio_page(folio, n) nth_page(&(folio)->page, n)
280 static __always_inline int PageTail(const struct page *page) in PageTail() argument
282 return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page); in PageTail()
285 static __always_inline int PageCompound(const struct page *page) in PageCompound() argument
287 return test_bit(PG_head, &page->flags) || in PageCompound()
288 READ_ONCE(page->compound_head) & 1; in PageCompound()
292 static inline int PagePoisoned(const struct page *page) in PagePoisoned() argument
294 return READ_ONCE(page->flags) == PAGE_POISON_PATTERN; in PagePoisoned()
298 void page_init_poison(struct page *page, size_t size);
300 static inline void page_init_poison(struct page *page, size_t size) in page_init_poison() argument
308 const struct page *page = &folio->page; in const_folio_flags() local
310 VM_BUG_ON_PGFLAGS(page->compound_head & 1, page); in const_folio_flags()
311 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); in const_folio_flags()
312 return &page[n].flags; in const_folio_flags()
317 struct page *page = &folio->page; in folio_flags() local
319 VM_BUG_ON_PGFLAGS(page->compound_head & 1, page); in folio_flags()
320 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); in folio_flags()
321 return &page[n].flags; in folio_flags()
325 * Page flags policies wrt compound pages
328 * check if this struct page poisoned/uninitialized
331 * the page flag is relevant for small, head and tail pages.
334 * for compound page all operations related to the page flag applied to
335 * head page.
338 * modifications of the page flag must be done on small or head pages,
342 * the page flag is not relevant for compound pages.
345 * the page flag is stored in the first tail page.
347 #define PF_POISONED_CHECK(page) ({ \ argument
348 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
349 page; })
350 #define PF_ANY(page, enforce) PF_POISONED_CHECK(page) argument
351 #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) argument
352 #define PF_NO_TAIL(page, enforce) ({ \ argument
353 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
354 PF_POISONED_CHECK(compound_head(page)); })
355 #define PF_NO_COMPOUND(page, enforce) ({ \ argument
356 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
357 PF_POISONED_CHECK(page); })
358 #define PF_SECOND(page, enforce) ({ \ argument
359 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
360 PF_POISONED_CHECK(&page[1]); })
362 /* Which page is the flag stored in */
373 * Macros to create function definitions for page flags
375 #define FOLIO_TEST_FLAG(name, page) \ argument
377 { return test_bit(PG_##name, const_folio_flags(folio, page)); }
379 #define FOLIO_SET_FLAG(name, page) \ argument
381 { set_bit(PG_##name, folio_flags(folio, page)); }
383 #define FOLIO_CLEAR_FLAG(name, page) \ argument
385 { clear_bit(PG_##name, folio_flags(folio, page)); }
387 #define __FOLIO_SET_FLAG(name, page) \ argument
389 { __set_bit(PG_##name, folio_flags(folio, page)); }
391 #define __FOLIO_CLEAR_FLAG(name, page) \ argument
393 { __clear_bit(PG_##name, folio_flags(folio, page)); }
395 #define FOLIO_TEST_SET_FLAG(name, page) \ argument
397 { return test_and_set_bit(PG_##name, folio_flags(folio, page)); }
399 #define FOLIO_TEST_CLEAR_FLAG(name, page) \ argument
401 { return test_and_clear_bit(PG_##name, folio_flags(folio, page)); }
403 #define FOLIO_FLAG(name, page) \ argument
404 FOLIO_TEST_FLAG(name, page) \
405 FOLIO_SET_FLAG(name, page) \
406 FOLIO_CLEAR_FLAG(name, page)
410 static __always_inline int Page##uname(const struct page *page) \
411 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
415 static __always_inline void SetPage##uname(struct page *page) \
416 { set_bit(PG_##lname, &policy(page, 1)->flags); }
420 static __always_inline void ClearPage##uname(struct page *page) \
421 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
425 static __always_inline void __SetPage##uname(struct page *page) \
426 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
430 static __always_inline void __ClearPage##uname(struct page *page) \
431 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
435 static __always_inline int TestSetPage##uname(struct page *page) \
436 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
440 static __always_inline int TestClearPage##uname(struct page *page) \
441 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
482 static inline int Page##uname(const struct page *page) { return 0; }
486 static inline void SetPage##uname(struct page *page) { }
490 static inline void ClearPage##uname(struct page *page) { }
494 static inline void __ClearPage##uname(struct page *page) { }
498 static inline int TestSetPage##uname(struct page *page) { return 0; }
502 static inline int TestClearPage##uname(struct page *page) { return 0; }
542 * Private page markings that may be used by the filesystem that owns the page in PAGEFLAG()
554 * risky: they bypass page accounting. in PAGEFLAG()
652 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, in FOLIO_SET_FLAG()
655 * structure which KSM associates with that merged page. See ksm.h. in FOLIO_SET_FLAG()
658 * page and then folio->mapping points to a struct movable_operations. in FOLIO_SET_FLAG()
664 * For slab pages, since slab reuses the bits in struct page to store its in FOLIO_SET_FLAG()
678 * indicates that this page->mapping is now under reflink case. in FOLIO_SET_FLAG()
687 static __always_inline bool PageMappingFlags(const struct page *page) in PageMappingFlags() argument
689 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; in PageMappingFlags()
697 static __always_inline bool PageAnonNotKsm(const struct page *page) in PageAnonNotKsm() argument
699 unsigned long flags = (unsigned long)page_folio(page)->mapping; in PageAnonNotKsm()
704 static __always_inline bool PageAnon(const struct page *page) in PageAnon() argument
706 return folio_test_anon(page_folio(page)); in PageAnon()
715 static __always_inline bool __PageMovable(const struct page *page) in __PageMovable() argument
717 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == in __PageMovable()
723 * A KSM page is one of those write-protected "shared pages" or "merged pages"
724 * which KSM maps into multiple mms, wherever identical anonymous page content
725 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
726 * anon_vma, but to that page's node of the stable tree.
737 u64 stable_page_flags(const struct page *page);
785 static inline bool PageUptodate(const struct page *page) in PageUptodate() argument
787 return folio_test_uptodate(page_folio(page)); in PageUptodate()
807 static __always_inline void __SetPageUptodate(struct page *page) in __SetPageUptodate() argument
809 __folio_mark_uptodate((struct folio *)page); in __SetPageUptodate()
812 static __always_inline void SetPageUptodate(struct page *page) in SetPageUptodate() argument
814 folio_mark_uptodate((struct folio *)page); in SetPageUptodate()
820 void set_page_writeback(struct page *page);
832 static __always_inline int PageHead(const struct page *page) in PageHead() argument
834 PF_POISONED_CHECK(page); in PageHead()
835 return test_bit(PG_head, &page->flags) && !page_is_fake_head(page); in PageHead()
843 * folio_test_large() - Does this folio contain more than one page? in __SETPAGEFLAG()
846 * Return: True if the folio is larger than one page. in __SETPAGEFLAG()
853 static __always_inline void set_compound_head(struct page *page, struct page *head) in set_compound_head() argument
855 WRITE_ONCE(page->compound_head, (unsigned long)head + 1); in set_compound_head()
858 static __always_inline void clear_compound_head(struct page *page) in clear_compound_head() argument
860 WRITE_ONCE(page->compound_head, 0); in clear_compound_head()
864 static inline void ClearPageCompound(struct page *page) in ClearPageCompound() argument
866 BUG_ON(!PageHead(page)); in ClearPageCompound()
867 ClearPageHead(page); in ClearPageCompound()
887 static inline int PageTransHuge(const struct page *page) in FOLIO_FLAG()
889 VM_BUG_ON_PAGE(PageTail(page), page); in FOLIO_FLAG()
890 return PageHead(page); in FOLIO_FLAG()
898 static inline int PageTransCompound(const struct page *page) in PageTransCompound() argument
900 return PageCompound(page); in PageTransCompound()
910 * compound page.
912 * This flag is set by hwpoison handler. Cleared by THP split or free page.
923 * pagetype will be overwritten when you clear the page_type from the page.
945 /* This takes a mapcount which is one more than page->_mapcount */
951 static inline bool page_has_type(const struct page *page) in page_has_type() argument
953 return page_mapcount_is_type(data_race(page->page_type)); in page_has_type()
959 return data_race(folio->page.page_type >> 24) == PGTY_##lname; \
965 VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX, \
967 folio->page.page_type = (unsigned int)PGTY_##lname << 24; \
971 if (folio->page.page_type == UINT_MAX) \
974 folio->page.page_type = UINT_MAX; \
979 static __always_inline int Page##uname(const struct page *page) \
981 return data_race(page->page_type >> 24) == PGTY_##lname; \
983 static __always_inline void __SetPage##uname(struct page *page) \
985 if (Page##uname(page)) \
987 VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page); \
988 page->page_type = (unsigned int)PGTY_##lname << 24; \
990 static __always_inline void __ClearPage##uname(struct page *page) \
992 if (page->page_type == UINT_MAX) \
994 VM_BUG_ON_PAGE(!Page##uname(page), page); \
995 page->page_type = UINT_MAX; \
999 * PageBuddy() indicates that the page is free and in the buddy system
1005 * PageOffline() indicates that the page is logically offline although the
1024 * Memory offlining code will not adjust the managed page count for any
1028 * There are drivers that mark a page PageOffline() and expect there won't be
1029 * any further access to page content. PFN walkers that read content of random
1041 * Marks pages in use as page tables.
1053 * PageSlab - Determine if the page belongs to the slab allocator in PAGE_TYPE_OPS()
1054 * @page: The page to test. in PAGE_TYPE_OPS()
1057 * Return: True for slab pages, false for any other kind of page. in PAGE_TYPE_OPS()
1059 static inline bool PageSlab(const struct page *page) in PAGE_TYPE_OPS()
1061 return folio_test_slab(page_folio(page)); in PAGE_TYPE_OPS()
1080 * PageHuge - Determine if the page belongs to hugetlbfs in FOLIO_TYPE_OPS()
1081 * @page: The page to test. in FOLIO_TYPE_OPS()
1087 static inline bool PageHuge(const struct page *page) in FOLIO_TYPE_OPS()
1089 return folio_test_hugetlb(page_folio(page)); in FOLIO_TYPE_OPS()
1093 * Check if a page is currently marked HWPoisoned. Note that this check is
1097 static inline bool is_page_hwpoison(const struct page *page) in is_page_hwpoison() argument
1101 if (PageHWPoison(page)) in is_page_hwpoison()
1103 folio = page_folio(page); in is_page_hwpoison()
1104 return folio_test_hugetlb(folio) && PageHWPoison(&folio->page); in is_page_hwpoison()
1113 bool is_free_buddy_page(const struct page *page);
1117 static __always_inline int PageAnonExclusive(const struct page *page) in PageAnonExclusive() argument
1119 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); in PageAnonExclusive()
1121 * HugeTLB stores this information on the head page; THP keeps it per in PageAnonExclusive()
1122 * page in PageAnonExclusive()
1124 if (PageHuge(page)) in PageAnonExclusive()
1125 page = compound_head(page); in PageAnonExclusive()
1126 return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); in PageAnonExclusive()
1129 static __always_inline void SetPageAnonExclusive(struct page *page) in SetPageAnonExclusive() argument
1131 VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page); in SetPageAnonExclusive()
1132 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); in SetPageAnonExclusive()
1133 set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); in SetPageAnonExclusive()
1136 static __always_inline void ClearPageAnonExclusive(struct page *page) in ClearPageAnonExclusive() argument
1138 VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page); in ClearPageAnonExclusive()
1139 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); in ClearPageAnonExclusive()
1140 clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); in ClearPageAnonExclusive()
1143 static __always_inline void __ClearPageAnonExclusive(struct page *page) in __ClearPageAnonExclusive() argument
1145 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); in __ClearPageAnonExclusive()
1146 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); in __ClearPageAnonExclusive()
1147 __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); in __ClearPageAnonExclusive()
1157 * Flags checked when a page is freed. Pages being freed should not have
1168 * Flags checked when a page is prepped for return by the page allocator.
1170 * there has been a kernel bug or struct page corruption.
1172 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
1173 * alloc-free cycle to prevent from reusing the page.
1179 * Flags stored in the second page of a compound page. They may overlap