Lines Matching full:order

46  * order == -1 is expected when compacting proactively via
51 static inline bool is_via_compact_memory(int order) in is_via_compact_memory() argument
53 return order == -1; in is_via_compact_memory()
59 static inline bool is_via_compact_memory(int order) { return false; } in is_via_compact_memory() argument
67 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) argument
68 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) argument
71 * Page order with-respect-to which proactive compaction
83 static struct page *mark_allocated_noprof(struct page *page, unsigned int order, gfp_t gfp_flags) in mark_allocated_noprof() argument
85 post_alloc_hook(page, order, __GFP_MOVABLE); in mark_allocated_noprof()
93 int order; in release_free_list() local
96 for (order = 0; order < NR_PAGE_ORDERS; order++) { in release_free_list()
99 list_for_each_entry_safe(page, next, &freepages[order], lru) { in release_free_list()
107 mark_allocated(page, order, __GFP_MOVABLE); in release_free_list()
108 __free_pages(page, order); in release_free_list()
159 static void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
164 if (order < zone->compact_order_failed) in defer_compaction()
165 zone->compact_order_failed = order; in defer_compaction()
170 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction()
174 static bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument
178 if (order < zone->compact_order_failed) in compaction_deferred()
187 trace_mm_compaction_deferred(zone, order); in compaction_deferred()
193 * Update defer tracking counters after successful compaction of given order,
197 void compaction_defer_reset(struct zone *zone, int order, in compaction_defer_reset() argument
204 if (order >= zone->compact_order_failed) in compaction_defer_reset()
205 zone->compact_order_failed = order + 1; in compaction_defer_reset()
207 trace_mm_compaction_defer_reset(zone, order); in compaction_defer_reset()
211 static bool compaction_restarting(struct zone *zone, int order) in compaction_restarting() argument
213 if (order < zone->compact_order_failed) in compaction_restarting()
292 * released. It is always pointless to compact pages of such order (if they are
601 unsigned int order; in isolate_freepages_block() local
632 const unsigned int order = compound_order(page); in isolate_freepages_block() local
634 if ((order <= MAX_PAGE_ORDER) && in isolate_freepages_block()
635 (blockpfn + (1UL << order) <= end_pfn)) { in isolate_freepages_block()
636 blockpfn += (1UL << order) - 1; in isolate_freepages_block()
637 page += (1UL << order) - 1; in isolate_freepages_block()
638 nr_scanned += (1UL << order) - 1; in isolate_freepages_block()
657 /* Found a free page, will break it into order-0 pages */ in isolate_freepages_block()
658 order = buddy_order(page); in isolate_freepages_block()
659 isolated = __isolate_free_page(page, order); in isolate_freepages_block()
662 set_page_private(page, order); in isolate_freepages_block()
667 list_add_tail(&page->lru, &freelist[order]); in isolate_freepages_block()
732 int order; in isolate_freepages_range() local
734 for (order = 0; order < NR_PAGE_ORDERS; order++) in isolate_freepages_range()
735 INIT_LIST_HEAD(&cc->freepages[order]); in isolate_freepages_range()
751 * is more than pageblock order. In this case, we adjust in isolate_freepages_range()
778 * pageblock_nr_pages for some non-negative n. (Max order in isolate_freepages_range()
828 * folio order and compaction target order
829 * @order: to-be-isolated folio order
830 * @target_order: compaction target order
834 static bool skip_isolation_on_order(int order, int target_order) in skip_isolation_on_order() argument
839 * target order: we wouldn't be here if we'd have a free folio with in skip_isolation_on_order()
843 if (!is_via_compact_memory(target_order) && order >= target_order) in skip_isolation_on_order()
849 return order >= pageblock_order; in skip_isolation_on_order()
913 next_skip_pfn = block_end_pfn(low_pfn, cc->order); in isolate_migratepages_block()
923 * previous order-aligned block, and did not skip it due in isolate_migratepages_block()
931 * We failed to isolate in the previous order-aligned in isolate_migratepages_block()
934 * next_skip_pfn by 1 << order, as low_pfn might have in isolate_migratepages_block()
936 * a compound or a high-order buddy page in the in isolate_migratepages_block()
939 next_skip_pfn = block_end_pfn(low_pfn, cc->order); in isolate_migratepages_block()
984 const unsigned int order = compound_order(page); in isolate_migratepages_block() local
987 * bigger than its order. THPs and other compound pages in isolate_migratepages_block()
992 if (order <= MAX_PAGE_ORDER) { in isolate_migratepages_block()
993 low_pfn += (1UL << order) - 1; in isolate_migratepages_block()
994 nr_scanned += (1UL << order) - 1; in isolate_migratepages_block()
1014 low_pfn += (1UL << order) - 1; in isolate_migratepages_block()
1015 nr_scanned += (1UL << order) - 1; in isolate_migratepages_block()
1038 * Skip if free. We read page order here without zone lock in isolate_migratepages_block()
1048 * a valid page order. Consider only values in the in isolate_migratepages_block()
1049 * valid order range to prevent low_pfn overflow. in isolate_migratepages_block()
1067 const unsigned int order = compound_order(page); in isolate_migratepages_block() local
1069 /* Skip based on page order and compaction target order. */ in isolate_migratepages_block()
1070 if (skip_isolation_on_order(order, cc->order)) { in isolate_migratepages_block()
1071 if (order <= MAX_PAGE_ORDER) { in isolate_migratepages_block()
1072 low_pfn += (1UL << order) - 1; in isolate_migratepages_block()
1073 nr_scanned += (1UL << order) - 1; in isolate_migratepages_block()
1219 * Check LRU folio order under the lock in isolate_migratepages_block()
1222 cc->order) && in isolate_migratepages_block()
1276 * instead of migrating, as we cannot form the cc->order buddy in isolate_migratepages_block()
1295 next_skip_pfn += 1UL << cc->order; in isolate_migratepages_block()
1420 int order = cc->order > 0 ? cc->order : pageblock_order; in suitable_migration_target() local
1425 * pageblock, so it's not worth to check order for valid range. in suitable_migration_target()
1427 if (buddy_order_unsafe(page) >= order) in suitable_migration_target()
1523 static int next_search_order(struct compact_control *cc, int order) in next_search_order() argument
1525 order--; in next_search_order()
1526 if (order < 0) in next_search_order()
1527 order = cc->order - 1; in next_search_order()
1530 if (order == cc->search_order) { in next_search_order()
1533 cc->search_order = cc->order - 1; in next_search_order()
1537 return order; in next_search_order()
1549 int order; in fast_isolate_freepages() local
1551 /* Full compaction passes in a negative order */ in fast_isolate_freepages()
1552 if (cc->order <= 0) in fast_isolate_freepages()
1576 * Search starts from the last successful isolation order or the next in fast_isolate_freepages()
1577 * order to search after a previous failure in fast_isolate_freepages()
1579 cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); in fast_isolate_freepages()
1581 for (order = cc->search_order; in fast_isolate_freepages()
1582 !page && order >= 0; in fast_isolate_freepages()
1583 order = next_search_order(cc, order)) { in fast_isolate_freepages()
1584 struct free_area *area = &cc->zone->free_area[order]; in fast_isolate_freepages()
1609 cc->search_order = order; in fast_isolate_freepages()
1638 if (__isolate_free_page(page, order)) { in fast_isolate_freepages()
1639 set_page_private(page, order); in fast_isolate_freepages()
1640 nr_isolated = 1 << order; in fast_isolate_freepages()
1644 list_add_tail(&page->lru, &cc->freepages[order]); in fast_isolate_freepages()
1648 order = cc->search_order + 1; in fast_isolate_freepages()
1660 * Smaller scan on next order so the total scan is related in fast_isolate_freepages()
1837 int order = folio_order(src); in compaction_alloc_noprof() local
1844 for (start_order = order; start_order < NR_PAGE_ORDERS; start_order++) in compaction_alloc_noprof()
1863 while (start_order > order) { in compaction_alloc_noprof()
1872 post_alloc_hook(&dst->page, order, __GFP_MOVABLE); in compaction_alloc_noprof()
1874 if (order) in compaction_alloc_noprof()
1875 prep_compound_page(&dst->page, order); in compaction_alloc_noprof()
1876 cc->nr_freepages -= 1 << order; in compaction_alloc_noprof()
1877 cc->nr_migratepages -= 1 << order; in compaction_alloc_noprof()
1894 int order = folio_order(dst); in compaction_free() local
1898 free_pages_prepare(page, order); in compaction_free()
1899 list_add(&dst->lru, &cc->freepages[order]); in compaction_free()
1900 cc->nr_freepages += 1 << order; in compaction_free()
1902 cc->nr_migratepages += 1 << order; in compaction_free()
1966 int order; in fast_find_migrateblock() local
1993 if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) in fast_find_migrateblock()
2016 for (order = cc->order - 1; in fast_find_migrateblock()
2017 order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit; in fast_find_migrateblock()
2018 order--) { in fast_find_migrateblock()
2019 struct free_area *area = &cc->zone->free_area[order]; in fast_find_migrateblock()
2276 unsigned int order; in __compact_finished() local
2319 if (is_via_compact_memory(cc->order)) in __compact_finished()
2333 for (order = cc->order; order < NR_PAGE_ORDERS; order++) { in __compact_finished()
2334 struct free_area *area = &cc->zone->free_area[order]; in __compact_finished()
2351 if (find_suitable_fallback(area, order, migratetype, in __compact_finished()
2376 trace_mm_compaction_finished(cc->zone, cc->order, ret); in compact_finished()
2383 static bool __compaction_suitable(struct zone *zone, int order, in __compaction_suitable() argument
2389 * Watermarks for order-0 must be met for compaction to be able to in __compaction_suitable()
2402 watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? in __compaction_suitable()
2404 watermark += compact_gap(order); in __compaction_suitable()
2412 bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx) in compaction_suitable() argument
2417 suitable = __compaction_suitable(zone, order, highest_zoneidx, in compaction_suitable()
2424 * watermarks, but we already failed the high-order watermark check in compaction_suitable()
2437 if (order > PAGE_ALLOC_COSTLY_ORDER) { in compaction_suitable()
2438 int fragindex = fragmentation_index(zone, order); in compaction_suitable()
2450 trace_mm_compaction_suitable(zone, order, compact_result); in compaction_suitable()
2455 bool compaction_zonelist_suitable(struct alloc_context *ac, int order, in compaction_zonelist_suitable() argument
2471 * want to trash just for a single high order allocation which in compaction_zonelist_suitable()
2475 available = zone_reclaimable_pages(zone) / order; in compaction_zonelist_suitable()
2477 if (__compaction_suitable(zone, order, ac->highest_zoneidx, in compaction_zonelist_suitable()
2486 * Should we do compaction for target allocation order.
2487 * Return COMPACT_SUCCESS if allocation for target order can be already
2489 * Return COMPACT_SKIPPED if compaction for target order is likely to fail
2490 * Return COMPACT_CONTINUE if compaction for target order should be ran
2493 compaction_suit_allocation_order(struct zone *zone, unsigned int order, in compaction_suit_allocation_order() argument
2500 if (zone_watermark_ok(zone, order, watermark, highest_zoneidx, in compaction_suit_allocation_order()
2507 * the high-order page in CMA pageblocks, which would not help the in compaction_suit_allocation_order()
2508 * allocation to succeed. However, limit the check to costly order async in compaction_suit_allocation_order()
2513 if (order > PAGE_ALLOC_COSTLY_ORDER && async && in compaction_suit_allocation_order()
2515 watermark = low_wmark_pages(zone) + compact_gap(order); in compaction_suit_allocation_order()
2521 if (!compaction_suitable(zone, order, highest_zoneidx)) in compaction_suit_allocation_order()
2537 int order; in compact_zone() local
2547 for (order = 0; order < NR_PAGE_ORDERS; order++) in compact_zone()
2548 INIT_LIST_HEAD(&cc->freepages[order]); in compact_zone()
2553 if (!is_via_compact_memory(cc->order)) { in compact_zone()
2554 ret = compaction_suit_allocation_order(cc->zone, cc->order, in compact_zone()
2566 if (compaction_restarting(cc->zone, cc->order)) in compact_zone()
2648 * previous cc->order aligned block. in compact_zone()
2702 if (cc->order == COMPACTION_HPAGE_ORDER) in compact_zone()
2718 * cc->order aligned block where we migrated from? If yes, in compact_zone()
2723 if (cc->order > 0 && last_migrated_pfn) { in compact_zone()
2725 block_start_pfn(cc->migrate_pfn, cc->order); in compact_zone()
2765 static enum compact_result compact_zone_order(struct zone *zone, int order, in compact_zone_order() argument
2772 .order = order, in compact_zone_order()
2773 .search_order = order, in compact_zone_order()
2820 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
2822 * @order: The order of the current allocation
2830 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, in try_to_compact_pages() argument
2841 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); in try_to_compact_pages()
2854 && compaction_deferred(zone, order)) { in try_to_compact_pages()
2859 status = compact_zone_order(zone, order, gfp_mask, prio, in try_to_compact_pages()
2871 compaction_defer_reset(zone, order, false); in try_to_compact_pages()
2883 defer_compaction(zone, order); in try_to_compact_pages()
2914 .order = -1, in compact_node()
3073 * order is allocatable. in kcompactd_do_work()
3078 .order = pgdat->kcompactd_max_order, in kcompactd_do_work()
3087 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, in kcompactd_do_work()
3098 if (compaction_deferred(zone, cc.order)) in kcompactd_do_work()
3102 cc.order, zoneid, ALLOC_WMARK_MIN, in kcompactd_do_work()
3114 compaction_defer_reset(zone, cc.order, false); in kcompactd_do_work()
3119 * order >= cc.order. This is ratelimited by the in kcompactd_do_work()
3128 defer_compaction(zone, cc.order); in kcompactd_do_work()
3139 * the requested order/highest_zoneidx in case it was higher/tighter in kcompactd_do_work()
3142 if (pgdat->kcompactd_max_order <= cc.order) in kcompactd_do_work()
3148 void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) in wakeup_kcompactd() argument
3150 if (!order) in wakeup_kcompactd()
3153 if (pgdat->kcompactd_max_order < order) in wakeup_kcompactd()
3154 pgdat->kcompactd_max_order = order; in wakeup_kcompactd()
3169 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, in wakeup_kcompactd()