Lines Matching +full:align +full:- +full:end
1 // SPDX-License-Identifier: GPL-2.0-or-later
46 * * ``memory`` - describes the physical memory available to the
50 * * ``reserved`` - describes the regions that were allocated
51 * * ``physmem`` - describes the actual physical memory available during
81 * * memblock_phys_alloc*() - these functions return the **physical**
83 * * memblock_alloc*() - these functions return the **virtual** address
145 for (i = 0, rgn = &memblock_type->regions[0]; \
146 i < memblock_type->cnt; \
147 i++, rgn = &memblock_type->regions[i])
174 return *size = min(*size, PHYS_ADDR_MAX - base); in memblock_cap_size()
194 for (i = 0; i < type->cnt; i++) in memblock_overlaps_region()
195 if (memblock_addrs_overlap(base, size, type->regions[i].base, in memblock_overlaps_region()
196 type->regions[i].size)) in memblock_overlaps_region()
202 * __memblock_find_range_bottom_up - find free area utility in bottom-up
204 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
207 * @align: alignment of free area to find
211 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
217 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, in __memblock_find_range_bottom_up() argument
218 phys_addr_t size, phys_addr_t align, int nid, in __memblock_find_range_bottom_up() argument
225 this_start = clamp(this_start, start, end); in __memblock_find_range_bottom_up()
226 this_end = clamp(this_end, start, end); in __memblock_find_range_bottom_up()
228 cand = round_up(this_start, align); in __memblock_find_range_bottom_up()
229 if (cand < this_end && this_end - cand >= size) in __memblock_find_range_bottom_up()
237 * __memblock_find_range_top_down - find free area utility, in top-down
239 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
242 * @align: alignment of free area to find
246 * Utility called from memblock_find_in_range_node(), find free area top-down.
252 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, in __memblock_find_range_top_down() argument
253 phys_addr_t size, phys_addr_t align, int nid, in __memblock_find_range_top_down() argument
261 this_start = clamp(this_start, start, end); in __memblock_find_range_top_down()
262 this_end = clamp(this_end, start, end); in __memblock_find_range_top_down()
267 cand = round_down(this_end - size, align); in __memblock_find_range_top_down()
276 * memblock_find_in_range_node - find free area in given range and node
278 * @align: alignment of free area to find
280 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
285 * Find @size free area aligned to @align in the specified range and node.
291 phys_addr_t align, phys_addr_t start, in memblock_find_in_range_node() argument
292 phys_addr_t end, int nid, in memblock_find_in_range_node() argument
295 /* pump up @end */ in memblock_find_in_range_node()
296 if (end == MEMBLOCK_ALLOC_ACCESSIBLE || in memblock_find_in_range_node()
297 end == MEMBLOCK_ALLOC_NOLEAKTRACE) in memblock_find_in_range_node()
298 end = memblock.current_limit; in memblock_find_in_range_node()
302 end = max(start, end); in memblock_find_in_range_node()
305 return __memblock_find_range_bottom_up(start, end, size, align, in memblock_find_in_range_node()
308 return __memblock_find_range_top_down(start, end, size, align, in memblock_find_in_range_node()
313 * memblock_find_in_range - find free area in given range
315 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
318 * @align: alignment of free area to find
320 * Find @size free area aligned to @align in the specified range.
326 phys_addr_t end, phys_addr_t size, in memblock_find_in_range() argument
327 phys_addr_t align) in memblock_find_in_range() argument
333 ret = memblock_find_in_range_node(size, align, start, end, in memblock_find_in_range()
348 type->total_size -= type->regions[r].size; in memblock_remove_region()
349 memmove(&type->regions[r], &type->regions[r + 1], in memblock_remove_region()
350 (type->cnt - (r + 1)) * sizeof(type->regions[r])); in memblock_remove_region()
351 type->cnt--; in memblock_remove_region()
354 if (type->cnt == 0) { in memblock_remove_region()
355 WARN_ON(type->total_size != 0); in memblock_remove_region()
356 type->regions[0].base = 0; in memblock_remove_region()
357 type->regions[0].size = 0; in memblock_remove_region()
358 type->regions[0].flags = 0; in memblock_remove_region()
359 memblock_set_region_node(&type->regions[0], MAX_NUMNODES); in memblock_remove_region()
365 * memblock_discard - discard memory and reserved arrays if they were allocated
396 * memblock_double_array - double the size of the memblock regions array
408 * 0 on success, -1 on failure.
424 panic("memblock: cannot resize %s array\n", type->name); in memblock_double_array()
427 old_size = type->max * sizeof(struct memblock_region); in memblock_double_array()
430 * We need to allocated new one align to PAGE_SIZE, in memblock_double_array()
463 type->name, type->max, type->max * 2); in memblock_double_array()
464 return -1; in memblock_double_array()
467 new_end = addr + new_size - 1; in memblock_double_array()
468 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]", in memblock_double_array()
469 type->name, type->max * 2, &addr, &new_end); in memblock_double_array()
476 memcpy(new_array, type->regions, old_size); in memblock_double_array()
477 memset(new_array + type->max, 0, old_size); in memblock_double_array()
478 old_array = type->regions; in memblock_double_array()
479 type->regions = new_array; in memblock_double_array()
480 type->max <<= 1; in memblock_double_array()
503 * memblock_merge_regions - merge neighboring compatible regions
505 * @start_rgn: start scanning from (@start_rgn - 1)
506 * @end_rgn: end scanning at (@end_rgn - 1)
507 * Scan @type and merge neighboring compatible regions in [@start_rgn - 1, @end_rgn)
515 i = start_rgn - 1; in memblock_merge_regions()
516 end_rgn = min(end_rgn, type->cnt - 1); in memblock_merge_regions()
518 struct memblock_region *this = &type->regions[i]; in memblock_merge_regions()
519 struct memblock_region *next = &type->regions[i + 1]; in memblock_merge_regions()
521 if (this->base + this->size != next->base || in memblock_merge_regions()
524 this->flags != next->flags) { in memblock_merge_regions()
525 BUG_ON(this->base + this->size > next->base); in memblock_merge_regions()
530 this->size += next->size; in memblock_merge_regions()
532 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); in memblock_merge_regions()
533 type->cnt--; in memblock_merge_regions()
534 end_rgn--; in memblock_merge_regions()
539 * memblock_insert_region - insert new memblock region
556 struct memblock_region *rgn = &type->regions[idx]; in memblock_insert_region()
558 BUG_ON(type->cnt >= type->max); in memblock_insert_region()
559 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); in memblock_insert_region()
560 rgn->base = base; in memblock_insert_region()
561 rgn->size = size; in memblock_insert_region()
562 rgn->flags = flags; in memblock_insert_region()
564 type->cnt++; in memblock_insert_region()
565 type->total_size += size; in memblock_insert_region()
569 * memblock_add_range - add new memblock region
577 * is allowed to overlap with existing ones - overlaps don't affect already
582 * 0 on success, -errno on failure.
590 phys_addr_t end = base + memblock_cap_size(base, &size); in memblock_add_range() local
591 int idx, nr_new, start_rgn = -1, end_rgn; in memblock_add_range()
598 if (type->regions[0].size == 0) { in memblock_add_range()
599 WARN_ON(type->cnt != 0 || type->total_size); in memblock_add_range()
600 type->regions[0].base = base; in memblock_add_range()
601 type->regions[0].size = size; in memblock_add_range()
602 type->regions[0].flags = flags; in memblock_add_range()
603 memblock_set_region_node(&type->regions[0], nid); in memblock_add_range()
604 type->total_size = size; in memblock_add_range()
605 type->cnt = 1; in memblock_add_range()
611 * then we'll need type->cnt + 1 empty regions in @type. So if in memblock_add_range()
612 * type->cnt * 2 + 1 is less than or equal to type->max, we know in memblock_add_range()
616 if (type->cnt * 2 + 1 <= type->max) in memblock_add_range()
629 phys_addr_t rbase = rgn->base; in memblock_add_range()
630 phys_addr_t rend = rbase + rgn->size; in memblock_add_range()
632 if (rbase >= end) in memblock_add_range()
644 WARN_ON(flags != rgn->flags); in memblock_add_range()
647 if (start_rgn == -1) in memblock_add_range()
651 rbase - base, nid, in memblock_add_range()
656 base = min(rend, end); in memblock_add_range()
660 if (base < end) { in memblock_add_range()
663 if (start_rgn == -1) in memblock_add_range()
666 memblock_insert_region(type, idx, base, end - base, in memblock_add_range()
679 while (type->cnt + nr_new > type->max) in memblock_add_range()
681 return -ENOMEM; in memblock_add_range()
691 * memblock_add_node - add new memblock region within a NUMA node
701 * 0 on success, -errno on failure.
706 phys_addr_t end = base + size - 1; in memblock_add_node() local
708 memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__, in memblock_add_node()
709 &base, &end, nid, flags, (void *)_RET_IP_); in memblock_add_node()
715 * memblock_add - add new memblock region
723 * 0 on success, -errno on failure.
727 phys_addr_t end = base + size - 1; in memblock_add() local
729 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, in memblock_add()
730 &base, &end, (void *)_RET_IP_); in memblock_add()
736 * memblock_validate_numa_coverage - check if amount of memory with
755 nr_pages += end_pfn - start_pfn; in memblock_validate_numa_coverage()
770 * memblock_isolate_range - isolate given range into disjoint memblocks
775 * @end_rgn: out parameter for the end of isolated region
784 * 0 on success, -errno on failure.
790 phys_addr_t end = base + memblock_cap_size(base, &size); in memblock_isolate_range() local
800 while (type->cnt + 2 > type->max) in memblock_isolate_range()
802 return -ENOMEM; in memblock_isolate_range()
805 phys_addr_t rbase = rgn->base; in memblock_isolate_range()
806 phys_addr_t rend = rbase + rgn->size; in memblock_isolate_range()
808 if (rbase >= end) in memblock_isolate_range()
816 * to process the next region - the new top half. in memblock_isolate_range()
818 rgn->base = base; in memblock_isolate_range()
819 rgn->size -= base - rbase; in memblock_isolate_range()
820 type->total_size -= base - rbase; in memblock_isolate_range()
821 memblock_insert_region(type, idx, rbase, base - rbase, in memblock_isolate_range()
823 rgn->flags); in memblock_isolate_range()
824 } else if (rend > end) { in memblock_isolate_range()
827 * current region - the new bottom half. in memblock_isolate_range()
829 rgn->base = end; in memblock_isolate_range()
830 rgn->size -= end - rbase; in memblock_isolate_range()
831 type->total_size -= end - rbase; in memblock_isolate_range()
832 memblock_insert_region(type, idx--, rbase, end - rbase, in memblock_isolate_range()
834 rgn->flags); in memblock_isolate_range()
856 for (i = end_rgn - 1; i >= start_rgn; i--) in memblock_remove_range()
863 phys_addr_t end = base + size - 1; in memblock_remove() local
865 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, in memblock_remove()
866 &base, &end, (void *)_RET_IP_); in memblock_remove()
872 * memblock_free - free boot memory allocation
886 * memblock_phys_free - free boot memory block
895 phys_addr_t end = base + size - 1; in memblock_phys_free() local
897 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, in memblock_phys_free()
898 &base, &end, (void *)_RET_IP_); in memblock_phys_free()
906 phys_addr_t end = base + size - 1; in memblock_reserve() local
908 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, in memblock_reserve()
909 &base, &end, (void *)_RET_IP_); in memblock_reserve()
917 phys_addr_t end = base + size - 1; in memblock_physmem_add() local
919 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, in memblock_physmem_add()
920 &base, &end, (void *)_RET_IP_); in memblock_physmem_add()
927 * memblock_setclr_flag - set or clear flag for a memory region
936 * Return: 0 on success, -errno on failure.
948 struct memblock_region *r = &type->regions[i]; in memblock_setclr_flag()
951 r->flags |= flag; in memblock_setclr_flag()
953 r->flags &= ~flag; in memblock_setclr_flag()
961 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
965 * Return: 0 on success, -errno on failure.
973 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
977 * Return: 0 on success, -errno on failure.
985 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
989 * Return: 0 on success, -errno on failure.
1002 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
1014 * Return: 0 on success, -errno on failure.
1022 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
1026 * Return: 0 on success, -errno on failure.
1034 * memblock_reserved_mark_noinit - Mark a reserved memory region with flag
1043 * Return: 0 on success, -errno on failure.
1070 /* if we want mirror memory skip non-mirror memory regions */ in should_skip_region()
1078 /* skip driver-managed memory unless we were asked for it explicitly */ in should_skip_region()
1086 * __next_mem_range - next function for for_each_free_mem_range() etc.
1093 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1102 * 0:[0-16), 1:[32-48), 2:[128-130)
1106 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
1119 for (; idx_a < type_a->cnt; idx_a++) { in __next_mem_range()
1120 struct memblock_region *m = &type_a->regions[idx_a]; in __next_mem_range()
1122 phys_addr_t m_start = m->base; in __next_mem_range()
1123 phys_addr_t m_end = m->base + m->size; in __next_mem_range()
1142 for (; idx_b < type_b->cnt + 1; idx_b++) { in __next_mem_range()
1147 r = &type_b->regions[idx_b]; in __next_mem_range()
1148 r_start = idx_b ? r[-1].base + r[-1].size : 0; in __next_mem_range()
1149 r_end = idx_b < type_b->cnt ? in __next_mem_range()
1150 r->base : PHYS_ADDR_MAX; in __next_mem_range()
1181 /* signal end of iteration */ in __next_mem_range()
1186 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1194 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1213 idx_a = type_a->cnt - 1; in __next_mem_range_rev()
1215 idx_b = type_b->cnt; in __next_mem_range_rev()
1220 for (; idx_a >= 0; idx_a--) { in __next_mem_range_rev()
1221 struct memblock_region *m = &type_a->regions[idx_a]; in __next_mem_range_rev()
1223 phys_addr_t m_start = m->base; in __next_mem_range_rev()
1224 phys_addr_t m_end = m->base + m->size; in __next_mem_range_rev()
1237 idx_a--; in __next_mem_range_rev()
1243 for (; idx_b >= 0; idx_b--) { in __next_mem_range_rev()
1248 r = &type_b->regions[idx_b]; in __next_mem_range_rev()
1249 r_start = idx_b ? r[-1].base + r[-1].size : 0; in __next_mem_range_rev()
1250 r_end = idx_b < type_b->cnt ? in __next_mem_range_rev()
1251 r->base : PHYS_ADDR_MAX; in __next_mem_range_rev()
1268 idx_a--; in __next_mem_range_rev()
1270 idx_b--; in __next_mem_range_rev()
1276 /* signal end of iteration */ in __next_mem_range_rev()
1291 while (++*idx < type->cnt) { in __next_mem_pfn_range()
1292 r = &type->regions[*idx]; in __next_mem_pfn_range()
1295 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) in __next_mem_pfn_range()
1300 if (*idx >= type->cnt) { in __next_mem_pfn_range()
1301 *idx = -1; in __next_mem_pfn_range()
1306 *out_start_pfn = PFN_UP(r->base); in __next_mem_pfn_range()
1308 *out_end_pfn = PFN_DOWN(r->base + r->size); in __next_mem_pfn_range()
1314 * memblock_set_node - set node ID on memblock regions
1324 * 0 on success, -errno on failure.
1338 memblock_set_region_node(&type->regions[i], nid); in memblock_set_node()
1347 * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone()
1352 * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL
1377 * Verify the end is at least past the start of the zone and in __next_mem_pfn_range_in_zone()
1380 if (zone->zone_start_pfn < epfn && spfn < epfn) { in __next_mem_pfn_range_in_zone()
1388 *out_spfn = max(zone->zone_start_pfn, spfn); in __next_mem_pfn_range_in_zone()
1400 /* signal end of iteration */ in __next_mem_pfn_range_in_zone()
1410 * memblock_alloc_range_nid - allocate boot memory block
1412 * @align: alignment of the region and block's size
1414 * @end: the upper bound of the memory region to allocate (phys address)
1419 * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE.
1435 phys_addr_t align, phys_addr_t start, in memblock_alloc_range_nid() argument
1436 phys_addr_t end, int nid, in memblock_alloc_range_nid() argument
1453 if (!align) { in memblock_alloc_range_nid()
1456 align = SMP_CACHE_BYTES; in memblock_alloc_range_nid()
1460 found = memblock_find_in_range_node(size, align, start, end, nid, in memblock_alloc_range_nid()
1466 found = memblock_find_in_range_node(size, align, start, in memblock_alloc_range_nid()
1467 end, NUMA_NO_NODE, in memblock_alloc_range_nid()
1487 if (end != MEMBLOCK_ALLOC_NOLEAKTRACE) in memblock_alloc_range_nid()
1497 * Some Virtual Machine platforms, such as Intel TDX or AMD SEV-SNP, in memblock_alloc_range_nid()
1509 * memblock_phys_alloc_range - allocate a memory block inside specified range
1511 * @align: alignment of the region and block's size
1513 * @end: the upper bound of the memory region to allocate (physical address)
1515 * Allocate @size bytes in the between @start and @end.
1521 phys_addr_t align, in memblock_phys_alloc_range() argument
1523 phys_addr_t end) in memblock_phys_alloc_range() argument
1525 memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n", in memblock_phys_alloc_range()
1526 __func__, (u64)size, (u64)align, &start, &end, in memblock_phys_alloc_range()
1528 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, in memblock_phys_alloc_range()
1533 * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node
1535 * @align: alignment of the region and block's size
1545 phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) in memblock_phys_alloc_try_nid() argument
1547 return memblock_alloc_range_nid(size, align, 0, in memblock_phys_alloc_try_nid()
1552 * memblock_alloc_internal - allocate boot memory block
1554 * @align: alignment of the region and block's size
1572 phys_addr_t size, phys_addr_t align, in memblock_alloc_internal() argument
1582 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid, in memblock_alloc_internal()
1587 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid, in memblock_alloc_internal()
1597 * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node
1600 * @align: alignment of the region and block's size
1615 phys_addr_t size, phys_addr_t align, in memblock_alloc_exact_nid_raw() argument
1619 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", in memblock_alloc_exact_nid_raw()
1620 __func__, (u64)size, (u64)align, nid, &min_addr, in memblock_alloc_exact_nid_raw()
1623 return memblock_alloc_internal(size, align, min_addr, max_addr, nid, in memblock_alloc_exact_nid_raw()
1628 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1631 * @align: alignment of the region and block's size
1647 phys_addr_t size, phys_addr_t align, in memblock_alloc_try_nid_raw() argument
1651 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", in memblock_alloc_try_nid_raw()
1652 __func__, (u64)size, (u64)align, nid, &min_addr, in memblock_alloc_try_nid_raw()
1655 return memblock_alloc_internal(size, align, min_addr, max_addr, nid, in memblock_alloc_try_nid_raw()
1660 * memblock_alloc_try_nid - allocate boot memory block
1662 * @align: alignment of the region and block's size
1677 phys_addr_t size, phys_addr_t align, in memblock_alloc_try_nid() argument
1683 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", in memblock_alloc_try_nid()
1684 __func__, (u64)size, (u64)align, nid, &min_addr, in memblock_alloc_try_nid()
1686 ptr = memblock_alloc_internal(size, align, in memblock_alloc_try_nid()
1695 * __memblock_alloc_or_panic - Try to allocate memory and panic on failure
1697 * @align: alignment of the region and block's size
1704 void *__init __memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align, in __memblock_alloc_or_panic() argument
1707 void *addr = memblock_alloc(size, align); in __memblock_alloc_or_panic()
1715 * memblock_free_late - free pages directly to buddy allocator
1725 phys_addr_t cursor, end; in memblock_free_late() local
1727 end = base + size - 1; in memblock_free_late()
1728 memblock_dbg("%s: [%pa-%pa] %pS\n", in memblock_free_late()
1729 __func__, &base, &end, (void *)_RET_IP_); in memblock_free_late()
1732 end = PFN_DOWN(base + size); in memblock_free_late()
1734 for (; cursor < end; cursor++) { in memblock_free_late()
1755 * memblock_estimated_nr_free_pages - return estimated number of free pages
1768 return PHYS_PFN(memblock_phys_mem_size() - memblock_reserved_size()); in memblock_estimated_nr_free_pages()
1779 int idx = memblock.memory.cnt - 1; in memblock_end_of_DRAM()
1795 if (limit <= r->size) { in __find_max_addr()
1796 max_addr = r->base + limit; in __find_max_addr()
1799 limit -= r->size; in __find_max_addr()
1833 if (!memblock_memory->total_size) { in memblock_cap_memory_range()
1844 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--) in memblock_cap_memory_range()
1848 for (i = start_rgn - 1; i >= 0; i--) in memblock_cap_memory_range()
1876 unsigned int left = 0, right = type->cnt; in memblock_search()
1881 if (addr < type->regions[mid].base) in memblock_search()
1883 else if (addr >= (type->regions[mid].base + in memblock_search()
1884 type->regions[mid].size)) in memblock_search()
1889 return -1; in memblock_search()
1894 return memblock_search(&memblock.reserved, addr) != -1; in memblock_is_reserved()
1899 return memblock_search(&memblock.memory, addr) != -1; in memblock_is_memory()
1906 if (i == -1) in memblock_is_map_memory()
1917 if (mid == -1) in memblock_search_pfn_nid()
1920 *start_pfn = PFN_DOWN(type->regions[mid].base); in memblock_search_pfn_nid()
1921 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); in memblock_search_pfn_nid()
1923 return memblock_get_region_node(&type->regions[mid]); in memblock_search_pfn_nid()
1927 * memblock_is_region_memory - check if a region is a subset of memory
1934 * 0 if false, non-zero if true
1939 phys_addr_t end = base + memblock_cap_size(base, &size); in memblock_is_region_memory() local
1941 if (idx == -1) in memblock_is_region_memory()
1944 memblock.memory.regions[idx].size) >= end; in memblock_is_region_memory()
1948 * memblock_is_region_reserved - check if a region intersects reserved memory
1963 void __init_memblock memblock_trim_memory(phys_addr_t align) in memblock_trim_memory() argument
1965 phys_addr_t start, end, orig_start, orig_end; in memblock_trim_memory() local
1969 orig_start = r->base; in memblock_trim_memory()
1970 orig_end = r->base + r->size; in memblock_trim_memory()
1971 start = round_up(orig_start, align); in memblock_trim_memory()
1972 end = round_down(orig_end, align); in memblock_trim_memory()
1974 if (start == orig_start && end == orig_end) in memblock_trim_memory()
1977 if (start < end) { in memblock_trim_memory()
1978 r->base = start; in memblock_trim_memory()
1979 r->size = end - start; in memblock_trim_memory()
1982 r - memblock.memory.regions); in memblock_trim_memory()
1983 r--; in memblock_trim_memory()
2000 phys_addr_t base, end, size; in memblock_dump() local
2005 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt); in memblock_dump()
2010 base = rgn->base; in memblock_dump()
2011 size = rgn->size; in memblock_dump()
2012 end = base + size - 1; in memblock_dump()
2013 flags = rgn->flags; in memblock_dump()
2019 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n", in memblock_dump()
2020 type->name, idx, &base, &end, &size, nid_buf, flags); in memblock_dump()
2065 start_pg = pfn_to_page(start_pfn - 1) + 1; in free_memmap()
2066 end_pg = pfn_to_page(end_pfn - 1) + 1; in free_memmap()
2069 * Convert to physical addresses, and round start upwards and end in free_memmap()
2080 memblock_phys_free(pg, pgend - pg); in free_memmap()
2088 unsigned long start, end, prev_end = 0; in free_unused_memmap() local
2099 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { in free_unused_memmap()
2105 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); in free_unused_memmap()
2108 * Align down here since many operations in VM subsystem in free_unused_memmap()
2122 * Align up here since many operations in VM subsystem in free_unused_memmap()
2126 prev_end = pageblock_align(end); in free_unused_memmap()
2131 prev_end = pageblock_align(end); in free_unused_memmap()
2132 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); in free_unused_memmap()
2137 static void __init __free_pages_memory(unsigned long start, unsigned long end) in __free_pages_memory() argument
2141 while (start < end) { in __free_pages_memory()
2146 * MAX_PAGE_ORDER-aligned, set order to MAX_PAGE_ORDER for in __free_pages_memory()
2154 while (start + (1UL << order) > end) in __free_pages_memory()
2155 order--; in __free_pages_memory()
2164 phys_addr_t end) in __free_memory_core() argument
2168 PFN_DOWN(end), max_low_pfn); in __free_memory_core()
2175 return end_pfn - start_pfn; in __free_memory_core()
2181 phys_addr_t start, end; in memmap_init_reserved_pages() local
2190 start = region->base; in memmap_init_reserved_pages()
2191 end = start + region->size; in memmap_init_reserved_pages()
2194 reserve_bootmem_region(start, end, nid); in memmap_init_reserved_pages()
2196 memblock_set_node(start, end, &memblock.reserved, nid); in memmap_init_reserved_pages()
2206 start = region->base; in memmap_init_reserved_pages()
2207 end = start + region->size; in memmap_init_reserved_pages()
2212 reserve_bootmem_region(start, end, nid); in memmap_init_reserved_pages()
2220 phys_addr_t start, end; in free_low_memory_core_early() local
2223 memblock_clear_hotplug(0, -1); in free_low_memory_core_early()
2228 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id in free_low_memory_core_early()
2232 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, in free_low_memory_core_early()
2234 count += __free_memory_core(start, end); in free_low_memory_core_early()
2245 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in reset_node_managed_pages()
2246 atomic_long_set(&z->managed_pages, 0); in reset_node_managed_pages()
2263 * memblock_free_all - release free pages to the buddy allocator
2294 map->start = start; in reserved_mem_add()
2295 map->size = size; in reserved_mem_add()
2296 strscpy(map->name, name); in reserved_mem_add()
2300 * reserve_mem_find_by_name - Find reserved memory region with a given name
2316 if (!map->size) in reserve_mem_find_by_name()
2318 if (strcmp(name, map->name) == 0) { in reserve_mem_find_by_name()
2319 *start = map->start; in reserve_mem_find_by_name()
2320 *size = map->size; in reserve_mem_find_by_name()
2329 * Parse reserve_mem=nn:align:name
2333 phys_addr_t start, size, align, tmp; in reserve_mem() local
2339 return -EINVAL; in reserve_mem()
2343 return -EBUSY; in reserve_mem()
2348 return -EINVAL; in reserve_mem()
2351 return -EINVAL; in reserve_mem()
2353 align = memparse(p+1, &p); in reserve_mem()
2355 return -EINVAL; in reserve_mem()
2358 * memblock_phys_alloc() doesn't like a zero size align, in reserve_mem()
2361 if (align < SMP_CACHE_BYTES) in reserve_mem()
2362 align = SMP_CACHE_BYTES; in reserve_mem()
2369 return -EINVAL; in reserve_mem()
2377 return -EINVAL; in reserve_mem()
2381 return -EBUSY; in reserve_mem()
2383 start = memblock_phys_alloc(size, align); in reserve_mem()
2385 return -ENOMEM; in reserve_mem()
2404 struct memblock_type *type = m->private; in memblock_debug_show()
2408 phys_addr_t end; in memblock_debug_show() local
2410 for (i = 0; i < type->cnt; i++) { in memblock_debug_show()
2411 reg = &type->regions[i]; in memblock_debug_show()
2412 end = reg->base + reg->size - 1; in memblock_debug_show()
2416 seq_printf(m, "%pa..%pa ", ®->base, &end); in memblock_debug_show()
2421 if (reg->flags) { in memblock_debug_show()
2423 if (reg->flags & (1U << j)) { in memblock_debug_show()