Lines Matching full:regions

39  * Memblock is a method of managing memory regions during the early
44 * regions. There are several types of these collections:
50 * * ``reserved`` - describes the regions that were allocated
58 * which contains an array of memory regions along with
66 * arrays during addition of new regions. This feature should be used
116 .memory.regions = memblock_memory_init_regions,
120 .reserved.regions = memblock_reserved_init_regions,
130 .regions = memblock_physmem_init_regions,
145 for (i = 0, rgn = &memblock_type->regions[0]; \
147 i++, rgn = &memblock_type->regions[i])
195 if (memblock_addrs_overlap(base, size, type->regions[i].base, in memblock_overlaps_region()
196 type->regions[i].size)) in memblock_overlaps_region()
348 type->total_size -= type->regions[r].size; in memblock_remove_region()
349 memmove(&type->regions[r], &type->regions[r + 1], in memblock_remove_region()
350 (type->cnt - (r + 1)) * sizeof(type->regions[r])); in memblock_remove_region()
356 type->regions[0].base = 0; in memblock_remove_region()
357 type->regions[0].size = 0; in memblock_remove_region()
358 type->regions[0].flags = 0; in memblock_remove_region()
359 memblock_set_region_node(&type->regions[0], MAX_NUMNODES); in memblock_remove_region()
371 if (memblock.reserved.regions != memblock_reserved_init_regions) { in memblock_discard()
372 addr = __pa(memblock.reserved.regions); in memblock_discard()
376 kfree(memblock.reserved.regions); in memblock_discard()
381 if (memblock.memory.regions != memblock_memory_init_regions) { in memblock_discard()
382 addr = __pa(memblock.memory.regions); in memblock_discard()
386 kfree(memblock.memory.regions); in memblock_discard()
396 * memblock_double_array - double the size of the memblock regions array
397 * @type: memblock type of the regions array being doubled
401 * Double the size of the @type regions array. If memblock is being used to
402 * allocate memory for a new reserved regions array and there is a previously
420 /* We don't allow resizing until we know about the reserved regions in memblock_double_array()
447 /* only exclude range when trying to double reserved.regions */ in memblock_double_array()
476 memcpy(new_array, type->regions, old_size); in memblock_double_array()
478 old_array = type->regions; in memblock_double_array()
479 type->regions = new_array; in memblock_double_array()
503 * memblock_merge_regions - merge neighboring compatible regions
507 * Scan @type and merge neighboring compatible regions in [@start_rgn - 1, @end_rgn)
518 struct memblock_region *this = &type->regions[i]; in memblock_merge_regions()
519 struct memblock_region *next = &type->regions[i + 1]; in memblock_merge_regions()
556 struct memblock_region *rgn = &type->regions[idx]; in memblock_insert_region()
578 * existing regions. @type is guaranteed to be minimal (all neighbouring
579 * compatible regions are merged) after the addition.
598 if (type->regions[0].size == 0) { in memblock_add_range()
600 type->regions[0].base = base; in memblock_add_range()
601 type->regions[0].size = size; in memblock_add_range()
602 type->regions[0].flags = flags; in memblock_add_range()
603 memblock_set_region_node(&type->regions[0], nid); in memblock_add_range()
610 * The worst case is when new range overlaps all existing regions, in memblock_add_range()
611 * then we'll need type->cnt + 1 empty regions in @type. So if in memblock_add_range()
613 * that there is enough empty regions in @type, and we can insert in memblock_add_range()
614 * regions directly. in memblock_add_range()
622 * then with %true. The first counts the number of regions needed in memblock_add_range()
777 * Walk @type and ensure that regions don't cross the boundaries defined by
778 * [@base, @base + @size). Crossing regions are split at the boundaries,
779 * which may create at most two more regions. The index of the first
799 /* we'll create at most two more regions */ in memblock_isolate_range()
948 struct memblock_region *r = &type->regions[i]; in memblock_setclr_flag()
1006 * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the
1007 * direct mapping of the physical memory. These regions will still be
1040 * struct pages will not be initialized for reserved memory regions marked with
1057 /* we never skip regions when iterating memblock.reserved or physmem */ in should_skip_region()
1061 /* only memory regions are associated with nodes, check it */ in should_skip_region()
1065 /* skip hotpluggable memory regions if needed */ in should_skip_region()
1070 /* if we want mirror memory skip non-mirror memory regions */ in should_skip_region()
1099 * areas before each region in type_b. For example, if type_b regions
1104 * The upper 32bit indexes the following regions.
1120 struct memblock_region *m = &type_a->regions[idx_a]; in __next_mem_range()
1147 r = &type_b->regions[idx_b]; in __next_mem_range()
1158 /* if the two regions intersect, we're done */ in __next_mem_range()
1221 struct memblock_region *m = &type_a->regions[idx_a]; in __next_mem_range_rev()
1248 r = &type_b->regions[idx_b]; in __next_mem_range_rev()
1259 /* if the two regions intersect, we're done */ in __next_mem_range_rev()
1292 r = &type->regions[*idx]; in __next_mem_pfn_range()
1314 * memblock_set_node - set node ID on memblock regions
1320 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1321 * Regions which cross the area boundaries are split as necessary.
1338 memblock_set_region_node(&type->regions[i], nid); in memblock_set_node()
1425 * from the regions with mirroring enabled and then retried from any
1774 return memblock.memory.regions[0].base; in memblock_start_of_DRAM()
1781 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); in memblock_end_of_DRAM()
1791 * the memory memblock regions, if the @limit exceeds the total size in __find_max_addr()
1792 * of those regions, max_addr will keep original value PHYS_ADDR_MAX in __find_max_addr()
1818 /* truncate both memory and reserved regions */ in memblock_enforce_memory_limit()
1843 /* remove all the MAP regions */ in memblock_cap_memory_range()
1845 if (!memblock_is_nomap(&memblock.memory.regions[i])) in memblock_cap_memory_range()
1849 if (!memblock_is_nomap(&memblock.memory.regions[i])) in memblock_cap_memory_range()
1852 /* truncate the reserved regions */ in memblock_cap_memory_range()
1881 if (addr < type->regions[mid].base) in memblock_search()
1883 else if (addr >= (type->regions[mid].base + in memblock_search()
1884 type->regions[mid].size)) in memblock_search()
1908 return !memblock_is_nomap(&memblock.memory.regions[i]); in memblock_is_map_memory()
1920 *start_pfn = PFN_DOWN(type->regions[mid].base); in memblock_search_pfn_nid()
1921 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); in memblock_search_pfn_nid()
1923 return memblock_get_region_node(&type->regions[mid]); in memblock_search_pfn_nid()
1943 return (memblock.memory.regions[idx].base + in memblock_is_region_memory()
1944 memblock.memory.regions[idx].size) >= end; in memblock_is_region_memory()
1982 r - memblock.memory.regions); in memblock_trim_memory()
2186 * pages for the NOMAP regions as PageReserved in memmap_init_reserved_pages()
2200 * initialize struct pages for reserved regions that don't have in memmap_init_reserved_pages()
2411 reg = &type->regions[i]; in memblock_debug_show()