1 #ifndef _LINUX_MEMBLOCK_H
2 #define _LINUX_MEMBLOCK_H
5 #ifdef CONFIG_HAVE_MEMBLOCK
7 * Logical memory blocks.
9 * Copyright (C) 2001 Peter Bergner, IBM Corp.
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/init.h>
20 #define INIT_MEMBLOCK_REGIONS 128
21 #define INIT_PHYSMEM_REGIONS 4
23 /* Definition of memblock flags. */
25 MEMBLOCK_NONE = 0x0, /* No special request */
26 MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
27 MEMBLOCK_MIRROR = 0x2, /* mirrored region */
28 MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
31 struct memblock_region {
35 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
40 struct memblock_type {
41 unsigned long cnt; /* number of regions */
42 unsigned long max; /* size of the allocated array */
43 phys_addr_t total_size; /* size of all regions */
44 struct memblock_region *regions;
48 bool bottom_up; /* is bottom up direction? */
49 phys_addr_t current_limit;
50 struct memblock_type memory;
51 struct memblock_type reserved;
52 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
53 struct memblock_type physmem;
57 extern struct memblock memblock;
58 extern int memblock_debug;
59 #ifdef CONFIG_MOVABLE_NODE
60 /* If movable_node boot option specified */
61 extern bool movable_node_enabled;
62 #endif /* CONFIG_MOVABLE_NODE */
64 #define memblock_dbg(fmt, ...) \
65 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
67 phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
68 phys_addr_t start, phys_addr_t end,
69 int nid, ulong flags);
70 phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
71 phys_addr_t size, phys_addr_t align);
72 phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
73 phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr);
74 void memblock_allow_resize(void);
75 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
76 int memblock_add(phys_addr_t base, phys_addr_t size);
77 int memblock_remove(phys_addr_t base, phys_addr_t size);
78 int memblock_free(phys_addr_t base, phys_addr_t size);
79 int memblock_reserve(phys_addr_t base, phys_addr_t size);
80 void memblock_trim_memory(phys_addr_t align);
81 bool memblock_overlaps_region(struct memblock_type *type,
82 phys_addr_t base, phys_addr_t size);
83 int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
84 int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
85 int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
86 int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
87 int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
88 ulong choose_memblock_flags(void);
89 unsigned long memblock_region_resize_late_begin(void);
90 void memblock_region_resize_late_end(unsigned long);
92 /* Low level functions */
93 int memblock_add_range(struct memblock_type *type,
94 phys_addr_t base, phys_addr_t size,
95 int nid, unsigned long flags);
97 void __next_mem_range(u64 *idx, int nid, ulong flags,
98 struct memblock_type *type_a,
99 struct memblock_type *type_b, phys_addr_t *out_start,
100 phys_addr_t *out_end, int *out_nid);
102 void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
103 struct memblock_type *type_a,
104 struct memblock_type *type_b, phys_addr_t *out_start,
105 phys_addr_t *out_end, int *out_nid);
107 void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
108 phys_addr_t *out_end);
111 * for_each_mem_range - iterate through memblock areas from type_a and not
112 * included in type_b. Or just type_a if type_b is NULL.
113 * @i: u64 used as loop variable
114 * @type_a: ptr to memblock_type to iterate
115 * @type_b: ptr to memblock_type which excludes from the iteration
116 * @nid: node selector, %NUMA_NO_NODE for all nodes
117 * @flags: pick from blocks based on memory attributes
118 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
119 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
120 * @p_nid: ptr to int for nid of the range, can be %NULL
122 #define for_each_mem_range(i, type_a, type_b, nid, flags, \
123 p_start, p_end, p_nid) \
124 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
125 p_start, p_end, p_nid); \
126 i != (u64)ULLONG_MAX; \
127 __next_mem_range(&i, nid, flags, type_a, type_b, \
128 p_start, p_end, p_nid))
131 * for_each_mem_range_rev - reverse iterate through memblock areas from
132 * type_a and not included in type_b. Or just type_a if type_b is NULL.
133 * @i: u64 used as loop variable
134 * @type_a: ptr to memblock_type to iterate
135 * @type_b: ptr to memblock_type which excludes from the iteration
136 * @nid: node selector, %NUMA_NO_NODE for all nodes
137 * @flags: pick from blocks based on memory attributes
138 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
139 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
140 * @p_nid: ptr to int for nid of the range, can be %NULL
142 #define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
143 p_start, p_end, p_nid) \
144 for (i = (u64)ULLONG_MAX, \
145 __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
146 p_start, p_end, p_nid); \
147 i != (u64)ULLONG_MAX; \
148 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
149 p_start, p_end, p_nid))
152 * for_each_reserved_mem_region - iterate over all reserved memblock areas
153 * @i: u64 used as loop variable
154 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
155 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
157 * Walks over reserved areas of memblock. Available as soon as memblock
160 #define for_each_reserved_mem_region(i, p_start, p_end) \
162 __next_reserved_mem_region(&i, p_start, p_end); \
163 i != (u64)ULLONG_MAX; \
164 __next_reserved_mem_region(&i, p_start, p_end))
166 #ifdef CONFIG_MOVABLE_NODE
167 static inline bool memblock_is_hotpluggable(struct memblock_region *m)
169 return m->flags & MEMBLOCK_HOTPLUG;
172 static inline bool movable_node_is_enabled(void)
174 return movable_node_enabled;
177 static inline bool memblock_is_hotpluggable(struct memblock_region *m)
181 static inline bool movable_node_is_enabled(void)
187 static inline bool memblock_is_mirror(struct memblock_region *m)
189 return m->flags & MEMBLOCK_MIRROR;
192 static inline bool memblock_is_nomap(struct memblock_region *m)
194 return m->flags & MEMBLOCK_NOMAP;
197 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
198 int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
199 unsigned long *end_pfn);
200 void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
201 unsigned long *out_end_pfn, int *out_nid);
204 * for_each_mem_pfn_range - early memory pfn range iterator
205 * @i: an integer used as loop variable
206 * @nid: node selector, %MAX_NUMNODES for all nodes
207 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
208 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
209 * @p_nid: ptr to int for nid of the range, can be %NULL
211 * Walks over configured memory ranges.
213 #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
214 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
215 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
216 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
219 * for_each_free_mem_range - iterate through free memblock areas
220 * @i: u64 used as loop variable
221 * @nid: node selector, %NUMA_NO_NODE for all nodes
222 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
223 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
224 * @p_nid: ptr to int for nid of the range, can be %NULL
225 * @flags: pick from blocks based on memory attributes
227 * Walks over free (memory && !reserved) areas of memblock. Available as
228 * soon as memblock is initialized.
230 #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
231 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
232 nid, flags, p_start, p_end, p_nid)
235 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
236 * @i: u64 used as loop variable
237 * @nid: node selector, %NUMA_NO_NODE for all nodes
238 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
239 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
240 * @p_nid: ptr to int for nid of the range, can be %NULL
241 * @flags: pick from blocks based on memory attributes
243 * Walks over free (memory && !reserved) areas of memblock in reverse
244 * order. Available as soon as memblock is initialized.
246 #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
248 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
249 nid, flags, p_start, p_end, p_nid)
251 static inline void memblock_set_region_flags(struct memblock_region *r,
257 static inline void memblock_clear_region_flags(struct memblock_region *r,
263 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
264 int memblock_set_node(phys_addr_t base, phys_addr_t size,
265 struct memblock_type *type, int nid);
267 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
272 static inline int memblock_get_region_node(const struct memblock_region *r)
277 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
281 static inline int memblock_get_region_node(const struct memblock_region *r)
285 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
287 phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
288 phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
290 phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
292 #ifdef CONFIG_MOVABLE_NODE
294 * Set the allocation direction to bottom-up or top-down.
296 static inline void __init memblock_set_bottom_up(bool enable)
298 memblock.bottom_up = enable;
302 * Check if the allocation direction is bottom-up or not.
303 * if this is true, that said, memblock will allocate memory
304 * in bottom-up direction.
306 static inline bool memblock_bottom_up(void)
308 return memblock.bottom_up;
311 static inline void __init memblock_set_bottom_up(bool enable) {}
312 static inline bool memblock_bottom_up(void) { return false; }
315 /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
316 #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
317 #define MEMBLOCK_ALLOC_ACCESSIBLE 0
319 phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
320 phys_addr_t start, phys_addr_t end,
322 phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
323 phys_addr_t max_addr);
324 phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
325 phys_addr_t max_addr);
326 phys_addr_t memblock_phys_mem_size(void);
327 phys_addr_t memblock_mem_size(unsigned long limit_pfn);
328 phys_addr_t memblock_start_of_DRAM(void);
329 phys_addr_t memblock_end_of_DRAM(void);
330 void memblock_enforce_memory_limit(phys_addr_t memory_limit);
331 int memblock_is_memory(phys_addr_t addr);
332 int memblock_is_map_memory(phys_addr_t addr);
333 int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
334 bool memblock_overlaps_memory(phys_addr_t base, phys_addr_t size);
335 int memblock_is_reserved(phys_addr_t addr);
336 bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
338 extern void __memblock_dump_all(void);
340 static inline void memblock_dump_all(void)
343 __memblock_dump_all();
347 * memblock_set_current_limit - Set the current allocation limit to allow
348 * limiting allocations to what is currently
349 * accessible during boot
350 * @limit: New limit value (physical address)
352 void memblock_set_current_limit(phys_addr_t limit);
355 phys_addr_t memblock_get_current_limit(void);
358 * pfn conversion functions
360 * While the memory MEMBLOCKs should always be page aligned, the reserved
361 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
362 * idea of what they return for such non aligned MEMBLOCKs.
366 * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region
367 * @reg: memblock_region structure
369 static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
371 return PFN_UP(reg->base);
375 * memblock_region_memory_end_pfn - Return the end_pfn this region
376 * @reg: memblock_region structure
378 static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
380 return PFN_DOWN(reg->base + reg->size);
384 * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region
385 * @reg: memblock_region structure
387 static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
389 return PFN_DOWN(reg->base);
393 * memblock_region_reserved_end_pfn - Return the end_pfn this region
394 * @reg: memblock_region structure
396 static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
398 return PFN_UP(reg->base + reg->size);
401 #define for_each_memblock(memblock_type, region) \
402 for (region = memblock.memblock_type.regions; \
403 region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
406 #define for_each_memblock_rev(memblock_type, region) \
407 for (region = memblock.memblock_type.regions + \
408 memblock.memblock_type.cnt - 1; \
409 region >= memblock.memblock_type.regions; \
412 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
413 #define __init_memblock __meminit
414 #define __initdata_memblock __meminitdata
416 #define __init_memblock
417 #define __initdata_memblock
420 #ifdef CONFIG_MEMTEST
421 extern void early_memtest(phys_addr_t start, phys_addr_t end);
423 static inline void early_memtest(phys_addr_t start, phys_addr_t end)
428 extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
429 phys_addr_t end_addr);
431 static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
436 static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
437 phys_addr_t end_addr)
442 #endif /* CONFIG_HAVE_MEMBLOCK */
444 #endif /* __KERNEL__ */
446 #endif /* _LINUX_MEMBLOCK_H */