aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/memblock.h
blob: c914112cd24f06e772fc0633bd458ed098b4c4ef (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
#ifndef _LINUX_MEMBLOCK_H
#define _LINUX_MEMBLOCK_H
#ifdef __KERNEL__

/*
 * Logical memory blocks.
 *
 * Copyright (C) 2001 Peter Bergner, IBM Corp.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <linux/init.h>
#include <linux/mm.h>

#include <asm/memblock.h>

#define MAX_MEMBLOCK_REGIONS 128

struct memblock_region {
	u64 base;
	u64 size;
};

struct memblock_type {
	unsigned long cnt;
	u64 size;
	struct memblock_region regions[MAX_MEMBLOCK_REGIONS+1];
};

struct memblock {
	unsigned long debug;
	u64 rmo_size;
	struct memblock_type memory;
	struct memblock_type reserved;
};

extern struct memblock memblock;

extern void __init memblock_init(void);
extern void __init memblock_analyze(void);
extern long memblock_add(u64 base, u64 size);
extern long memblock_remove(u64 base, u64 size);
extern long __init memblock_free(u64 base, u64 size);
extern long __init memblock_reserve(u64 base, u64 size);
extern u64 __init memblock_alloc_nid(u64 size, u64 align, int nid,
				u64 (*nid_range)(u64, u64, int *));
extern u64 __init memblock_alloc(u64 size, u64 align);
extern u64 __init memblock_alloc_base(u64 size,
		u64, u64 max_addr);
extern u64 __init __memblock_alloc_base(u64 size,
		u64 align, u64 max_addr);
extern u64 __init memblock_phys_mem_size(void);
extern u64 memblock_end_of_DRAM(void);
extern void __init memblock_enforce_memory_limit(u64 memory_limit);
extern int memblock_is_memory(u64 addr);
extern int memblock_is_region_memory(u64 base, u64 size);
extern int __init memblock_is_reserved(u64 addr);
extern int memblock_is_region_reserved(u64 base, u64 size);
extern int memblock_find(struct memblock_region *res);

extern void memblock_dump_all(void);

/* Obsolete accessors */
static inline u64
memblock_size_bytes(struct memblock_type *type, unsigned long region_nr)
{
	return type->regions[region_nr].size;
}
static inline u64
memblock_size_pages(struct memblock_type *type, unsigned long region_nr)
{
	return memblock_size_bytes(type, region_nr) >> PAGE_SHIFT;
}
static inline u64
memblock_start_pfn(struct memblock_type *type, unsigned long region_nr)
{
	return type->regions[region_nr].base >> PAGE_SHIFT;
}
static inline u64
memblock_end_pfn(struct memblock_type *type, unsigned long region_nr)
{
	return memblock_start_pfn(type, region_nr) +
	       memblock_size_pages(type, region_nr);
}

/*
 * pfn conversion functions
 *
 * While the memory MEMBLOCKs should always be page aligned, the reserved
 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
 * idea of what they return for such non aligned MEMBLOCKs.
 */

/**
 * memblock_region_base_pfn - Return the lowest pfn intersecting with the region
 * @reg: memblock_region structure
 */
static inline unsigned long memblock_region_base_pfn(const struct memblock_region *reg)
{
	return reg->base >> PAGE_SHIFT;
}

/**
 * memblock_region_last_pfn - Return the highest pfn intersecting with the region
 * @reg: memblock_region structure
 */
static inline unsigned long memblock_region_last_pfn(const struct memblock_region *reg)
{
	return (reg->base + reg->size - 1) >> PAGE_SHIFT;
}

/**
 * memblock_region_end_pfn - Return the pfn of the first page following the region
 *                      but not intersecting it
 * @reg: memblock_region structure
 */
static inline unsigned long memblock_region_end_pfn(const struct memblock_region *reg)
{
	return memblock_region_last_pfn(reg) + 1;
}

/**
 * memblock_region_pages - Return the number of pages covering a region
 * @reg: memblock_region structure
 */
static inline unsigned long memblock_region_pages(const struct memblock_region *reg)
{
	return memblock_region_end_pfn(reg) - memblock_region_end_pfn(reg);
}

#define for_each_memblock(memblock_type, region)					\
	for (region = memblock.memblock_type.regions;				\
	     region < (memblock.memblock_type.regions + memblock.memblock_type.cnt);	\
	     region++)


#endif /* __KERNEL__ */

#endif /* _LINUX_MEMBLOCK_H */