aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init.c
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2012-11-16 19:39:14 -0800
committerH. Peter Anvin <hpa@linux.intel.com>2012-11-17 11:59:41 -0800
commit1829ae9ad7380bf17333ab9ad1610631d9cb8664 (patch)
treeae54f697df1ac557090f626af4349613b619f462 /arch/x86/mm/init.c
parentx86, mm: use PFN_DOWN in split_mem_range() (diff)
downloadlinux-dev-1829ae9ad7380bf17333ab9ad1610631d9cb8664.tar.xz
linux-dev-1829ae9ad7380bf17333ab9ad1610631d9cb8664.zip
x86, mm: use pfn instead of pos in split_mem_range
could save some bit shifting operations. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/1353123563-3103-38-git-send-email-yinghai@kernel.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm/init.c')
-rw-r--r--arch/x86/mm/init.c29
1 files changed, 14 insertions, 15 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 1cca052b2cbd..4bf1c5374928 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -204,12 +204,11 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
unsigned long end)
{
unsigned long start_pfn, end_pfn;
- unsigned long pos;
+ unsigned long pfn;
int i;
/* head if not big page alignment ? */
- start_pfn = PFN_DOWN(start);
- pos = PFN_PHYS(start_pfn);
+ pfn = start_pfn = PFN_DOWN(start);
#ifdef CONFIG_X86_32
/*
* Don't use a large page for the first 2/4MB of memory
@@ -217,26 +216,26 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
* and overlapping MTRRs into large pages can cause
* slowdowns.
*/
- if (pos == 0)
+ if (pfn == 0)
end_pfn = PFN_DOWN(PMD_SIZE);
else
- end_pfn = PFN_DOWN(round_up(pos, PMD_SIZE));
+ end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
#else /* CONFIG_X86_64 */
- end_pfn = PFN_DOWN(round_up(pos, PMD_SIZE));
+ end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
#endif
if (end_pfn > PFN_DOWN(end))
end_pfn = PFN_DOWN(end);
if (start_pfn < end_pfn) {
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
- pos = PFN_PHYS(end_pfn);
+ pfn = end_pfn;
}
/* big page (2M) range */
- start_pfn = PFN_DOWN(round_up(pos, PMD_SIZE));
+ start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
#ifdef CONFIG_X86_32
end_pfn = PFN_DOWN(round_down(end, PMD_SIZE));
#else /* CONFIG_X86_64 */
- end_pfn = PFN_DOWN(round_up(pos, PUD_SIZE));
+ end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
if (end_pfn > PFN_DOWN(round_down(end, PMD_SIZE)))
end_pfn = PFN_DOWN(round_down(end, PMD_SIZE));
#endif
@@ -244,32 +243,32 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
if (start_pfn < end_pfn) {
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
page_size_mask & (1<<PG_LEVEL_2M));
- pos = PFN_PHYS(end_pfn);
+ pfn = end_pfn;
}
#ifdef CONFIG_X86_64
/* big page (1G) range */
- start_pfn = PFN_DOWN(round_up(pos, PUD_SIZE));
+ start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
end_pfn = PFN_DOWN(round_down(end, PUD_SIZE));
if (start_pfn < end_pfn) {
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
page_size_mask &
((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
- pos = PFN_PHYS(end_pfn);
+ pfn = end_pfn;
}
/* tail is not big page (1G) alignment */
- start_pfn = PFN_DOWN(round_up(pos, PMD_SIZE));
+ start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
end_pfn = PFN_DOWN(round_down(end, PMD_SIZE));
if (start_pfn < end_pfn) {
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
page_size_mask & (1<<PG_LEVEL_2M));
- pos = PFN_PHYS(end_pfn);
+ pfn = end_pfn;
}
#endif
/* tail is not big page (2M) alignment */
- start_pfn = PFN_DOWN(pos);
+ start_pfn = pfn;
end_pfn = PFN_DOWN(end);
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);