aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/um/kernel')
-rw-r--r--arch/um/kernel/mem.c11
-rw-r--r--arch/um/kernel/physmem.c2
-rw-r--r--arch/um/kernel/skas/mem.c11
-rw-r--r--arch/um/kernel/tt/mem.c10
-rw-r--r--arch/um/kernel/um_arch.c9
-rw-r--r--arch/um/kernel/uml.lds.S13
6 files changed, 26 insertions, 30 deletions
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 44e41a35f000..61280167c560 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -24,8 +24,6 @@
#include "init.h"
#include "kern_constants.h"
-extern char __binary_start;
-
/* Changed during early boot */
unsigned long *empty_zero_page = NULL;
unsigned long *empty_bad_page = NULL;
@@ -65,8 +63,6 @@ static void setup_highmem(unsigned long highmem_start,
void mem_init(void)
{
- unsigned long start;
-
max_low_pfn = (high_physmem - uml_physmem) >> PAGE_SHIFT;
/* clear the zero-page */
@@ -81,13 +77,6 @@ void mem_init(void)
free_bootmem(__pa(brk_end), uml_reserved - brk_end);
uml_reserved = brk_end;
- /* Fill in any hole at the start of the binary */
- start = (unsigned long) &__binary_start & PAGE_MASK;
- if(uml_physmem != start){
- map_memory(uml_physmem, __pa(uml_physmem), start - uml_physmem,
- 1, 1, 0);
- }
-
/* this will put all low memory onto the freelists */
totalram_pages = free_all_bootmem();
totalhigh_pages = highmem >> PAGE_SHIFT;
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
index 166cb09cae4c..abafa64b8727 100644
--- a/arch/um/kernel/physmem.c
+++ b/arch/um/kernel/physmem.c
@@ -317,7 +317,7 @@ void map_memory(unsigned long virt, unsigned long phys, unsigned long len,
}
}
-extern int __syscall_stub_start, __binary_start;
+extern int __syscall_stub_start;
void setup_physmem(unsigned long start, unsigned long reserve_end,
unsigned long len, unsigned long long highmem)
diff --git a/arch/um/kernel/skas/mem.c b/arch/um/kernel/skas/mem.c
index 7e5b8f165cf2..27bbf54b1e52 100644
--- a/arch/um/kernel/skas/mem.c
+++ b/arch/um/kernel/skas/mem.c
@@ -9,20 +9,19 @@
#include "mem_user.h"
#include "skas.h"
-unsigned long set_task_sizes_skas(unsigned long *host_size_out,
- unsigned long *task_size_out)
+unsigned long set_task_sizes_skas(unsigned long *task_size_out)
{
/* Round up to the nearest 4M */
- unsigned long top = ROUND_4M((unsigned long) &host_size_out);
+ unsigned long host_task_size = ROUND_4M((unsigned long)
+ &host_task_size);
#ifdef CONFIG_HOST_TASK_SIZE
*host_size_out = ROUND_4M(CONFIG_HOST_TASK_SIZE);
*task_size_out = CONFIG_HOST_TASK_SIZE;
#else
- *host_size_out = top;
if (!skas_needs_stub)
- *task_size_out = top;
+ *task_size_out = host_task_size;
else *task_size_out = CONFIG_STUB_START & PGDIR_MASK;
#endif
- return ((unsigned long) set_task_sizes_skas) & ~0xffffff;
+ return host_task_size;
}
diff --git a/arch/um/kernel/tt/mem.c b/arch/um/kernel/tt/mem.c
index 4ae8c5c1e3b3..84a23b14f770 100644
--- a/arch/um/kernel/tt/mem.c
+++ b/arch/um/kernel/tt/mem.c
@@ -24,11 +24,13 @@ void before_mem_tt(unsigned long brk_start)
#define SIZE ((CONFIG_NEST_LEVEL + CONFIG_KERNEL_HALF_GIGS) * 0x20000000)
#define START (CONFIG_TOP_ADDR - SIZE)
-unsigned long set_task_sizes_tt(unsigned long *host_size_out,
- unsigned long *task_size_out)
+unsigned long set_task_sizes_tt(unsigned long *task_size_out)
{
+ unsigned long host_task_size;
+
/* Round up to the nearest 4M */
- *host_size_out = ROUND_4M((unsigned long) &host_size_out);
+ host_task_size = ROUND_4M((unsigned long) &host_task_size);
*task_size_out = START;
- return START;
+
+ return host_task_size;
}
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index a50a0aac8faa..7896cf98232d 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -330,6 +330,8 @@ EXPORT_SYMBOL(end_iomem);
#define MIN_VMALLOC (32 * 1024 * 1024)
+extern char __binary_start;
+
int linux_main(int argc, char **argv)
{
unsigned long avail, diff;
@@ -374,8 +376,9 @@ int linux_main(int argc, char **argv)
printf("UML running in %s mode\n", mode);
- uml_start = CHOOSE_MODE_PROC(set_task_sizes_tt, set_task_sizes_skas,
- &host_task_size, &task_size);
+ uml_start = (unsigned long) &__binary_start;
+ host_task_size = CHOOSE_MODE_PROC(set_task_sizes_tt,
+ set_task_sizes_skas, &task_size);
/*
* Setting up handlers to 'sig_info' struct
@@ -395,7 +398,7 @@ int linux_main(int argc, char **argv)
physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
}
- uml_physmem = uml_start;
+ uml_physmem = uml_start & PAGE_MASK;
/* Reserve up to 4M after the current brk */
uml_reserved = ROUND_4M(brk_start) + (1 << 22);
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index af11915ce0a8..8eca47a6ff08 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -7,13 +7,16 @@ jiffies = jiffies_64;
SECTIONS
{
- /*This must contain the right address - not quite the default ELF one.*/
+ /* This must contain the right address - not quite the default ELF one.*/
PROVIDE (__executable_start = START);
- . = START + SIZEOF_HEADERS;
+ /* Static binaries stick stuff here, like the sigreturn trampoline,
+ * invisibly to objdump. So, just make __binary_start equal to the very
+ * beginning of the executable, and if there are unmapped pages after this,
+ * they are forever unusable.
+ */
+ __binary_start = START;
- /* Used in arch/um/kernel/mem.c. Any memory between START and __binary_start
- * is remapped.*/
- __binary_start = .;
+ . = START + SIZEOF_HEADERS;
#ifdef MODE_TT
.remap_data : { UNMAP_PATH (.data .bss) }