From c56004901fa5dcf55f92318f192ab3c0e87db2d1 Mon Sep 17 00:00:00 2001 From: Jeff Dike Date: Sat, 3 Sep 2005 15:57:36 -0700 Subject: [PATCH] uml: TLB operation batching This adds VM op batching to skas0. Rather than having a context switch to and from the userspace stub for each address space change, we write a number of operations to the stub data page and invoke a different stub which loops over them and executes them all in one go. The operations are stored as [ system call number, arg1, arg2, ... ] tuples. The set is terminated by a system call number of 0. Single operations, i.e. page faults, are handled in the old way, since that is slightly more efficient. For a kernel build, a minority (~1/4) of the operations are part of a set. These sets averaged ~100 in length, so for this quarter, the context switching overhead is greatly reduced. Signed-off-by: Jeff Dike Cc: Paolo Giarrusso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/um/sys-x86_64/stub.S | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'arch/um/sys-x86_64') diff --git a/arch/um/sys-x86_64/stub.S b/arch/um/sys-x86_64/stub.S index 31c14925716b..957f2eff32ca 100644 --- a/arch/um/sys-x86_64/stub.S +++ b/arch/um/sys-x86_64/stub.S @@ -13,3 +13,24 @@ syscall_stub: or %rcx, %rbx movq %rax, (%rbx) int3 + + .globl batch_syscall_stub +batch_syscall_stub: + movq $(UML_CONFIG_STUB_DATA >> 32), %rbx + salq $32, %rbx + movq $(UML_CONFIG_STUB_DATA & 0xffffffff), %rcx + or %rcx, %rbx + movq %rbx, %rsp +again: pop %rax + cmpq $0, %rax +jz done + pop %rdi + pop %rsi + pop %rdx + pop %r10 + pop %r8 + pop %r9 + syscall + mov %rax, (%rbx) + jmp again +done: int3 -- cgit v1.2.3-59-g8ed1b