aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools/testing/selftests/kvm/demand_paging_test.c
diff options
context:
space:
mode:
authorBen Gardon <bgardon@google.com>2020-01-23 10:04:34 -0800
committerPaolo Bonzini <pbonzini@redhat.com>2020-03-16 17:57:05 +0100
commitf09205b99832f353088b7c82778b3f8175627620 (patch)
tree492cdbd43002b9b997fef0231ee6ca171d9ed108 /tools/testing/selftests/kvm/demand_paging_test.c
parentKVM: selftests: Support multiple vCPUs in demand paging test (diff)
downloadwireguard-linux-f09205b99832f353088b7c82778b3f8175627620.tar.xz
wireguard-linux-f09205b99832f353088b7c82778b3f8175627620.zip
KVM: selftests: Time guest demand paging
In order to quantify demand paging performance, time guest execution during demand paging. Signed-off-by: Ben Gardon <bgardon@google.com> [Move timespec-diff to test_util.h] Signed-off-by: Andrew Jones <drjones@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'tools/testing/selftests/kvm/demand_paging_test.c')
-rw-r--r--tools/testing/selftests/kvm/demand_paging_test.c50
1 files changed, 49 insertions, 1 deletions
diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index c516cece2368..8cdb8871e4d8 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -35,6 +35,12 @@
#define DEFAULT_GUEST_TEST_MEM_SIZE (1 << 30) /* 1G */
+#ifdef PRINT_PER_PAGE_UPDATES
+#define PER_PAGE_DEBUG(...) DEBUG(__VA_ARGS__)
+#else
+#define PER_PAGE_DEBUG(...)
+#endif
+
#ifdef PRINT_PER_VCPU_UPDATES
#define PER_VCPU_DEBUG(...) DEBUG(__VA_ARGS__)
#else
@@ -111,10 +117,14 @@ static void *vcpu_worker(void *data)
struct kvm_vm *vm = args->vm;
int vcpu_id = args->vcpu_id;
struct kvm_run *run;
+ struct timespec start;
+ struct timespec end;
vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
run = vcpu_state(vm, vcpu_id);
+ clock_gettime(CLOCK_MONOTONIC, &start);
+
/* Let the guest access its memory */
ret = _vcpu_run(vm, vcpu_id);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
@@ -124,6 +134,11 @@ static void *vcpu_worker(void *data)
exit_reason_str(run->exit_reason));
}
+ clock_gettime(CLOCK_MONOTONIC, &end);
+ PER_VCPU_DEBUG("vCPU %d execution time: %lld.%.9lds\n", vcpu_id,
+ (long long)(timespec_diff(start, end).tv_sec),
+ timespec_diff(start, end).tv_nsec);
+
return NULL;
}
@@ -161,6 +176,8 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, int vcpus,
static int handle_uffd_page_request(int uffd, uint64_t addr)
{
pid_t tid;
+ struct timespec start;
+ struct timespec end;
struct uffdio_copy copy;
int r;
@@ -171,6 +188,8 @@ static int handle_uffd_page_request(int uffd, uint64_t addr)
copy.len = host_page_size;
copy.mode = 0;
+ clock_gettime(CLOCK_MONOTONIC, &start);
+
r = ioctl(uffd, UFFDIO_COPY, &copy);
if (r == -1) {
DEBUG("Failed Paged in 0x%lx from thread %d with errno: %d\n",
@@ -178,6 +197,13 @@ static int handle_uffd_page_request(int uffd, uint64_t addr)
return r;
}
+ clock_gettime(CLOCK_MONOTONIC, &end);
+
+ PER_PAGE_DEBUG("UFFDIO_COPY %d \t%lld ns\n", tid,
+ (long long)timespec_to_ns(timespec_diff(start, end)));
+ PER_PAGE_DEBUG("Paged in %ld bytes at 0x%lx from thread %d\n",
+ host_page_size, addr, tid);
+
return 0;
}
@@ -196,7 +222,10 @@ static void *uffd_handler_thread_fn(void *arg)
int pipefd = uffd_args->pipefd;
useconds_t delay = uffd_args->delay;
int64_t pages = 0;
+ struct timespec start;
+ struct timespec end;
+ clock_gettime(CLOCK_MONOTONIC, &start);
while (!quit_uffd_thread) {
struct uffd_msg msg;
struct pollfd pollfd[2];
@@ -264,6 +293,13 @@ static void *uffd_handler_thread_fn(void *arg)
pages++;
}
+ clock_gettime(CLOCK_MONOTONIC, &end);
+ PER_VCPU_DEBUG("userfaulted %ld pages over %lld.%.9lds. (%f/sec)\n",
+ pages, (long long)(timespec_diff(start, end).tv_sec),
+ timespec_diff(start, end).tv_nsec, pages /
+ ((double)timespec_diff(start, end).tv_sec +
+ (double)timespec_diff(start, end).tv_nsec / 100000000.0));
+
return NULL;
}
@@ -328,6 +364,8 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
uint64_t guest_num_pages;
int vcpu_id;
int r;
+ struct timespec start;
+ struct timespec end;
vm = create_vm(mode, vcpus, vcpu_memory_bytes);
@@ -369,7 +407,6 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
DEBUG("guest physical test memory offset: 0x%lx\n",
guest_test_phys_mem);
-
/* Add an extra memory slot for testing demand paging */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
guest_test_phys_mem,
@@ -451,6 +488,8 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
DEBUG("Finished creating vCPUs and starting uffd threads\n");
+ clock_gettime(CLOCK_MONOTONIC, &start);
+
for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
&vcpu_args[vcpu_id]);
@@ -466,6 +505,8 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
DEBUG("All vCPU threads joined\n");
+ clock_gettime(CLOCK_MONOTONIC, &end);
+
if (use_uffd) {
char c;
@@ -478,6 +519,13 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
}
}
+ DEBUG("Total guest execution time: %lld.%.9lds\n",
+ (long long)(timespec_diff(start, end).tv_sec),
+ timespec_diff(start, end).tv_nsec);
+ DEBUG("Overall demand paging rate: %f pgs/sec\n",
+ guest_num_pages / ((double)timespec_diff(start, end).tv_sec +
+ (double)timespec_diff(start, end).tv_nsec / 100000000.0));
+
ucall_uninit(vm);
kvm_vm_free(vm);