From ee31fff091fa9c185d10844f9caf38784afe4745 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Tue, 22 Nov 2022 15:39:21 -0500 Subject: selftests/rseq: Implement parametrized mm_cid test Adapt to the rseq.h API changes introduced by commits "selftests/rseq: : Template memory ordering and percpu access mode". Build a new param_test_mm_cid, param_test_mm_cid_benchmark, and param_test_mm_cid_compare_twice executables to test the new "mm_cid" rseq field. Signed-off-by: Mathieu Desnoyers Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20221122203932.231377-20-mathieu.desnoyers@efficios.com --- tools/testing/selftests/rseq/param_test.c | 148 ++++++++++++++++++++---------- 1 file changed, 100 insertions(+), 48 deletions(-) (limited to 'tools/testing/selftests/rseq/param_test.c') diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c index 9869369a8607..cadb9d884811 100644 --- a/tools/testing/selftests/rseq/param_test.c +++ b/tools/testing/selftests/rseq/param_test.c @@ -16,6 +16,7 @@ #include #include #include +#include static inline pid_t rseq_gettid(void) { @@ -36,7 +37,7 @@ static int opt_modulo, verbose; static int opt_yield, opt_signal, opt_sleep, opt_disable_rseq, opt_threads = 200, - opt_disable_mod = 0, opt_test = 's', opt_mb = 0; + opt_disable_mod = 0, opt_test = 's'; static long long opt_reps = 5000; @@ -264,6 +265,63 @@ unsigned int yield_mod_cnt, nr_abort; #include "rseq.h" +static enum rseq_mo opt_mo = RSEQ_MO_RELAXED; + +#ifdef RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV +#define TEST_MEMBARRIER + +static int sys_membarrier(int cmd, int flags, int cpu_id) +{ + return syscall(__NR_membarrier, cmd, flags, cpu_id); +} +#endif + +#ifdef BUILDOPT_RSEQ_PERCPU_MM_CID +# define RSEQ_PERCPU RSEQ_PERCPU_MM_CID +static +int get_current_cpu_id(void) +{ + return rseq_current_mm_cid(); +} +static +bool rseq_validate_cpu_id(void) +{ + return rseq_mm_cid_available(); +} +# ifdef TEST_MEMBARRIER +/* + * Membarrier does not currently support targeting a mm_cid, so + * issue the barrier on all cpus. + */ +static +int rseq_membarrier_expedited(int cpu) +{ + return sys_membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ, + 0, 0); +} +# endif /* TEST_MEMBARRIER */ +#else +# define RSEQ_PERCPU RSEQ_PERCPU_CPU_ID +static +int get_current_cpu_id(void) +{ + return rseq_cpu_start(); +} +static +bool rseq_validate_cpu_id(void) +{ + return rseq_current_cpu_raw() >= 0; +} +# ifdef TEST_MEMBARRIER +static +int rseq_membarrier_expedited(int cpu) +{ + return sys_membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ, + MEMBARRIER_CMD_FLAG_CPU, cpu); +} +# endif /* TEST_MEMBARRIER */ +#endif + struct percpu_lock_entry { intptr_t v; } __attribute__((aligned(128))); @@ -351,8 +409,9 @@ static int rseq_this_cpu_lock(struct percpu_lock *lock) for (;;) { int ret; - cpu = rseq_cpu_start(); - ret = rseq_cmpeqv_storev(&lock->c[cpu].v, + cpu = get_current_cpu_id(); + ret = rseq_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU, + &lock->c[cpu].v, 0, 1, cpu); if (rseq_likely(!ret)) break; @@ -469,8 +528,9 @@ void *test_percpu_inc_thread(void *arg) do { int cpu; - cpu = rseq_cpu_start(); - ret = rseq_addv(&data->c[cpu].count, 1, cpu); + cpu = get_current_cpu_id(); + ret = rseq_addv(RSEQ_MO_RELAXED, RSEQ_PERCPU, + &data->c[cpu].count, 1, cpu); } while (rseq_unlikely(ret)); #ifndef BENCHMARK if (i != 0 && !(i % (reps / 10))) @@ -539,13 +599,14 @@ void this_cpu_list_push(struct percpu_list *list, intptr_t *targetptr, newval, expect; int ret; - cpu = rseq_cpu_start(); + cpu = get_current_cpu_id(); /* Load list->c[cpu].head with single-copy atomicity. */ expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head); newval = (intptr_t)node; targetptr = (intptr_t *)&list->c[cpu].head; node->next = (struct percpu_list_node *)expect; - ret = rseq_cmpeqv_storev(targetptr, expect, newval, cpu); + ret = rseq_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU, + targetptr, expect, newval, cpu); if (rseq_likely(!ret)) break; /* Retry if comparison fails or rseq aborts. */ @@ -571,13 +632,14 @@ struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list, long offset; int ret; - cpu = rseq_cpu_start(); + cpu = get_current_cpu_id(); targetptr = (intptr_t *)&list->c[cpu].head; expectnot = (intptr_t)NULL; offset = offsetof(struct percpu_list_node, next); load = (intptr_t *)&head; - ret = rseq_cmpnev_storeoffp_load(targetptr, expectnot, - offset, load, cpu); + ret = rseq_cmpnev_storeoffp_load(RSEQ_MO_RELAXED, RSEQ_PERCPU, + targetptr, expectnot, + offset, load, cpu); if (rseq_likely(!ret)) { node = head; break; @@ -715,7 +777,7 @@ bool this_cpu_buffer_push(struct percpu_buffer *buffer, intptr_t offset; int ret; - cpu = rseq_cpu_start(); + cpu = get_current_cpu_id(); offset = RSEQ_READ_ONCE(buffer->c[cpu].offset); if (offset == buffer->c[cpu].buflen) break; @@ -723,14 +785,9 @@ bool this_cpu_buffer_push(struct percpu_buffer *buffer, targetptr_spec = (intptr_t *)&buffer->c[cpu].array[offset]; newval_final = offset + 1; targetptr_final = &buffer->c[cpu].offset; - if (opt_mb) - ret = rseq_cmpeqv_trystorev_storev_release( - targetptr_final, offset, targetptr_spec, - newval_spec, newval_final, cpu); - else - ret = rseq_cmpeqv_trystorev_storev(targetptr_final, - offset, targetptr_spec, newval_spec, - newval_final, cpu); + ret = rseq_cmpeqv_trystorev_storev(opt_mo, RSEQ_PERCPU, + targetptr_final, offset, targetptr_spec, + newval_spec, newval_final, cpu); if (rseq_likely(!ret)) { result = true; break; @@ -753,7 +810,7 @@ struct percpu_buffer_node *this_cpu_buffer_pop(struct percpu_buffer *buffer, intptr_t offset; int ret; - cpu = rseq_cpu_start(); + cpu = get_current_cpu_id(); /* Load offset with single-copy atomicity. */ offset = RSEQ_READ_ONCE(buffer->c[cpu].offset); if (offset == 0) { @@ -763,7 +820,8 @@ struct percpu_buffer_node *this_cpu_buffer_pop(struct percpu_buffer *buffer, head = RSEQ_READ_ONCE(buffer->c[cpu].array[offset - 1]); newval = offset - 1; targetptr = (intptr_t *)&buffer->c[cpu].offset; - ret = rseq_cmpeqv_cmpeqv_storev(targetptr, offset, + ret = rseq_cmpeqv_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU, + targetptr, offset, (intptr_t *)&buffer->c[cpu].array[offset - 1], (intptr_t)head, newval, cpu); if (rseq_likely(!ret)) @@ -920,7 +978,7 @@ bool this_cpu_memcpy_buffer_push(struct percpu_memcpy_buffer *buffer, size_t copylen; int ret; - cpu = rseq_cpu_start(); + cpu = get_current_cpu_id(); /* Load offset with single-copy atomicity. */ offset = RSEQ_READ_ONCE(buffer->c[cpu].offset); if (offset == buffer->c[cpu].buflen) @@ -931,15 +989,11 @@ bool this_cpu_memcpy_buffer_push(struct percpu_memcpy_buffer *buffer, copylen = sizeof(item); newval_final = offset + 1; targetptr_final = &buffer->c[cpu].offset; - if (opt_mb) - ret = rseq_cmpeqv_trymemcpy_storev_release( - targetptr_final, offset, - destptr, srcptr, copylen, - newval_final, cpu); - else - ret = rseq_cmpeqv_trymemcpy_storev(targetptr_final, - offset, destptr, srcptr, copylen, - newval_final, cpu); + ret = rseq_cmpeqv_trymemcpy_storev( + opt_mo, RSEQ_PERCPU, + targetptr_final, offset, + destptr, srcptr, copylen, + newval_final, cpu); if (rseq_likely(!ret)) { result = true; break; @@ -964,7 +1018,7 @@ bool this_cpu_memcpy_buffer_pop(struct percpu_memcpy_buffer *buffer, size_t copylen; int ret; - cpu = rseq_cpu_start(); + cpu = get_current_cpu_id(); /* Load offset with single-copy atomicity. */ offset = RSEQ_READ_ONCE(buffer->c[cpu].offset); if (offset == 0) @@ -975,8 +1029,8 @@ bool this_cpu_memcpy_buffer_pop(struct percpu_memcpy_buffer *buffer, copylen = sizeof(*item); newval_final = offset - 1; targetptr_final = &buffer->c[cpu].offset; - ret = rseq_cmpeqv_trymemcpy_storev(targetptr_final, - offset, destptr, srcptr, copylen, + ret = rseq_cmpeqv_trymemcpy_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU, + targetptr_final, offset, destptr, srcptr, copylen, newval_final, cpu); if (rseq_likely(!ret)) { result = true; @@ -1151,7 +1205,7 @@ static int set_signal_handler(void) } /* Test MEMBARRIER_CMD_PRIVATE_RESTART_RSEQ_ON_CPU membarrier command. */ -#ifdef RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV +#ifdef TEST_MEMBARRIER struct test_membarrier_thread_args { int stop; intptr_t percpu_list_ptr; @@ -1178,9 +1232,10 @@ void *test_membarrier_worker_thread(void *arg) int ret; do { - int cpu = rseq_cpu_start(); + int cpu = get_current_cpu_id(); - ret = rseq_offset_deref_addv(&args->percpu_list_ptr, + ret = rseq_offset_deref_addv(RSEQ_MO_RELAXED, RSEQ_PERCPU, + &args->percpu_list_ptr, sizeof(struct percpu_list_entry) * cpu, 1, cpu); } while (rseq_unlikely(ret)); } @@ -1217,11 +1272,6 @@ void test_membarrier_free_percpu_list(struct percpu_list *list) free(list->c[i].head); } -static int sys_membarrier(int cmd, int flags, int cpu_id) -{ - return syscall(__NR_membarrier, cmd, flags, cpu_id); -} - /* * The manager thread swaps per-cpu lists that worker threads see, * and validates that there are no unexpected modifications. @@ -1260,8 +1310,7 @@ void *test_membarrier_manager_thread(void *arg) /* Make list_b "active". */ atomic_store(&args->percpu_list_ptr, (intptr_t)&list_b); - if (sys_membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ, - MEMBARRIER_CMD_FLAG_CPU, cpu_a) && + if (rseq_membarrier_expedited(cpu_a) && errno != ENXIO /* missing CPU */) { perror("sys_membarrier"); abort(); @@ -1284,8 +1333,7 @@ void *test_membarrier_manager_thread(void *arg) /* Make list_a "active". */ atomic_store(&args->percpu_list_ptr, (intptr_t)&list_a); - if (sys_membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ, - MEMBARRIER_CMD_FLAG_CPU, cpu_b) && + if (rseq_membarrier_expedited(cpu_b) && errno != ENXIO /* missing CPU*/) { perror("sys_membarrier"); abort(); @@ -1356,7 +1404,7 @@ void test_membarrier(void) abort(); } } -#else /* RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV */ +#else /* TEST_MEMBARRIER */ void test_membarrier(void) { fprintf(stderr, "rseq_offset_deref_addv is not implemented on this architecture. " @@ -1513,7 +1561,7 @@ int main(int argc, char **argv) verbose = 1; break; case 'M': - opt_mb = 1; + opt_mo = RSEQ_MO_RELEASE; break; default: show_usage(argc, argv); @@ -1533,6 +1581,10 @@ int main(int argc, char **argv) if (!opt_disable_rseq && rseq_register_current_thread()) goto error; + if (!opt_disable_rseq && !rseq_validate_cpu_id()) { + fprintf(stderr, "Error: cpu id getter unavailable\n"); + goto error; + } switch (opt_test) { case 's': printf_verbose("spinlock\n"); -- cgit v1.2.3-59-g8ed1b