aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h
blob: 35600466557665ce6a70d409e52e19395cbd793c (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
#ifndef LOCKS_H
#define LOCKS_H

#include <limits.h>
#include <pthread.h>
#include <stdbool.h>

#include "assume.h"
#include "bug_on.h"
#include "preempt.h"

int nondet_int(void);

#define __acquire(x)
#define __acquires(x)
#define __release(x)
#define __releases(x)

/* Only use one lock mechanism. Select which one. */
#ifdef PTHREAD_LOCK
struct lock_impl {
	pthread_mutex_t mutex;
};

static inline void lock_impl_lock(struct lock_impl *lock)
{
	BUG_ON(pthread_mutex_lock(&lock->mutex));
}

static inline void lock_impl_unlock(struct lock_impl *lock)
{
	BUG_ON(pthread_mutex_unlock(&lock->mutex));
}

static inline bool lock_impl_trylock(struct lock_impl *lock)
{
	int err = pthread_mutex_trylock(&lock->mutex);

	if (!err)
		return true;
	else if (err == EBUSY)
		return false;
	BUG();
}

static inline void lock_impl_init(struct lock_impl *lock)
{
	pthread_mutex_init(&lock->mutex, NULL);
}

#define LOCK_IMPL_INITIALIZER {.mutex = PTHREAD_MUTEX_INITIALIZER}

#else /* !defined(PTHREAD_LOCK) */
/* Spinlock that assumes that it always gets the lock immediately. */

struct lock_impl {
	bool locked;
};

static inline bool lock_impl_trylock(struct lock_impl *lock)
{
#ifdef RUN
	/* TODO: Should this be a test and set? */
	return __sync_bool_compare_and_swap(&lock->locked, false, true);
#else
	__CPROVER_atomic_begin();
	bool old_locked = lock->locked;
	lock->locked = true;
	__CPROVER_atomic_end();

	/* Minimal barrier to prevent accesses leaking out of lock. */
	__CPROVER_fence("RRfence", "RWfence");

	return !old_locked;
#endif
}

static inline void lock_impl_lock(struct lock_impl *lock)
{
	/*
	 * CBMC doesn't support busy waiting, so just assume that the
	 * lock is available.
	 */
	assume(lock_impl_trylock(lock));

	/*
	 * If the lock was already held by this thread then the assumption
	 * is unsatisfiable (deadlock).
	 */
}

static inline void lock_impl_unlock(struct lock_impl *lock)
{
#ifdef RUN
	BUG_ON(!__sync_bool_compare_and_swap(&lock->locked, true, false));
#else
	/* Minimal barrier to prevent accesses leaking out of lock. */
	__CPROVER_fence("RWfence", "WWfence");

	__CPROVER_atomic_begin();
	bool old_locked = lock->locked;
	lock->locked = false;
	__CPROVER_atomic_end();

	BUG_ON(!old_locked);
#endif
}

static inline void lock_impl_init(struct lock_impl *lock)
{
	lock->locked = false;
}

#define LOCK_IMPL_INITIALIZER {.locked = false}

#endif /* !defined(PTHREAD_LOCK) */

/*
 * Implement spinlocks using the lock mechanism. Wrap the lock to prevent mixing
 * locks of different types.
 */
typedef struct {
	struct lock_impl internal_lock;
} spinlock_t;

#define SPIN_LOCK_UNLOCKED {.internal_lock = LOCK_IMPL_INITIALIZER}
#define __SPIN_LOCK_UNLOCKED(x) SPIN_LOCK_UNLOCKED
#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED

static inline void spin_lock_init(spinlock_t *lock)
{
	lock_impl_init(&lock->internal_lock);
}

static inline void spin_lock(spinlock_t *lock)
{
	/*
	 * Spin locks also need to be removed in order to eliminate all
	 * memory barriers. They are only used by the write side anyway.
	 */
#ifndef NO_SYNC_SMP_MB
	preempt_disable();
	lock_impl_lock(&lock->internal_lock);
#endif
}

static inline void spin_unlock(spinlock_t *lock)
{
#ifndef NO_SYNC_SMP_MB
	lock_impl_unlock(&lock->internal_lock);
	preempt_enable();
#endif
}

/* Don't bother with interrupts */
#define spin_lock_irq(lock) spin_lock(lock)
#define spin_unlock_irq(lock) spin_unlock(lock)
#define spin_lock_irqsave(lock, flags) spin_lock(lock)
#define spin_unlock_irqrestore(lock, flags) spin_unlock(lock)

/*
 * This is supposed to return an int, but I think that a bool should work as
 * well.
 */
static inline bool spin_trylock(spinlock_t *lock)
{
#ifndef NO_SYNC_SMP_MB
	preempt_disable();
	return lock_impl_trylock(&lock->internal_lock);
#else
	return true;
#endif
}

struct completion {
	/* Hopefuly this won't overflow. */
	unsigned int count;
};

#define COMPLETION_INITIALIZER(x) {.count = 0}
#define DECLARE_COMPLETION(x) struct completion x = COMPLETION_INITIALIZER(x)
#define DECLARE_COMPLETION_ONSTACK(x) DECLARE_COMPLETION(x)

static inline void init_completion(struct completion *c)
{
	c->count = 0;
}

static inline void wait_for_completion(struct completion *c)
{
	unsigned int prev_count = __sync_fetch_and_sub(&c->count, 1);

	assume(prev_count);
}

static inline void complete(struct completion *c)
{
	unsigned int prev_count = __sync_fetch_and_add(&c->count, 1);

	BUG_ON(prev_count == UINT_MAX);
}

/* This function probably isn't very useful for CBMC. */
static inline bool try_wait_for_completion(struct completion *c)
{
	BUG();
}

static inline bool completion_done(struct completion *c)
{
	return c->count;
}

/* TODO: Implement complete_all */
static inline void complete_all(struct completion *c)
{
	BUG();
}

#endif