aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/include/asm/spinlock_32.h
blob: f3a8473c68daa18fe35997005be55b3df6ba15a5 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
/*
 * Copyright 2010 Tilera Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation, version 2.
 *
 *   This program is distributed in the hope that it will be useful, but
 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 *   NON INFRINGEMENT.  See the GNU General Public License for
 *   more details.
 *
 * 32-bit SMP spinlocks.
 */

#ifndef _ASM_TILE_SPINLOCK_32_H
#define _ASM_TILE_SPINLOCK_32_H

#include <asm/atomic.h>
#include <asm/page.h>
#include <asm/system.h>
#include <linux/compiler.h>

/*
 * We only use even ticket numbers so the '1' inserted by a tns is
 * an unambiguous "ticket is busy" flag.
 */
#define TICKET_QUANTUM 2


/*
 * SMP ticket spinlocks, allowing only a single CPU anywhere
 *
 * (the type definitions are in asm/spinlock_types.h)
 */
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
	/*
	 * Note that even if a new ticket is in the process of being
	 * acquired, so lock->next_ticket is 1, it's still reasonable
	 * to claim the lock is held, since it will be momentarily
	 * if not already.  There's no need to wait for a "valid"
	 * lock->next_ticket to become available.
	 */
	return lock->next_ticket != lock->current_ticket;
}

void arch_spin_lock(arch_spinlock_t *lock);

/* We cannot take an interrupt after getting a ticket, so don't enable them. */
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)

int arch_spin_trylock(arch_spinlock_t *lock);

static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
	/* For efficiency, overlap fetching the old ticket with the wmb(). */
	int old_ticket = lock->current_ticket;
	wmb();  /* guarantee anything modified under the lock is visible */
	lock->current_ticket = old_ticket + TICKET_QUANTUM;
}

void arch_spin_unlock_wait(arch_spinlock_t *lock);

/*
 * Read-write spinlocks, allowing multiple readers
 * but only one writer.
 *
 * We use a "tns/store-back" technique on a single word to manage
 * the lock state, looping around to retry if the tns returns 1.
 */

/* Internal layout of the word; do not use. */
#define _WR_NEXT_SHIFT	8
#define _WR_CURR_SHIFT  16
#define _WR_WIDTH       8
#define _RD_COUNT_SHIFT 24
#define _RD_COUNT_WIDTH 8

/* Internal functions; do not use. */
void arch_read_lock_slow(arch_rwlock_t *, u32);
int arch_read_trylock_slow(arch_rwlock_t *);
void arch_read_unlock_slow(arch_rwlock_t *);
void arch_write_lock_slow(arch_rwlock_t *, u32);
void arch_write_unlock_slow(arch_rwlock_t *, u32);

/**
 * arch_read_can_lock() - would read_trylock() succeed?
 */
static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
{
	return (rwlock->lock << _RD_COUNT_WIDTH) == 0;
}

/**
 * arch_write_can_lock() - would write_trylock() succeed?
 */
static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
{
	return rwlock->lock == 0;
}

/**
 * arch_read_lock() - acquire a read lock.
 */
static inline void arch_read_lock(arch_rwlock_t *rwlock)
{
	u32 val = __insn_tns((int *)&rwlock->lock);
	if (unlikely(val << _RD_COUNT_WIDTH)) {
		arch_read_lock_slow(rwlock, val);
		return;
	}
	rwlock->lock = val + (1 << _RD_COUNT_SHIFT);
}

/**
 * arch_read_lock() - acquire a write lock.
 */
static inline void arch_write_lock(arch_rwlock_t *rwlock)
{
	u32 val = __insn_tns((int *)&rwlock->lock);
	if (unlikely(val != 0)) {
		arch_write_lock_slow(rwlock, val);
		return;
	}
	rwlock->lock = 1 << _WR_NEXT_SHIFT;
}

/**
 * arch_read_trylock() - try to acquire a read lock.
 */
static inline int arch_read_trylock(arch_rwlock_t *rwlock)
{
	int locked;
	u32 val = __insn_tns((int *)&rwlock->lock);
	if (unlikely(val & 1)) {
		return arch_read_trylock_slow(rwlock);
	}
	locked = (val << _RD_COUNT_WIDTH) == 0;
	rwlock->lock = val + (locked << _RD_COUNT_SHIFT);
	return locked;
}

/**
 * arch_write_trylock() - try to acquire a write lock.
 */
static inline int arch_write_trylock(arch_rwlock_t *rwlock)
{
	u32 val = __insn_tns((int *)&rwlock->lock);

	/*
	 * If a tns is in progress, or there's a waiting or active locker,
	 * or active readers, we can't take the lock, so give up.
	 */
	if (unlikely(val != 0)) {
		if (!(val & 1))
			rwlock->lock = val;
		return 0;
	}

	/* Set the "next" field to mark it locked. */
	rwlock->lock = 1 << _WR_NEXT_SHIFT;
	return 1;
}

/**
 * arch_read_unlock() - release a read lock.
 */
static inline void arch_read_unlock(arch_rwlock_t *rwlock)
{
	u32 val;
	mb();  /* guarantee anything modified under the lock is visible */
	val = __insn_tns((int *)&rwlock->lock);
	if (unlikely(val & 1)) {
		arch_read_unlock_slow(rwlock);
		return;
	}
	rwlock->lock = val - (1 << _RD_COUNT_SHIFT);
}

/**
 * arch_write_unlock() - release a write lock.
 */
static inline void arch_write_unlock(arch_rwlock_t *rwlock)
{
	u32 val;
	mb();  /* guarantee anything modified under the lock is visible */
	val = __insn_tns((int *)&rwlock->lock);
	if (unlikely(val != (1 << _WR_NEXT_SHIFT))) {
		arch_write_unlock_slow(rwlock, val);
		return;
	}
	rwlock->lock = 0;
}

#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)

#endif /* _ASM_TILE_SPINLOCK_32_H */