aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src/hashtables.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2018-09-28 03:05:22 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2018-10-02 03:41:49 +0200
commit45a53bbaafbca0af90f0fb097b1ff6c151b5d8f0 (patch)
tree9409e8289e08211e35e8cdeef0e509e1e254acc0 /src/hashtables.c
parentpoly1305-mips64: use compiler-defined macros in assembly (diff)
downloadwireguard-monolithic-historical-45a53bbaafbca0af90f0fb097b1ff6c151b5d8f0.tar.xz
wireguard-monolithic-historical-45a53bbaafbca0af90f0fb097b1ff6c151b5d8f0.zip
global: prefix all functions with wg_
I understand why this must be done, though I'm not so happy about having to do it. In some places, it puts us over 80 chars and we have to break lines up in further ugly ways. And in general, I think this makes things harder to read. Yet another thing we must do to please upstream. Maybe this can be replaced in the future by some kind of automatic module namespacing logic in the linker, or even combined with LTO and aggressive symbol stripping. Suggested-by: Andrew Lunn <andrew@lunn.ch>
Diffstat (limited to 'src/hashtables.c')
-rw-r--r--src/hashtables.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/src/hashtables.c b/src/hashtables.c
index afe17e9..6e5518b 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -19,15 +19,15 @@ static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table,
(HASH_SIZE(table->hashtable) - 1)];
}
-void pubkey_hashtable_init(struct pubkey_hashtable *table)
+void wg_pubkey_hashtable_init(struct pubkey_hashtable *table)
{
get_random_bytes(&table->key, sizeof(table->key));
hash_init(table->hashtable);
mutex_init(&table->lock);
}
-void pubkey_hashtable_add(struct pubkey_hashtable *table,
- struct wireguard_peer *peer)
+void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
+ struct wireguard_peer *peer)
{
mutex_lock(&table->lock);
hlist_add_head_rcu(&peer->pubkey_hash,
@@ -35,8 +35,8 @@ void pubkey_hashtable_add(struct pubkey_hashtable *table,
mutex_unlock(&table->lock);
}
-void pubkey_hashtable_remove(struct pubkey_hashtable *table,
- struct wireguard_peer *peer)
+void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
+ struct wireguard_peer *peer)
{
mutex_lock(&table->lock);
hlist_del_init_rcu(&peer->pubkey_hash);
@@ -45,8 +45,8 @@ void pubkey_hashtable_remove(struct pubkey_hashtable *table,
/* Returns a strong reference to a peer */
struct wireguard_peer *
-pubkey_hashtable_lookup(struct pubkey_hashtable *table,
- const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
+wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
{
struct wireguard_peer *iter_peer, *peer = NULL;
@@ -59,7 +59,7 @@ pubkey_hashtable_lookup(struct pubkey_hashtable *table,
break;
}
}
- peer = peer_get_maybe_zero(peer);
+ peer = wg_peer_get_maybe_zero(peer);
rcu_read_unlock_bh();
return peer;
}
@@ -74,7 +74,7 @@ static struct hlist_head *index_bucket(struct index_hashtable *table,
(HASH_SIZE(table->hashtable) - 1)];
}
-void index_hashtable_init(struct index_hashtable *table)
+void wg_index_hashtable_init(struct index_hashtable *table)
{
hash_init(table->hashtable);
spin_lock_init(&table->lock);
@@ -104,8 +104,8 @@ void index_hashtable_init(struct index_hashtable *table)
* is another thing to consider moving forward.
*/
-__le32 index_hashtable_insert(struct index_hashtable *table,
- struct index_hashtable_entry *entry)
+__le32 wg_index_hashtable_insert(struct index_hashtable *table,
+ struct index_hashtable_entry *entry)
{
struct index_hashtable_entry *existing_entry;
@@ -151,9 +151,9 @@ search_unused_slot:
return entry->index;
}
-bool index_hashtable_replace(struct index_hashtable *table,
- struct index_hashtable_entry *old,
- struct index_hashtable_entry *new)
+bool wg_index_hashtable_replace(struct index_hashtable *table,
+ struct index_hashtable_entry *old,
+ struct index_hashtable_entry *new)
{
if (unlikely(hlist_unhashed(&old->index_hash)))
return false;
@@ -172,8 +172,8 @@ bool index_hashtable_replace(struct index_hashtable *table,
return true;
}
-void index_hashtable_remove(struct index_hashtable *table,
- struct index_hashtable_entry *entry)
+void wg_index_hashtable_remove(struct index_hashtable *table,
+ struct index_hashtable_entry *entry)
{
spin_lock_bh(&table->lock);
hlist_del_init_rcu(&entry->index_hash);
@@ -182,9 +182,9 @@ void index_hashtable_remove(struct index_hashtable *table,
/* Returns a strong reference to a entry->peer */
struct index_hashtable_entry *
-index_hashtable_lookup(struct index_hashtable *table,
- const enum index_hashtable_type type_mask,
- const __le32 index, struct wireguard_peer **peer)
+wg_index_hashtable_lookup(struct index_hashtable *table,
+ const enum index_hashtable_type type_mask,
+ const __le32 index, struct wireguard_peer **peer)
{
struct index_hashtable_entry *iter_entry, *entry = NULL;
@@ -198,7 +198,7 @@ index_hashtable_lookup(struct index_hashtable *table,
}
}
if (likely(entry)) {
- entry->peer = peer_get_maybe_zero(entry->peer);
+ entry->peer = wg_peer_get_maybe_zero(entry->peer);
if (likely(entry->peer))
*peer = entry->peer;
else