aboutsummaryrefslogtreecommitdiffstats
path: root/net/xdp/xsk_queue.c
blob: b66504592d9bd886126743da5e932e03029e742a (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
// SPDX-License-Identifier: GPL-2.0
/* XDP user-space ring structure
 * Copyright(c) 2018 Intel Corporation.
 */

#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/overflow.h>

#include "xsk_queue.h"

void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask)
{
	if (!q)
		return;

	q->size = size;
	q->chunk_mask = chunk_mask;
}

static u32 xskq_umem_get_ring_size(struct xsk_queue *q)
{
	return sizeof(struct xdp_umem_ring) + q->nentries * sizeof(u64);
}

static u32 xskq_rxtx_get_ring_size(struct xsk_queue *q)
{
	return sizeof(struct xdp_ring) + q->nentries * sizeof(struct xdp_desc);
}

struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
{
	struct xsk_queue *q;
	gfp_t gfp_flags;
	size_t size;

	q = kzalloc(sizeof(*q), GFP_KERNEL);
	if (!q)
		return NULL;

	q->nentries = nentries;
	q->ring_mask = nentries - 1;

	gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN |
		    __GFP_COMP  | __GFP_NORETRY;
	size = umem_queue ? xskq_umem_get_ring_size(q) :
	       xskq_rxtx_get_ring_size(q);

	q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags,
						      get_order(size));
	if (!q->ring) {
		kfree(q);
		return NULL;
	}

	return q;
}

void xskq_destroy(struct xsk_queue *q)
{
	if (!q)
		return;

	page_frag_free(q->ring);
	kfree(q);
}

struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
{
	struct xdp_umem_fq_reuse *newq;

	/* Check for overflow */
	if (nentries > (u32)roundup_pow_of_two(nentries))
		return NULL;
	nentries = roundup_pow_of_two(nentries);

	newq = kvmalloc(struct_size(newq, handles, nentries), GFP_KERNEL);
	if (!newq)
		return NULL;
	memset(newq, 0, offsetof(typeof(*newq), handles));

	newq->nentries = nentries;
	return newq;
}
EXPORT_SYMBOL_GPL(xsk_reuseq_prepare);

struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
					  struct xdp_umem_fq_reuse *newq)
{
	struct xdp_umem_fq_reuse *oldq = umem->fq_reuse;

	if (!oldq) {
		umem->fq_reuse = newq;
		return NULL;
	}

	if (newq->nentries < oldq->length)
		return newq;

	memcpy(newq->handles, oldq->handles,
	       array_size(oldq->length, sizeof(u64)));
	newq->length = oldq->length;

	umem->fq_reuse = newq;
	return oldq;
}
EXPORT_SYMBOL_GPL(xsk_reuseq_swap);

void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
{
	kvfree(rq);
}
EXPORT_SYMBOL_GPL(xsk_reuseq_free);

void xsk_reuseq_destroy(struct xdp_umem *umem)
{
	xsk_reuseq_free(umem->fq_reuse);
	umem->fq_reuse = NULL;
}