aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/fungible/funcore/fun_queue.h
blob: 7fb53d0ae8b000b632544aa16931368213dd4a91 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */

#ifndef _FUN_QEUEUE_H
#define _FUN_QEUEUE_H

#include <linux/interrupt.h>
#include <linux/io.h>

struct device;
struct fun_dev;
struct fun_queue;
struct fun_cqe_info;
struct fun_rsp_common;

typedef void (*cq_callback_t)(struct fun_queue *funq, void *data, void *msg,
			      const struct fun_cqe_info *info);

struct fun_rq_info {
	dma_addr_t dma;
	struct page *page;
};

/* A queue group consisting of an SQ, a CQ, and an optional RQ. */
struct fun_queue {
	struct fun_dev *fdev;
	spinlock_t sq_lock;

	dma_addr_t cq_dma_addr;
	dma_addr_t sq_dma_addr;
	dma_addr_t rq_dma_addr;

	u32 __iomem *cq_db;
	u32 __iomem *sq_db;
	u32 __iomem *rq_db;

	void *cqes;
	void *sq_cmds;
	struct fun_eprq_rqbuf *rqes;
	struct fun_rq_info *rq_info;

	u32 cqid;
	u32 sqid;
	u32 rqid;

	u32 cq_depth;
	u32 sq_depth;
	u32 rq_depth;

	u16 cq_head;
	u16 sq_tail;
	u16 rq_tail;

	u8 cqe_size_log2;
	u8 sqe_size_log2;

	u16 cqe_info_offset;

	u16 rq_buf_idx;
	int rq_buf_offset;
	u16 num_rqe_to_fill;

	u8 cq_intcoal_usec;
	u8 cq_intcoal_nentries;
	u8 sq_intcoal_usec;
	u8 sq_intcoal_nentries;

	u16 cq_flags;
	u16 sq_flags;
	u16 rq_flags;

	/* SQ head writeback */
	u16 sq_comp;

	volatile __be64 *sq_head;

	cq_callback_t cq_cb;
	void *cb_data;

	irq_handler_t irq_handler;
	void *irq_data;
	s16 cq_vector;
	u8 cq_phase;

	/* I/O q index */
	u16 qid;

	char irqname[24];
};

static inline void *fun_sqe_at(const struct fun_queue *funq, unsigned int pos)
{
	return funq->sq_cmds + (pos << funq->sqe_size_log2);
}

static inline void funq_sq_post_tail(struct fun_queue *funq, u16 tail)
{
	if (++tail == funq->sq_depth)
		tail = 0;
	funq->sq_tail = tail;
	writel(tail, funq->sq_db);
}

static inline struct fun_cqe_info *funq_cqe_info(const struct fun_queue *funq,
						 void *cqe)
{
	return cqe + funq->cqe_info_offset;
}

static inline void funq_rq_post(struct fun_queue *funq)
{
	writel(funq->rq_tail, funq->rq_db);
}

struct fun_queue_alloc_req {
	u8  cqe_size_log2;
	u8  sqe_size_log2;

	u16 cq_flags;
	u16 sq_flags;
	u16 rq_flags;

	u32 cq_depth;
	u32 sq_depth;
	u32 rq_depth;

	u8 cq_intcoal_usec;
	u8 cq_intcoal_nentries;
	u8 sq_intcoal_usec;
	u8 sq_intcoal_nentries;
};

int fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid,
		  u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr,
		  u8 coal_nentries, u8 coal_usec, u32 irq_num,
		  u32 scan_start_id, u32 scan_end_id,
		  u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp);
int fun_cq_create(struct fun_dev *fdev, u16 flags, u32 cqid, u32 rqid,
		  u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr,
		  u16 headroom, u16 tailroom, u8 coal_nentries, u8 coal_usec,
		  u32 irq_num, u32 scan_start_id, u32 scan_end_id,
		  u32 *cqidp, u32 __iomem **dbp);
void *fun_alloc_ring_mem(struct device *dma_dev, size_t depth,
			 size_t hw_desc_sz, size_t sw_desc_size, bool wb,
			 int numa_node, dma_addr_t *dma_addr, void **sw_va,
			 volatile __be64 **wb_va);
void fun_free_ring_mem(struct device *dma_dev, size_t depth, size_t hw_desc_sz,
		       bool wb, void *hw_va, dma_addr_t dma_addr, void *sw_va);

#define fun_destroy_sq(fdev, sqid) \
	fun_res_destroy((fdev), FUN_ADMIN_OP_EPSQ, 0, (sqid))
#define fun_destroy_cq(fdev, cqid) \
	fun_res_destroy((fdev), FUN_ADMIN_OP_EPCQ, 0, (cqid))

struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid,
				  const struct fun_queue_alloc_req *req);
void fun_free_queue(struct fun_queue *funq);

static inline void fun_set_cq_callback(struct fun_queue *funq, cq_callback_t cb,
				       void *cb_data)
{
	funq->cq_cb = cb;
	funq->cb_data = cb_data;
}

int fun_create_rq(struct fun_queue *funq);
int fun_create_queue(struct fun_queue *funq);

void fun_free_irq(struct fun_queue *funq);
int fun_request_irq(struct fun_queue *funq, const char *devname,
		    irq_handler_t handler, void *data);

unsigned int __fun_process_cq(struct fun_queue *funq, unsigned int max);
unsigned int fun_process_cq(struct fun_queue *funq, unsigned int max);

#endif /* _FUN_QEUEUE_H */