aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/scripts/python/netdev-times.py
blob: 4c6f09ac7d129e0f5ee9b0c823836d4de8f5c04c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
# Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.

import os
import sys

sys.path.append(os.environ['PERF_EXEC_PATH'] + \
	'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')

from perf_trace_context import *
from Core import *
from Util import *

all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
              # which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
		 # and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
		       # skb_copy_datagram_iovec

buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
		       # tx_xmit_list
of_count_rx_skb_list = 0; # overflow count

tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count

tx_xmit_list = [];  # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count

tx_free_list = [];  # list of packets which is freed

# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;

# indices of event_info tuple
EINFO_IDX_NAME=   0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU=    2
EINFO_IDX_TIME=   3
EINFO_IDX_PID=    4
EINFO_IDX_COMM=   5

# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
	return (dst - src) / 1000000.0

# Display a process of transmitting a packet
def print_transmit(hunk):
	if dev != 0 and hunk['dev'].find(dev) < 0:
		return
	print "%7s %5d %6d.%06dsec %12.3fmsec      %12.3fmsec" % \
		(hunk['dev'], hunk['len'],
		nsecs_secs(hunk['queue_t']),
		nsecs_nsecs(hunk['queue_t'])/1000,
		diff_msec(hunk['queue_t'], hunk['xmit_t']),
		diff_msec(hunk['xmit_t'], hunk['free_t']))

# Format for displaying rx packet processing
PF_IRQ_ENTRY= "  irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY="  softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= "  napi_poll_exit(+%.3fmsec %s)"
PF_JOINT=     "         |"
PF_WJOINT=    "         |            |"
PF_NET_RECV=  "         |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX=    "         |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= "         |      skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= "         |      kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB=  "         |      consume_skb(+%.3fmsec)"

# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
	show_hunk = 0
	irq_list = hunk['irq_list']
	cpu = irq_list[0]['cpu']
	base_t = irq_list[0]['irq_ent_t']
	# check if this hunk should be showed
	if dev != 0:
		for i in range(len(irq_list)):
			if irq_list[i]['name'].find(dev) >= 0:
				show_hunk = 1
				break
	else:
		show_hunk = 1
	if show_hunk == 0:
		return

	print "%d.%06dsec cpu=%d" % \
		(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
	for i in range(len(irq_list)):
		print PF_IRQ_ENTRY % \
			(diff_msec(base_t, irq_list[i]['irq_ent_t']),
			irq_list[i]['irq'], irq_list[i]['name'])
		print PF_JOINT
		irq_event_list = irq_list[i]['event_list']
		for j in range(len(irq_event_list)):
			irq_event = irq_event_list[j]
			if irq_event['event'] == 'netif_rx':
				print PF_NET_RX % \
					(diff_msec(base_t, irq_event['time']),
					irq_event['skbaddr'])
				print PF_JOINT
	print PF_SOFT_ENTRY % \
		diff_msec(base_t, hunk['sirq_ent_t'])
	print PF_JOINT
	event_list = hunk['event_list']
	for i in range(len(event_list)):
		event = event_list[i]
		if event['event_name'] == 'napi_poll':
			print PF_NAPI_POLL % \
			    (diff_msec(base_t, event['event_t']), event['dev'])
			if i == len(event_list) - 1:
				print ""
			else:
				print PF_JOINT
		else:
			print PF_NET_RECV % \
			    (diff_msec(base_t, event['event_t']), event['skbaddr'],
				event['len'])
			if 'comm' in event.keys():
				print PF_WJOINT
				print PF_CPY_DGRAM % \
					(diff_msec(base_t, event['comm_t']),
					event['pid'], event['comm'])
			elif 'handle' in event.keys():
				print PF_WJOINT
				if event['handle'] == "kfree_skb":
					print PF_KFREE_SKB % \
						(diff_msec(base_t,
						event['comm_t']),
						event['location'])
				elif event['handle'] == "consume_skb":
					print PF_CONS_SKB % \
						diff_msec(base_t,
							event['comm_t'])
			print PF_JOINT

def trace_begin():
	global show_tx
	global show_rx
	global dev
	global debug

	for i in range(len(sys.argv)):
		if i == 0:
			continue
		arg = sys.argv[i]
		if arg == 'tx':
			show_tx = 1
		elif arg =='rx':
			show_rx = 1
		elif arg.find('dev=',0, 4) >= 0:
			dev = arg[4:]
		elif arg == 'debug':
			debug = 1
	if show_tx == 0  and show_rx == 0:
		show_tx = 1
		show_rx = 1

def trace_end():
	# order all events in time
	all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
					    b[EINFO_IDX_TIME]))
	# process all events
	for i in range(len(all_event_list)):
		event_info = all_event_list[i]
		name = event_info[EINFO_IDX_NAME]
		if name == 'irq__softirq_exit':
			handle_irq_softirq_exit(event_info)
		elif name == 'irq__softirq_entry':
			handle_irq_softirq_entry(event_info)
		elif name == 'irq__softirq_raise':
			handle_irq_softirq_raise(event_info)
		elif name == 'irq__irq_handler_entry':
			handle_irq_handler_entry(event_info)
		elif name == 'irq__irq_handler_exit':
			handle_irq_handler_exit(event_info)
		elif name == 'napi__napi_poll':
			handle_napi_poll(event_info)
		elif name == 'net__netif_receive_skb':
			handle_netif_receive_skb(event_info)
		elif name == 'net__netif_rx':
			handle_netif_rx(event_info)
		elif name == 'skb__skb_copy_datagram_iovec':
			handle_skb_copy_datagram_iovec(event_info)
		elif name == 'net__net_dev_queue':
			handle_net_dev_queue(event_info)
		elif name == 'net__net_dev_xmit':
			handle_net_dev_xmit(event_info)
		elif name == 'skb__kfree_skb':
			handle_kfree_skb(event_info)
		elif name == 'skb__consume_skb':
			handle_consume_skb(event_info)
	# display receive hunks
	if show_rx:
		for i in range(len(receive_hunk_list)):
			print_receive(receive_hunk_list[i])
	# display transmit hunks
	if show_tx:
		print "   dev    len      Qdisc        " \
			"       netdevice             free"
		for i in range(len(tx_free_list)):
			print_transmit(tx_free_list[i])
	if debug:
		print "debug buffer status"
		print "----------------------------"
		print "xmit Qdisc:remain:%d overflow:%d" % \
			(len(tx_queue_list), of_count_tx_queue_list)
		print "xmit netdevice:remain:%d overflow:%d" % \
			(len(tx_xmit_list), of_count_tx_xmit_list)
		print "receive:remain:%d overflow:%d" % \
			(len(rx_skb_list), of_count_rx_skb_list)

# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
	if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
		return
	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
	all_event_list.append(event_info)

def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
	if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
		return
	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
	all_event_list.append(event_info)

def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
	if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
		return
	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
	all_event_list.append(event_info)

def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
			callchain, irq, irq_name):
	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
			irq, irq_name)
	all_event_list.append(event_info)

def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret):
	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
	all_event_list.append(event_info)

def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi,
                    dev_name, work=None, budget=None):
	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
			napi, dev_name, work, budget)
	all_event_list.append(event_info)

def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
			skblen, dev_name):
	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
			skbaddr, skblen, dev_name)
	all_event_list.append(event_info)

def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
			skblen, dev_name):
	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
			skbaddr, skblen, dev_name)
	all_event_list.append(event_info)

def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain,
			skbaddr, skblen, dev_name):
	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
			skbaddr, skblen, dev_name)
	all_event_list.append(event_info)

def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain,
			skbaddr, skblen, rc, dev_name):
	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
			skbaddr, skblen, rc ,dev_name)
	all_event_list.append(event_info)

def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
			skbaddr, protocol, location):
	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
			skbaddr, protocol, location)
	all_event_list.append(event_info)

def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr):
	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
			skbaddr)
	all_event_list.append(event_info)

def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain,
	skbaddr, skblen):
	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
			skbaddr, skblen)
	all_event_list.append(event_info)

def handle_irq_handler_entry(event_info):
	(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
	if cpu not in irq_dic.keys():
		irq_dic[cpu] = []
	irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
	irq_dic[cpu].append(irq_record)

def handle_irq_handler_exit(event_info):
	(name, context, cpu, time, pid, comm, irq, ret) = event_info
	if cpu not in irq_dic.keys():
		return
	irq_record = irq_dic[cpu].pop()
	if irq != irq_record['irq']:
		return
	irq_record.update({'irq_ext_t':time})
	# if an irq doesn't include NET_RX softirq, drop.
	if 'event_list' in irq_record.keys():
		irq_dic[cpu].append(irq_record)

def handle_irq_softirq_raise(event_info):
	(name, context, cpu, time, pid, comm, vec) = event_info
	if cpu not in irq_dic.keys() \
	or len(irq_dic[cpu]) == 0:
		return
	irq_record = irq_dic[cpu].pop()
	if 'event_list' in irq_record.keys():
		irq_event_list = irq_record['event_list']
	else:
		irq_event_list = []
	irq_event_list.append({'time':time, 'event':'sirq_raise'})
	irq_record.update({'event_list':irq_event_list})
	irq_dic[cpu].append(irq_record)

def handle_irq_softirq_entry(event_info):
	(name, context, cpu, time, pid, comm, vec) = event_info
	net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}

def handle_irq_softirq_exit(event_info):
	(name, context, cpu, time, pid, comm, vec) = event_info
	irq_list = []
	event_list = 0
	if cpu in irq_dic.keys():
		irq_list = irq_dic[cpu]
		del irq_dic[cpu]
	if cpu in net_rx_dic.keys():
		sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
		event_list = net_rx_dic[cpu]['event_list']
		del net_rx_dic[cpu]
	if irq_list == [] or event_list == 0:
		return
	rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
		    'irq_list':irq_list, 'event_list':event_list}
	# merge information realted to a NET_RX softirq
	receive_hunk_list.append(rec_data)

def handle_napi_poll(event_info):
	(name, context, cpu, time, pid, comm, napi, dev_name,
		work, budget) = event_info
	if cpu in net_rx_dic.keys():
		event_list = net_rx_dic[cpu]['event_list']
		rec_data = {'event_name':'napi_poll',
				'dev':dev_name, 'event_t':time,
				'work':work, 'budget':budget}
		event_list.append(rec_data)

def handle_netif_rx(event_info):
	(name, context, cpu, time, pid, comm,
		skbaddr, skblen, dev_name) = event_info
	if cpu not in irq_dic.keys() \
	or len(irq_dic[cpu]) == 0:
		return
	irq_record = irq_dic[cpu].pop()
	if 'event_list' in irq_record.keys():
		irq_event_list = irq_record['event_list']
	else:
		irq_event_list = []
	irq_event_list.append({'time':time, 'event':'netif_rx',
		'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
	irq_record.update({'event_list':irq_event_list})
	irq_dic[cpu].append(irq_record)

def handle_netif_receive_skb(event_info):
	global of_count_rx_skb_list

	(name, context, cpu, time, pid, comm,
		skbaddr, skblen, dev_name) = event_info
	if cpu in net_rx_dic.keys():
		rec_data = {'event_name':'netif_receive_skb',
			    'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
		event_list = net_rx_dic[cpu]['event_list']
		event_list.append(rec_data)
		rx_skb_list.insert(0, rec_data)
		if len(rx_skb_list) > buffer_budget:
			rx_skb_list.pop()
			of_count_rx_skb_list += 1

def handle_net_dev_queue(event_info):
	global of_count_tx_queue_list

	(name, context, cpu, time, pid, comm,
		skbaddr, skblen, dev_name) = event_info
	skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
	tx_queue_list.insert(0, skb)
	if len(tx_queue_list) > buffer_budget:
		tx_queue_list.pop()
		of_count_tx_queue_list += 1

def handle_net_dev_xmit(event_info):
	global of_count_tx_xmit_list

	(name, context, cpu, time, pid, comm,
		skbaddr, skblen, rc, dev_name) = event_info
	if rc == 0: # NETDEV_TX_OK
		for i in range(len(tx_queue_list)):
			skb = tx_queue_list[i]
			if skb['skbaddr'] == skbaddr:
				skb['xmit_t'] = time
				tx_xmit_list.insert(0, skb)
				del tx_queue_list[i]
				if len(tx_xmit_list) > buffer_budget:
					tx_xmit_list.pop()
					of_count_tx_xmit_list += 1
				return

def handle_kfree_skb(event_info):
	(name, context, cpu, time, pid, comm,
		skbaddr, protocol, location) = event_info
	for i in range(len(tx_queue_list)):
		skb = tx_queue_list[i]
		if skb['skbaddr'] == skbaddr:
			del tx_queue_list[i]
			return
	for i in range(len(tx_xmit_list)):
		skb = tx_xmit_list[i]
		if skb['skbaddr'] == skbaddr:
			skb['free_t'] = time
			tx_free_list.append(skb)
			del tx_xmit_list[i]
			return
	for i in range(len(rx_skb_list)):
		rec_data = rx_skb_list[i]
		if rec_data['skbaddr'] == skbaddr:
			rec_data.update({'handle':"kfree_skb",
					'comm':comm, 'pid':pid, 'comm_t':time})
			del rx_skb_list[i]
			return

def handle_consume_skb(event_info):
	(name, context, cpu, time, pid, comm, skbaddr) = event_info
	for i in range(len(tx_xmit_list)):
		skb = tx_xmit_list[i]
		if skb['skbaddr'] == skbaddr:
			skb['free_t'] = time
			tx_free_list.append(skb)
			del tx_xmit_list[i]
			return

def handle_skb_copy_datagram_iovec(event_info):
	(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
	for i in range(len(rx_skb_list)):
		rec_data = rx_skb_list[i]
		if skbaddr == rec_data['skbaddr']:
			rec_data.update({'handle':"skb_copy_datagram_iovec",
					'comm':comm, 'pid':pid, 'comm_t':time})
			del rx_skb_list[i]
			return
lass="p">(queue); u32 len; len = le32_to_cpu(hdr->plen) - hdr->hlen - (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0); if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { pr_err("queue %d: data digest flag is cleared\n", queue->idx); return -EPROTO; } return 0; } static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd) { struct scatterlist *sg; int i; sg = &cmd->req.sg[cmd->sg_idx]; for (i = 0; i < cmd->nr_mapped; i++) kunmap(sg_page(&sg[i])); } static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) { struct kvec *iov = cmd->iov; struct scatterlist *sg; u32 length, offset, sg_offset; length = cmd->pdu_len; cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE); offset = cmd->rbytes_done; cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE); sg_offset = offset % PAGE_SIZE; sg = &cmd->req.sg[cmd->sg_idx]; while (length) { u32 iov_len = min_t(u32, length, sg->length - sg_offset); iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset; iov->iov_len = iov_len; length -= iov_len; sg = sg_next(sg); iov++; } iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov, cmd->nr_mapped, cmd->pdu_len); } static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) { queue->rcv_state = NVMET_TCP_RECV_ERR; if (queue->nvme_sq.ctrl) nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); else kernel_sock_shutdown(queue->sock, SHUT_RDWR); } static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) { struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl; u32 len = le32_to_cpu(sgl->length); if (!cmd->req.data_len) return 0; if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET)) { if (!nvme_is_write(cmd->req.cmd)) return NVME_SC_INVALID_FIELD | NVME_SC_DNR; if (len > cmd->req.port->inline_data_size) return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; cmd->pdu_len = len; } cmd->req.transfer_len += len; cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt); if (!cmd->req.sg) return NVME_SC_INTERNAL; cmd->cur_sg = cmd->req.sg; if (nvmet_tcp_has_data_in(cmd)) { cmd->iov = kmalloc_array(cmd->req.sg_cnt, sizeof(*cmd->iov), GFP_KERNEL); if (!cmd->iov) goto err; } return 0; err: sgl_free(cmd->req.sg); return NVME_SC_INTERNAL; } static void nvmet_tcp_ddgst(struct ahash_request *hash, struct nvmet_tcp_cmd *cmd) { ahash_request_set_crypt(hash, cmd->req.sg, (void *)&cmd->exp_ddgst, cmd->req.transfer_len); crypto_ahash_digest(hash); } static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) { struct nvme_tcp_data_pdu *pdu = cmd->data_pdu; struct nvmet_tcp_queue *queue = cmd->queue; u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue); cmd->offset = 0; cmd->state = NVMET_TCP_SEND_DATA_PDU; pdu->hdr.type = nvme_tcp_c2h_data; pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ? NVME_TCP_F_DATA_SUCCESS : 0); pdu->hdr.hlen = sizeof(*pdu); pdu->hdr.pdo = pdu->hdr.hlen + hdgst; pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst + cmd->req.transfer_len + ddgst); pdu->command_id = cmd->req.cqe->command_id; pdu->data_length = cpu_to_le32(cmd->req.transfer_len); pdu->data_offset = cpu_to_le32(cmd->wbytes_done); if (queue->data_digest) { pdu->hdr.flags |= NVME_TCP_F_DDGST; nvmet_tcp_ddgst(queue->snd_hash, cmd); } if (cmd->queue->hdr_digest) { pdu->hdr.flags |= NVME_TCP_F_HDGST; nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); } } static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) { struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu; struct nvmet_tcp_queue *queue = cmd->queue; u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); cmd->offset = 0; cmd->state = NVMET_TCP_SEND_R2T; pdu->hdr.type = nvme_tcp_r2t; pdu->hdr.flags = 0; pdu->hdr.hlen = sizeof(*pdu); pdu->hdr.pdo = 0; pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); pdu->command_id = cmd->req.cmd->common.command_id; pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd); pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done); pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done); if (cmd->queue->hdr_digest) { pdu->hdr.flags |= NVME_TCP_F_HDGST; nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); } } static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) { struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu; struct nvmet_tcp_queue *queue = cmd->queue; u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); cmd->offset = 0; cmd->state = NVMET_TCP_SEND_RESPONSE; pdu->hdr.type = nvme_tcp_rsp; pdu->hdr.flags = 0; pdu->hdr.hlen = sizeof(*pdu); pdu->hdr.pdo = 0; pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); if (cmd->queue->hdr_digest) { pdu->hdr.flags |= NVME_TCP_F_HDGST; nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); } } static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue) { struct llist_node *node; node = llist_del_all(&queue->resp_list); if (!node) return; while (node) { struct nvmet_tcp_cmd *cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry); list_add(&cmd->entry, &queue->resp_send_list); node = node->next; queue->send_list_len++; } } static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue) { queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list, struct nvmet_tcp_cmd, entry); if (!queue->snd_cmd) { nvmet_tcp_process_resp_list(queue); queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list, struct nvmet_tcp_cmd, entry); if (unlikely(!queue->snd_cmd)) return NULL; } list_del_init(&queue->snd_cmd->entry); queue->send_list_len--; if (nvmet_tcp_need_data_out(queue->snd_cmd)) nvmet_setup_c2h_data_pdu(queue->snd_cmd); else if (nvmet_tcp_need_data_in(queue->snd_cmd)) nvmet_setup_r2t_pdu(queue->snd_cmd); else nvmet_setup_response_pdu(queue->snd_cmd); return queue->snd_cmd; } static void nvmet_tcp_queue_response(struct nvmet_req *req) { struct nvmet_tcp_cmd *cmd = container_of(req, struct nvmet_tcp_cmd, req); struct nvmet_tcp_queue *queue = cmd->queue; llist_add(&cmd->lentry, &queue->resp_list); queue_work_on(cmd->queue->cpu, nvmet_tcp_wq, &cmd->queue->io_work); } static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) { u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst; int ret; ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu), offset_in_page(cmd->data_pdu) + cmd->offset, left, MSG_DONTWAIT | MSG_MORE); if (ret <= 0) return ret; cmd->offset += ret; left -= ret; if (left) return -EAGAIN; cmd->state = NVMET_TCP_SEND_DATA; cmd->offset = 0; return 1; } static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd) { struct nvmet_tcp_queue *queue = cmd->queue; int ret; while (cmd->cur_sg) { struct page *page = sg_page(cmd->cur_sg); u32 left = cmd->cur_sg->length - cmd->offset; ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset, left, MSG_DONTWAIT | MSG_MORE); if (ret <= 0) return ret; cmd->offset += ret; cmd->wbytes_done += ret; /* Done with sg?*/ if (cmd->offset == cmd->cur_sg->length) { cmd->cur_sg = sg_next(cmd->cur_sg); cmd->offset = 0; } } if (queue->data_digest) { cmd->state = NVMET_TCP_SEND_DDGST; cmd->offset = 0; } else { if (queue->nvme_sq.sqhd_disabled) { cmd->queue->snd_cmd = NULL; nvmet_tcp_put_cmd(cmd); } else { nvmet_setup_response_pdu(cmd); } } if (queue->nvme_sq.sqhd_disabled) { kfree(cmd->iov); sgl_free(cmd->req.sg); } return 1; } static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd, bool last_in_batch) { u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst; int flags = MSG_DONTWAIT; int ret; if (!last_in_batch && cmd->queue->send_list_len) flags |= MSG_MORE; else flags |= MSG_EOR; ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu), offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags); if (ret <= 0) return ret; cmd->offset += ret; left -= ret; if (left) return -EAGAIN; kfree(cmd->iov); sgl_free(cmd->req.sg); cmd->queue->snd_cmd = NULL; nvmet_tcp_put_cmd(cmd); return 1; } static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch) { u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst; int flags = MSG_DONTWAIT; int ret; if (!last_in_batch && cmd->queue->send_list_len) flags |= MSG_MORE; else flags |= MSG_EOR; ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu), offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags); if (ret <= 0) return ret; cmd->offset += ret; left -= ret; if (left) return -EAGAIN; cmd->queue->snd_cmd = NULL; return 1; } static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd) { struct nvmet_tcp_queue *queue = cmd->queue; struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; struct kvec iov = { .iov_base = &cmd->exp_ddgst + cmd->offset, .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset }; int ret; ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); if (unlikely(ret <= 0)) return ret; cmd->offset += ret; if (queue->nvme_sq.sqhd_disabled) { cmd->queue->snd_cmd = NULL; nvmet_tcp_put_cmd(cmd); } else { nvmet_setup_response_pdu(cmd); } return 1; } static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue, bool last_in_batch) { struct nvmet_tcp_cmd *cmd = queue->snd_cmd; int ret = 0; if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) { cmd = nvmet_tcp_fetch_cmd(queue); if (unlikely(!cmd)) return 0; } if (cmd->state == NVMET_TCP_SEND_DATA_PDU) { ret = nvmet_try_send_data_pdu(cmd); if (ret <= 0) goto done_send; } if (cmd->state == NVMET_TCP_SEND_DATA) { ret = nvmet_try_send_data(cmd); if (ret <= 0) goto done_send; } if (cmd->state == NVMET_TCP_SEND_DDGST) { ret = nvmet_try_send_ddgst(cmd); if (ret <= 0) goto done_send; } if (cmd->state == NVMET_TCP_SEND_R2T) { ret = nvmet_try_send_r2t(cmd, last_in_batch); if (ret <= 0) goto done_send; } if (cmd->state == NVMET_TCP_SEND_RESPONSE) ret = nvmet_try_send_response(cmd, last_in_batch); done_send: if (ret < 0) { if (ret == -EAGAIN) return 0; return ret; } return 1; } static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue, int budget, int *sends) { int i, ret = 0; for (i = 0; i < budget; i++) { ret = nvmet_tcp_try_send_one(queue, i == budget - 1); if (ret <= 0) break; (*sends)++; } return ret; } static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) { queue->offset = 0; queue->left = sizeof(struct nvme_tcp_hdr); queue->cmd = NULL; queue->rcv_state = NVMET_TCP_RECV_PDU; } static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); ahash_request_free(queue->rcv_hash); ahash_request_free(queue->snd_hash); crypto_free_ahash(tfm); } static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue) { struct crypto_ahash *tfm; tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) return PTR_ERR(tfm); queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); if (!queue->snd_hash) goto free_tfm; ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); if (!queue->rcv_hash) goto free_snd_hash; ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); return 0; free_snd_hash: ahash_request_free(queue->snd_hash); free_tfm: crypto_free_ahash(tfm); return -ENOMEM; } static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) { struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq; struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp; struct msghdr msg = {}; struct kvec iov; int ret; if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) { pr_err("bad nvme-tcp pdu length (%d)\n", le32_to_cpu(icreq->hdr.plen)); nvmet_tcp_fatal_error(queue); } if (icreq->pfv != NVME_TCP_PFV_1_0) { pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv); return -EPROTO; } if (icreq->hpda != 0) { pr_err("queue %d: unsupported hpda %d\n", queue->idx, icreq->hpda); return -EPROTO; } queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE); queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE); if (queue->hdr_digest || queue->data_digest) { ret = nvmet_tcp_alloc_crypto(queue); if (ret) return ret; } memset(icresp, 0, sizeof(*icresp)); icresp->hdr.type = nvme_tcp_icresp; icresp->hdr.hlen = sizeof(*icresp); icresp->hdr.pdo = 0; icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen); icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); icresp->maxdata = cpu_to_le32(0xffff); /* FIXME: support r2t */ icresp->cpda = 0; if (queue->hdr_digest) icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE; if (queue->data_digest) icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE; iov.iov_base = icresp; iov.iov_len = sizeof(*icresp); ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); if (ret < 0) goto free_crypto; queue->state = NVMET_TCP_Q_LIVE; nvmet_prepare_receive_pdu(queue); return 0; free_crypto: if (queue->hdr_digest || queue->data_digest) nvmet_tcp_free_crypto(queue); return ret; } static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) { int ret; /* recover the expected data transfer length */ req->data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); if (!nvme_is_write(cmd->req.cmd) || req->data_len > cmd->req.port->inline_data_size) { nvmet_prepare_receive_pdu(queue); return; } ret = nvmet_tcp_map_data(cmd); if (unlikely(ret)) { pr_err("queue %d: failed to map data\n", queue->idx); nvmet_tcp_fatal_error(queue); return; } queue->rcv_state = NVMET_TCP_RECV_DATA; nvmet_tcp_map_pdu_iovec(cmd); cmd->flags |= NVMET_TCP_F_INIT_FAILED; } static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) { struct nvme_tcp_data_pdu *data = &queue->pdu.data; struct nvmet_tcp_cmd *cmd; cmd = &queue->cmds[data->ttag]; if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { pr_err("ttag %u unexpected data offset %u (expected %u)\n", data->ttag, le32_to_cpu(data->data_offset), cmd->rbytes_done); /* FIXME: use path and transport errors */ nvmet_req_complete(&cmd->req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); return -EPROTO; } cmd->pdu_len = le32_to_cpu(data->data_length); cmd->pdu_recv = 0; nvmet_tcp_map_pdu_iovec(cmd); queue->cmd = cmd; queue->rcv_state = NVMET_TCP_RECV_DATA; return 0; } static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) { struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd; struct nvmet_req *req; int ret; if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { if (hdr->type != nvme_tcp_icreq) { pr_err("unexpected pdu type (%d) before icreq\n", hdr->type); nvmet_tcp_fatal_error(queue); return -EPROTO; } return nvmet_tcp_handle_icreq(queue); } if (hdr->type == nvme_tcp_h2c_data) { ret = nvmet_tcp_handle_h2c_data_pdu(queue); if (unlikely(ret)) return ret; return 0; } queue->cmd = nvmet_tcp_get_cmd(queue); if (unlikely(!queue->cmd)) { /* This should never happen */ pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d", queue->idx, queue->nr_cmds, queue->send_list_len, nvme_cmd->common.opcode); nvmet_tcp_fatal_error(queue); return -ENOMEM; } req = &queue->cmd->req; memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd)); if (unlikely(!nvmet_req_init(req, &queue->nvme_cq, &queue->nvme_sq, &nvmet_tcp_ops))) { pr_err("failed cmd %p id %d opcode %d, data_len: %d\n", req->cmd, req->cmd->common.command_id, req->cmd->common.opcode, le32_to_cpu(req->cmd->common.dptr.sgl.length)); nvmet_tcp_handle_req_failure(queue, queue->cmd, req); return -EAGAIN; } ret = nvmet_tcp_map_data(queue->cmd); if (unlikely(ret)) { pr_err("queue %d: failed to map data\n", queue->idx); if (nvmet_tcp_has_inline_data(queue->cmd)) nvmet_tcp_fatal_error(queue); else nvmet_req_complete(req, ret); ret = -EAGAIN; goto out; } if (nvmet_tcp_need_data_in(queue->cmd)) { if (nvmet_tcp_has_inline_data(queue->cmd)) { queue->rcv_state = NVMET_TCP_RECV_DATA; nvmet_tcp_map_pdu_iovec(queue->cmd); return 0; } /* send back R2T */ nvmet_tcp_queue_response(&queue->cmd->req); goto out; } nvmet_req_execute(&queue->cmd->req); out: nvmet_prepare_receive_pdu(queue); return ret; } static const u8 nvme_tcp_pdu_sizes[] = { [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu), [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu), [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu), }; static inline u8 nvmet_tcp_pdu_size(u8 type) { size_t idx = type; return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) && nvme_tcp_pdu_sizes[idx]) ? nvme_tcp_pdu_sizes[idx] : 0; } static inline bool nvmet_tcp_pdu_valid(u8 type) { switch (type) { case nvme_tcp_icreq: case nvme_tcp_cmd: case nvme_tcp_h2c_data: /* fallthru */ return true; } return false; } static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue) { struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; int len; struct kvec iov; struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; recv: iov.iov_base = (void *)&queue->pdu + queue->offset; iov.iov_len = queue->left; len = kernel_recvmsg(queue->sock, &msg, &iov, 1, iov.iov_len, msg.msg_flags); if (unlikely(len < 0)) return len; queue->offset += len; queue->left -= len; if (queue->left) return -EAGAIN; if (queue->offset == sizeof(struct nvme_tcp_hdr)) { u8 hdgst = nvmet_tcp_hdgst_len(queue); if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) { pr_err("unexpected pdu type %d\n", hdr->type); nvmet_tcp_fatal_error(queue); return -EIO; } if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) { pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen); return -EIO; } queue->left = hdr->hlen - queue->offset + hdgst; goto recv; } if (queue->hdr_digest && nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) { nvmet_tcp_fatal_error(queue); /* fatal */ return -EPROTO; } if (queue->data_digest && nvmet_tcp_check_ddgst(queue, &queue->pdu)) { nvmet_tcp_fatal_error(queue); /* fatal */ return -EPROTO; } return nvmet_tcp_done_recv_pdu(queue); } static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) { struct nvmet_tcp_queue *queue = cmd->queue; nvmet_tcp_ddgst(queue->rcv_hash, cmd); queue->offset = 0; queue->left = NVME_TCP_DIGEST_LENGTH; queue->rcv_state = NVMET_TCP_RECV_DDGST; } static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) { struct nvmet_tcp_cmd *cmd = queue->cmd; int ret; while (msg_data_left(&cmd->recv_msg)) { ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg, cmd->recv_msg.msg_flags); if (ret <= 0) return ret; cmd->pdu_recv += ret; cmd->rbytes_done += ret; } nvmet_tcp_unmap_pdu_iovec(cmd); if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) && cmd->rbytes_done == cmd->req.transfer_len) { if (queue->data_digest) { nvmet_tcp_prep_recv_ddgst(cmd); return 0; } nvmet_req_execute(&cmd->req); } nvmet_prepare_receive_pdu(queue); return 0; } static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) { struct nvmet_tcp_cmd *cmd = queue->cmd; int ret; struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; struct kvec iov = { .iov_base = (void *)&cmd->recv_ddgst + queue->offset, .iov_len = queue->left }; ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, iov.iov_len, msg.msg_flags); if (unlikely(ret < 0)) return ret; queue->offset += ret; queue->left -= ret; if (queue->left) return -EAGAIN; if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) { pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n", queue->idx, cmd->req.cmd->common.command_id, queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), le32_to_cpu(cmd->exp_ddgst)); nvmet_tcp_finish_cmd(cmd); nvmet_tcp_fatal_error(queue); ret = -EPROTO; goto out; } if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) && cmd->rbytes_done == cmd->req.transfer_len) nvmet_req_execute(&cmd->req); ret = 0; out: nvmet_prepare_receive_pdu(queue); return ret; } static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) { int result = 0; if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) return 0; if (queue->rcv_state == NVMET_TCP_RECV_PDU) { result = nvmet_tcp_try_recv_pdu(queue); if (result != 0) goto done_recv; } if (queue->rcv_state == NVMET_TCP_RECV_DATA) { result = nvmet_tcp_try_recv_data(queue); if (result != 0) goto done_recv; } if (queue->rcv_state == NVMET_TCP_RECV_DDGST) { result = nvmet_tcp_try_recv_ddgst(queue); if (result != 0) goto done_recv; } done_recv: if (result < 0) { if (result == -EAGAIN) return 0; return result; } return 1; } static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue, int budget, int *recvs) { int i, ret = 0; for (i = 0; i < budget; i++) { ret = nvmet_tcp_try_recv_one(queue); if (ret <= 0) break; (*recvs)++; } return ret; } static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) { spin_lock(&queue->state_lock); if (queue->state != NVMET_TCP_Q_DISCONNECTING) { queue->state = NVMET_TCP_Q_DISCONNECTING; schedule_work(&queue->release_work); } spin_unlock(&queue->state_lock); } static void nvmet_tcp_io_work(struct work_struct *w) { struct nvmet_tcp_queue *queue = container_of(w, struct nvmet_tcp_queue, io_work); bool pending; int ret, ops = 0; do { pending = false; ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops); if (ret > 0) { pending = true; } else if (ret < 0) { if (ret == -EPIPE || ret == -ECONNRESET) kernel_sock_shutdown(queue->sock, SHUT_RDWR); else nvmet_tcp_fatal_error(queue); return; } ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops); if (ret > 0) { /* transmitted message/data */ pending = true; } else if (ret < 0) { if (ret == -EPIPE || ret == -ECONNRESET) kernel_sock_shutdown(queue->sock, SHUT_RDWR); else nvmet_tcp_fatal_error(queue); return; } } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET); /* * We exahusted our budget, requeue our selves */ if (pending) queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work); } static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *c) { u8 hdgst = nvmet_tcp_hdgst_len(queue); c->queue = queue; c->req.port = queue->port->nport; c->cmd_pdu = page_frag_alloc(&queue->pf_cache, sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); if (!c->cmd_pdu) return -ENOMEM; c->req.cmd = &c->cmd_pdu->cmd; c->rsp_pdu = page_frag_alloc(&queue->pf_cache, sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); if (!c->rsp_pdu) goto out_free_cmd; c->req.cqe = &c->rsp_pdu->cqe; c->data_pdu = page_frag_alloc(&queue->pf_cache, sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); if (!c->data_pdu) goto out_free_rsp; c->r2t_pdu = page_frag_alloc(&queue->pf_cache, sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); if (!c->r2t_pdu) goto out_free_data; c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; list_add_tail(&c->entry, &queue->free_list); return 0; out_free_data: page_frag_free(c->data_pdu); out_free_rsp: page_frag_free(c->rsp_pdu); out_free_cmd: page_frag_free(c->cmd_pdu); return -ENOMEM; } static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c) { page_frag_free(c->r2t_pdu); page_frag_free(c->data_pdu); page_frag_free(c->rsp_pdu); page_frag_free(c->cmd_pdu); } static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue) { struct nvmet_tcp_cmd *cmds; int i, ret = -EINVAL, nr_cmds = queue->nr_cmds; cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL); if (!cmds) goto out; for (i = 0; i < nr_cmds; i++) { ret = nvmet_tcp_alloc_cmd(queue, cmds + i); if (ret) goto out_free; } queue->cmds = cmds; return 0; out_free: while (--i >= 0) nvmet_tcp_free_cmd(cmds + i); kfree(cmds); out: return ret; } static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue) { struct nvmet_tcp_cmd *cmds = queue->cmds; int i; for (i = 0; i < queue->nr_cmds; i++) nvmet_tcp_free_cmd(cmds + i); nvmet_tcp_free_cmd(&queue->connect); kfree(cmds); } static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) { struct socket *sock = queue->sock; write_lock_bh(&sock->sk->sk_callback_lock); sock->sk->sk_data_ready = queue->data_ready; sock->sk->sk_state_change = queue->state_change; sock->sk->sk_write_space = queue->write_space; sock->sk->sk_user_data = NULL; write_unlock_bh(&sock->sk->sk_callback_lock); } static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd) { nvmet_req_uninit(&cmd->req); nvmet_tcp_unmap_pdu_iovec(cmd); kfree(cmd->iov); sgl_free(cmd->req.sg); } static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) { struct nvmet_tcp_cmd *cmd = queue->cmds; int i; for (i = 0; i < queue->nr_cmds; i++, cmd++) { if (nvmet_tcp_need_data_in(cmd)) nvmet_tcp_finish_cmd(cmd); } if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) { /* failed in connect */ nvmet_tcp_finish_cmd(&queue->connect); } } static void nvmet_tcp_release_queue_work(struct work_struct *w) { struct nvmet_tcp_queue *queue = container_of(w, struct nvmet_tcp_queue, release_work); mutex_lock(&nvmet_tcp_queue_mutex); list_del_init(&queue->queue_list); mutex_unlock(&nvmet_tcp_queue_mutex); nvmet_tcp_restore_socket_callbacks(queue); flush_work(&queue->io_work); nvmet_tcp_uninit_data_in_cmds(queue); nvmet_sq_destroy(&queue->nvme_sq); cancel_work_sync(&queue->io_work); sock_release(queue->sock); nvmet_tcp_free_cmds(queue); if (queue->hdr_digest || queue->data_digest) nvmet_tcp_free_crypto(queue); ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx); kfree(queue); } static void nvmet_tcp_data_ready(struct sock *sk) { struct nvmet_tcp_queue *queue; read_lock_bh(&sk->sk_callback_lock); queue = sk->sk_user_data; if (likely(queue)) queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work); read_unlock_bh(&sk->sk_callback_lock); } static void nvmet_tcp_write_space(struct sock *sk) { struct nvmet_tcp_queue *queue; read_lock_bh(&sk->sk_callback_lock); queue = sk->sk_user_data; if (unlikely(!queue)) goto out; if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { queue->write_space(sk); goto out; } if (sk_stream_is_writeable(sk)) { clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work); } out: read_unlock_bh(&sk->sk_callback_lock); } static void nvmet_tcp_state_change(struct sock *sk) { struct nvmet_tcp_queue *queue; write_lock_bh(&sk->sk_callback_lock); queue = sk->sk_user_data; if (!queue) goto done; switch (sk->sk_state) { case TCP_FIN_WAIT1: case TCP_CLOSE_WAIT: case TCP_CLOSE: /* FALLTHRU */ sk->sk_user_data = NULL; nvmet_tcp_schedule_release_queue(queue); break; default: pr_warn("queue %d unhandled state %d\n", queue->idx, sk->sk_state); } done: write_unlock_bh(&sk->sk_callback_lock); } static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) { struct socket *sock = queue->sock; struct inet_sock *inet = inet_sk(sock->sk); struct linger sol = { .l_onoff = 1, .l_linger = 0 }; int ret; ret = kernel_getsockname(sock, (struct sockaddr *)&queue->sockaddr); if (ret < 0) return ret; ret = kernel_getpeername(sock, (struct sockaddr *)&queue->sockaddr_peer); if (ret < 0) return ret; /* * Cleanup whatever is sitting in the TCP transmit queue on socket * close. This is done to prevent stale data from being sent should * the network connection be restored before TCP times out. */ ret = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, (char *)&sol, sizeof(sol)); if (ret) return ret; /* Set socket type of service */ if (inet->rcv_tos > 0) { int tos = inet->rcv_tos; ret = kernel_setsockopt(sock, SOL_IP, IP_TOS, (char *)&tos, sizeof(tos)); if (ret) return ret; } write_lock_bh(&sock->sk->sk_callback_lock); sock->sk->sk_user_data = queue; queue->data_ready = sock->sk->sk_data_ready; sock->sk->sk_data_ready = nvmet_tcp_data_ready; queue->state_change = sock->sk->sk_state_change; sock->sk->sk_state_change = nvmet_tcp_state_change; queue->write_space = sock->sk->sk_write_space; sock->sk->sk_write_space = nvmet_tcp_write_space; write_unlock_bh(&sock->sk->sk_callback_lock); return 0; } static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, struct socket *newsock) { struct nvmet_tcp_queue *queue; int ret; queue = kzalloc(sizeof(*queue), GFP_KERNEL); if (!queue) return -ENOMEM; INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work); INIT_WORK(&queue->io_work, nvmet_tcp_io_work); queue->sock = newsock; queue->port = port; queue->nr_cmds = 0; spin_lock_init(&queue->state_lock); queue->state = NVMET_TCP_Q_CONNECTING; INIT_LIST_HEAD(&queue->free_list); init_llist_head(&queue->resp_list); INIT_LIST_HEAD(&queue->resp_send_list); queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL); if (queue->idx < 0) { ret = queue->idx; goto out_free_queue; } ret = nvmet_tcp_alloc_cmd(queue, &queue->connect); if (ret) goto out_ida_remove; ret = nvmet_sq_init(&queue->nvme_sq); if (ret) goto out_free_connect; port->last_cpu = cpumask_next_wrap(port->last_cpu, cpu_online_mask, -1, false); queue->cpu = port->last_cpu; nvmet_prepare_receive_pdu(queue); mutex_lock(&nvmet_tcp_queue_mutex); list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list); mutex_unlock(&nvmet_tcp_queue_mutex); ret = nvmet_tcp_set_queue_sock(queue); if (ret) goto out_destroy_sq; queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work); return 0; out_destroy_sq: mutex_lock(&nvmet_tcp_queue_mutex); list_del_init(&queue->queue_list); mutex_unlock(&nvmet_tcp_queue_mutex); nvmet_sq_destroy(&queue->nvme_sq); out_free_connect: nvmet_tcp_free_cmd(&queue->connect); out_ida_remove: ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx); out_free_queue: kfree(queue); return ret; } static void nvmet_tcp_accept_work(struct work_struct *w) { struct nvmet_tcp_port *port = container_of(w, struct nvmet_tcp_port, accept_work); struct socket *newsock; int ret; while (true) { ret = kernel_accept(port->sock, &newsock, O_NONBLOCK); if (ret < 0) { if (ret != -EAGAIN) pr_warn("failed to accept err=%d\n", ret); return; } ret = nvmet_tcp_alloc_queue(port, newsock); if (ret) { pr_err("failed to allocate queue\n"); sock_release(newsock); } } } static void nvmet_tcp_listen_data_ready(struct sock *sk) { struct nvmet_tcp_port *port; read_lock_bh(&sk->sk_callback_lock); port = sk->sk_user_data; if (!port) goto out; if (sk->sk_state == TCP_LISTEN) schedule_work(&port->accept_work); out: read_unlock_bh(&sk->sk_callback_lock); } static int nvmet_tcp_add_port(struct nvmet_port *nport) { struct nvmet_tcp_port *port; __kernel_sa_family_t af; int opt, ret; port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) return -ENOMEM; switch (nport->disc_addr.adrfam) { case NVMF_ADDR_FAMILY_IP4: af = AF_INET; break; case NVMF_ADDR_FAMILY_IP6: af = AF_INET6; break; default: pr_err("address family %d not supported\n", nport->disc_addr.adrfam); ret = -EINVAL; goto err_port; } ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr, nport->disc_addr.trsvcid, &port->addr); if (ret) { pr_err("malformed ip/port passed: %s:%s\n", nport->disc_addr.traddr, nport->disc_addr.trsvcid); goto err_port; } port->nport = nport; port->last_cpu = -1; INIT_WORK(&port->accept_work, nvmet_tcp_accept_work); if (port->nport->inline_data_size < 0) port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE; ret = sock_create(port->addr.ss_family, SOCK_STREAM, IPPROTO_TCP, &port->sock); if (ret) { pr_err("failed to create a socket\n"); goto err_port; } port->sock->sk->sk_user_data = port; port->data_ready = port->sock->sk->sk_data_ready; port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready; opt = 1; ret = kernel_setsockopt(port->sock, IPPROTO_TCP, TCP_NODELAY, (char *)&opt, sizeof(opt)); if (ret) { pr_err("failed to set TCP_NODELAY sock opt %d\n", ret); goto err_sock; } ret = kernel_setsockopt(port->sock, SOL_SOCKET, SO_REUSEADDR, (char *)&opt, sizeof(opt)); if (ret) { pr_err("failed to set SO_REUSEADDR sock opt %d\n", ret); goto err_sock; } ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr, sizeof(port->addr)); if (ret) { pr_err("failed to bind port socket %d\n", ret); goto err_sock; } ret = kernel_listen(port->sock, 128); if (ret) { pr_err("failed to listen %d on port sock\n", ret); goto err_sock; } nport->priv = port; pr_info("enabling port %d (%pISpc)\n", le16_to_cpu(nport->disc_addr.portid), &port->addr); return 0; err_sock: sock_release(port->sock); err_port: kfree(port); return ret; } static void nvmet_tcp_remove_port(struct nvmet_port *nport) { struct nvmet_tcp_port *port = nport->priv; write_lock_bh(&port->sock->sk->sk_callback_lock); port->sock->sk->sk_data_ready = port->data_ready; port->sock->sk->sk_user_data = NULL; write_unlock_bh(&port->sock->sk->sk_callback_lock); cancel_work_sync(&port->accept_work); sock_release(port->sock); kfree(port); } static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl) { struct nvmet_tcp_queue *queue; mutex_lock(&nvmet_tcp_queue_mutex); list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) if (queue->nvme_sq.ctrl == ctrl) kernel_sock_shutdown(queue->sock, SHUT_RDWR); mutex_unlock(&nvmet_tcp_queue_mutex); } static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq) { struct nvmet_tcp_queue *queue = container_of(sq, struct nvmet_tcp_queue, nvme_sq); if (sq->qid == 0) { /* Let inflight controller teardown complete */ flush_scheduled_work(); } queue->nr_cmds = sq->size * 2; if (nvmet_tcp_alloc_cmds(queue)) return NVME_SC_INTERNAL; return 0; } static void nvmet_tcp_disc_port_addr(struct nvmet_req *req, struct nvmet_port *nport, char *traddr) { struct nvmet_tcp_port *port = nport->priv; if (inet_addr_is_any((struct sockaddr *)&port->addr)) { struct nvmet_tcp_cmd *cmd = container_of(req, struct nvmet_tcp_cmd, req); struct nvmet_tcp_queue *queue = cmd->queue; sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr); } else { memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE); } } static struct nvmet_fabrics_ops nvmet_tcp_ops = { .owner = THIS_MODULE, .type = NVMF_TRTYPE_TCP, .msdbd = 1, .has_keyed_sgls = 0, .add_port = nvmet_tcp_add_port, .remove_port = nvmet_tcp_remove_port, .queue_response = nvmet_tcp_queue_response, .delete_ctrl = nvmet_tcp_delete_ctrl, .install_queue = nvmet_tcp_install_queue, .disc_traddr = nvmet_tcp_disc_port_addr, }; static int __init nvmet_tcp_init(void) { int ret; nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0); if (!nvmet_tcp_wq) return -ENOMEM; ret = nvmet_register_transport(&nvmet_tcp_ops); if (ret) goto err; return 0; err: destroy_workqueue(nvmet_tcp_wq); return ret; } static void __exit nvmet_tcp_exit(void) { struct nvmet_tcp_queue *queue; nvmet_unregister_transport(&nvmet_tcp_ops); flush_scheduled_work(); mutex_lock(&nvmet_tcp_queue_mutex); list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) kernel_sock_shutdown(queue->sock, SHUT_RDWR); mutex_unlock(&nvmet_tcp_queue_mutex); flush_scheduled_work(); destroy_workqueue(nvmet_tcp_wq); } module_init(nvmet_tcp_init); module_exit(nvmet_tcp_exit); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */