aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/output.c
blob: 9013f64f5219760917af8337e390f81323102b34 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
/* SCTP kernel reference Implementation
 * (C) Copyright IBM Corp. 2001, 2004
 * Copyright (c) 1999-2000 Cisco, Inc.
 * Copyright (c) 1999-2001 Motorola, Inc.
 *
 * This file is part of the SCTP kernel reference Implementation
 *
 * These functions handle output processing.
 *
 * The SCTP reference implementation is free software;
 * you can redistribute it and/or modify it under the terms of
 * the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * The SCTP reference implementation is distributed in the hope that it
 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
 *                 ************************
 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 * See the GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with GNU CC; see the file COPYING.  If not, write to
 * the Free Software Foundation, 59 Temple Place - Suite 330,
 * Boston, MA 02111-1307, USA.
 *
 * Please send any bug reports or fixes you make to the
 * email address(es):
 *    lksctp developers <lksctp-developers@lists.sourceforge.net>
 *
 * Or submit a bug report through the following website:
 *    http://www.sf.net/projects/lksctp
 *
 * Written or modified by:
 *    La Monte H.P. Yarroll <piggy@acm.org>
 *    Karl Knutson          <karl@athena.chicago.il.us>
 *    Jon Grimm             <jgrimm@austin.ibm.com>
 *    Sridhar Samudrala     <sri@us.ibm.com>
 *
 * Any bugs reported given to us we will try to fix... any fixes shared will
 * be incorporated into the next SCTP release.
 */

#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/time.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/init.h>
#include <net/inet_ecn.h>
#include <net/icmp.h>

#ifndef TEST_FRAME
#include <net/tcp.h>
#endif /* TEST_FRAME (not defined) */

#include <linux/socket.h> /* for sa_family_t */
#include <net/sock.h>

#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>

/* Forward declarations for private helpers. */
static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
					   struct sctp_chunk *chunk);

/* Config a packet.
 * This appears to be a followup set of initializations.
 */
struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
				       __u32 vtag, int ecn_capable)
{
	struct sctp_chunk *chunk = NULL;

	SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __FUNCTION__,
			  packet, vtag);

	packet->vtag = vtag;
	packet->has_cookie_echo = 0;
	packet->has_sack = 0;
	packet->ipfragok = 0;

	if (ecn_capable && sctp_packet_empty(packet)) {
		chunk = sctp_get_ecne_prepend(packet->transport->asoc);

		/* If there a is a prepend chunk stick it on the list before
	 	 * any other chunks get appended.
	 	 */
		if (chunk)
			sctp_packet_append_chunk(packet, chunk);
	}

	return packet;
}

/* Initialize the packet structure. */
struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
				     struct sctp_transport *transport,
				     __u16 sport, __u16 dport)
{
	struct sctp_association *asoc = transport->asoc;
	size_t overhead;

	SCTP_DEBUG_PRINTK("%s: packet:%p transport:%p\n", __FUNCTION__,
			  packet, transport);

	packet->transport = transport;
	packet->source_port = sport;
	packet->destination_port = dport;
	skb_queue_head_init(&packet->chunks);
	if (asoc) {
		struct sctp_sock *sp = sctp_sk(asoc->base.sk);	
		overhead = sp->pf->af->net_header_len; 
	} else {
		overhead = sizeof(struct ipv6hdr);
	}
	overhead += sizeof(struct sctphdr);
	packet->overhead = overhead;
	packet->size = overhead;
	packet->vtag = 0;
	packet->has_cookie_echo = 0;
	packet->has_sack = 0;
	packet->ipfragok = 0;
	packet->malloced = 0;
	return packet;
}

/* Free a packet.  */
void sctp_packet_free(struct sctp_packet *packet)
{
	struct sctp_chunk *chunk;

	SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet);

        while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL)
		sctp_chunk_free(chunk);

	if (packet->malloced)
		kfree(packet);
}

/* This routine tries to append the chunk to the offered packet. If adding
 * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk
 * is not present in the packet, it transmits the input packet.
 * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long
 * as it can fit in the packet, but any more data that does not fit in this
 * packet can be sent only after receiving the COOKIE_ACK.
 */
sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
				       struct sctp_chunk *chunk)
{
	sctp_xmit_t retval;
	int error = 0;

	SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __FUNCTION__,
			  packet, chunk);

	switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
	case SCTP_XMIT_PMTU_FULL:
		if (!packet->has_cookie_echo) {
			error = sctp_packet_transmit(packet);
			if (error < 0)
				chunk->skb->sk->sk_err = -error;

			/* If we have an empty packet, then we can NOT ever
			 * return PMTU_FULL.
			 */
			retval = sctp_packet_append_chunk(packet, chunk);
		}
		break;

	case SCTP_XMIT_RWND_FULL:
	case SCTP_XMIT_OK:
	case SCTP_XMIT_NAGLE_DELAY:
		break;
	};

	return retval;
}

/* Try to bundle a SACK with the packet. */
static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
					   struct sctp_chunk *chunk)
{
	sctp_xmit_t retval = SCTP_XMIT_OK;

	/* If sending DATA and haven't aleady bundled a SACK, try to
	 * bundle one in to the packet.
	 */
	if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
	    !pkt->has_cookie_echo) {
		struct sctp_association *asoc;
		asoc = pkt->transport->asoc;

		if (asoc->a_rwnd > asoc->rwnd) {
			struct sctp_chunk *sack;
			asoc->a_rwnd = asoc->rwnd;
			sack = sctp_make_sack(asoc);
			if (sack) {
				struct timer_list *timer;
				retval = sctp_packet_append_chunk(pkt, sack);
				asoc->peer.sack_needed = 0;
				timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
				if (timer_pending(timer) && del_timer(timer))
					sctp_association_put(asoc);
			}
		}
	}
	return retval;
}

/* Append a chunk to the offered packet reporting back any inability to do
 * so.
 */
sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
				     struct sctp_chunk *chunk)
{
	sctp_xmit_t retval = SCTP_XMIT_OK;
	__u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length));
	size_t psize;
	size_t pmtu;
	int too_big;

	SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __FUNCTION__, packet,
			  chunk);

	retval = sctp_packet_bundle_sack(packet, chunk);
	psize = packet->size;

	if (retval != SCTP_XMIT_OK)
		goto finish;

	pmtu  = ((packet->transport->asoc) ?
		 (packet->transport->asoc->pmtu) :
		 (packet->transport->pmtu));

	too_big = (psize + chunk_len > pmtu);

	/* Decide if we need to fragment or resubmit later. */
	if (too_big) {
		/* Both control chunks and data chunks with TSNs are
		 * non-fragmentable.
		 */
		if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk)) {
			/* We no longer do re-fragmentation.
			 * Just fragment at the IP layer, if we
			 * actually hit this condition
			 */
			packet->ipfragok = 1;
			goto append;

		} else {
			retval = SCTP_XMIT_PMTU_FULL;
			goto finish;
		}
	}

append:
	/* We believe that this chunk is OK to add to the packet (as
	 * long as we have the cwnd for it).
	 */

	/* DATA is a special case since we must examine both rwnd and cwnd
	 * before we send DATA.
	 */
	if (sctp_chunk_is_data(chunk)) {
		retval = sctp_packet_append_data(packet, chunk);
		/* Disallow SACK bundling after DATA. */
		packet->has_sack = 1;
		if (SCTP_XMIT_OK != retval)
			goto finish;
	} else if (SCTP_CID_COOKIE_ECHO == chunk->chunk_hdr->type)
		packet->has_cookie_echo = 1;
	else if (SCTP_CID_SACK == chunk->chunk_hdr->type)
		packet->has_sack = 1;

	/* It is OK to send this chunk.  */
	__skb_queue_tail(&packet->chunks, (struct sk_buff *)chunk);
	packet->size += chunk_len;
	chunk->transport = packet->transport;
finish:
	return retval;
}

/* All packets are sent to the network through this function from
 * sctp_outq_tail().
 *
 * The return value is a normal kernel error return value.
 */
int sctp_packet_transmit(struct sctp_packet *packet)
{
	struct sctp_transport *tp = packet->transport;
	struct sctp_association *asoc = tp->asoc;
	struct sctphdr *sh;
	__u32 crc32;
	struct sk_buff *nskb;
	struct sctp_chunk *chunk;
	struct sock *sk;
	int err = 0;
	int padding;		/* How much padding do we need?  */
	__u8 has_data = 0;
	struct dst_entry *dst;

	SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet);

	/* Do NOT generate a chunkless packet. */
	chunk = (struct sctp_chunk *)skb_peek(&packet->chunks);
	if (unlikely(!chunk))
		return err;

	/* Set up convenience variables... */
	sk = chunk->skb->sk;

	/* Allocate the new skb.  */
	nskb = dev_alloc_skb(packet->size);
	if (!nskb)
		goto nomem;

	/* Make sure the outbound skb has enough header room reserved. */
	skb_reserve(nskb, packet->overhead);

	/* Set the owning socket so that we know where to get the
	 * destination IP address.
	 */
	skb_set_owner_w(nskb, sk);

	/* Build the SCTP header.  */
	sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr));
	sh->source = htons(packet->source_port);
	sh->dest   = htons(packet->destination_port);

	/* From 6.8 Adler-32 Checksum Calculation:
	 * After the packet is constructed (containing the SCTP common
	 * header and one or more control or DATA chunks), the
	 * transmitter shall:
	 *
	 * 1) Fill in the proper Verification Tag in the SCTP common
	 *    header and initialize the checksum field to 0's.
	 */
	sh->vtag     = htonl(packet->vtag);
	sh->checksum = 0;

	/* 2) Calculate the Adler-32 checksum of the whole packet,
	 *    including the SCTP common header and all the
	 *    chunks.
	 *
	 * Note: Adler-32 is no longer applicable, as has been replaced
	 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
	 */
	crc32 = sctp_start_cksum((__u8 *)sh, sizeof(struct sctphdr));

	/**
	 * 6.10 Bundling
	 *
	 *    An endpoint bundles chunks by simply including multiple
	 *    chunks in one outbound SCTP packet.  ...
	 */

	/**
	 * 3.2  Chunk Field Descriptions
	 *
	 * The total length of a chunk (including Type, Length and
	 * Value fields) MUST be a multiple of 4 bytes.  If the length
	 * of the chunk is not a multiple of 4 bytes, the sender MUST
	 * pad the chunk with all zero bytes and this padding is not
	 * included in the chunk length field.  The sender should
	 * never pad with more than 3 bytes.
	 *
	 * [This whole comment explains WORD_ROUND() below.]
	 */
	SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n");
	while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) {
		if (sctp_chunk_is_data(chunk)) {

			if (!chunk->has_tsn) {
				sctp_chunk_assign_ssn(chunk);
				sctp_chunk_assign_tsn(chunk);

			/* 6.3.1 C4) When data is in flight and when allowed
			 * by rule C5, a new RTT measurement MUST be made each
			 * round trip.  Furthermore, new RTT measurements
			 * SHOULD be made no more than once per round-trip
			 * for a given destination transport address.
			 */

				if (!tp->rto_pending) {
					chunk->rtt_in_progress = 1;
					tp->rto_pending = 1;
				}
			} else
				chunk->resent = 1;

			chunk->sent_at = jiffies;
			has_data = 1;
		}

		padding = WORD_ROUND(chunk->skb->len) - chunk->skb->len;
		if (padding)
			memset(skb_put(chunk->skb, padding), 0, padding);

		crc32 = sctp_update_copy_cksum(skb_put(nskb, chunk->skb->len),
					       chunk->skb->data,
					       chunk->skb->len, crc32);

		SCTP_DEBUG_PRINTK("%s %p[%s] %s 0x%x, %s %d, %s %d, %s %d\n",
				  "*** Chunk", chunk,
				  sctp_cname(SCTP_ST_CHUNK(
					  chunk->chunk_hdr->type)),
				  chunk->has_tsn ? "TSN" : "No TSN",
				  chunk->has_tsn ?
				  ntohl(chunk->subh.data_hdr->tsn) : 0,
				  "length", ntohs(chunk->chunk_hdr->length),
				  "chunk->skb->len", chunk->skb->len,
				  "rtt_in_progress", chunk->rtt_in_progress);

		/*
		 * If this is a control chunk, this is our last
		 * reference. Free data chunks after they've been
		 * acknowledged or have failed.
		 */
		if (!sctp_chunk_is_data(chunk))
    			sctp_chunk_free(chunk);
	}

	/* Perform final transformation on checksum. */
	crc32 = sctp_end_cksum(crc32);

	/* 3) Put the resultant value into the checksum field in the
	 *    common header, and leave the rest of the bits unchanged.
	 */
	sh->checksum = htonl(crc32);

	/* IP layer ECN support
	 * From RFC 2481
	 *  "The ECN-Capable Transport (ECT) bit would be set by the
	 *   data sender to indicate that the end-points of the
	 *   transport protocol are ECN-capable."
	 *
	 * Now setting the ECT bit all the time, as it should not cause
	 * any problems protocol-wise even if our peer ignores it.
	 *
	 * Note: The works for IPv6 layer checks this bit too later
	 * in transmission.  See IP6_ECN_flow_xmit().
	 */
	INET_ECN_xmit(nskb->sk);

	/* Set up the IP options.  */
	/* BUG: not implemented
	 * For v4 this all lives somewhere in sk->sk_opt...
	 */

	/* Dump that on IP!  */
	if (asoc && asoc->peer.last_sent_to != tp) {
		/* Considering the multiple CPU scenario, this is a
		 * "correcter" place for last_sent_to.  --xguo
		 */
		asoc->peer.last_sent_to = tp;
	}

	if (has_data) {
		struct timer_list *timer;
		unsigned long timeout;

		tp->last_time_used = jiffies;

		/* Restart the AUTOCLOSE timer when sending data. */
		if (sctp_state(asoc, ESTABLISHED) && asoc->autoclose) {
			timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
			timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];

			if (!mod_timer(timer, jiffies + timeout))
				sctp_association_hold(asoc);
		}
	}

	dst = tp->dst;
	/* The 'obsolete' field of dst is set to 2 when a dst is freed. */
	if (!dst || (dst->obsolete > 1)) {
		dst_release(dst);
		sctp_transport_route(tp, NULL, sctp_sk(sk));
		sctp_assoc_sync_pmtu(asoc);
	}

	nskb->dst = dst_clone(tp->dst);
	if (!nskb->dst)
		goto no_route;

	SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb len %d\n",
			  nskb->len);

	(*tp->af_specific->sctp_xmit)(nskb, tp, packet->ipfragok);

out:
	packet->size = packet->overhead;
	return err;
no_route:
	kfree_skb(nskb);
	IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);

	/* FIXME: Returning the 'err' will effect all the associations
	 * associated with a socket, although only one of the paths of the
	 * association is unreachable.
	 * The real failure of a transport or association can be passed on
	 * to the user via notifications. So setting this error may not be
	 * required.
	 */
	 /* err = -EHOSTUNREACH; */
err:
	/* Control chunks are unreliable so just drop them.  DATA chunks
	 * will get resent or dropped later.
	 */

	while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) {
		if (!sctp_chunk_is_data(chunk))
    			sctp_chunk_free(chunk);
	}
	goto out;
nomem:
	err = -ENOMEM;
	goto err;
}

/********************************************************************
 * 2nd Level Abstractions
 ********************************************************************/

/* This private function handles the specifics of appending DATA chunks.  */
static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
					   struct sctp_chunk *chunk)
{
	sctp_xmit_t retval = SCTP_XMIT_OK;
	size_t datasize, rwnd, inflight;
	struct sctp_transport *transport = packet->transport;
	__u32 max_burst_bytes;
	struct sctp_association *asoc = transport->asoc;
	struct sctp_sock *sp = sctp_sk(asoc->base.sk);
	struct sctp_outq *q = &asoc->outqueue;

	/* RFC 2960 6.1  Transmission of DATA Chunks
	 *
	 * A) At any given time, the data sender MUST NOT transmit new data to
	 * any destination transport address if its peer's rwnd indicates
	 * that the peer has no buffer space (i.e. rwnd is 0, see Section
	 * 6.2.1).  However, regardless of the value of rwnd (including if it
	 * is 0), the data sender can always have one DATA chunk in flight to
	 * the receiver if allowed by cwnd (see rule B below).  This rule
	 * allows the sender to probe for a change in rwnd that the sender
	 * missed due to the SACK having been lost in transit from the data
	 * receiver to the data sender.
	 */

	rwnd = asoc->peer.rwnd;
	inflight = asoc->outqueue.outstanding_bytes;

	datasize = sctp_data_size(chunk);

	if (datasize > rwnd) {
		if (inflight > 0) {
			/* We have (at least) one data chunk in flight,
			 * so we can't fall back to rule 6.1 B).
			 */
			retval = SCTP_XMIT_RWND_FULL;
			goto finish;
		}
	}

	/* sctpimpguide-05 2.14.2
	 * D) When the time comes for the sender to
	 * transmit new DATA chunks, the protocol parameter Max.Burst MUST
	 * first be applied to limit how many new DATA chunks may be sent.
	 * The limit is applied by adjusting cwnd as follows:
	 * 	if ((flightsize + Max.Burst * MTU) < cwnd)
	 *		cwnd = flightsize + Max.Burst * MTU
	 */
	max_burst_bytes = asoc->max_burst * asoc->pmtu;
	if ((transport->flight_size + max_burst_bytes) < transport->cwnd) {
		transport->cwnd = transport->flight_size + max_burst_bytes;
		SCTP_DEBUG_PRINTK("%s: cwnd limited by max_burst: "
				  "transport: %p, cwnd: %d, "
				  "ssthresh: %d, flight_size: %d, "
				  "pba: %d\n",
				  __FUNCTION__, transport,
				  transport->cwnd,
				  transport->ssthresh,
				  transport->flight_size,
				  transport->partial_bytes_acked);
	}

	/* RFC 2960 6.1  Transmission of DATA Chunks
	 *
	 * B) At any given time, the sender MUST NOT transmit new data
	 * to a given transport address if it has cwnd or more bytes
	 * of data outstanding to that transport address.
	 */
	/* RFC 7.2.4 & the Implementers Guide 2.8.
	 *
	 * 3) ...
	 *    When a Fast Retransmit is being performed the sender SHOULD
	 *    ignore the value of cwnd and SHOULD NOT delay retransmission.
	 */
	if (!chunk->fast_retransmit)
		if (transport->flight_size >= transport->cwnd) {
			retval = SCTP_XMIT_RWND_FULL;
			goto finish;
		}

	/* Nagle's algorithm to solve small-packet problem:
	 * Inhibit the sending of new chunks when new outgoing data arrives
	 * if any previously transmitted data on the connection remains
	 * unacknowledged.
	 */
	if (!sp->nodelay && sctp_packet_empty(packet) &&
	    q->outstanding_bytes && sctp_state(asoc, ESTABLISHED)) {
		unsigned len = datasize + q->out_qlen;

		/* Check whether this chunk and all the rest of pending
		 * data will fit or delay in hopes of bundling a full
		 * sized packet.
		 */
		if (len < asoc->pmtu - packet->overhead) {
			retval = SCTP_XMIT_NAGLE_DELAY;
			goto finish;
		}
	}

	/* Keep track of how many bytes are in flight over this transport. */
	transport->flight_size += datasize;

	/* Keep track of how many bytes are in flight to the receiver. */
	asoc->outqueue.outstanding_bytes += datasize;

	/* Update our view of the receiver's rwnd. */
	if (datasize < rwnd)
		rwnd -= datasize;
	else
		rwnd = 0;

	asoc->peer.rwnd = rwnd;
	/* Has been accepted for transmission. */
	if (!asoc->peer.prsctp_capable)
		chunk->msg->can_abandon = 0;

finish:
	return retval;
}