From 4479ff76c43607b680f9349128d8493228b49dce Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Mon, 9 Sep 2013 09:39:01 +0200 Subject: xfrm: Fix replay size checking on async events We pass the wrong netlink attribute to xfrm_replay_verify_len(). It should be XFRMA_REPLAY_ESN_VAL and not XFRMA_REPLAY_VAL as we currently doing. This causes memory corruptions if the replay esn attribute has incorrect length. Fix this by passing the right attribute to xfrm_replay_verify_len(). Reported-by: Michael Rossberg Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_user.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 3f565e495ac6..4b26ceedff26 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1856,7 +1856,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, if (x->km.state != XFRM_STATE_VALID) goto out; - err = xfrm_replay_verify_len(x->replay_esn, rp); + err = xfrm_replay_verify_len(x->replay_esn, re); if (err) goto out; -- cgit v1.2.3-59-g8ed1b From bafd4bd4dcfa13145db7f951251eef3e10f8c278 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Mon, 9 Sep 2013 10:38:38 +0200 Subject: xfrm: Decode sessions with output interface. The output interface matching does not work on forward policy lookups, the output interface of the flowi is always 0. Fix this by setting the output interface when we decode the session. Signed-off-by: Steffen Klassert --- net/ipv4/xfrm4_policy.c | 1 + net/ipv6/xfrm6_policy.c | 1 + 2 files changed, 2 insertions(+) (limited to 'net') diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 9a459be24af7..ccde54248c8c 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -107,6 +107,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) memset(fl4, 0, sizeof(struct flowi4)); fl4->flowi4_mark = skb->mark; + fl4->flowi4_oif = skb_dst(skb)->dev->ifindex; if (!ip_is_fragment(iph)) { switch (iph->protocol) { diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 23ed03d786c8..08ed2772b7aa 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -138,6 +138,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) memset(fl6, 0, sizeof(struct flowi6)); fl6->flowi6_mark = skb->mark; + fl6->flowi6_oif = skb_dst(skb)->dev->ifindex; fl6->daddr = reverse ? hdr->saddr : hdr->daddr; fl6->saddr = reverse ? hdr->daddr : hdr->saddr; -- cgit v1.2.3-59-g8ed1b From b3b2b9e192d5811f91f9cd92aeec489cecabc92e Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 10 Sep 2013 13:43:09 +0200 Subject: ipsec: Don't update the pmtu on ICMPV6_DEST_UNREACH Currently we update the pmtu in the IPsec protocol error handlers if icmpv6 message type is either ICMPV6_DEST_UNREACH or ICMPV6_PKT_TOOBIG. Updating the pmtu on ICMPV6_DEST_UNREACH is wrong in any case, it causes strangely fragmented packets. Only ICMPV6_PKT_TOOBIG signalizes pmtu discovery, so remove the ICMPV6_DEST_UNREACH check in the IPsec protocol error handlers. Signed-off-by: Steffen Klassert --- net/ipv6/ah6.c | 3 +-- net/ipv6/esp6.c | 3 +-- net/ipv6/ipcomp6.c | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 73784c3d4642..82e1da3a40b9 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -618,8 +618,7 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset); struct xfrm_state *x; - if (type != ICMPV6_DEST_UNREACH && - type != ICMPV6_PKT_TOOBIG && + if (type != ICMPV6_PKT_TOOBIG && type != NDISC_REDIRECT) return; diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index d3618a78fcac..e67e63f9858d 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -436,8 +436,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset); struct xfrm_state *x; - if (type != ICMPV6_DEST_UNREACH && - type != ICMPV6_PKT_TOOBIG && + if (type != ICMPV6_PKT_TOOBIG && type != NDISC_REDIRECT) return; diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 5636a912074a..ce507d9e1c90 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -64,8 +64,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, (struct ip_comp_hdr *)(skb->data + offset); struct xfrm_state *x; - if (type != ICMPV6_DEST_UNREACH && - type != ICMPV6_PKT_TOOBIG && + if (type != ICMPV6_PKT_TOOBIG && type != NDISC_REDIRECT) return; -- cgit v1.2.3-59-g8ed1b From 33fce60d6a6e137035f8e23a89d7fd55f3a24cda Mon Sep 17 00:00:00 2001 From: Fan Du Date: Tue, 17 Sep 2013 15:14:13 +0800 Subject: xfrm: Guard IPsec anti replay window against replay bitmap For legacy IPsec anti replay mechanism: bitmap in struct xfrm_replay_state could only provide a 32 bits window size limit in current design, thus user level parameter sadb_sa_replay should honor this limit, otherwise misleading outputs("replay=244") by setkey -D will be: 192.168.25.2 192.168.22.2 esp mode=transport spi=147561170(0x08cb9ad2) reqid=0(0x00000000) E: aes-cbc 9a8d7468 7655cf0b 719d27be b0ddaac2 A: hmac-sha1 2d2115c2 ebf7c126 1c54f186 3b139b58 264a7331 seq=0x00000000 replay=244 flags=0x00000000 state=mature created: Sep 17 14:00:00 2013 current: Sep 17 14:00:22 2013 diff: 22(s) hard: 30(s) soft: 26(s) last: Sep 17 14:00:00 2013 hard: 0(s) soft: 0(s) current: 1408(bytes) hard: 0(bytes) soft: 0(bytes) allocated: 22 hard: 0 soft: 0 sadb_seq=1 pid=4854 refcnt=0 192.168.22.2 192.168.25.2 esp mode=transport spi=255302123(0x0f3799eb) reqid=0(0x00000000) E: aes-cbc 6485d990 f61a6bd5 e5660252 608ad282 A: hmac-sha1 0cca811a eb4fa893 c47ae56c 98f6e413 87379a88 seq=0x00000000 replay=244 flags=0x00000000 state=mature created: Sep 17 14:00:00 2013 current: Sep 17 14:00:22 2013 diff: 22(s) hard: 30(s) soft: 26(s) last: Sep 17 14:00:00 2013 hard: 0(s) soft: 0(s) current: 1408(bytes) hard: 0(bytes) soft: 0(bytes) allocated: 22 hard: 0 soft: 0 sadb_seq=0 pid=4854 refcnt=0 And also, optimizing xfrm_replay_check window checking by setting the desirable x->props.replay_window with only doing the comparison once for all when xfrm_state is first born. Signed-off-by: Fan Du Signed-off-by: Steffen Klassert --- net/key/af_key.c | 3 ++- net/xfrm/xfrm_replay.c | 3 +-- net/xfrm/xfrm_user.c | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/key/af_key.c b/net/key/af_key.c index 9d585370c5b4..911ef03bf8fb 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1098,7 +1098,8 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, x->id.proto = proto; x->id.spi = sa->sadb_sa_spi; - x->props.replay_window = sa->sadb_sa_replay; + x->props.replay_window = min_t(unsigned int, sa->sadb_sa_replay, + (sizeof(x->replay.bitmap) * 8)); if (sa->sadb_sa_flags & SADB_SAFLAGS_NOECN) x->props.flags |= XFRM_STATE_NOECN; if (sa->sadb_sa_flags & SADB_SAFLAGS_DECAP_DSCP) diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 8dafe6d3c6e4..eeca388effc7 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c @@ -129,8 +129,7 @@ static int xfrm_replay_check(struct xfrm_state *x, return 0; diff = x->replay.seq - seq; - if (diff >= min_t(unsigned int, x->props.replay_window, - sizeof(x->replay.bitmap) * 8)) { + if (diff >= x->props.replay_window) { x->stats.replay_window++; goto err; } diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 4b26ceedff26..f964d4c00ffb 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -446,7 +446,8 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info * memcpy(&x->sel, &p->sel, sizeof(x->sel)); memcpy(&x->lft, &p->lft, sizeof(x->lft)); x->props.mode = p->mode; - x->props.replay_window = p->replay_window; + x->props.replay_window = min_t(unsigned int, p->replay_window, + sizeof(x->replay.bitmap) * 8); x->props.reqid = p->reqid; x->props.family = p->family; memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr)); -- cgit v1.2.3-59-g8ed1b From cd808fc9a6c7cd3a4311d9d2cffc4adbeaef5f6c Mon Sep 17 00:00:00 2001 From: Thomas Egerer Date: Thu, 19 Sep 2013 13:19:19 +0200 Subject: xfrm: Fix aevent generation for each received packet If asynchronous events are enabled for a particular netlink socket, the notify function is called by the advance function. The notify function creates and dispatches a km_event if a replay timeout occurred, or at least replay_maxdiff packets have been received since the last asynchronous event has been sent. The function is supposed to return if neither of the two events were detected for a state, or replay_maxdiff is equal to zero. Replay_maxdiff is initialized in xfrm_state_construct to the value of the xfrm.sysctl_aevent_rseqth (2 by default), and updated if for a state if the netlink attribute XFRMA_REPLAY_THRESH is set. If, however, replay_maxdiff is set to zero, then all of the three notify implementations perform a break from the switch statement instead of checking whether a timeout occurred, and -- if not -- return. As a result an asynchronous event is generated for every replay update of a state that has a zero replay_maxdiff value. This patch modifies the notify functions such that they immediately return if replay_maxdiff has the value zero, unless a timeout occurred. Signed-off-by: Thomas Egerer Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_replay.c | 51 ++++++++++++++++++++++++++------------------------ 1 file changed, 27 insertions(+), 24 deletions(-) (limited to 'net') diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index eeca388effc7..dab57daae408 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c @@ -61,9 +61,9 @@ static void xfrm_replay_notify(struct xfrm_state *x, int event) switch (event) { case XFRM_REPLAY_UPDATE: - if (x->replay_maxdiff && - (x->replay.seq - x->preplay.seq < x->replay_maxdiff) && - (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) { + if (!x->replay_maxdiff || + ((x->replay.seq - x->preplay.seq < x->replay_maxdiff) && + (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff))) { if (x->xflags & XFRM_TIME_DEFER) event = XFRM_REPLAY_TIMEOUT; else @@ -301,9 +301,10 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event) switch (event) { case XFRM_REPLAY_UPDATE: - if (x->replay_maxdiff && - (replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) && - (replay_esn->oseq - preplay_esn->oseq < x->replay_maxdiff)) { + if (!x->replay_maxdiff || + ((replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) && + (replay_esn->oseq - preplay_esn->oseq + < x->replay_maxdiff))) { if (x->xflags & XFRM_TIME_DEFER) event = XFRM_REPLAY_TIMEOUT; else @@ -352,28 +353,30 @@ static void xfrm_replay_notify_esn(struct xfrm_state *x, int event) switch (event) { case XFRM_REPLAY_UPDATE: - if (!x->replay_maxdiff) - break; - - if (replay_esn->seq_hi == preplay_esn->seq_hi) - seq_diff = replay_esn->seq - preplay_esn->seq; - else - seq_diff = ~preplay_esn->seq + replay_esn->seq + 1; - - if (replay_esn->oseq_hi == preplay_esn->oseq_hi) - oseq_diff = replay_esn->oseq - preplay_esn->oseq; - else - oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1; - - if (seq_diff < x->replay_maxdiff && - oseq_diff < x->replay_maxdiff) { + if (x->replay_maxdiff) { + if (replay_esn->seq_hi == preplay_esn->seq_hi) + seq_diff = replay_esn->seq - preplay_esn->seq; + else + seq_diff = ~preplay_esn->seq + replay_esn->seq + + 1; - if (x->xflags & XFRM_TIME_DEFER) - event = XFRM_REPLAY_TIMEOUT; + if (replay_esn->oseq_hi == preplay_esn->oseq_hi) + oseq_diff = replay_esn->oseq + - preplay_esn->oseq; else - return; + oseq_diff = ~preplay_esn->oseq + + replay_esn->oseq + 1; + + if (seq_diff >= x->replay_maxdiff || + oseq_diff >= x->replay_maxdiff) + break; } + if (x->xflags & XFRM_TIME_DEFER) + event = XFRM_REPLAY_TIMEOUT; + else + return; + break; case XFRM_REPLAY_TIMEOUT: -- cgit v1.2.3-59-g8ed1b From 03bb7f42765ce596604f03d179f3137d7df05bba Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sun, 29 Sep 2013 21:39:33 +0200 Subject: mac80211: use sta_info_get_bss() for nl80211 tx and client probing This allows calls for clients in AP_VLANs (e.g. for 4-addr) to succeed Cc: stable@vger.kernel.org Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- net/mac80211/cfg.c | 2 +- net/mac80211/tx.c | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 2e7855a1b10d..629dee7ec9bf 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -3518,7 +3518,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, return -EINVAL; } band = chanctx_conf->def.chan->band; - sta = sta_info_get(sdata, peer); + sta = sta_info_get_bss(sdata, peer); if (sta) { qos = test_sta_flag(sta, WLAN_STA_WME); } else { diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 3456c0486b48..70b5a05c0a4e 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1120,7 +1120,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, tx->sta = rcu_dereference(sdata->u.vlan.sta); if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr) return TX_DROP; - } else if (info->flags & IEEE80211_TX_CTL_INJECTED || + } else if (info->flags & (IEEE80211_TX_CTL_INJECTED | + IEEE80211_TX_INTFL_NL80211_FRAME_TX) || tx->sdata->control_port_protocol == tx->skb->protocol) { tx->sta = sta_info_get_bss(sdata, hdr->addr1); } -- cgit v1.2.3-59-g8ed1b From 0c5b93290b2f3c7a376567c03ae8d385b0e99851 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sun, 29 Sep 2013 21:39:34 +0200 Subject: mac80211: update sta->last_rx on acked tx frames When clients are idle for too long, hostapd sends nullfunc frames for probing. When those are acked by the client, the idle time needs to be updated. To make this work (and to avoid unnecessary probing), update sta->last_rx whenever an ACK was received for a tx packet. Only do this if the flag IEEE80211_HW_REPORTS_TX_ACK_STATUS is set. Cc: stable@vger.kernel.org Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- net/mac80211/status.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'net') diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 368837fe3b80..78dc2e99027e 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -180,6 +180,9 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb) struct ieee80211_local *local = sta->local; struct ieee80211_sub_if_data *sdata = sta->sdata; + if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) + sta->last_rx = jiffies; + if (ieee80211_is_data_qos(mgmt->frame_control)) { struct ieee80211_hdr *hdr = (void *) skb->data; u8 *qc = ieee80211_get_qos_ctl(hdr); -- cgit v1.2.3-59-g8ed1b From 22c4ceed0184318ec5a6182c6d75d398452c2e39 Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Mon, 30 Sep 2013 12:36:05 +0300 Subject: mac80211: Run deferred scan if last roc_list item is not started mac80211 scan processing could get stuck if roc work for pending, but not started when a scan request was deferred due to such roc item. Normally the deferred scan would be started from ieee80211_start_next_roc(), but ieee80211_sw_roc_work() calls that only if the finished ROC was started. Fix this by calling ieee80211_run_deferred_scan() in the case the last ROC was not actually started. This issue was hit relatively easily in P2P find operations where Listen state (remain-on-channel) and Search state (scan) are repeated in a loop. Signed-off-by: Jouni Malinen Signed-off-by: Johannes Berg --- net/mac80211/offchannel.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index acd1f71adc03..0c2a29484c07 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -394,6 +394,8 @@ void ieee80211_sw_roc_work(struct work_struct *work) if (started) ieee80211_start_next_roc(local); + else if (list_empty(&local->roc_list)) + ieee80211_run_deferred_scan(local); } out_unlock: -- cgit v1.2.3-59-g8ed1b From e7d8f6cb2f8735693396872f4608bbe305e8baee Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 8 Oct 2013 10:49:45 +0200 Subject: xfrm: Add refcount handling to queued policies We need to ensure that policies can't go away as long as the hold timer is armed, so take a refcont when we arm the timer and drop one if we delete it. Bug was introduced with git commit a0073fe18 ("xfrm: Add a state resolution packet queue") Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_policy.c | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index ed38d5d81f9e..5f9be976770e 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -334,7 +334,8 @@ static void xfrm_policy_kill(struct xfrm_policy *policy) atomic_inc(&policy->genid); - del_timer(&policy->polq.hold_timer); + if (del_timer(&policy->polq.hold_timer)) + xfrm_pol_put(policy); xfrm_queue_purge(&policy->polq.hold_queue); if (del_timer(&policy->timer)) @@ -589,7 +590,8 @@ static void xfrm_policy_requeue(struct xfrm_policy *old, spin_lock_bh(&pq->hold_queue.lock); skb_queue_splice_init(&pq->hold_queue, &list); - del_timer(&pq->hold_timer); + if (del_timer(&pq->hold_timer)) + xfrm_pol_put(old); spin_unlock_bh(&pq->hold_queue.lock); if (skb_queue_empty(&list)) @@ -600,7 +602,8 @@ static void xfrm_policy_requeue(struct xfrm_policy *old, spin_lock_bh(&pq->hold_queue.lock); skb_queue_splice(&list, &pq->hold_queue); pq->timeout = XFRM_QUEUE_TMO_MIN; - mod_timer(&pq->hold_timer, jiffies); + if (!mod_timer(&pq->hold_timer, jiffies)) + xfrm_pol_hold(new); spin_unlock_bh(&pq->hold_queue.lock); } @@ -1787,8 +1790,9 @@ static void xfrm_policy_queue_process(unsigned long arg) goto purge_queue; pq->timeout = pq->timeout << 1; - mod_timer(&pq->hold_timer, jiffies + pq->timeout); - return; + if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout)) + xfrm_pol_hold(pol); + goto out; } dst_release(dst); @@ -1819,11 +1823,14 @@ static void xfrm_policy_queue_process(unsigned long arg) err = dst_output(skb); } +out: + xfrm_pol_put(pol); return; purge_queue: pq->timeout = 0; xfrm_queue_purge(&pq->hold_queue); + xfrm_pol_put(pol); } static int xdst_queue_output(struct sk_buff *skb) @@ -1831,7 +1838,8 @@ static int xdst_queue_output(struct sk_buff *skb) unsigned long sched_next; struct dst_entry *dst = skb_dst(skb); struct xfrm_dst *xdst = (struct xfrm_dst *) dst; - struct xfrm_policy_queue *pq = &xdst->pols[0]->polq; + struct xfrm_policy *pol = xdst->pols[0]; + struct xfrm_policy_queue *pq = &pol->polq; if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) { kfree_skb(skb); @@ -1850,10 +1858,12 @@ static int xdst_queue_output(struct sk_buff *skb) if (del_timer(&pq->hold_timer)) { if (time_before(pq->hold_timer.expires, sched_next)) sched_next = pq->hold_timer.expires; + xfrm_pol_put(pol); } __skb_queue_tail(&pq->hold_queue, skb); - mod_timer(&pq->hold_timer, sched_next); + if (!mod_timer(&pq->hold_timer, sched_next)) + xfrm_pol_hold(pol); spin_unlock_bh(&pq->hold_queue.lock); -- cgit v1.2.3-59-g8ed1b From 2bb53e2557964c2c5368a0392cf3b3b63a288cd0 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 8 Oct 2013 10:49:51 +0200 Subject: xfrm: check for a vaild skb in xfrm_policy_queue_process We might dreference a NULL pointer if the hold_queue is empty, so add a check to avoid this. Bug was introduced with git commit a0073fe18 ("xfrm: Add a state resolution packet queue") Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_policy.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'net') diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 5f9be976770e..76e1873811d4 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1772,6 +1772,10 @@ static void xfrm_policy_queue_process(unsigned long arg) spin_lock(&pq->hold_queue.lock); skb = skb_peek(&pq->hold_queue); + if (!skb) { + spin_unlock(&pq->hold_queue.lock); + goto out; + } dst = skb_dst(skb); sk = skb->sk; xfrm_decode_session(skb, &fl, dst->ops->family); -- cgit v1.2.3-59-g8ed1b From a754055a1296fcbe6f32de3a5eaca6efb2fd1865 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Mon, 16 Sep 2013 11:12:07 +0300 Subject: mac80211: correctly close cancelled scans __ieee80211_scan_completed is called from a worker. This means that the following flow is possible. * driver calls ieee80211_scan_completed * mac80211 cancels the scan (that is already complete) * __ieee80211_scan_completed runs When scan_work will finally run, it will see that the scan hasn't been aborted and might even trigger another scan on another band. This leads to a situation where cfg80211's scan is not done and no further scan can be issued. Fix this by setting a new flag when a HW scan is being cancelled so that no other scan will be triggered. Cc: stable@vger.kernel.org Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- net/mac80211/ieee80211_i.h | 3 +++ net/mac80211/scan.c | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+) (limited to 'net') diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index b6186517ec56..611abfcfb5eb 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -893,6 +893,8 @@ struct tpt_led_trigger { * that the scan completed. * @SCAN_ABORTED: Set for our scan work function when the driver reported * a scan complete for an aborted scan. + * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being + * cancelled. */ enum { SCAN_SW_SCANNING, @@ -900,6 +902,7 @@ enum { SCAN_ONCHANNEL_SCANNING, SCAN_COMPLETED, SCAN_ABORTED, + SCAN_HW_CANCELLED, }; /** diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 08afe74b98f4..d2d17a449224 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -238,6 +238,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local) enum ieee80211_band band; int i, ielen, n_chans; + if (test_bit(SCAN_HW_CANCELLED, &local->scanning)) + return false; + do { if (local->hw_scan_band == IEEE80211_NUM_BANDS) return false; @@ -940,7 +943,23 @@ void ieee80211_scan_cancel(struct ieee80211_local *local) if (!local->scan_req) goto out; + /* + * We have a scan running and the driver already reported completion, + * but the worker hasn't run yet or is stuck on the mutex - mark it as + * cancelled. + */ + if (test_bit(SCAN_HW_SCANNING, &local->scanning) && + test_bit(SCAN_COMPLETED, &local->scanning)) { + set_bit(SCAN_HW_CANCELLED, &local->scanning); + goto out; + } + if (test_bit(SCAN_HW_SCANNING, &local->scanning)) { + /* + * Make sure that __ieee80211_scan_completed doesn't trigger a + * scan on another band. + */ + set_bit(SCAN_HW_CANCELLED, &local->scanning); if (local->ops->cancel_hw_scan) drv_cancel_hw_scan(local, rcu_dereference_protected(local->scan_sdata, -- cgit v1.2.3-59-g8ed1b From f38dd58ccca0d612e62509f75e99952dcf316cb2 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Tue, 17 Sep 2013 15:20:13 +0300 Subject: cfg80211: don't add p2p device while in RFKILL Since P2P device doesn't have a netdev associated to it, we cannot prevent the user to start it when in RFKILL. So refuse to even add it when in RFKILL. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- net/wireless/core.c | 2 -- net/wireless/core.h | 3 +++ 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/wireless/core.c b/net/wireless/core.c index fe8d4f2be49b..aff959e5a1b3 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -958,8 +958,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, case NETDEV_PRE_UP: if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) return notifier_from_errno(-EOPNOTSUPP); - if (rfkill_blocked(rdev->rfkill)) - return notifier_from_errno(-ERFKILL); ret = cfg80211_can_add_interface(rdev, wdev->iftype); if (ret) return notifier_from_errno(ret); diff --git a/net/wireless/core.h b/net/wireless/core.h index 9ad43c619c54..3159e9c284c5 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -411,6 +411,9 @@ static inline int cfg80211_can_add_interface(struct cfg80211_registered_device *rdev, enum nl80211_iftype iftype) { + if (rfkill_blocked(rdev->rfkill)) + return -ERFKILL; + return cfg80211_can_change_interface(rdev, NULL, iftype); } -- cgit v1.2.3-59-g8ed1b From cb03db9d0e964568407fb08ea46cc2b6b7f67587 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sat, 5 Oct 2013 17:56:59 -0300 Subject: net: secure_seq: Fix warning when CONFIG_IPV6 and CONFIG_INET are not selected net_secret() is only used when CONFIG_IPV6 or CONFIG_INET are selected. Building a defconfig with both of these symbols unselected (Using the ARM at91sam9rl_defconfig, for example) leads to the following build warning: $ make at91sam9rl_defconfig # # configuration written to .config # $ make net/core/secure_seq.o scripts/kconfig/conf --silentoldconfig Kconfig CHK include/config/kernel.release CHK include/generated/uapi/linux/version.h CHK include/generated/utsrelease.h make[1]: `include/generated/mach-types.h' is up to date. CALL scripts/checksyscalls.sh CC net/core/secure_seq.o net/core/secure_seq.c:17:13: warning: 'net_secret_init' defined but not used [-Wunused-function] Fix this warning by protecting the definition of net_secret() with these symbols. Reported-by: Olof Johansson Signed-off-by: Fabio Estevam Signed-off-by: David S. Miller --- net/core/secure_seq.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index 3f1ec1586ae1..8d9d05edd2eb 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -10,6 +10,7 @@ #include +#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_INET) #define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4) static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned; @@ -29,6 +30,7 @@ static void net_secret_init(void) cmpxchg(&net_secret[--i], 0, tmp); } } +#endif #ifdef CONFIG_INET static u32 seq_scale(u32 seq) -- cgit v1.2.3-59-g8ed1b From f144febd93d5ee534fdf23505ab091b2b9088edc Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Thu, 10 Oct 2013 15:57:59 -0400 Subject: bridge: update mdb expiration timer upon reports. commit 9f00b2e7cf241fa389733d41b615efdaa2cb0f5b bridge: only expire the mdb entry when query is received changed the mdb expiration timer to be armed only when QUERY is received. Howerver, this causes issues in an environment where the multicast server socket comes and goes very fast while a client is trying to send traffic to it. The root cause is a race where a sequence of LEAVE followed by REPORT messages can race against QUERY messages generated in response to LEAVE. The QUERY ends up starting the expiration timer, and that timer can potentially expire after the new REPORT message has been received signaling the new join operation. This leads to a significant drop in multicast traffic and possible complete stall. The solution is to have REPORT messages update the expiration timer on entries that already exist. CC: Cong Wang CC: Herbert Xu CC: Stephen Hemminger Signed-off-by: Vlad Yasevich Acked-by: Herbert Xu Signed-off-by: David S. Miller --- net/bridge/br_multicast.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index d1c578630678..1085f2180f3a 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -611,6 +611,9 @@ rehash: break; default: + /* If we have an existing entry, update it's expire timer */ + mod_timer(&mp->timer, + jiffies + br->multicast_membership_interval); goto out; } @@ -680,8 +683,12 @@ static int br_multicast_add_group(struct net_bridge *br, for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; pp = &p->next) { - if (p->port == port) + if (p->port == port) { + /* We already have a portgroup, update the timer. */ + mod_timer(&p->timer, + jiffies + br->multicast_membership_interval); goto out; + } if ((unsigned long)p->port < (unsigned long)port) break; } -- cgit v1.2.3-59-g8ed1b From 7263a5187f9e9de45fcb51349cf0e031142c19a1 Mon Sep 17 00:00:00 2001 From: Christophe Gouault Date: Tue, 8 Oct 2013 17:21:22 +0200 Subject: vti: get rid of nf mark rule in prerouting This patch fixes and improves the use of vti interfaces (while lightly changing the way of configuring them). Currently: - it is necessary to identify and mark inbound IPsec packets destined to each vti interface, via netfilter rules in the mangle table at prerouting hook. - the vti module cannot retrieve the right tunnel in input since commit b9959fd3: vti tunnels all have an i_key, but the tunnel lookup is done with flag TUNNEL_NO_KEY, so there no chance to retrieve them. - the i_key is used by the outbound processing as a mark to lookup for the right SP and SA bundle. This patch uses the o_key to store the vti mark (instead of i_key) and enables: - to avoid the need for previously marking the inbound skbuffs via a netfilter rule. - to properly retrieve the right tunnel in input, only based on the IPsec packet outer addresses. - to properly perform an inbound policy check (using the tunnel o_key as a mark). - to properly perform an outbound SPD and SAD lookup (using the tunnel o_key as a mark). - to keep the current mark of the skbuff. The skbuff mark is neither used nor changed by the vti interface. Only the vti interface o_key is used. SAs have a wildcard mark. SPs have a mark equal to the vti interface o_key. The vti interface must be created as follows (i_key = 0, o_key = mark): ip link add vti1 mode vti local 1.1.1.1 remote 2.2.2.2 okey 1 The SPs attached to vti1 must be created as follows (mark = vti1 o_key): ip xfrm policy add dir out mark 1 tmpl src 1.1.1.1 dst 2.2.2.2 \ proto esp mode tunnel ip xfrm policy add dir in mark 1 tmpl src 2.2.2.2 dst 1.1.1.1 \ proto esp mode tunnel The SAs are created with the default wildcard mark. There is no distinction between global vs. vti SAs. Just their addresses will possibly link them to a vti interface: ip xfrm state add src 1.1.1.1 dst 2.2.2.2 proto esp spi 1000 mode tunnel \ enc "cbc(aes)" "azertyuiopqsdfgh" ip xfrm state add src 2.2.2.2 dst 1.1.1.1 proto esp spi 2000 mode tunnel \ enc "cbc(aes)" "sqbdhgqsdjqjsdfh" To avoid matching "global" (not vti) SPs in vti interfaces, global SPs should no use the default wildcard mark, but explicitly match mark 0. To avoid a double SPD lookup in input and output (in global and vti SPDs), the NOPOLICY and NOXFRM options should be set on the vti interfaces: echo 1 > /proc/sys/net/ipv4/conf/vti1/disable_policy echo 1 > /proc/sys/net/ipv4/conf/vti1/disable_xfrm The outgoing traffic is steered to vti1 by a route via the vti interface: ip route add 192.168.0.0/16 dev vti1 The incoming IPsec traffic is steered to vti1 because its outer addresses match the vti1 tunnel configuration. Signed-off-by: Christophe Gouault Signed-off-by: David S. Miller --- net/ipv4/ip_vti.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index e805e7b3030e..6e87f853d033 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -125,8 +125,17 @@ static int vti_rcv(struct sk_buff *skb) iph->saddr, iph->daddr, 0); if (tunnel != NULL) { struct pcpu_tstats *tstats; + u32 oldmark = skb->mark; + int ret; - if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) + + /* temporarily mark the skb with the tunnel o_key, to + * only match policies with this mark. + */ + skb->mark = be32_to_cpu(tunnel->parms.o_key); + ret = xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb); + skb->mark = oldmark; + if (!ret) return -1; tstats = this_cpu_ptr(tunnel->dev->tstats); @@ -135,7 +144,6 @@ static int vti_rcv(struct sk_buff *skb) tstats->rx_bytes += skb->len; u64_stats_update_end(&tstats->syncp); - skb->mark = 0; secpath_reset(skb); skb->dev = tunnel->dev; return 1; @@ -167,7 +175,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) memset(&fl4, 0, sizeof(fl4)); flowi4_init_output(&fl4, tunnel->parms.link, - be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos), + be32_to_cpu(tunnel->parms.o_key), RT_TOS(tos), RT_SCOPE_UNIVERSE, IPPROTO_IPIP, 0, dst, tiph->saddr, 0, 0); -- cgit v1.2.3-59-g8ed1b From 455cc32bf128e114455d11ad919321ab89a2c312 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 10 Oct 2013 06:30:09 -0700 Subject: l2tp: must disable bh before calling l2tp_xmit_skb() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit François Cachereul made a very nice bug report and suspected the bh_lock_sock() / bh_unlok_sock() pair used in l2tp_xmit_skb() from process context was not good. This problem was added by commit 6af88da14ee284aaad6e4326da09a89191ab6165 ("l2tp: Fix locking in l2tp_core.c"). l2tp_eth_dev_xmit() runs from BH context, so we must disable BH from other l2tp_xmit_skb() users. [ 452.060011] BUG: soft lockup - CPU#1 stuck for 23s! [accel-pppd:6662] [ 452.061757] Modules linked in: l2tp_ppp l2tp_netlink l2tp_core pppoe pppox ppp_generic slhc ipv6 ext3 mbcache jbd virtio_balloon xfs exportfs dm_mod virtio_blk ata_generic virtio_net floppy ata_piix libata virtio_pci virtio_ring virtio [last unloaded: scsi_wait_scan] [ 452.064012] CPU 1 [ 452.080015] BUG: soft lockup - CPU#2 stuck for 23s! [accel-pppd:6643] [ 452.080015] CPU 2 [ 452.080015] [ 452.080015] Pid: 6643, comm: accel-pppd Not tainted 3.2.46.mini #1 Bochs Bochs [ 452.080015] RIP: 0010:[] [] do_raw_spin_lock+0x17/0x1f [ 452.080015] RSP: 0018:ffff88007125fc18 EFLAGS: 00000293 [ 452.080015] RAX: 000000000000aba9 RBX: ffffffff811d0703 RCX: 0000000000000000 [ 452.080015] RDX: 00000000000000ab RSI: ffff8800711f6896 RDI: ffff8800745c8110 [ 452.080015] RBP: ffff88007125fc18 R08: 0000000000000020 R09: 0000000000000000 [ 452.080015] R10: 0000000000000000 R11: 0000000000000280 R12: 0000000000000286 [ 452.080015] R13: 0000000000000020 R14: 0000000000000240 R15: 0000000000000000 [ 452.080015] FS: 00007fdc0cc24700(0000) GS:ffff8800b6f00000(0000) knlGS:0000000000000000 [ 452.080015] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 452.080015] CR2: 00007fdb054899b8 CR3: 0000000074404000 CR4: 00000000000006a0 [ 452.080015] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 452.080015] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 [ 452.080015] Process accel-pppd (pid: 6643, threadinfo ffff88007125e000, task ffff8800b27e6dd0) [ 452.080015] Stack: [ 452.080015] ffff88007125fc28 ffffffff81256559 ffff88007125fc98 ffffffffa01b2bd1 [ 452.080015] ffff88007125fc58 000000000000000c 00000000029490d0 0000009c71dbe25e [ 452.080015] 000000000000005c 000000080000000e 0000000000000000 ffff880071170600 [ 452.080015] Call Trace: [ 452.080015] [] _raw_spin_lock+0xe/0x10 [ 452.080015] [] l2tp_xmit_skb+0x189/0x4ac [l2tp_core] [ 452.080015] [] pppol2tp_sendmsg+0x15e/0x19c [l2tp_ppp] [ 452.080015] [] __sock_sendmsg_nosec+0x22/0x24 [ 452.080015] [] sock_sendmsg+0xa1/0xb6 [ 452.080015] [] ? __schedule+0x5c1/0x616 [ 452.080015] [] ? __dequeue_signal+0xb7/0x10c [ 452.080015] [] ? fget_light+0x75/0x89 [ 452.080015] [] ? sockfd_lookup_light+0x20/0x56 [ 452.080015] [] sys_sendto+0x10c/0x13b [ 452.080015] [] system_call_fastpath+0x16/0x1b [ 452.080015] Code: 81 48 89 e5 72 0c 31 c0 48 81 ff 45 66 25 81 0f 92 c0 5d c3 55 b8 00 01 00 00 48 89 e5 f0 66 0f c1 07 0f b6 d4 38 d0 74 06 f3 90 <8a> 07 eb f6 5d c3 90 90 55 48 89 e5 9c 58 0f 1f 44 00 00 5d c3 [ 452.080015] Call Trace: [ 452.080015] [] _raw_spin_lock+0xe/0x10 [ 452.080015] [] l2tp_xmit_skb+0x189/0x4ac [l2tp_core] [ 452.080015] [] pppol2tp_sendmsg+0x15e/0x19c [l2tp_ppp] [ 452.080015] [] __sock_sendmsg_nosec+0x22/0x24 [ 452.080015] [] sock_sendmsg+0xa1/0xb6 [ 452.080015] [] ? __schedule+0x5c1/0x616 [ 452.080015] [] ? __dequeue_signal+0xb7/0x10c [ 452.080015] [] ? fget_light+0x75/0x89 [ 452.080015] [] ? sockfd_lookup_light+0x20/0x56 [ 452.080015] [] sys_sendto+0x10c/0x13b [ 452.080015] [] system_call_fastpath+0x16/0x1b [ 452.064012] [ 452.064012] Pid: 6662, comm: accel-pppd Not tainted 3.2.46.mini #1 Bochs Bochs [ 452.064012] RIP: 0010:[] [] do_raw_spin_lock+0x19/0x1f [ 452.064012] RSP: 0018:ffff8800b6e83ba0 EFLAGS: 00000297 [ 452.064012] RAX: 000000000000aaa9 RBX: ffff8800b6e83b40 RCX: 0000000000000002 [ 452.064012] RDX: 00000000000000aa RSI: 000000000000000a RDI: ffff8800745c8110 [ 452.064012] RBP: ffff8800b6e83ba0 R08: 000000000000c802 R09: 000000000000001c [ 452.064012] R10: ffff880071096c4e R11: 0000000000000006 R12: ffff8800b6e83b18 [ 452.064012] R13: ffffffff8125d51e R14: ffff8800b6e83ba0 R15: ffff880072a589c0 [ 452.064012] FS: 00007fdc0b81e700(0000) GS:ffff8800b6e80000(0000) knlGS:0000000000000000 [ 452.064012] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 452.064012] CR2: 0000000000625208 CR3: 0000000074404000 CR4: 00000000000006a0 [ 452.064012] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 452.064012] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 [ 452.064012] Process accel-pppd (pid: 6662, threadinfo ffff88007129a000, task ffff8800744f7410) [ 452.064012] Stack: [ 452.064012] ffff8800b6e83bb0 ffffffff81256559 ffff8800b6e83bc0 ffffffff8121c64a [ 452.064012] ffff8800b6e83bf0 ffffffff8121ec7a ffff880072a589c0 ffff880071096c62 [ 452.064012] 0000000000000011 ffffffff81430024 ffff8800b6e83c80 ffffffff8121f276 [ 452.064012] Call Trace: [ 452.064012] [ 452.064012] [] _raw_spin_lock+0xe/0x10 [ 452.064012] [] spin_lock+0x9/0xb [ 452.064012] [] udp_queue_rcv_skb+0x186/0x269 [ 452.064012] [] __udp4_lib_rcv+0x297/0x4ae [ 452.064012] [] ? raw_rcv+0xe9/0xf0 [ 452.064012] [] udp_rcv+0x1a/0x1c [ 452.064012] [] ip_local_deliver_finish+0x12b/0x1a5 [ 452.064012] [] ip_local_deliver+0x53/0x84 [ 452.064012] [] ip_rcv_finish+0x2bc/0x2f3 [ 452.064012] [] ip_rcv+0x210/0x269 [ 452.064012] [] ? kvm_clock_get_cycles+0x9/0xb [ 452.064012] [] __netif_receive_skb+0x3a5/0x3f7 [ 452.064012] [] netif_receive_skb+0x57/0x5e [ 452.064012] [] ? __netdev_alloc_skb+0x1f/0x3b [ 452.064012] [] virtnet_poll+0x4ba/0x5a4 [virtio_net] [ 452.064012] [] net_rx_action+0x73/0x184 [ 452.064012] [] ? l2tp_xmit_skb+0x27a/0x4ac [l2tp_core] [ 452.064012] [] __do_softirq+0xc3/0x1a8 [ 452.064012] [] ? ack_APIC_irq+0x10/0x12 [ 452.064012] [] ? _raw_spin_lock+0xe/0x10 [ 452.064012] [] call_softirq+0x1c/0x26 [ 452.064012] [] do_softirq+0x45/0x82 [ 452.064012] [] irq_exit+0x42/0x9c [ 452.064012] [] do_IRQ+0x8e/0xa5 [ 452.064012] [] common_interrupt+0x6e/0x6e [ 452.064012] [ 452.064012] [] ? kfree+0x8a/0xa3 [ 452.064012] [] ? l2tp_xmit_skb+0x27a/0x4ac [l2tp_core] [ 452.064012] [] ? l2tp_xmit_skb+0x1dd/0x4ac [l2tp_core] [ 452.064012] [] pppol2tp_sendmsg+0x15e/0x19c [l2tp_ppp] [ 452.064012] [] __sock_sendmsg_nosec+0x22/0x24 [ 452.064012] [] sock_sendmsg+0xa1/0xb6 [ 452.064012] [] ? __schedule+0x5c1/0x616 [ 452.064012] [] ? __dequeue_signal+0xb7/0x10c [ 452.064012] [] ? fget_light+0x75/0x89 [ 452.064012] [] ? sockfd_lookup_light+0x20/0x56 [ 452.064012] [] sys_sendto+0x10c/0x13b [ 452.064012] [] system_call_fastpath+0x16/0x1b [ 452.064012] Code: 89 e5 72 0c 31 c0 48 81 ff 45 66 25 81 0f 92 c0 5d c3 55 b8 00 01 00 00 48 89 e5 f0 66 0f c1 07 0f b6 d4 38 d0 74 06 f3 90 8a 07 f6 5d c3 90 90 55 48 89 e5 9c 58 0f 1f 44 00 00 5d c3 55 48 [ 452.064012] Call Trace: [ 452.064012] [] _raw_spin_lock+0xe/0x10 [ 452.064012] [] spin_lock+0x9/0xb [ 452.064012] [] udp_queue_rcv_skb+0x186/0x269 [ 452.064012] [] __udp4_lib_rcv+0x297/0x4ae [ 452.064012] [] ? raw_rcv+0xe9/0xf0 [ 452.064012] [] udp_rcv+0x1a/0x1c [ 452.064012] [] ip_local_deliver_finish+0x12b/0x1a5 [ 452.064012] [] ip_local_deliver+0x53/0x84 [ 452.064012] [] ip_rcv_finish+0x2bc/0x2f3 [ 452.064012] [] ip_rcv+0x210/0x269 [ 452.064012] [] ? kvm_clock_get_cycles+0x9/0xb [ 452.064012] [] __netif_receive_skb+0x3a5/0x3f7 [ 452.064012] [] netif_receive_skb+0x57/0x5e [ 452.064012] [] ? __netdev_alloc_skb+0x1f/0x3b [ 452.064012] [] virtnet_poll+0x4ba/0x5a4 [virtio_net] [ 452.064012] [] net_rx_action+0x73/0x184 [ 452.064012] [] ? l2tp_xmit_skb+0x27a/0x4ac [l2tp_core] [ 452.064012] [] __do_softirq+0xc3/0x1a8 [ 452.064012] [] ? ack_APIC_irq+0x10/0x12 [ 452.064012] [] ? _raw_spin_lock+0xe/0x10 [ 452.064012] [] call_softirq+0x1c/0x26 [ 452.064012] [] do_softirq+0x45/0x82 [ 452.064012] [] irq_exit+0x42/0x9c [ 452.064012] [] do_IRQ+0x8e/0xa5 [ 452.064012] [] common_interrupt+0x6e/0x6e [ 452.064012] [] ? kfree+0x8a/0xa3 [ 452.064012] [] ? l2tp_xmit_skb+0x27a/0x4ac [l2tp_core] [ 452.064012] [] ? l2tp_xmit_skb+0x1dd/0x4ac [l2tp_core] [ 452.064012] [] pppol2tp_sendmsg+0x15e/0x19c [l2tp_ppp] [ 452.064012] [] __sock_sendmsg_nosec+0x22/0x24 [ 452.064012] [] sock_sendmsg+0xa1/0xb6 [ 452.064012] [] ? __schedule+0x5c1/0x616 [ 452.064012] [] ? __dequeue_signal+0xb7/0x10c [ 452.064012] [] ? fget_light+0x75/0x89 [ 452.064012] [] ? sockfd_lookup_light+0x20/0x56 [ 452.064012] [] sys_sendto+0x10c/0x13b [ 452.064012] [] system_call_fastpath+0x16/0x1b Reported-by: François Cachereul Tested-by: François Cachereul Signed-off-by: Eric Dumazet Cc: James Chapman Signed-off-by: David S. Miller --- net/l2tp/l2tp_ppp.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'net') diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 5ebee2ded9e9..8c46b271064a 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -353,7 +353,9 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh goto error_put_sess_tun; } + local_bh_disable(); l2tp_xmit_skb(session, skb, session->hdr_len); + local_bh_enable(); sock_put(ps->tunnel_sock); sock_put(sk); @@ -422,7 +424,9 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) skb->data[0] = ppph[0]; skb->data[1] = ppph[1]; + local_bh_disable(); l2tp_xmit_skb(session, skb, session->hdr_len); + local_bh_enable(); sock_put(sk_tun); sock_put(sk); -- cgit v1.2.3-59-g8ed1b From 638a52b801e40ed276ceb69b73579ad99365361a Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Sun, 6 Oct 2013 15:15:33 -0700 Subject: netem: update backlog after drop MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When packet is dropped from rb-tree netem the backlog statistic should also be updated. Reported-by: Сергеев Сергей Signed-off-by: Stephen Hemminger Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/sched/sch_netem.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index a6d788d45216..6b75b6733e8e 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -520,6 +520,7 @@ static unsigned int netem_drop(struct Qdisc *sch) skb->next = NULL; skb->prev = NULL; len = qdisc_pkt_len(skb); + sch->qstats.backlog -= len; kfree_skb(skb); } } -- cgit v1.2.3-59-g8ed1b From ff704050f2fc0f3382b5a70bba56a51a3feca79d Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Sun, 6 Oct 2013 15:16:49 -0700 Subject: netem: free skb's in tree on reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Netem can leak memory because packets get stored in red-black tree and it is not cleared on reset. Reported by: Сергеев Сергей Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/sched/sch_netem.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'net') diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 6b75b6733e8e..b87e83d07478 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -358,6 +358,21 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche return PSCHED_NS2TICKS(ticks); } +static void tfifo_reset(struct Qdisc *sch) +{ + struct netem_sched_data *q = qdisc_priv(sch); + struct rb_node *p; + + while ((p = rb_first(&q->t_root))) { + struct sk_buff *skb = netem_rb_to_skb(p); + + rb_erase(p, &q->t_root); + skb->next = NULL; + skb->prev = NULL; + kfree_skb(skb); + } +} + static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); @@ -610,6 +625,7 @@ static void netem_reset(struct Qdisc *sch) struct netem_sched_data *q = qdisc_priv(sch); qdisc_reset_queue(sch); + tfifo_reset(sch); if (q->qdisc) qdisc_reset(q->qdisc); qdisc_watchdog_cancel(&q->watchdog); -- cgit v1.2.3-59-g8ed1b From bf58175954f2d390300df00f96070312d1b15096 Mon Sep 17 00:00:00 2001 From: Oussama Ghorbel Date: Thu, 10 Oct 2013 18:50:27 +0100 Subject: ipv6: Initialize ip6_tnl.hlen in gre tunnel even if no route is found The ip6_tnl.hlen (gre and ipv6 headers length) is independent from the outgoing interface, so it would be better to initialize it even when no route is found, otherwise its value will be zero. While I'm not sure if this could happen in real life, but doing that will avoid to call the skb_push function with a zero in ip6gre_header function. Suggested-by: Hannes Frederic Sowa Signed-off-by: Oussama Ghorbel Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- net/ipv6/ip6_gre.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 1ef1fa2b22a6..bf4a9a084de5 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -976,6 +976,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) if (t->parms.o_flags&GRE_SEQ) addend += 4; } + t->hlen = addend; if (p->flags & IP6_TNL_F_CAP_XMIT) { int strict = (ipv6_addr_type(&p->raddr) & @@ -1002,8 +1003,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) } ip6_rt_put(rt); } - - t->hlen = addend; } static int ip6gre_tnl_change(struct ip6_tnl *t, -- cgit v1.2.3-59-g8ed1b From f5563318ff1bde15b10e736e97ffce13be08bc1a Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 11 Oct 2013 14:47:05 +0200 Subject: wireless: radiotap: fix parsing buffer overrun When parsing an invalid radiotap header, the parser can overrun the buffer that is passed in because it doesn't correctly check 1) the minimum radiotap header size 2) the space for extended bitmaps The first issue doesn't affect any in-kernel user as they all check the minimum size before calling the radiotap function. The second issue could potentially affect the kernel if an skb is passed in that consists only of the radiotap header with a lot of extended bitmaps that extend past the SKB. In that case a read-only buffer overrun by at most 4 bytes is possible. Fix this by adding the appropriate checks to the parser. Cc: stable@vger.kernel.org Reported-by: Evan Huus Signed-off-by: Johannes Berg --- net/wireless/radiotap.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c index 7d604c06c3dc..a271c27fac77 100644 --- a/net/wireless/radiotap.c +++ b/net/wireless/radiotap.c @@ -97,6 +97,10 @@ int ieee80211_radiotap_iterator_init( struct ieee80211_radiotap_header *radiotap_header, int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns) { + /* check the radiotap header can actually be present */ + if (max_length < sizeof(struct ieee80211_radiotap_header)) + return -EINVAL; + /* Linux only supports version 0 radiotap format */ if (radiotap_header->it_version) return -EINVAL; @@ -131,7 +135,8 @@ int ieee80211_radiotap_iterator_init( */ if ((unsigned long)iterator->_arg - - (unsigned long)iterator->_rtheader > + (unsigned long)iterator->_rtheader + + sizeof(uint32_t) > (unsigned long)iterator->_max_length) return -EINVAL; } -- cgit v1.2.3-59-g8ed1b From d86aa4f8ca58898ec6a94c0635da20b948171ed7 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 11 Oct 2013 15:47:06 +0200 Subject: mac80211: fix crash if bitrate calculation goes wrong If a frame's timestamp is calculated, and the bitrate calculation goes wrong and returns zero, the system will attempt to divide by zero and crash. Catch this case and print the rate information that the driver reported when this happens. Cc: stable@vger.kernel.org Reported-by: Thomas Lindroth Signed-off-by: Johannes Berg --- net/mac80211/util.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'net') diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 9c3200bcfc02..69e4ef5348a0 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2238,6 +2238,10 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, } rate = cfg80211_calculate_bitrate(&ri); + if (WARN_ONCE(!rate, + "Invalid bitrate: flags=0x%x, idx=%d, vht_nss=%d\n", + status->flag, status->rate_idx, status->vht_nss)) + return 0; /* rewind from end of MPDU */ if (status->flag & RX_FLAG_MACTIME_END) -- cgit v1.2.3-59-g8ed1b From 27127a82561a2a3ed955ce207048e1b066a80a2a Mon Sep 17 00:00:00 2001 From: Fan Du Date: Tue, 15 Oct 2013 22:01:30 -0400 Subject: sctp: Use software crc32 checksum when xfrm transform will happen. igb/ixgbe have hardware sctp checksum support, when this feature is enabled and also IPsec is armed to protect sctp traffic, ugly things happened as xfrm_output checks CHECKSUM_PARTIAL to do checksum operation(sum every thing up and pack the 16bits result in the checksum field). The result is fail establishment of sctp communication. Cc: Neil Horman Cc: Steffen Klassert Signed-off-by: Fan Du Signed-off-by: Vlad Yasevich Acked-by: Neil Horman Signed-off-by: David S. Miller --- net/sctp/output.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/sctp/output.c b/net/sctp/output.c index 0ac3a65daccb..d35b54cb3020 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -536,7 +536,8 @@ int sctp_packet_transmit(struct sctp_packet *packet) * by CRC32-C as described in . */ if (!sctp_checksum_disable) { - if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) { + if (!(dst->dev->features & NETIF_F_SCTP_CSUM) || + (dst_xfrm(dst) != NULL)) { __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); /* 3) Put the resultant value into the checksum field in the -- cgit v1.2.3-59-g8ed1b From d2dbbba77e95dff4b4f901fee236fef6d9552072 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Tue, 15 Oct 2013 22:01:31 -0400 Subject: sctp: Perform software checksum if packet has to be fragmented. IP/IPv6 fragmentation knows how to compute only TCP/UDP checksum. This causes problems if SCTP packets has to be fragmented and ipsummed has been set to PARTIAL due to checksum offload support. This condition can happen when retransmitting after MTU discover, or when INIT or other control chunks are larger then MTU. Check for the rare fragmentation condition in SCTP and use software checksum calculation in this case. CC: Fan Du Signed-off-by: Vlad Yasevich Acked-by: Neil Horman Signed-off-by: David S. Miller --- net/sctp/output.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/sctp/output.c b/net/sctp/output.c index d35b54cb3020..319137340d15 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -537,7 +537,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) */ if (!sctp_checksum_disable) { if (!(dst->dev->features & NETIF_F_SCTP_CSUM) || - (dst_xfrm(dst) != NULL)) { + (dst_xfrm(dst) != NULL) || packet->ipfragok) { __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); /* 3) Put the resultant value into the checksum field in the -- cgit v1.2.3-59-g8ed1b From 031afe4990a7c9dbff41a3a742c44d3e740ea0a1 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Sat, 12 Oct 2013 10:16:27 -0700 Subject: tcp: fix incorrect ca_state in tail loss probe On receiving an ACK that covers the loss probe sequence, TLP immediately sets the congestion state to Open, even though some packets are not recovered and retransmisssion are on the way. The later ACks may trigger a WARN_ON check in step D of tcp_fastretrans_alert(), e.g., https://bugzilla.redhat.com/show_bug.cgi?id=989251 The fix is to follow the similar procedure in recovery by calling tcp_try_keep_open(). The sender switches to Open state if no packets are retransmissted. Otherwise it goes to Disorder and let subsequent ACKs move the state to Recovery or Open. Reported-By: Michael Sterrett Tested-By: Dormando Signed-off-by: Yuchung Cheng Acked-by: Neal Cardwell Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 113dc5f17d47..53974c7c04fe 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3291,7 +3291,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) tcp_init_cwnd_reduction(sk, true); tcp_set_ca_state(sk, TCP_CA_CWR); tcp_end_cwnd_reduction(sk); - tcp_set_ca_state(sk, TCP_CA_Open); + tcp_try_keep_open(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBERECOVERY); } -- cgit v1.2.3-59-g8ed1b From c52e2421f7368fd36cbe330d2cf41b10452e39a9 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 15 Oct 2013 11:54:30 -0700 Subject: tcp: must unclone packets before mangling them TCP stack should make sure it owns skbs before mangling them. We had various crashes using bnx2x, and it turned out gso_size was cleared right before bnx2x driver was populating TC descriptor of the _previous_ packet send. TCP stack can sometime retransmit packets that are still in Qdisc. Of course we could make bnx2x driver more robust (using ACCESS_ONCE(shinfo->gso_size) for example), but the bug is TCP stack. We have identified two points where skb_unclone() was needed. This patch adds a WARN_ON_ONCE() to warn us if we missed another fix of this kind. Kudos to Neal for finding the root cause of this bug. Its visible using small MSS. Signed-off-by: Eric Dumazet Signed-off-by: Neal Cardwell Cc: Yuchung Cheng Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index c6f01f2cdb32..8fad1c155688 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -986,6 +986,9 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, unsigned int mss_now) { + /* Make sure we own this skb before messing gso_size/gso_segs */ + WARN_ON_ONCE(skb_cloned(skb)); + if (skb->len <= mss_now || !sk_can_gso(sk) || skb->ip_summed == CHECKSUM_NONE) { /* Avoid the costly divide in the normal @@ -1067,9 +1070,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, if (nsize < 0) nsize = 0; - if (skb_cloned(skb) && - skb_is_nonlinear(skb) && - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + if (skb_unclone(skb, GFP_ATOMIC)) return -ENOMEM; /* Get a new skb... force flag on. */ @@ -2344,6 +2345,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) int oldpcount = tcp_skb_pcount(skb); if (unlikely(oldpcount > 1)) { + if (skb_unclone(skb, GFP_ATOMIC)) + return -ENOMEM; tcp_init_tso_segs(sk, skb, cur_mss); tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); } -- cgit v1.2.3-59-g8ed1b From 8f26fb1c1ed81c33f5d87c5936f4d9d1b4118918 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 15 Oct 2013 12:24:54 -0700 Subject: tcp: remove the sk_can_gso() check from tcp_set_skb_tso_segs() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sk_can_gso() should only be used as a hint in tcp_sendmsg() to build GSO packets in the first place. (As a performance hint) Once we have GSO packets in write queue, we can not decide they are no longer GSO only because flow now uses a route which doesn't handle TSO/GSO. Core networking stack handles the case very well for us, all we need is keeping track of packet counts in MSS terms, regardless of segmentation done later (in GSO or hardware) Right now, if tcp_fragment() splits a GSO packet in two parts, @left and @right, and route changed through a non GSO device, both @left and @right have pcount set to 1, which is wrong, and leads to incorrect packet_count tracking. This problem was added in commit d5ac99a648 ("[TCP]: skb pcount with MTU discovery") Signed-off-by: Eric Dumazet Signed-off-by: Neal Cardwell Signed-off-by: Yuchung Cheng Reported-by: Maciej Żenczykowski Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 8fad1c155688..d46f2143305c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -989,8 +989,7 @@ static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, /* Make sure we own this skb before messing gso_size/gso_segs */ WARN_ON_ONCE(skb_cloned(skb)); - if (skb->len <= mss_now || !sk_can_gso(sk) || - skb->ip_summed == CHECKSUM_NONE) { + if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) { /* Avoid the costly divide in the normal * non-TSO case. */ -- cgit v1.2.3-59-g8ed1b From 4b6c7879d84ad06a2ac5b964808ed599187a188d Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Tue, 15 Oct 2013 14:57:45 -0400 Subject: bridge: Correctly clamp MAX forward_delay when enabling STP Commit be4f154d5ef0ca147ab6bcd38857a774133f5450 bridge: Clamp forward_delay when enabling STP had a typo when attempting to clamp maximum forward delay. It is possible to set bridge_forward_delay to be higher then permitted maximum when STP is off. When turning STP on, the higher then allowed delay has to be clamed down to max value. CC: Herbert Xu CC: Stephen Hemminger Signed-off-by: Vlad Yasevich Reviewed-by: Veaceslav Falico Acked-by: Herbert Xu Signed-off-by: David S. Miller --- net/bridge/br_stp_if.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 108084a04671..656a6f3e40de 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c @@ -134,7 +134,7 @@ static void br_stp_start(struct net_bridge *br) if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY) __br_set_forward_delay(br, BR_MIN_FORWARD_DELAY); - else if (br->bridge_forward_delay < BR_MAX_FORWARD_DELAY) + else if (br->bridge_forward_delay > BR_MAX_FORWARD_DELAY) __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY); if (r == 0) { -- cgit v1.2.3-59-g8ed1b From 8adff41c3d259eb5e313b7b04669eee545925154 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Wed, 16 Oct 2013 17:07:13 +0900 Subject: bridge: Don't use VID 0 and 4095 in vlan filtering IEEE 802.1Q says that: - VID 0 shall not be configured as a PVID, or configured in any Filtering Database entry. - VID 4095 shall not be configured as a PVID, or transmitted in a tag header. This VID value may be used to indicate a wildcard match for the VID in management operations or Filtering Database entries. (See IEEE 802.1Q-2011 6.9.1 and Table 9-2) Don't accept adding these VIDs in the vlan_filtering implementation. Signed-off-by: Toshiaki Makita Reviewed-by: Vlad Yasevich Acked-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/bridge/br_fdb.c | 4 +- net/bridge/br_netlink.c | 2 +- net/bridge/br_vlan.c | 97 +++++++++++++++++++++++-------------------------- 3 files changed, 49 insertions(+), 54 deletions(-) (limited to 'net') diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index ffd5874f2592..33e8f23acddd 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -700,7 +700,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], vid = nla_get_u16(tb[NDA_VLAN]); - if (vid >= VLAN_N_VID) { + if (!vid || vid >= VLAN_VID_MASK) { pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n", vid); return -EINVAL; @@ -794,7 +794,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], vid = nla_get_u16(tb[NDA_VLAN]); - if (vid >= VLAN_N_VID) { + if (!vid || vid >= VLAN_VID_MASK) { pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n", vid); return -EINVAL; diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index e74ddc1c29a8..f75d92e4f96b 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -243,7 +243,7 @@ static int br_afspec(struct net_bridge *br, vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]); - if (vinfo->vid >= VLAN_N_VID) + if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK) return -EINVAL; switch (cmd) { diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 9a9ffe7e4019..21b6d217872b 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -45,37 +45,34 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) return 0; } - if (vid) { - if (v->port_idx) { - p = v->parent.port; - br = p->br; - dev = p->dev; - } else { - br = v->parent.br; - dev = br->dev; - } - ops = dev->netdev_ops; - - if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { - /* Add VLAN to the device filter if it is supported. - * Stricly speaking, this is not necessary now, since - * devices are made promiscuous by the bridge, but if - * that ever changes this code will allow tagged - * traffic to enter the bridge. - */ - err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q), - vid); - if (err) - return err; - } - - err = br_fdb_insert(br, p, dev->dev_addr, vid); - if (err) { - br_err(br, "failed insert local address into bridge " - "forwarding table\n"); - goto out_filt; - } + if (v->port_idx) { + p = v->parent.port; + br = p->br; + dev = p->dev; + } else { + br = v->parent.br; + dev = br->dev; + } + ops = dev->netdev_ops; + + if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { + /* Add VLAN to the device filter if it is supported. + * Stricly speaking, this is not necessary now, since + * devices are made promiscuous by the bridge, but if + * that ever changes this code will allow tagged + * traffic to enter the bridge. + */ + err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q), + vid); + if (err) + return err; + } + err = br_fdb_insert(br, p, dev->dev_addr, vid); + if (err) { + br_err(br, "failed insert local address into bridge " + "forwarding table\n"); + goto out_filt; } set_bit(vid, v->vlan_bitmap); @@ -98,7 +95,7 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid) __vlan_delete_pvid(v, vid); clear_bit(vid, v->untagged_bitmap); - if (v->port_idx && vid) { + if (v->port_idx) { struct net_device *dev = v->parent.port->dev; const struct net_device_ops *ops = dev->netdev_ops; @@ -248,7 +245,9 @@ bool br_allowed_egress(struct net_bridge *br, return false; } -/* Must be protected by RTNL */ +/* Must be protected by RTNL. + * Must be called with vid in range from 1 to 4094 inclusive. + */ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags) { struct net_port_vlans *pv = NULL; @@ -278,7 +277,9 @@ out: return err; } -/* Must be protected by RTNL */ +/* Must be protected by RTNL. + * Must be called with vid in range from 1 to 4094 inclusive. + */ int br_vlan_delete(struct net_bridge *br, u16 vid) { struct net_port_vlans *pv; @@ -289,14 +290,9 @@ int br_vlan_delete(struct net_bridge *br, u16 vid) if (!pv) return -EINVAL; - if (vid) { - /* If the VID !=0 remove fdb for this vid. VID 0 is special - * in that it's the default and is always there in the fdb. - */ - spin_lock_bh(&br->hash_lock); - fdb_delete_by_addr(br, br->dev->dev_addr, vid); - spin_unlock_bh(&br->hash_lock); - } + spin_lock_bh(&br->hash_lock); + fdb_delete_by_addr(br, br->dev->dev_addr, vid); + spin_unlock_bh(&br->hash_lock); __vlan_del(pv, vid); return 0; @@ -329,7 +325,9 @@ unlock: return 0; } -/* Must be protected by RTNL */ +/* Must be protected by RTNL. + * Must be called with vid in range from 1 to 4094 inclusive. + */ int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags) { struct net_port_vlans *pv = NULL; @@ -363,7 +361,9 @@ clean_up: return err; } -/* Must be protected by RTNL */ +/* Must be protected by RTNL. + * Must be called with vid in range from 1 to 4094 inclusive. + */ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid) { struct net_port_vlans *pv; @@ -374,14 +374,9 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid) if (!pv) return -EINVAL; - if (vid) { - /* If the VID !=0 remove fdb for this vid. VID 0 is special - * in that it's the default and is always there in the fdb. - */ - spin_lock_bh(&port->br->hash_lock); - fdb_delete_by_addr(port->br, port->dev->dev_addr, vid); - spin_unlock_bh(&port->br->hash_lock); - } + spin_lock_bh(&port->br->hash_lock); + fdb_delete_by_addr(port->br, port->dev->dev_addr, vid); + spin_unlock_bh(&port->br->hash_lock); return __vlan_del(pv, vid); } -- cgit v1.2.3-59-g8ed1b From b90356ce17c2b199cd55530cb9c3cfabe18dbdc3 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Wed, 16 Oct 2013 17:07:14 +0900 Subject: bridge: Apply the PVID to priority-tagged frames IEEE 802.1Q says that when we receive priority-tagged (VID 0) frames use the PVID for the port as its VID. (See IEEE 802.1Q-2011 6.9.1 and Table 9-2) Apply the PVID to not only untagged frames but also priority-tagged frames. Signed-off-by: Toshiaki Makita Reviewed-by: Vlad Yasevich Signed-off-by: David S. Miller --- net/bridge/br_vlan.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 21b6d217872b..5a9c44a0c306 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -189,6 +189,8 @@ out: bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, struct sk_buff *skb, u16 *vid) { + int err; + /* If VLAN filtering is disabled on the bridge, all packets are * permitted. */ @@ -201,20 +203,31 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, if (!v) return false; - if (br_vlan_get_tag(skb, vid)) { + err = br_vlan_get_tag(skb, vid); + if (!*vid) { u16 pvid = br_get_pvid(v); - /* Frame did not have a tag. See if pvid is set - * on this port. That tells us which vlan untagged - * traffic belongs to. + /* Frame had a tag with VID 0 or did not have a tag. + * See if pvid is set on this port. That tells us which + * vlan untagged or priority-tagged traffic belongs to. */ if (pvid == VLAN_N_VID) return false; - /* PVID is set on this port. Any untagged ingress - * frame is considered to belong to this vlan. + /* PVID is set on this port. Any untagged or priority-tagged + * ingress frame is considered to belong to this vlan. */ - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid); + if (likely(err)) + /* Untagged Frame. */ + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid); + else + /* Priority-tagged Frame. + * At this point, We know that skb->vlan_tci had + * VLAN_TAG_PRESENT bit and its VID field was 0x000. + * We update only VID field and preserve PCP field. + */ + skb->vlan_tci |= pvid; + return true; } -- cgit v1.2.3-59-g8ed1b From d1c6c708c4da9d104e0b7c116654cb449bff9b5f Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Wed, 16 Oct 2013 17:07:15 +0900 Subject: bridge: Fix the way the PVID is referenced We are using the VLAN_TAG_PRESENT bit to detect whether the PVID is set or not at br_get_pvid(), while we don't care about the bit in adding/deleting the PVID, which makes it impossible to forward any incomming untagged frame with vlan_filtering enabled. Since vid 0 cannot be used for the PVID, we can use vid 0 to indicate that the PVID is not set, which is slightly more efficient than using the VLAN_TAG_PRESENT. Fix the problem by getting rid of using the VLAN_TAG_PRESENT. Signed-off-by: Toshiaki Makita Reviewed-by: Vlad Yasevich Signed-off-by: David S. Miller --- net/bridge/br_private.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'net') diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index efb57d911569..7ca2ae45b2d5 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -643,9 +643,7 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v) * vid wasn't set */ smp_rmb(); - return (v->pvid & VLAN_TAG_PRESENT) ? - (v->pvid & ~VLAN_TAG_PRESENT) : - VLAN_N_VID; + return v->pvid ?: VLAN_N_VID; } #else -- cgit v1.2.3-59-g8ed1b From dfb5fa32c66496a53ec6a45302d902416b51ade2 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Wed, 16 Oct 2013 17:07:16 +0900 Subject: bridge: Fix updating FDB entries when the PVID is applied We currently set the value that variable vid is pointing, which will be used in FDB later, to 0 at br_allowed_ingress() when we receive untagged or priority-tagged frames, even though the PVID is valid. This leads to FDB updates in such a wrong way that they are learned with VID 0. Update the value to that of PVID if the PVID is applied. Signed-off-by: Toshiaki Makita Reviewed-by: Vlad Yasevich Signed-off-by: David S. Miller --- net/bridge/br_vlan.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 5a9c44a0c306..53f0990eab58 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -217,6 +217,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, /* PVID is set on this port. Any untagged or priority-tagged * ingress frame is considered to belong to this vlan. */ + *vid = pvid; if (likely(err)) /* Untagged Frame. */ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid); -- cgit v1.2.3-59-g8ed1b From 90c6bd34f884cd9cee21f1d152baf6c18bcac949 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 17 Oct 2013 22:51:31 +0200 Subject: net: unix: inherit SOCK_PASS{CRED, SEC} flags from socket to fix race In the case of credentials passing in unix stream sockets (dgram sockets seem not affected), we get a rather sparse race after commit 16e5726 ("af_unix: dont send SCM_CREDENTIALS by default"). We have a stream server on receiver side that requests credential passing from senders (e.g. nc -U). Since we need to set SO_PASSCRED on each spawned/accepted socket on server side to 1 first (as it's not inherited), it can happen that in the time between accept() and setsockopt() we get interrupted, the sender is being scheduled and continues with passing data to our receiver. At that time SO_PASSCRED is neither set on sender nor receiver side, hence in cmsg's SCM_CREDENTIALS we get eventually pid:0, uid:65534, gid:65534 (== overflow{u,g}id) instead of what we actually would like to see. On the sender side, here nc -U, the tests in maybe_add_creds() invoked through unix_stream_sendmsg() would fail, as at that exact time, as mentioned, the sender has neither SO_PASSCRED on his side nor sees it on the server side, and we have a valid 'other' socket in place. Thus, sender believes it would just look like a normal connection, not needing/requesting SO_PASSCRED at that time. As reverting 16e5726 would not be an option due to the significant performance regression reported when having creds always passed, one way/trade-off to prevent that would be to set SO_PASSCRED on the listener socket and allow inheriting these flags to the spawned socket on server side in accept(). It seems also logical to do so if we'd tell the listener socket to pass those flags onwards, and would fix the race. Before, strace: recvmsg(4, {msg_name(0)=NULL, msg_iov(1)=[{"blub\n", 4096}], msg_controllen=32, {cmsg_len=28, cmsg_level=SOL_SOCKET, cmsg_type=SCM_CREDENTIALS{pid=0, uid=65534, gid=65534}}, msg_flags=0}, 0) = 5 After, strace: recvmsg(4, {msg_name(0)=NULL, msg_iov(1)=[{"blub\n", 4096}], msg_controllen=32, {cmsg_len=28, cmsg_level=SOL_SOCKET, cmsg_type=SCM_CREDENTIALS{pid=11580, uid=1000, gid=1000}}, msg_flags=0}, 0) = 5 Signed-off-by: Daniel Borkmann Cc: Eric Dumazet Cc: Eric W. Biederman Signed-off-by: David S. Miller --- net/unix/af_unix.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'net') diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 86de99ad2976..c1f403bed683 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1246,6 +1246,15 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb) return 0; } +static void unix_sock_inherit_flags(const struct socket *old, + struct socket *new) +{ + if (test_bit(SOCK_PASSCRED, &old->flags)) + set_bit(SOCK_PASSCRED, &new->flags); + if (test_bit(SOCK_PASSSEC, &old->flags)) + set_bit(SOCK_PASSSEC, &new->flags); +} + static int unix_accept(struct socket *sock, struct socket *newsock, int flags) { struct sock *sk = sock->sk; @@ -1280,6 +1289,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags) /* attach accepted sock to socket */ unix_state_lock(tsk); newsock->state = SS_CONNECTED; + unix_sock_inherit_flags(sock, newsock); sock_graft(tsk, newsock); unix_state_unlock(tsk); return 0; -- cgit v1.2.3-59-g8ed1b From e36d3ff91130002c7c2d1d6a55556991da1daecc Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sat, 19 Oct 2013 12:29:15 +0200 Subject: udp6: respect IPV6_DONTFRAG sockopt in case there are pending frames if up->pending != 0 dontfrag is left with default value -1. That causes that application that do: sendto len>mtu flag MSG_MORE sendto len>mtu flag 0 will receive EMSGSIZE errno as the result of the second sendto. This patch fixes it by respecting IPV6_DONTFRAG socket option. introduced by: commit 4b340ae20d0e2366792abe70f46629e576adaf5e "IPv6: Complete IPV6_DONTFRAG support" Signed-off-by: Jiri Pirko Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- net/ipv6/udp.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 72b7eaaf3ca0..18786098fd41 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1225,9 +1225,6 @@ do_udp_sendmsg: if (tclass < 0) tclass = np->tclass; - if (dontfrag < 0) - dontfrag = np->dontfrag; - if (msg->msg_flags&MSG_CONFIRM) goto do_confirm; back_from_confirm: @@ -1246,6 +1243,8 @@ back_from_confirm: up->pending = AF_INET6; do_append_data: + if (dontfrag < 0) + dontfrag = np->dontfrag; up->len += ulen; getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen, -- cgit v1.2.3-59-g8ed1b From c547dbf55d5f8cf615ccc0e7265e98db27d3fb8b Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sat, 19 Oct 2013 12:29:16 +0200 Subject: ip6_output: do skb ufo init for peeked non ufo skb as well Now, if user application does: sendto lenmtu flag 0 The skb is not treated as fragmented one because it is not initialized that way. So move the initialization to fix this. introduced by: commit e89e9cf539a28df7d0eb1d0a545368e9920b34ac "[IPv4/IPv6]: UFO Scatter-gather approach" Signed-off-by: Jiri Pirko Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- net/ipv6/ip6_output.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) (limited to 'net') diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index a54c45ce4a48..975624b8d2ea 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1008,6 +1008,7 @@ static inline int ip6_ufo_append_data(struct sock *sk, { struct sk_buff *skb; + struct frag_hdr fhdr; int err; /* There is support for UDP large send offload by network @@ -1015,8 +1016,6 @@ static inline int ip6_ufo_append_data(struct sock *sk, * udp datagram */ if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { - struct frag_hdr fhdr; - skb = sock_alloc_send_skb(sk, hh_len + fragheaderlen + transhdrlen + 20, (flags & MSG_DONTWAIT), &err); @@ -1036,20 +1035,24 @@ static inline int ip6_ufo_append_data(struct sock *sk, skb->transport_header = skb->network_header + fragheaderlen; skb->protocol = htons(ETH_P_IPV6); - skb->ip_summed = CHECKSUM_PARTIAL; skb->csum = 0; - /* Specify the length of each IPv6 datagram fragment. - * It has to be a multiple of 8. - */ - skb_shinfo(skb)->gso_size = (mtu - fragheaderlen - - sizeof(struct frag_hdr)) & ~7; - skb_shinfo(skb)->gso_type = SKB_GSO_UDP; - ipv6_select_ident(&fhdr, rt); - skb_shinfo(skb)->ip6_frag_id = fhdr.identification; __skb_queue_tail(&sk->sk_write_queue, skb); + } else if (skb_is_gso(skb)) { + goto append; } + skb->ip_summed = CHECKSUM_PARTIAL; + /* Specify the length of each IPv6 datagram fragment. + * It has to be a multiple of 8. + */ + skb_shinfo(skb)->gso_size = (mtu - fragheaderlen - + sizeof(struct frag_hdr)) & ~7; + skb_shinfo(skb)->gso_type = SKB_GSO_UDP; + ipv6_select_ident(&fhdr, rt); + skb_shinfo(skb)->ip6_frag_id = fhdr.identification; + +append: return skb_append_datato_frags(sk, skb, getfrag, from, (length - transhdrlen)); } -- cgit v1.2.3-59-g8ed1b From e93b7d748be887cd7639b113ba7d7ef792a7efb9 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sat, 19 Oct 2013 12:29:17 +0200 Subject: ip_output: do skb ufo init for peeked non ufo skb as well Now, if user application does: sendto lenmtu flag 0 The skb is not treated as fragmented one because it is not initialized that way. So move the initialization to fix this. introduced by: commit e89e9cf539a28df7d0eb1d0a545368e9920b34ac "[IPv4/IPv6]: UFO Scatter-gather approach" Signed-off-by: Jiri Pirko Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- net/ipv4/ip_output.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index a04d872c54f9..3982eabf61e1 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -772,15 +772,20 @@ static inline int ip_ufo_append_data(struct sock *sk, /* initialize protocol header pointer */ skb->transport_header = skb->network_header + fragheaderlen; - skb->ip_summed = CHECKSUM_PARTIAL; skb->csum = 0; - /* specify the length of each IP datagram fragment */ - skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen; - skb_shinfo(skb)->gso_type = SKB_GSO_UDP; + __skb_queue_tail(queue, skb); + } else if (skb_is_gso(skb)) { + goto append; } + skb->ip_summed = CHECKSUM_PARTIAL; + /* specify the length of each IP datagram fragment */ + skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen; + skb_shinfo(skb)->gso_type = SKB_GSO_UDP; + +append: return skb_append_datato_frags(sk, skb, getfrag, from, (length - transhdrlen)); } -- cgit v1.2.3-59-g8ed1b From 550bab42f83308c9d6ab04a980cc4333cef1c8fa Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Sun, 20 Oct 2013 15:43:04 +0300 Subject: ipv6: fill rt6i_gateway with nexthop address Make sure rt6i_gateway contains nexthop information in all routes returned from lookup or when routes are directly attached to skb for generated ICMP packets. The effect of this patch should be a faster version of rt6_nexthop() and the consideration of local addresses as nexthop. Signed-off-by: Julian Anastasov Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- include/net/ip6_route.h | 6 ++---- net/ipv6/ip6_output.c | 4 ++-- net/ipv6/route.c | 8 ++++++-- 3 files changed, 10 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 481404abdf65..2b786b7e3585 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -194,11 +194,9 @@ static inline int ip6_skb_dst_mtu(struct sk_buff *skb) skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); } -static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, struct in6_addr *dest) +static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt) { - if (rt->rt6i_flags & RTF_GATEWAY || !ipv6_addr_any(&rt->rt6i_gateway)) - return &rt->rt6i_gateway; - return dest; + return &rt->rt6i_gateway; } #endif diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 975624b8d2ea..91fb4e8212f5 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -105,7 +105,7 @@ static int ip6_finish_output2(struct sk_buff *skb) } rcu_read_lock_bh(); - nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); + nexthop = rt6_nexthop((struct rt6_info *)dst); neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); if (unlikely(!neigh)) neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); @@ -874,7 +874,7 @@ static int ip6_dst_lookup_tail(struct sock *sk, */ rt = (struct rt6_info *) *dst; rcu_read_lock_bh(); - n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt, &fl6->daddr)); + n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt)); err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0; rcu_read_unlock_bh(); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index c979dd96d82a..c1ee3813e1ae 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -851,7 +851,6 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, if (ort->rt6i_dst.plen != 128 && ipv6_addr_equal(&ort->rt6i_dst.addr, daddr)) rt->rt6i_flags |= RTF_ANYCAST; - rt->rt6i_gateway = *daddr; } rt->rt6i_flags |= RTF_CACHE; @@ -1338,6 +1337,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, rt->dst.flags |= DST_HOST; rt->dst.output = ip6_output; atomic_set(&rt->dst.__refcnt, 1); + rt->rt6i_gateway = fl6->daddr; rt->rt6i_dst.addr = fl6->daddr; rt->rt6i_dst.plen = 128; rt->rt6i_idev = idev; @@ -1873,7 +1873,10 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort, in6_dev_hold(rt->rt6i_idev); rt->dst.lastuse = jiffies; - rt->rt6i_gateway = ort->rt6i_gateway; + if (ort->rt6i_flags & RTF_GATEWAY) + rt->rt6i_gateway = ort->rt6i_gateway; + else + rt->rt6i_gateway = *dest; rt->rt6i_flags = ort->rt6i_flags; if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) == (RTF_DEFAULT | RTF_ADDRCONF)) @@ -2160,6 +2163,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, else rt->rt6i_flags |= RTF_LOCAL; + rt->rt6i_gateway = *addr; rt->rt6i_dst.addr = *addr; rt->rt6i_dst.plen = 128; rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); -- cgit v1.2.3-59-g8ed1b From 56e42441ed54b092d6c7411138ce60d049e7c731 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Sun, 20 Oct 2013 15:43:05 +0300 Subject: netfilter: nf_conntrack: fix rt6i_gateway checks for H.323 helper Now when rt6_nexthop() can return nexthop address we can use it for proper nexthop comparison of directly connected destinations. For more information refer to commit bbb5823cf742a7 ("netfilter: nf_conntrack: fix rt_gateway checks for H.323 helper"). Signed-off-by: Julian Anastasov Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- net/netfilter/nf_conntrack_h323_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index bdebd03bc8cd..70866d192efc 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c @@ -778,8 +778,8 @@ static int callforward_do_filter(const union nf_inet_addr *src, flowi6_to_flowi(&fl1), false)) { if (!afinfo->route(&init_net, (struct dst_entry **)&rt2, flowi6_to_flowi(&fl2), false)) { - if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, - sizeof(rt1->rt6i_gateway)) && + if (ipv6_addr_equal(rt6_nexthop(rt1), + rt6_nexthop(rt2)) && rt1->dst.dev == rt2->dst.dev) ret = 1; dst_release(&rt2->dst); -- cgit v1.2.3-59-g8ed1b From c2f17e827b419918c856131f592df9521e1a38e3 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Mon, 21 Oct 2013 06:17:15 +0200 Subject: ipv6: probe routes asynchronous in rt6_probe Routes need to be probed asynchronous otherwise the call stack gets exhausted when the kernel attemps to deliver another skb inline, like e.g. xt_TEE does, and we probe at the same time. We update neigh->updated still at once, otherwise we would send to many probes. Cc: Julian Anastasov Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- net/ipv6/route.c | 38 +++++++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/ipv6/route.c b/net/ipv6/route.c index c1ee3813e1ae..f54e3a101098 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -476,6 +476,24 @@ out: } #ifdef CONFIG_IPV6_ROUTER_PREF +struct __rt6_probe_work { + struct work_struct work; + struct in6_addr target; + struct net_device *dev; +}; + +static void rt6_probe_deferred(struct work_struct *w) +{ + struct in6_addr mcaddr; + struct __rt6_probe_work *work = + container_of(w, struct __rt6_probe_work, work); + + addrconf_addr_solict_mult(&work->target, &mcaddr); + ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL); + dev_put(work->dev); + kfree(w); +} + static void rt6_probe(struct rt6_info *rt) { struct neighbour *neigh; @@ -499,17 +517,23 @@ static void rt6_probe(struct rt6_info *rt) if (!neigh || time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) { - struct in6_addr mcaddr; - struct in6_addr *target; + struct __rt6_probe_work *work; + + work = kmalloc(sizeof(*work), GFP_ATOMIC); - if (neigh) { + if (neigh && work) neigh->updated = jiffies; + + if (neigh) write_unlock(&neigh->lock); - } - target = (struct in6_addr *)&rt->rt6i_gateway; - addrconf_addr_solict_mult(target, &mcaddr); - ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL); + if (work) { + INIT_WORK(&work->work, rt6_probe_deferred); + work->target = rt->rt6i_gateway; + dev_hold(rt->dst.dev); + work->dev = rt->dst.dev; + schedule_work(&work->work); + } } else { out: write_unlock(&neigh->lock); -- cgit v1.2.3-59-g8ed1b From 02cf4ebd82ff0ac7254b88e466820a290ed8289a Mon Sep 17 00:00:00 2001 From: Neal Cardwell Date: Mon, 21 Oct 2013 15:40:19 -0400 Subject: tcp: initialize passive-side sk_pacing_rate after 3WHS For passive TCP connections, upon receiving the ACK that completes the 3WHS, make sure we set our pacing rate after we get our first RTT sample. On passive TCP connections, when we receive the ACK completing the 3WHS we do not take an RTT sample in tcp_ack(), but rather in tcp_synack_rtt_meas(). So upon receiving the ACK that completes the 3WHS, tcp_ack() leaves sk_pacing_rate at its initial value. Originally the initial sk_pacing_rate value was 0, so passive-side connections defaulted to sysctl_tcp_min_tso_segs (2 segs) in skbuffs made in the first RTT. With a default initial cwnd of 10 packets, this happened to be correct for RTTs 5ms or bigger, so it was hard to see problems in WAN or emulated WAN testing. Since 7eec4174ff ("pkt_sched: fq: fix non TCP flows pacing"), the initial sk_pacing_rate is 0xffffffff. So after that change, passive TCP connections were keeping this value (and using large numbers of segments per skbuff) until receiving an ACK for data. Signed-off-by: Neal Cardwell Cc: Eric Dumazet Cc: Yuchung Cheng Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 53974c7c04fe..a16b01b537ba 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5712,6 +5712,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, } else tcp_init_metrics(sk); + tcp_update_pacing_rate(sk); + /* Prevent spurious tcp_cwnd_restart() on first data packet */ tp->lsndtime = tcp_time_stamp; -- cgit v1.2.3-59-g8ed1b From 454594f3b93a49ef568cd190c5af31376b105a7b Mon Sep 17 00:00:00 2001 From: Linus Lüssing Date: Sun, 20 Oct 2013 00:58:57 +0200 Subject: Revert "bridge: only expire the mdb entry when query is received" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While this commit was a good attempt to fix issues occuring when no multicast querier is present, this commit still has two more issues: 1) There are cases where mdb entries do not expire even if there is a querier present. The bridge will unnecessarily continue flooding multicast packets on the according ports. 2) Never removing an mdb entry could be exploited for a Denial of Service by an attacker on the local link, slowly, but steadily eating up all memory. Actually, this commit became obsolete with "bridge: disable snooping if there is no querier" (b00589af3b) which included fixes for a few more cases. Therefore reverting the following commits (the commit stated in the commit message plus three of its follow up fixes): ==================== Revert "bridge: update mdb expiration timer upon reports." This reverts commit f144febd93d5ee534fdf23505ab091b2b9088edc. Revert "bridge: do not call setup_timer() multiple times" This reverts commit 1faabf2aab1fdaa1ace4e8c829d1b9cf7bfec2f1. Revert "bridge: fix some kernel warning in multicast timer" This reverts commit c7e8e8a8f7a70b343ca1e0f90a31e35ab2d16de1. Revert "bridge: only expire the mdb entry when query is received" This reverts commit 9f00b2e7cf241fa389733d41b615efdaa2cb0f5b. ==================== CC: Cong Wang Signed-off-by: Linus Lüssing Reviewed-by: Vlad Yasevich Signed-off-by: David S. Miller --- net/bridge/br_mdb.c | 2 +- net/bridge/br_multicast.c | 47 +++++++++++++++++++++++++++-------------------- net/bridge/br_private.h | 1 - 3 files changed, 28 insertions(+), 22 deletions(-) (limited to 'net') diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 85a09bb5ca51..b7b1914dfa25 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -453,7 +453,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) call_rcu_bh(&p->rcu, br_multicast_free_pg); err = 0; - if (!mp->ports && !mp->mglist && mp->timer_armed && + if (!mp->ports && !mp->mglist && netif_running(br->dev)) mod_timer(&mp->timer, jiffies); break; diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 1085f2180f3a..8b0b610ca2c9 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -272,7 +272,7 @@ static void br_multicast_del_pg(struct net_bridge *br, del_timer(&p->timer); call_rcu_bh(&p->rcu, br_multicast_free_pg); - if (!mp->ports && !mp->mglist && mp->timer_armed && + if (!mp->ports && !mp->mglist && netif_running(br->dev)) mod_timer(&mp->timer, jiffies); @@ -611,9 +611,6 @@ rehash: break; default: - /* If we have an existing entry, update it's expire timer */ - mod_timer(&mp->timer, - jiffies + br->multicast_membership_interval); goto out; } @@ -623,7 +620,6 @@ rehash: mp->br = br; mp->addr = *group; - setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp); @@ -663,6 +659,7 @@ static int br_multicast_add_group(struct net_bridge *br, struct net_bridge_mdb_entry *mp; struct net_bridge_port_group *p; struct net_bridge_port_group __rcu **pp; + unsigned long now = jiffies; int err; spin_lock(&br->multicast_lock); @@ -677,18 +674,15 @@ static int br_multicast_add_group(struct net_bridge *br, if (!port) { mp->mglist = true; + mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; } for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; pp = &p->next) { - if (p->port == port) { - /* We already have a portgroup, update the timer. */ - mod_timer(&p->timer, - jiffies + br->multicast_membership_interval); - goto out; - } + if (p->port == port) + goto found; if ((unsigned long)p->port < (unsigned long)port) break; } @@ -699,6 +693,8 @@ static int br_multicast_add_group(struct net_bridge *br, rcu_assign_pointer(*pp, p); br_mdb_notify(br->dev, port, group, RTM_NEWMDB); +found: + mod_timer(&p->timer, now + br->multicast_membership_interval); out: err = 0; @@ -1198,9 +1194,6 @@ static int br_ip4_multicast_query(struct net_bridge *br, if (!mp) goto out; - mod_timer(&mp->timer, now + br->multicast_membership_interval); - mp->timer_armed = true; - max_delay *= br->multicast_last_member_count; if (mp->mglist && @@ -1277,9 +1270,6 @@ static int br_ip6_multicast_query(struct net_bridge *br, if (!mp) goto out; - mod_timer(&mp->timer, now + br->multicast_membership_interval); - mp->timer_armed = true; - max_delay *= br->multicast_last_member_count; if (mp->mglist && (timer_pending(&mp->timer) ? @@ -1365,7 +1355,7 @@ static void br_multicast_leave_group(struct net_bridge *br, call_rcu_bh(&p->rcu, br_multicast_free_pg); br_mdb_notify(br->dev, port, group, RTM_DELMDB); - if (!mp->ports && !mp->mglist && mp->timer_armed && + if (!mp->ports && !mp->mglist && netif_running(br->dev)) mod_timer(&mp->timer, jiffies); } @@ -1377,12 +1367,30 @@ static void br_multicast_leave_group(struct net_bridge *br, br->multicast_last_member_interval; if (!port) { - if (mp->mglist && mp->timer_armed && + if (mp->mglist && (timer_pending(&mp->timer) ? time_after(mp->timer.expires, time) : try_to_del_timer_sync(&mp->timer) >= 0)) { mod_timer(&mp->timer, time); } + + goto out; + } + + for (p = mlock_dereference(mp->ports, br); + p != NULL; + p = mlock_dereference(p->next, br)) { + if (p->port != port) + continue; + + if (!hlist_unhashed(&p->mglist) && + (timer_pending(&p->timer) ? + time_after(p->timer.expires, time) : + try_to_del_timer_sync(&p->timer) >= 0)) { + mod_timer(&p->timer, time); + } + + break; } out: spin_unlock(&br->multicast_lock); @@ -1805,7 +1813,6 @@ void br_multicast_stop(struct net_bridge *br) hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], hlist[ver]) { del_timer(&mp->timer); - mp->timer_armed = false; call_rcu_bh(&mp->rcu, br_multicast_free_group); } } diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 7ca2ae45b2d5..e14c33b42f75 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -126,7 +126,6 @@ struct net_bridge_mdb_entry struct timer_list timer; struct br_ip addr; bool mglist; - bool timer_armed; }; struct net_bridge_mdb_htable -- cgit v1.2.3-59-g8ed1b