Update to 4.8.8

This commit is contained in:
Salvatore Bonaccorso 2016-11-15 22:07:17 +01:00
parent 343333fea3
commit 26676cdca0
3 changed files with 45 additions and 202 deletions

45
debian/changelog vendored
View File

@ -1,3 +1,48 @@
linux (4.8.8-1) UNRELEASED; urgency=medium
* New upstream stable update:
https://www.kernel.org/pub/linux/kernel/v4.x/ChangeLog-4.8.8
- net: fec: set mac address unconditionally
- net: pktgen: fix pkt_size
- net/sched: act_vlan: Push skb->data to mac_header prior calling
skb_vlan_*() functions
- net: Add netdev all_adj_list refcnt propagation to fix panic
- packet: call fanout_release, while UNREGISTERING a netdev
- netlink: do not enter direct reclaim from netlink_dump()
- drivers/ptp: Fix kernel memory disclosure
- net_sched: reorder pernet ops and act ops registrations
- ipv6: tcp: restore IP6CB for pktoptions skbs
- net: phy: Trigger state machine on state change and not polling.
- ip6_tunnel: fix ip6_tnl_lookup
- ipv6: correctly add local routes when lo goes up
- IB/ipoib: move back IB LL address into the hard header
- net/mlx4_en: fixup xdp tx irq to match rx
- net: pktgen: remove rcu locking in pktgen_change_name()
- bridge: multicast: restore perm router ports on multicast enable
- switchdev: Execute bridge ndos only for bridge ports
- rtnetlink: Add rtnexthop offload flag to compare mask
- net: core: Correctly iterate over lower adjacency list
- net: add recursion limit to GRO
- ipv4: disable BH in set_ping_group_range()
- ipv4: use the right lock for ping_group_range
- net: fec: Call swap_buffer() prior to IP header alignment
- net: sctp, forbid negative length
- sctp: fix the panic caused by route update
- udp: fix IP_CHECKSUM handling
- [x86] netvsc: fix incorrect receive checksum offloading
- net: ipv6: Do not consider link state for nexthop validation
- net sched filters: fix notification of filter delete with proper handle
- sctp: validate chunk len before actually using it
- ip6_tunnel: Update skb->protocol to ETH_P_IPV6 in ip6_tnl_xmit()
- packet: on direct_xmit, limit tso and csum to supported devices
- [powerpc] Update parameters for csum_tcpudp_magic & csum_tcpudp_nofold
- [arm64, armhf] usb: dwc3: gadget: properly account queued requests
- scsi: megaraid_sas: Fix data integrity failure for JBOD (passthrough)
devices
- scsi: megaraid_sas: fix macro MEGASAS_IS_LOGICAL to avoid regression
-- Salvatore Bonaccorso <carnil@debian.org> Tue, 15 Nov 2016 22:01:08 +0100
linux (4.8.7-1) unstable; urgency=medium
* New upstream stable update:

View File

@ -1,201 +0,0 @@
From: Sabrina Dubroca <sd@queasysnail.net>
Date: Mon, 10 Oct 2016 15:43:46 +0200
Subject: net: add recursion limit to GRO
Origin: https://patchwork.ozlabs.org/patch/680412/
Currently, GRO can do unlimited recursion through the gro_receive
handlers. This was fixed for tunneling protocols by limiting tunnel GRO
to one level with encap_mark, but both VLAN and TEB still have this
problem. Thus, the kernel is vulnerable to a stack overflow, if we
receive a packet composed entirely of VLAN headers.
This patch adds a recursion counter to the GRO layer to prevent stack
overflow. When a gro_receive function hits the recursion limit, GRO is
aborted for this skb and it is processed normally.
Thanks to Vladimír Beneš <vbenes@redhat.com> for the initial bug report.
Fixes: CVE-2016-7039
Fixes: 9b174d88c257 ("net: Add Transparent Ethernet Bridging GRO support.")
Fixes: 66e5133f19e9 ("vlan: Add GRO support for non hardware accelerated vlan")
Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Reviewed-by: Jiri Benc <jbenc@redhat.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
---
drivers/net/geneve.c | 2 +-
drivers/net/vxlan.c | 2 +-
include/linux/netdevice.h | 24 +++++++++++++++++++++++-
net/8021q/vlan.c | 2 +-
net/core/dev.c | 1 +
net/ethernet/eth.c | 2 +-
net/ipv4/af_inet.c | 2 +-
net/ipv4/fou.c | 4 ++--
net/ipv4/gre_offload.c | 2 +-
net/ipv4/udp_offload.c | 8 +++++++-
net/ipv6/ip6_offload.c | 2 +-
11 files changed, 40 insertions(+), 11 deletions(-)
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -471,7 +471,7 @@ static struct sk_buff **geneve_gro_recei
skb_gro_pull(skb, gh_len);
skb_gro_postpull_rcsum(skb, gh, gh_len);
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
out_unlock:
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -601,7 +601,7 @@ static struct sk_buff **vxlan_gro_receiv
}
}
- pp = eth_gro_receive(head, skb);
+ pp = call_gro_receive(eth_gro_receive, head, skb);
flush = 0;
out:
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2114,7 +2114,10 @@ struct napi_gro_cb {
/* Used to determine if flush_id can be ignored */
u8 is_atomic:1;
- /* 5 bit hole */
+ /* Number of gro_receive callbacks this packet already went through */
+ u8 recursion_counter:4;
+
+ /* 1 bit hole */
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
@@ -2125,6 +2128,25 @@ struct napi_gro_cb {
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
+#define GRO_RECURSION_LIMIT 15
+static inline int gro_recursion_inc_test(struct sk_buff *skb)
+{
+ return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
+}
+
+typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
+static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
+ struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ if (gro_recursion_inc_test(skb)) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(head, skb);
+}
+
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
struct net_device *dev; /* NULL is wildcarded here */
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -664,7 +664,7 @@ static struct sk_buff **vlan_gro_receive
skb_gro_pull(skb, sizeof(*vhdr));
skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4500,6 +4500,7 @@ static enum gro_result dev_gro_receive(s
NAPI_GRO_CB(skb)->flush = 0;
NAPI_GRO_CB(skb)->free = 0;
NAPI_GRO_CB(skb)->encap_mark = 0;
+ NAPI_GRO_CB(skb)->recursion_counter = 0;
NAPI_GRO_CB(skb)->is_fou = 0;
NAPI_GRO_CB(skb)->is_atomic = 1;
NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -439,7 +439,7 @@ struct sk_buff **eth_gro_receive(struct
skb_gro_pull(skb, sizeof(*eh));
skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1388,7 +1388,7 @@ struct sk_buff **inet_gro_receive(struct
skb_gro_pull(skb, sizeof(*iph));
skb_set_transport_header(skb, skb_gro_offset(skb));
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -219,7 +219,7 @@ static struct sk_buff **fou_gro_receive(
if (!ops || !ops->callbacks.gro_receive)
goto out_unlock;
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
@@ -387,7 +387,7 @@ static struct sk_buff **gue_gro_receive(
if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
goto out_unlock;
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
flush = 0;
out_unlock:
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -227,7 +227,7 @@ static struct sk_buff **gre_gro_receive(
/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
skb_gro_postpull_rcsum(skb, greh, grehlen);
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
out_unlock:
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -293,7 +293,13 @@ unflush:
skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
- pp = udp_sk(sk)->gro_receive(sk, head, skb);
+
+ if (gro_recursion_inc_test(skb)) {
+ flush = 1;
+ pp = NULL;
+ } else {
+ pp = udp_sk(sk)->gro_receive(sk, head, skb);
+ }
out_unlock:
rcu_read_unlock();
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -243,7 +243,7 @@ static struct sk_buff **ipv6_gro_receive
skb_gro_postpull_rcsum(skb, iph, nlen);
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();

View File

@ -93,7 +93,6 @@ features/all/securelevel/arm64-add-kernel-config-option-to-set-securelevel-wh.pa
# Security fixes
bugfix/all/ptrace-being-capable-wrt-a-process-requires-mapped-uids-gids.patch
debian/i386-686-pae-pci-set-pci-nobios-by-default.patch
bugfix/all/net-add-recursion-limit-to-gro.patch
# ABI maintenance