DivestOS/Patches/Linux_CVEs/Untracked/ANY/0004-net-add-recursion-limit-to-GRO.patch
2017-11-08 03:42:33 -05:00

191 lines
6.7 KiB
Diff

From 8a069274d823319273dab097e865faa01bee2451 Mon Sep 17 00:00:00 2001
From: Sabrina Dubroca <sd@queasysnail.net>
Date: Fri, 26 May 2017 15:25:08 +0530
Subject: net: add recursion limit to GRO
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
[ Backported upstream commit: fcd91dd449867c6bfe56a81cabba76b829fd05cd]
[ Files without GRO support have not been updated ]
Currently, GRO can do unlimited recursion through the gro_receive
handlers. This was fixed for tunneling protocols by limiting tunnel GRO
to one level with encap_mark, but both VLAN and TEB still have this
problem. Thus, the kernel is vulnerable to a stack overflow, if we
receive a packet composed entirely of VLAN headers.
This patch adds a recursion counter to the GRO layer to prevent stack
overflow. When a gro_receive function hits the recursion limit, GRO is
aborted for this skb and it is processed normally. This recursion
counter is put in the GRO CB, but could be turned into a percpu counter
if we run out of space in the CB.
Thanks to Vladimír Beneš <vbenes@redhat.com> for the initial bug report.
Change-Id: Iec7b958d843c5d8214a36be8187d03f9e86ef079
Fixes: CVE-2016-7039
Fixes: 9b174d88c257 ("net: Add Transparent Ethernet Bridging GRO support.")
Fixes: 66e5133f19e9 ("vlan: Add GRO support for non hardware accelerated vlan")
Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Reviewed-by: Jiri Benc <jbenc@redhat.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: Tom Herbert <tom@herbertland.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Akshaya <akshayab@codeaurora.org>
---
drivers/net/vxlan.c | 2 +-
include/linux/netdevice.h | 24 ++++++++++++++++++++++++
net/core/dev.c | 1 +
net/ipv4/af_inet.c | 2 +-
net/ipv4/fou.c | 4 ++--
net/ipv4/gre_offload.c | 2 +-
net/ipv4/udp_offload.c | 2 +-
net/ipv6/ip6_offload.c | 2 +-
8 files changed, 32 insertions(+), 7 deletions(-)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 59282dd..d7cdfad 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -600,7 +600,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */
skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 173b250..cbcd056 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1914,6 +1914,11 @@ struct napi_gro_cb {
/* Used in foo-over-udp, set in udp[46]_gro_receive */
u8 is_ipv6:1;
+ /* Number of gro_receive callbacks this packet already went through */
+ u8 recursion_counter:4;
+
+ /* 1 bit hole */
+
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
@@ -1923,6 +1928,25 @@ struct napi_gro_cb {
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
+#define GRO_RECURSION_LIMIT 15
+static inline int gro_recursion_inc_test(struct sk_buff *skb)
+{
+ return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
+}
+
+typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
+static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
+ struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ if (unlikely(gro_recursion_inc_test(skb))) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(head, skb);
+}
+
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
struct net_device *dev; /* NULL is wildcarded here */
diff --git a/net/core/dev.c b/net/core/dev.c
index 99e2387..836e4f0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4117,6 +4117,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
NAPI_GRO_CB(skb)->flush = 0;
NAPI_GRO_CB(skb)->free = 0;
NAPI_GRO_CB(skb)->encap_mark = 0;
+ NAPI_GRO_CB(skb)->recursion_counter = 0;
/* Setup for GRO checksum validation */
switch (skb->ip_summed) {
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 5589a7c..4fc2ca4 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1409,7 +1409,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, sizeof(*iph));
skb_set_transport_header(skb, skb_gro_offset(skb));
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 8ce8e82..7b5b280 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -119,7 +119,7 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head,
if (!ops || !ops->callbacks.gro_receive)
goto out_unlock;
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
@@ -220,7 +220,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
skb_gro_postpull_rcsum(skb, guehdr, guehlen);
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index cc7b082..370c057 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -219,7 +219,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
skb_gro_postpull_rcsum(skb, greh, grehlen);
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index e6d05ae..f805597 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -306,7 +306,7 @@ unflush:
skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
- pp = uo_priv->offload->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(uo_priv->offload->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index dbc528e..b10c0c6 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -244,7 +244,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
skb_gro_postpull_rcsum(skb, iph, nlen);
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
--
1.9.1