kernel: bump 3.18 to 3.18.130

Refreshed all patches.

Compile-tested on: adm5120
Runtime-tested on: none

Signed-off-by: Koen Vandeputte <koen.vandeputte@ncentric.com>
master
Koen Vandeputte 6 years ago
parent 09004e6e13
commit 902a9f23d6
  1. 4
      include/kernel-version.mk
  2. 2
      target/linux/generic/pending-3.18/001-mtdsplit_backport.patch
  3. 28
      target/linux/generic/pending-3.18/760-8139cp-fixes-from-4.3.patch

@ -2,12 +2,12 @@
LINUX_RELEASE?=1
LINUX_VERSION-3.18 = .129
LINUX_VERSION-3.18 = .130
LINUX_VERSION-4.9 = .145
LINUX_VERSION-4.14 = .88
LINUX_VERSION-4.19 = .9
LINUX_KERNEL_HASH-3.18.129 = 8d420b58593a74109175be781da2320a341baec9aaa186da31ad508d3f377e72
LINUX_KERNEL_HASH-3.18.130 = d1bf85ed3fd0067b1134178ed5492ae0053cb3fdd5361986fe0b85234fc82723
LINUX_KERNEL_HASH-4.9.145 = 6901ca37e8c305a2f26c598952338b4dc2481ca5a9d0bf71e2b71730a5b5bc5e
LINUX_KERNEL_HASH-4.14.88 = b0f0b8c76708eab6caf3009702e531d40a243b152922ee1f9a441316f226f52d
LINUX_KERNEL_HASH-4.19.9 = fc116cc6829c73944215d3b3ac0fc368dde9e8235b456744afffde001269dbf2

@ -67,7 +67,7 @@
struct mtd_partition *part;
--- a/drivers/mtd/mtdsplit/mtdsplit_tplink.c
+++ b/drivers/mtd/mtdsplit/mtdsplit_tplink.c
@@ -84,8 +84,8 @@ struct tplink_fw_header {
@@ -83,8 +83,8 @@ struct tplink_fw_header {
};
static int mtdsplit_parse_tplink(struct mtd_info *master,

@ -131,7 +131,7 @@ Date: Fri Sep 18 00:19:08 2015 +0100
unsigned rx_buf_sz;
unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
@@ -665,7 +667,7 @@ static void cp_tx (struct cp_private *cp
@@ -670,7 +672,7 @@ static void cp_tx (struct cp_private *cp
BUG_ON(!skb);
dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
@ -140,7 +140,7 @@ Date: Fri Sep 18 00:19:08 2015 +0100
PCI_DMA_TODEVICE);
if (status & LastFrag) {
@@ -733,7 +735,7 @@ static netdev_tx_t cp_start_xmit (struct
@@ -738,7 +740,7 @@ static netdev_tx_t cp_start_xmit (struct
{
struct cp_private *cp = netdev_priv(dev);
unsigned entry;
@ -149,7 +149,7 @@ Date: Fri Sep 18 00:19:08 2015 +0100
unsigned long intr_flags;
__le32 opts2;
int mss = 0;
@@ -753,6 +755,21 @@ static netdev_tx_t cp_start_xmit (struct
@@ -758,6 +760,21 @@ static netdev_tx_t cp_start_xmit (struct
mss = skb_shinfo(skb)->gso_size;
opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
@ -171,7 +171,7 @@ Date: Fri Sep 18 00:19:08 2015 +0100
if (skb_shinfo(skb)->nr_frags == 0) {
struct cp_desc *txd = &cp->tx_ring[entry];
@@ -768,31 +785,20 @@ static netdev_tx_t cp_start_xmit (struct
@@ -773,31 +790,20 @@ static netdev_tx_t cp_start_xmit (struct
txd->addr = cpu_to_le64(mapping);
wmb();
@ -209,7 +209,7 @@ Date: Fri Sep 18 00:19:08 2015 +0100
/* We must give this initial chunk to the device last.
* Otherwise we could race with the device.
@@ -805,14 +811,14 @@ static netdev_tx_t cp_start_xmit (struct
@@ -810,14 +816,14 @@ static netdev_tx_t cp_start_xmit (struct
goto out_dma_error;
cp->tx_skb[entry] = skb;
@ -226,7 +226,7 @@ Date: Fri Sep 18 00:19:08 2015 +0100
len = skb_frag_size(this_frag);
mapping = dma_map_single(&cp->pdev->dev,
skb_frag_address(this_frag),
@@ -824,19 +830,7 @@ static netdev_tx_t cp_start_xmit (struct
@@ -829,19 +835,7 @@ static netdev_tx_t cp_start_xmit (struct
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
@ -247,7 +247,7 @@ Date: Fri Sep 18 00:19:08 2015 +0100
if (frag == skb_shinfo(skb)->nr_frags - 1)
ctrl |= LastFrag;
@@ -849,8 +843,8 @@ static netdev_tx_t cp_start_xmit (struct
@@ -854,8 +848,8 @@ static netdev_tx_t cp_start_xmit (struct
txd->opts1 = cpu_to_le32(ctrl);
wmb();
@ -257,7 +257,7 @@ Date: Fri Sep 18 00:19:08 2015 +0100
}
txd = &cp->tx_ring[first_entry];
@@ -858,27 +852,17 @@ static netdev_tx_t cp_start_xmit (struct
@@ -863,27 +857,17 @@ static netdev_tx_t cp_start_xmit (struct
txd->addr = cpu_to_le64(first_mapping);
wmb();
@ -292,7 +292,7 @@ Date: Fri Sep 18 00:19:08 2015 +0100
if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
@@ -1115,6 +1099,7 @@ static int cp_init_rings (struct cp_priv
@@ -1120,6 +1104,7 @@ static int cp_init_rings (struct cp_priv
{
memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
@ -300,7 +300,7 @@ Date: Fri Sep 18 00:19:08 2015 +0100
cp_init_rings_index(cp);
@@ -1151,7 +1136,7 @@ static void cp_clean_rings (struct cp_pr
@@ -1156,7 +1141,7 @@ static void cp_clean_rings (struct cp_pr
desc = cp->rx_ring + i;
dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
@ -309,7 +309,7 @@ Date: Fri Sep 18 00:19:08 2015 +0100
}
}
@@ -1164,7 +1149,7 @@ static void cp_clean_rings (struct cp_pr
@@ -1169,7 +1154,7 @@ static void cp_clean_rings (struct cp_pr
le32_to_cpu(desc->opts1) & 0xffff,
PCI_DMA_TODEVICE);
if (le32_to_cpu(desc->opts1) & LastFrag)
@ -318,7 +318,7 @@ Date: Fri Sep 18 00:19:08 2015 +0100
cp->dev->stats.tx_dropped++;
}
}
@@ -1172,6 +1157,7 @@ static void cp_clean_rings (struct cp_pr
@@ -1177,6 +1162,7 @@ static void cp_clean_rings (struct cp_pr
memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
@ -326,7 +326,7 @@ Date: Fri Sep 18 00:19:08 2015 +0100
memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
@@ -1249,7 +1235,7 @@ static void cp_tx_timeout(struct net_dev
@@ -1254,7 +1240,7 @@ static void cp_tx_timeout(struct net_dev
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
@ -335,7 +335,7 @@ Date: Fri Sep 18 00:19:08 2015 +0100
netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
cpr8(Cmd), cpr16(CpCmd),
@@ -1257,13 +1243,26 @@ static void cp_tx_timeout(struct net_dev
@@ -1262,13 +1248,26 @@ static void cp_tx_timeout(struct net_dev
spin_lock_irqsave(&cp->lock, flags);

Loading…
Cancel
Save