This adds the patch submitted to upstream that adds BQL to the mvneta driver: https://patchwork.kernel.org/patch/9328413/. Helps latency under load when the physical link is saturated. Signed-off-by: Toke Høiland-Jørgensen <toke@toke.dk>master
parent
a72e1692b8
commit
8aa9f6bd71
@ -0,0 +1,83 @@ |
||||
--- a/drivers/net/ethernet/marvell/mvneta.c
|
||||
+++ b/drivers/net/ethernet/marvell/mvneta.c
|
||||
@@ -1695,8 +1695,10 @@ static struct mvneta_tx_queue *mvneta_tx
|
||||
|
||||
/* Free tx queue skbuffs */
|
||||
static void mvneta_txq_bufs_free(struct mvneta_port *pp,
|
||||
- struct mvneta_tx_queue *txq, int num)
|
||||
+ struct mvneta_tx_queue *txq, int num,
|
||||
+ struct netdev_queue *nq)
|
||||
{
|
||||
+ unsigned int bytes_compl = 0, pkts_compl = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
@@ -1704,6 +1706,11 @@ static void mvneta_txq_bufs_free(struct
|
||||
txq->txq_get_index;
|
||||
struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
|
||||
|
||||
+ if (skb) {
|
||||
+ bytes_compl += skb->len;
|
||||
+ pkts_compl++;
|
||||
+ }
|
||||
+
|
||||
mvneta_txq_inc_get(txq);
|
||||
|
||||
if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
|
||||
@@ -1714,6 +1721,8 @@ static void mvneta_txq_bufs_free(struct
|
||||
continue;
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
+
|
||||
+ netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
|
||||
}
|
||||
|
||||
/* Handle end of transmission */
|
||||
@@ -1727,7 +1736,7 @@ static void mvneta_txq_done(struct mvnet
|
||||
if (!tx_done)
|
||||
return;
|
||||
|
||||
- mvneta_txq_bufs_free(pp, txq, tx_done);
|
||||
+ mvneta_txq_bufs_free(pp, txq, tx_done, nq);
|
||||
|
||||
txq->count -= tx_done;
|
||||
|
||||
@@ -2334,6 +2343,8 @@ out:
|
||||
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
|
||||
struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
|
||||
|
||||
+ netdev_tx_sent_queue(nq, len);
|
||||
+
|
||||
txq->count += frags;
|
||||
mvneta_txq_pend_desc_add(pp, txq, frags);
|
||||
|
||||
@@ -2358,9 +2369,10 @@ static void mvneta_txq_done_force(struct
|
||||
struct mvneta_tx_queue *txq)
|
||||
|
||||
{
|
||||
+ struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
|
||||
int tx_done = txq->count;
|
||||
|
||||
- mvneta_txq_bufs_free(pp, txq, tx_done);
|
||||
+ mvneta_txq_bufs_free(pp, txq, tx_done, nq);
|
||||
|
||||
/* reset txq */
|
||||
txq->count = 0;
|
||||
@@ -2841,6 +2853,8 @@ static int mvneta_txq_init(struct mvneta
|
||||
static void mvneta_txq_deinit(struct mvneta_port *pp,
|
||||
struct mvneta_tx_queue *txq)
|
||||
{
|
||||
+ struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
|
||||
+
|
||||
kfree(txq->tx_skb);
|
||||
|
||||
if (txq->tso_hdrs)
|
||||
@@ -2852,6 +2866,8 @@ static void mvneta_txq_deinit(struct mvn
|
||||
txq->size * MVNETA_DESC_ALIGNED_SIZE,
|
||||
txq->descs, txq->descs_phys);
|
||||
|
||||
+ netdev_tx_reset_queue(nq);
|
||||
+
|
||||
txq->descs = NULL;
|
||||
txq->last_desc = 0;
|
||||
txq->next_desc_to_proc = 0;
|
Loading…
Reference in new issue