Home Home > GIT Browse > SLE12-SP5-AZURE
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichal Suchanek <msuchanek@suse.de>2019-08-20 16:58:04 +0200
committerMichal Suchanek <msuchanek@suse.de>2019-08-20 16:58:04 +0200
commit3b35ec5faed5dd6dee060797bbf70864b4431f4c (patch)
treebf92e8cfce2b963d2859eb8781510e3d6dc4450d
parent6cd670e51920d45eebd34064fc2701e74a0be6b6 (diff)
ibmvnic: Unmap DMA address of TX descriptor buffers after use
(bsc#1146351 ltc#180726).
-rw-r--r--patches.drivers/ibmvnic-Unmap-DMA-address-of-TX-descriptor-buffers-a.patch63
-rw-r--r--series.conf1
2 files changed, 64 insertions, 0 deletions
diff --git a/patches.drivers/ibmvnic-Unmap-DMA-address-of-TX-descriptor-buffers-a.patch b/patches.drivers/ibmvnic-Unmap-DMA-address-of-TX-descriptor-buffers-a.patch
new file mode 100644
index 0000000000..ddee90a1c9
--- /dev/null
+++ b/patches.drivers/ibmvnic-Unmap-DMA-address-of-TX-descriptor-buffers-a.patch
@@ -0,0 +1,63 @@
+From 80f0fe0934cd3daa13a5e4d48a103f469115b160 Mon Sep 17 00:00:00 2001
+From: Thomas Falcon <tlfalcon@linux.ibm.com>
+Date: Wed, 14 Aug 2019 14:57:05 -0500
+Subject: [PATCH] ibmvnic: Unmap DMA address of TX descriptor buffers after use
+
+References: bsc#1146351 ltc#180726
+Patch-mainline: v5.3 or v5.3-rc6 (next release)
+Git-commit: 80f0fe0934cd3daa13a5e4d48a103f469115b160
+
+There's no need to wait until a completion is received to unmap
+TX descriptor buffers that have been passed to the hypervisor.
+Instead unmap it when the hypervisor call has completed. This patch
+avoids the possibility that a buffer will not be unmapped because
+a TX completion is lost or mishandled.
+
+Reported-by: Abdul Haleem <abdhalee@linux.vnet.ibm.com>
+Tested-by: Devesh K. Singh <devesh_singh@in.ibm.com>
+Signed-off-by: Thomas Falcon <tlfalcon@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ drivers/net/ethernet/ibm/ibmvnic.c | 11 ++---------
+ 1 file changed, 2 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 3da680073265..cebd20f3128d 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1568,6 +1568,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+ lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
+ (u64)tx_buff->indir_dma,
+ (u64)num_entries);
++ dma_unmap_single(dev, tx_buff->indir_dma,
++ sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
+ } else {
+ tx_buff->num_entries = num_entries;
+ lpar_rc = send_subcrq(adapter, handle_array[queue_num],
+@@ -2788,7 +2790,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
+ union sub_crq *next;
+ int index;
+ int i, j;
+- u8 *first;
+
+ restart_loop:
+ while (pending_scrq(adapter, scrq)) {
+@@ -2818,14 +2819,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
+
+ txbuff->data_dma[j] = 0;
+ }
+- /* if sub_crq was sent indirectly */
+- first = &txbuff->indir_arr[0].generic.first;
+- if (*first == IBMVNIC_CRQ_CMD) {
+- dma_unmap_single(dev, txbuff->indir_dma,
+- sizeof(txbuff->indir_arr),
+- DMA_TO_DEVICE);
+- *first = 0;
+- }
+
+ if (txbuff->last_frag) {
+ dev_kfree_skb_any(txbuff->skb);
+--
+2.22.0
+
diff --git a/series.conf b/series.conf
index eb633c197c..4ec5ba79ca 100644
--- a/series.conf
+++ b/series.conf
@@ -23414,6 +23414,7 @@
patches.fixes/nvme-multipath-revalidate-nvme_ns_head-gendisk-in-nv.patch
patches.fixes/0001-usb-cdc-acm-make-sure-a-refcount-is-taken-early-enou.patch
patches.fixes/0001-USB-CDC-fix-sanity-checks-in-CDC-union-parser.patch
+ patches.drivers/ibmvnic-Unmap-DMA-address-of-TX-descriptor-buffers-a.patch
# dhowells/linux-fs keys-uefi
patches.suse/0001-KEYS-Allow-unrestricted-boot-time-addition-of-keys-t.patch