Home Home > GIT Browse > SLE12-SP3
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDenis Kirjanov <dkirjanov@suse.com>2019-02-22 13:43:30 +0100
committerDenis Kirjanov <dkirjanov@suse.com>2019-02-22 14:13:33 +0100
commit5bcf5c8f36ac99470cb7cf4a32eeb62cdfef595d (patch)
tree6a86a859378cbc2305b0a985935a368e899fbf10
parente395d2e5212889f10413f9e2ada7305c77f253d9 (diff)
- Refresh
patches.drivers/0020-RDMA-bnxt_re-Allow-posting-when-QPs-are-in-error.patch. - Refresh patches.drivers/RDMA-bnxt_re-Allocate-multiple-notification-queues.patch. Sync the patches to the upstream version suse-commit: 99230ca6f4a641acfac0152369c7fad62186acf6
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c28
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c21
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c356
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h27
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c18
6 files changed, 269 insertions, 183 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index dba63f598a6f..35814c6b33ce 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -137,7 +137,7 @@ struct bnxt_re_dev {
struct bnxt_re_qp *qp1_sqp;
struct bnxt_re_ah *sqp_ah;
struct bnxt_re_sqp_entries sqp_tbl[1024];
- atomic_t nq_alloc_cnt;
+ atomic_t nq_alloc_cnt;
};
#define to_bnxt_re_dev(ptr, member) \
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 56a673b4f19a..afa8f65272fc 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -1374,18 +1374,18 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
if (!qp->sumem &&
- qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
- dev_dbg(rdev_to_dev(rdev),
- "Move QP = %p to flush list\n",
- qp);
- bnxt_qplib_add_flush_qp(&qp->qplib_qp);
- }
+ qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ dev_dbg(rdev_to_dev(rdev),
+ "Move QP = %p to flush list\n",
+ qp);
+ bnxt_qplib_add_flush_qp(&qp->qplib_qp);
+ }
if (!qp->sumem &&
- qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
- dev_dbg(rdev_to_dev(rdev),
- "Move QP = %p out of flush list\n",
- qp);
- bnxt_qplib_del_flush_qp(&qp->qplib_qp);
+ qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
+ dev_dbg(rdev_to_dev(rdev),
+ "Move QP = %p out of flush list\n",
+ qp);
+ bnxt_qplib_del_flush_qp(&qp->qplib_qp);
}
}
if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
@@ -2409,7 +2409,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
cq->qplib_cq.max_wqe = entries;
cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
- cq->qplib_cq.nq = nq;
+ cq->qplib_cq.nq = nq;
rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
if (rc) {
@@ -2918,8 +2918,8 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
}
if (ncqe < budget)
ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
- cqe + ncqe,
- budget - ncqe);
+ cqe + ncqe,
+ budget - ncqe);
if (!ncqe)
break;
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index c952be846140..1dce6dfbaab6 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -678,13 +678,12 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
for (i = 1; i < rdev->num_msix ; i++) {
rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
- i - 1, rdev->msix_entries[i].vector,
- rdev->msix_entries[i].db_offset,
- &bnxt_re_cqn_handler, NULL);
-
+ i - 1, rdev->msix_entries[i].vector,
+ rdev->msix_entries[i].db_offset,
+ &bnxt_re_cqn_handler, NULL);
if (rc) {
dev_err(rdev_to_dev(rdev),
- "Failed to enable NQ with rc = 0x%x", rc);
+ "Failed to enable NQ with rc = 0x%x", rc);
goto fail;
}
}
@@ -745,7 +744,7 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
rc = bnxt_qplib_alloc_nq(rdev->en_dev->pdev, &rdev->nq[i]);
if (rc) {
dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x",
- i, rc);
+ i, rc);
goto dealloc_dpi;
}
rc = bnxt_re_net_ring_alloc
@@ -757,8 +756,8 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
&rdev->nq[i].ring_id);
if (rc) {
dev_err(rdev_to_dev(rdev),
- "Failed to allocate NQ fw id with rc = 0x%x",
- rc);
+ "Failed to allocate NQ fw id with rc = 0x%x",
+ rc);
goto free_nq;
}
}
@@ -768,8 +767,8 @@ free_nq:
bnxt_qplib_free_nq(&rdev->nq[i]);
dealloc_dpi:
bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
- &rdev->qplib_res.dpi_tbl,
- &rdev->dpi_privileged);
+ &rdev->qplib_res.dpi_tbl,
+ &rdev->dpi_privileged);
dealloc_res:
bnxt_qplib_free_res(&rdev->qplib_res);
@@ -1072,7 +1071,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
* memory for the function and all child VFs
*/
rc = bnxt_qplib_alloc_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw,
- BNXT_RE_MAX_QPC_COUNT);
+ BNXT_RE_MAX_QPC_COUNT);
if (rc)
goto fail;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 78eb67260651..5f6e067f56f8 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -51,113 +51,167 @@
#include "qplib_fp.h"
static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
+static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
+
+static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
+{
+ qp->sq.condition = false;
+ qp->sq.send_phantom = false;
+ qp->sq.single = false;
+}
/* Flush list */
+static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_cq *scq, *rcq;
+
+ scq = qp->scq;
+ rcq = qp->rcq;
+
+ if (!qp->sq.flushed) {
+ dev_dbg(&scq->hwq.pdev->dev,
+ "QPLIB: FP: Adding to SQ Flush list = %p",
+ qp);
+ bnxt_qplib_cancel_phantom_processing(qp);
+ list_add_tail(&qp->sq_flush, &scq->sqf_head);
+ qp->sq.flushed = true;
+ }
+ if (!qp->srq) {
+ if (!qp->rq.flushed) {
+ dev_dbg(&rcq->hwq.pdev->dev,
+ "QPLIB: FP: Adding to RQ Flush list = %p",
+ qp);
+ list_add_tail(&qp->rq_flush, &rcq->rqf_head);
+ qp->rq.flushed = true;
+ }
+ }
+}
-/* To avoid processing completions if QP is already in flush list */
-static bool bnxt_qplib_is_qp_in_rq_flushlist(struct bnxt_qplib_qp *qp)
+void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
+ unsigned long *flags)
+ __acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock)
{
- bool flushed = false;
- unsigned long flags;
+ spin_lock_irqsave(&qp->scq->hwq.lock, *flags);
+ if (qp->scq == qp->rcq)
+ __acquire(&qp->rcq->hwq.lock);
+ else
+ spin_lock(&qp->rcq->hwq.lock);
+}
- spin_lock_irqsave(&qp->rcq->flush_lock, flags);
- flushed = qp->rq.flushed;
- spin_unlock_irqrestore(&qp->rcq->flush_lock, flags);
- return flushed;
+void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
+ unsigned long *flags)
+ __releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock)
+{
+ if (qp->scq == qp->rcq)
+ __release(&qp->rcq->hwq.lock);
+ else
+ spin_unlock(&qp->rcq->hwq.lock);
+ spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags);
}
-static bool bnxt_qplib_is_qp_in_sq_flushlist(struct bnxt_qplib_qp *qp)
+static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_cq *cq)
{
- bool flushed = false;
- unsigned long flags;
+ struct bnxt_qplib_cq *buddy_cq = NULL;
+
+ if (qp->scq == qp->rcq)
+ buddy_cq = NULL;
+ else if (qp->scq == cq)
+ buddy_cq = qp->rcq;
+ else
+ buddy_cq = qp->scq;
+ return buddy_cq;
+}
- spin_lock_irqsave(&qp->scq->flush_lock, flags);
- flushed = qp->sq.flushed;
- spin_unlock_irqrestore(&qp->scq->flush_lock, flags);
- return flushed;
+static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_cq *cq)
+ __acquires(&buddy_cq->hwq.lock)
+{
+ struct bnxt_qplib_cq *buddy_cq = NULL;
+
+ buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
+ if (!buddy_cq)
+ __acquire(&cq->hwq.lock);
+ else
+ spin_lock(&buddy_cq->hwq.lock);
}
-static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
+static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_cq *cq)
+ __releases(&buddy_cq->hwq.lock)
{
- qp->sq.condition = false;
- qp->sq.send_phantom = false;
- qp->sq.single = false;
+ struct bnxt_qplib_cq *buddy_cq = NULL;
+
+ buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
+ if (!buddy_cq)
+ __release(&cq->hwq.lock);
+ else
+ spin_unlock(&buddy_cq->hwq.lock);
}
void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
{
- struct bnxt_qplib_cq *scq, *rcq;
- unsigned long flags;
+ unsigned long flags;
- scq = qp->scq;
- rcq = qp->rcq;
-
- spin_lock_irqsave(&scq->flush_lock, flags);
- if (!qp->sq.flushed) {
- dev_dbg(&scq->hwq.pdev->dev,
- "QPLIB: FP: Adding to SQ Flush list = %p",
- qp);
- bnxt_qplib_cancel_phantom_processing(qp);
- list_add_tail(&qp->sq_flush, &scq->sqf_head);
- qp->sq.flushed = true;
- }
- spin_unlock_irqrestore(&scq->flush_lock, flags);
- if (!qp->srq) {
- spin_lock_irqsave(&rcq->flush_lock, flags);
- if (!qp->rq.flushed) {
- dev_dbg(&rcq->hwq.pdev->dev,
- "QPLIB: FP: Adding to RQ Flush list = %p",
- qp);
- list_add_tail(&qp->rq_flush, &rcq->rqf_head);
- qp->rq.flushed = true;
- }
- spin_unlock_irqrestore(&rcq->flush_lock, flags);
- }
+ bnxt_qplib_acquire_cq_locks(qp, &flags);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_release_cq_locks(qp, &flags);
}
-void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
+static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
{
- struct bnxt_qplib_cq *scq, *rcq;
- unsigned long flags;
-
- scq = qp->scq;
- rcq = qp->rcq;
+ struct bnxt_qplib_cq *scq, *rcq;
+
+ scq = qp->scq;
+ rcq = qp->rcq;
+
+ if (qp->sq.flushed) {
+ qp->sq.flushed = false;
+ list_del(&qp->sq_flush);
+ }
+ if (!qp->srq) {
+ if (qp->rq.flushed) {
+ qp->rq.flushed = false;
+ list_del(&qp->rq_flush);
+ }
+ }
+}
- spin_lock_irqsave(&scq->flush_lock, flags);
- if (qp->sq.flushed) {
- qp->sq.flushed = false;
- list_del(&qp->sq_flush);
- }
- spin_unlock_irqrestore(&scq->flush_lock, flags);
- if (!qp->srq) {
- spin_lock_irqsave(&rcq->flush_lock, flags);
- if (qp->rq.flushed) {
- qp->rq.flushed = false;
- list_del(&qp->rq_flush);
- }
- spin_unlock_irqrestore(&rcq->flush_lock, flags);
- }
+void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
+{
+ unsigned long flags;
+
+ bnxt_qplib_acquire_cq_locks(qp, &flags);
+ __clean_cq(qp->scq, (u64)(unsigned long)qp);
+ qp->sq.hwq.prod = 0;
+ qp->sq.hwq.cons = 0;
+ __clean_cq(qp->rcq, (u64)(unsigned long)qp);
+ qp->rq.hwq.prod = 0;
+ qp->rq.hwq.cons = 0;
+
+ __bnxt_qplib_del_flush_qp(qp);
+ bnxt_qplib_release_cq_locks(qp, &flags);
}
static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
{
- struct bnxt_qplib_nq_work *nq_work =
- container_of(work, struct bnxt_qplib_nq_work, work);
-
- struct bnxt_qplib_cq *cq = nq_work->cq;
- struct bnxt_qplib_nq *nq = nq_work->nq;
-
- if (cq && nq) {
- spin_lock_bh(&cq->compl_lock);
- if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
- dev_dbg(&nq->pdev->dev,
- "%s:Trigger cq = %p event nq = %p\n",
- __func__, cq, nq);
- nq->cqn_handler(nq, cq);
- }
- spin_unlock_bh(&cq->compl_lock);
- }
- kfree(nq_work);
+ struct bnxt_qplib_nq_work *nq_work =
+ container_of(work, struct bnxt_qplib_nq_work, work);
+
+ struct bnxt_qplib_cq *cq = nq_work->cq;
+ struct bnxt_qplib_nq *nq = nq_work->nq;
+
+ if (cq && nq) {
+ spin_lock_bh(&cq->compl_lock);
+ if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
+ dev_dbg(&nq->pdev->dev,
+ "%s:Trigger cq = %p event nq = %p\n",
+ __func__, cq, nq);
+ nq->cqn_handler(nq, cq);
+ }
+ spin_unlock_bh(&cq->compl_lock);
+ }
+ kfree(nq_work);
}
static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
@@ -364,8 +418,8 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
rc = irq_set_affinity_hint(nq->vector, &nq->mask);
if (rc) {
dev_warn(&nq->pdev->dev,
- "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
- nq->vector, nq_idx);
+ "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
+ nq->vector, nq_idx);
}
nq->requested = true;
@@ -1451,6 +1505,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
SQ_PSN_SEARCH_NEXT_PSN_MASK));
}
+
queue_err:
if (sch_handler) {
/* Store the ULP info in the software structures */
@@ -1464,6 +1519,7 @@ queue_err:
swq->start_psn = sq->psn & BTH_PSN_MASK;
}
sq->hwq.prod++;
+
qp->wqe_cnt++;
done:
@@ -1476,7 +1532,7 @@ done:
queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
} else {
dev_err(&sq->hwq.pdev->dev,
- "QPLIB: FP: Failed to allocate SQ nq_work!");
+ "QPLIB: FP: Failed to allocate SQ nq_work!");
rc = -ENOMEM;
}
}
@@ -1515,8 +1571,8 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
sch_handler = true;
dev_dbg(&rq->hwq.pdev->dev,
- "%s Error QP. Scheduling for poll_cq\n",
- __func__);
+ "%s Error QP. Scheduling for poll_cq\n",
+ __func__);
goto queue_err;
}
if (bnxt_qplib_queue_full(rq)) {
@@ -1570,7 +1626,7 @@ queue_err:
queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
} else {
dev_err(&rq->hwq.pdev->dev,
- "QPLIB: FP: Failed to allocate RQ nq_work!");
+ "QPLIB: FP: Failed to allocate RQ nq_work!");
rc = -ENOMEM;
}
}
@@ -1668,7 +1724,6 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
init_waitqueue_head(&cq->waitq);
INIT_LIST_HEAD(&cq->sqf_head);
INIT_LIST_HEAD(&cq->rqf_head);
- spin_lock_init(&cq->flush_lock);
spin_lock_init(&cq->compl_lock);
bnxt_qplib_arm_cq_enable(cq);
@@ -1786,17 +1841,17 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
void bnxt_qplib_mark_qp_error(void *qp_handle)
{
- struct bnxt_qplib_qp *qp = qp_handle;
+ struct bnxt_qplib_qp *qp = qp_handle;
- if (!qp)
- return;
+ if (!qp)
+ return;
- /* Must block new posting of SQ and RQ */
- qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
- bnxt_qplib_cancel_phantom_processing(qp);
+ /* Must block new posting of SQ and RQ */
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ bnxt_qplib_cancel_phantom_processing(qp);
- /* Add qp to flush list of the CQ */
- bnxt_qplib_add_flush_qp(qp);
+ /* Add qp to flush list of the CQ */
+ __bnxt_qplib_add_flush_qp(qp);
}
/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
@@ -1926,11 +1981,12 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
return -EINVAL;
}
- if (bnxt_qplib_is_qp_in_sq_flushlist(qp)) {
+ if (qp->sq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
+
/* Require to walk the sq's swq to fabricate CQEs for all previously
* signaled SWQEs due to CQE aggregation from the current sq cons
* to the cqe_sq_cons
@@ -1966,7 +2022,9 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
sw_sq_cons, cqe->wr_id, cqe->status);
cqe++;
(*budget)--;
+ bnxt_qplib_lock_buddy_cq(qp, cq);
bnxt_qplib_mark_qp_error(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
} else {
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
/* Before we complete, do WA 9060 */
@@ -2018,12 +2076,11 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL");
return -EINVAL;
}
- if (bnxt_qplib_is_qp_in_rq_flushlist(qp)) {
+ if (qp->rq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
-
cqe = *pcqe;
cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
cqe->length = le32_to_cpu(hwcqe->length);
@@ -2050,9 +2107,12 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
rq->hwq.cons++;
*pcqe = cqe;
- if (hwcqe->status != CQ_RES_RC_STATUS_OK)
- /* Add qp to flush list of the CQ */
- bnxt_qplib_add_flush_qp(qp);
+ if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
+ }
done:
return rc;
@@ -2075,9 +2135,9 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL");
return -EINVAL;
}
- if (bnxt_qplib_is_qp_in_rq_flushlist(qp)) {
+ if (qp->rq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
cqe = *pcqe;
@@ -2110,10 +2170,12 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
rq->hwq.cons++;
*pcqe = cqe;
- if (hwcqe->status != CQ_RES_RC_STATUS_OK)
+ if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
/* Add qp to flush list of the CQ */
- bnxt_qplib_add_flush_qp(qp);
-
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
+ }
done:
return rc;
}
@@ -2136,9 +2198,9 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
"QPLIB: process_cq Raw/QP1 qp is NULL");
return -EINVAL;
}
- if (bnxt_qplib_is_qp_in_rq_flushlist(qp)) {
+ if (qp->rq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
cqe = *pcqe;
@@ -2176,9 +2238,12 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
rq->hwq.cons++;
*pcqe = cqe;
- if (hwcqe->status != CQ_RES_RC_STATUS_OK)
- /* Add qp to flush list of the CQ */
- bnxt_qplib_add_flush_qp(qp);
+ if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
+ }
done:
return rc;
@@ -2208,6 +2273,7 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
"QPLIB: FP: CQ Process terminal qp is NULL");
return -EINVAL;
}
+
/* Must block new posting of SQ and RQ */
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
@@ -2227,9 +2293,9 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
goto do_rq;
}
- if (bnxt_qplib_is_qp_in_sq_flushlist(qp)) {
+ if (qp->sq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
goto sq_done;
}
@@ -2277,20 +2343,21 @@ do_rq:
goto done;
}
- if (bnxt_qplib_is_qp_in_rq_flushlist(qp)) {
+ if (qp->rq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
rc = 0;
goto done;
}
-
/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
* from the current rq->cons to the rq->prod regardless what the
* rq->cons the terminal CQE indicates
*/
/* Add qp to flush list of the CQ */
- bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
done:
return rc;
}
@@ -2312,30 +2379,30 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
}
int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
- struct bnxt_qplib_cqe *cqe,
- int num_cqes)
+ struct bnxt_qplib_cqe *cqe,
+ int num_cqes)
{
- struct bnxt_qplib_qp *qp = NULL;
- u32 budget = num_cqes;
- unsigned long flags;
-
- spin_lock_irqsave(&cq->flush_lock, flags);
- list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
- dev_dbg(&cq->hwq.pdev->dev,
- "QPLIB: FP: Flushing SQ QP= %p",
- qp);
- __flush_sq(&qp->sq, qp, &cqe, &budget);
- }
-
- list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
- dev_dbg(&cq->hwq.pdev->dev,
- "QPLIB: FP: Flushing RQ QP= %p",
- qp);
- __flush_rq(&qp->rq, qp, &cqe, &budget);
- }
- spin_unlock_irqrestore(&cq->flush_lock, flags);
-
- return num_cqes - budget;
+ struct bnxt_qplib_qp *qp = NULL;
+ u32 budget = num_cqes;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->hwq.lock, flags);
+ list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: FP: Flushing SQ QP= %p",
+ qp);
+ __flush_sq(&qp->sq, qp, &cqe, &budget);
+ }
+
+ list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: FP: Flushing RQ QP= %p",
+ qp);
+ __flush_rq(&qp->rq, qp, &cqe, &budget);
+ }
+ spin_unlock_irqrestore(&cq->hwq.lock, flags);
+
+ return num_cqes - budget;
}
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
@@ -2428,6 +2495,7 @@ void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
spin_lock_irqsave(&cq->hwq.lock, flags);
if (arm_type)
bnxt_qplib_arm_cq(cq, arm_type);
+
/* Using cq->arm_state variable to track whether to issue cq handler */
atomic_set(&cq->arm_state, 1);
spin_unlock_irqrestore(&cq->hwq.lock, flags);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 74f4f988b362..f00feefbd383 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -226,7 +226,7 @@ struct bnxt_qplib_q {
u32 phantom_wqe_cnt;
u32 phantom_cqe_cnt;
u32 next_cq_cons;
- bool flushed;
+ bool flushed;
};
struct bnxt_qplib_qp {
@@ -297,8 +297,8 @@ struct bnxt_qplib_qp {
dma_addr_t sq_hdr_buf_map;
void *rq_hdr_buf;
dma_addr_t rq_hdr_buf_map;
- struct list_head sq_flush;
- struct list_head rq_flush;
+ struct list_head sq_flush;
+ struct list_head rq_flush;
};
#define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
@@ -354,7 +354,7 @@ struct bnxt_qplib_cq {
u16 period;
struct bnxt_qplib_hwq hwq;
u32 cnq_hw_ring_id;
- struct bnxt_qplib_nq *nq;
+ struct bnxt_qplib_nq *nq;
bool resize_in_progress;
struct scatterlist *sghead;
u32 nmap;
@@ -364,10 +364,9 @@ struct bnxt_qplib_cq {
unsigned long flags;
#define CQ_FLAGS_RESIZE_IN_PROG 1
wait_queue_head_t waitq;
- spinlock_t flush_lock; /* lock flush queue list */
struct list_head sqf_head, rqf_head;
- atomic_t arm_state;
- spinlock_t compl_lock; /* synch CQ handlers */
+ atomic_t arm_state;
+ spinlock_t compl_lock; /* synch CQ handlers */
};
#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
@@ -408,7 +407,7 @@ struct bnxt_qplib_nq {
struct pci_dev *pdev;
int vector;
- cpumask_t mask;
+ cpumask_t mask;
int budget;
bool requested;
struct tasklet_struct worker;
@@ -426,8 +425,8 @@ struct bnxt_qplib_nq {
(struct bnxt_qplib_nq *nq,
void *srq,
u8 event);
- struct workqueue_struct *cqn_wq;
- char name[32];
+ struct workqueue_struct *cqn_wq;
+ char name[32];
};
struct bnxt_qplib_nq_work {
@@ -471,7 +470,11 @@ void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp);
+void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
+ unsigned long *flags);
+void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
+ unsigned long *flags);
int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
- struct bnxt_qplib_cqe *cqe,
- int num_cqes);
+ struct bnxt_qplib_cqe *cqe,
+ int num_cqes);
#endif /* __BNXT_QPLIB_FP_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index cef528b4a5ce..4ffe34265360 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -44,6 +44,9 @@
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_rcfw.h"
+#include "qplib_sp.h"
+#include "qplib_fp.h"
+
static void bnxt_qplib_service_creq(unsigned long data);
/* Hardware communication channel */
@@ -283,16 +286,29 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
struct creq_qp_event *qp_event)
{
struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
+ struct creq_qp_error_notification *err_event;
struct bnxt_qplib_crsq *crsqe;
unsigned long flags;
+ struct bnxt_qplib_qp *qp;
u16 cbit, blocked = 0;
u16 cookie;
__le16 mcookie;
+ u32 qp_id;
switch (qp_event->event) {
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
+ err_event = (struct creq_qp_error_notification *)qp_event;
+ qp_id = le32_to_cpu(err_event->xid);
+ qp = rcfw->qp_tbl[qp_id].qp_handle;
dev_dbg(&rcfw->pdev->dev,
"QPLIB: Received QP error notification");
+ dev_dbg(&rcfw->pdev->dev,
+ "QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
+ qp_id, err_event->req_err_state_reason,
+ err_event->res_err_state_reason);
+ bnxt_qplib_acquire_cq_locks(qp, &flags);
+ bnxt_qplib_mark_qp_error(qp);
+ bnxt_qplib_release_cq_locks(qp, &flags);
break;
default:
/* Command Response */
@@ -549,7 +565,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
rcfw->qp_tbl_size = qp_tbl_sz;
rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!rcfw->qp_tbl)
goto fail;