* [PATCH net-next] qed: Add srq core support for RoCE and iWARP
@ 2018-05-30 13:11 Yuval Bason
2018-05-31 17:33 ` Leon Romanovsky
` (3 more replies)
0 siblings, 4 replies; 7+ messages in thread
From: Yuval Bason @ 2018-05-30 13:11 UTC (permalink / raw)
To: yuval.bason, davem
Cc: netdev, jgg, dledford, linux-rdma, Michal Kalderon, Ariel Elior
This patch adds support for configuring SRQ and provides the necessary
APIs for rdma upper layer driver (qedr) to enable the SRQ feature.
Signed-off-by: Michal Kalderon <michal.kalderon@cavium.com>
Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
Signed-off-by: Yuval Bason <yuval.bason@cavium.com>
---
drivers/net/ethernet/qlogic/qed/qed_cxt.c | 5 +-
drivers/net/ethernet/qlogic/qed/qed_cxt.h | 1 +
drivers/net/ethernet/qlogic/qed/qed_hsi.h | 2 +
drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 23 ++++
drivers/net/ethernet/qlogic/qed/qed_main.c | 2 +
drivers/net/ethernet/qlogic/qed/qed_rdma.c | 179 +++++++++++++++++++++++++++-
drivers/net/ethernet/qlogic/qed/qed_rdma.h | 2 +
drivers/net/ethernet/qlogic/qed/qed_roce.c | 17 ++-
include/linux/qed/qed_rdma_if.h | 12 +-
9 files changed, 235 insertions(+), 8 deletions(-)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 820b226..7ed6aa0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -47,6 +47,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
+#include "qed_rdma.h"
#include "qed_reg_addr.h"
#include "qed_sriov.h"
@@ -426,7 +427,7 @@ static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
p_mgr->srq_count = num_srqs;
}
-static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
+u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
@@ -2071,7 +2072,7 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
u32 num_cons, num_qps, num_srqs;
enum protocol_type proto;
- num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
+ num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
DP_NOTICE(p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index a4e9586..758a8b4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -235,6 +235,7 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type);
u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
enum protocol_type type);
+u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn);
int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
#define QED_CTX_WORKING_MEM 0
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 8e1e6e1..82ce401 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -9725,6 +9725,8 @@ enum iwarp_eqe_async_opcode {
IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED,
IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE,
IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW,
+ IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY,
+ IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT,
MAX_IWARP_EQE_ASYNC_OPCODE
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 2a2b101..474e6cf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -271,6 +271,8 @@ int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
p_ramrod->sq_num_pages = qp->sq_num_pages;
p_ramrod->rq_num_pages = qp->rq_num_pages;
+ p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
+ p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
@@ -3004,8 +3006,11 @@ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
union event_ring_data *data,
u8 fw_return_code)
{
+ struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
struct regpair *fw_handle = &data->rdma_data.async_handle;
struct qed_iwarp_ep *ep = NULL;
+ u16 srq_offset;
+ u16 srq_id;
u16 cid;
ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
@@ -3067,6 +3072,24 @@ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
qed_iwarp_cid_cleaned(p_hwfn, cid);
break;
+ case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY:
+ DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n");
+ srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
+ /* FW assigns value that is no greater than u16 */
+ srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
+ events.affiliated_event(events.context,
+ QED_IWARP_EVENT_SRQ_EMPTY,
+ &srq_id);
+ break;
+ case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT:
+ DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n");
+ srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
+ /* FW assigns value that is no greater than u16 */
+ srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
+ events.affiliated_event(events.context,
+ QED_IWARP_EVENT_SRQ_LIMIT,
+ &srq_id);
+ break;
case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 68c4399..b04d57c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -64,6 +64,7 @@
#define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8)
+#define QED_RDMA_SRQS QED_ROCE_QPS
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -922,6 +923,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
if (IS_ENABLED(CONFIG_QED_RDMA)) {
params->rdma_pf_params.num_qps = QED_ROCE_QPS;
params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
+ params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
/* divide by 3 the MRs to avoid MF ILT overflow */
params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index a411f9c..bd23659 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -259,15 +259,29 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
goto free_cid_map;
}
+ /* Allocate bitmap for srqs */
+ p_rdma_info->num_srqs = qed_cxt_get_srq_count(p_hwfn);
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map,
+ p_rdma_info->num_srqs, "SRQ");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate srq bitmap, rc = %d\n", rc);
+ goto free_real_cid_map;
+ }
+
if (QED_IS_IWARP_PERSONALITY(p_hwfn))
rc = qed_iwarp_alloc(p_hwfn);
if (rc)
- goto free_cid_map;
+ goto free_srq_map;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
return 0;
+free_srq_map:
+ kfree(p_rdma_info->srq_map.bitmap);
+free_real_cid_map:
+ kfree(p_rdma_info->real_cid_map.bitmap);
free_cid_map:
kfree(p_rdma_info->cid_map.bitmap);
free_tid_map:
@@ -351,6 +365,8 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1);
kfree(p_rdma_info->port);
kfree(p_rdma_info->dev);
@@ -431,6 +447,12 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
if (cdev->rdma_max_sge)
dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
+ dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE;
+ if (p_hwfn->cdev->rdma_max_srq_sge) {
+ dev->max_srq_sge = min_t(u32,
+ p_hwfn->cdev->rdma_max_srq_sge,
+ dev->max_srq_sge);
+ }
dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
dev->max_inline = (cdev->rdma_max_inline) ?
@@ -474,6 +496,8 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
dev->max_pkey = QED_RDMA_MAX_P_KEY;
+ dev->max_srq = p_hwfn->p_rdma_info->num_srqs;
+ dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM;
dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
(RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
@@ -1628,6 +1652,156 @@ static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
return QED_LEADING_HWFN(cdev);
}
+int qed_rdma_modify_srq(void *rdma_cxt,
+ struct qed_rdma_modify_srq_in_params *in_params)
+{
+ struct rdma_srq_modify_ramrod_data *p_ramrod;
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u16 opaque_fid;
+ int rc;
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_MODIFY_SRQ,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rdma_modify_srq;
+ p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id);
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+ p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
+ p_ramrod->wqe_limit = cpu_to_le16(in_params->wqe_limit);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x",
+ in_params->srq_id);
+
+ return rc;
+}
+
+int qed_rdma_destroy_srq(void *rdma_cxt,
+ struct qed_rdma_destroy_srq_in_params *in_params)
+{
+ struct rdma_srq_destroy_ramrod_data *p_ramrod;
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ struct qed_bmap *bmap;
+ u16 opaque_fid;
+ int rc;
+
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_DESTROY_SRQ,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rdma_destroy_srq;
+ p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id);
+ p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ bmap = &p_hwfn->p_rdma_info->srq_map;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "SRQ destroyed Id = %x",
+ in_params->srq_id);
+
+ return rc;
+}
+
+int qed_rdma_create_srq(void *rdma_cxt,
+ struct qed_rdma_create_srq_in_params *in_params,
+ struct qed_rdma_create_srq_out_params *out_params)
+{
+ struct rdma_srq_create_ramrod_data *p_ramrod;
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+ struct qed_sp_init_data init_data;
+ enum qed_cxt_elem_type elem_type;
+ struct qed_spq_entry *p_ent;
+ u16 opaque_fid, srq_id;
+ struct qed_bmap *bmap;
+ u32 returned_id;
+ int rc;
+
+ bmap = &p_hwfn->p_rdma_info->srq_map;
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ if (rc) {
+ DP_NOTICE(p_hwfn, "failed to allocate srq id\n");
+ return rc;
+ }
+
+ elem_type = QED_ELEM_SRQ;
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id);
+ if (rc)
+ goto err;
+ /* returned id is no greater than u16 */
+ srq_id = (u16)returned_id;
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ memset(&init_data, 0, sizeof(init_data));
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_CREATE_SRQ,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_create_srq;
+ DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr);
+ p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages);
+ p_ramrod->pd_id = cpu_to_le16(in_params->pd_id);
+ p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id);
+ p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
+ p_ramrod->page_size = cpu_to_le16(in_params->page_size);
+ DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ out_params->srq_id = srq_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "SRQ created Id = %x\n", out_params->srq_id);
+
+ return rc;
+
+err:
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, bmap, returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ return rc;
+}
+
bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
{
bool result;
@@ -1773,6 +1947,9 @@ static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
.rdma_free_tid = &qed_rdma_free_tid,
.rdma_register_tid = &qed_rdma_register_tid,
.rdma_deregister_tid = &qed_rdma_deregister_tid,
+ .rdma_create_srq = &qed_rdma_create_srq,
+ .rdma_modify_srq = &qed_rdma_modify_srq,
+ .rdma_destroy_srq = &qed_rdma_destroy_srq,
.ll2_acquire_connection = &qed_ll2_acquire_connection,
.ll2_establish_connection = &qed_ll2_establish_connection,
.ll2_terminate_connection = &qed_ll2_terminate_connection,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
index 18ec9cb..6f722ee 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
@@ -96,6 +96,8 @@ struct qed_rdma_info {
u8 num_cnqs;
u32 num_qps;
u32 num_mrs;
+ u32 num_srqs;
+ u16 srq_id_offset;
u16 queue_zone_base;
u16 max_queue_zones;
enum protocol_type proto;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 6acfd43..ee57fcd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -65,6 +65,8 @@
u8 fw_event_code,
u16 echo, union event_ring_data *data, u8 fw_return_code)
{
+ struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
+
if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
u16 icid =
(u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid);
@@ -75,11 +77,18 @@
*/
qed_roce_free_real_icid(p_hwfn, icid);
} else {
- struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events;
+ if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY ||
+ fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) {
+ u16 srq_id = (u16)data->rdma_data.async_handle.lo;
+
+ events.affiliated_event(events.context, fw_event_code,
+ &srq_id);
+ } else {
+ union rdma_eqe_data rdata = data->rdma_data;
- events->affiliated_event(p_hwfn->p_rdma_info->events.context,
- fw_event_code,
- (void *)&data->rdma_data.async_handle);
+ events.affiliated_event(events.context, fw_event_code,
+ (void *)&rdata.async_handle);
+ }
}
return 0;
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index 4dd72ba..e05e320 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -485,7 +485,9 @@ enum qed_iwarp_event_type {
QED_IWARP_EVENT_ACTIVE_MPA_REPLY,
QED_IWARP_EVENT_LOCAL_ACCESS_ERROR,
QED_IWARP_EVENT_REMOTE_OPERATION_ERROR,
- QED_IWARP_EVENT_TERMINATE_RECEIVED
+ QED_IWARP_EVENT_TERMINATE_RECEIVED,
+ QED_IWARP_EVENT_SRQ_LIMIT,
+ QED_IWARP_EVENT_SRQ_EMPTY,
};
enum qed_tcp_ip_version {
@@ -646,6 +648,14 @@ struct qed_rdma_ops {
int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
+ int (*rdma_create_srq)(void *rdma_cxt,
+ struct qed_rdma_create_srq_in_params *iparams,
+ struct qed_rdma_create_srq_out_params *oparams);
+ int (*rdma_destroy_srq)(void *rdma_cxt,
+ struct qed_rdma_destroy_srq_in_params *iparams);
+ int (*rdma_modify_srq)(void *rdma_cxt,
+ struct qed_rdma_modify_srq_in_params *iparams);
+
int (*ll2_acquire_connection)(void *rdma_cxt,
struct qed_ll2_acquire_data *data);
--
1.8.3.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH net-next] qed: Add srq core support for RoCE and iWARP
2018-05-30 13:11 [PATCH net-next] qed: Add srq core support for RoCE and iWARP Yuval Bason
@ 2018-05-31 17:33 ` Leon Romanovsky
2018-06-03 16:10 ` Bason, Yuval
2018-06-01 3:41 ` [RFC PATCH] qed: qed_rdma_modify_srq() can be static kbuild test robot
` (2 subsequent siblings)
3 siblings, 1 reply; 7+ messages in thread
From: Leon Romanovsky @ 2018-05-31 17:33 UTC (permalink / raw)
To: Yuval Bason
Cc: davem, netdev, jgg, dledford, linux-rdma, Michal Kalderon, Ariel Elior
[-- Attachment #1: Type: text/plain, Size: 1179 bytes --]
On Wed, May 30, 2018 at 04:11:37PM +0300, Yuval Bason wrote:
> This patch adds support for configuring SRQ and provides the necessary
> APIs for rdma upper layer driver (qedr) to enable the SRQ feature.
>
> Signed-off-by: Michal Kalderon <michal.kalderon@cavium.com>
> Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
> Signed-off-by: Yuval Bason <yuval.bason@cavium.com>
> ---
> drivers/net/ethernet/qlogic/qed/qed_cxt.c | 5 +-
> drivers/net/ethernet/qlogic/qed/qed_cxt.h | 1 +
> drivers/net/ethernet/qlogic/qed/qed_hsi.h | 2 +
> drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 23 ++++
> drivers/net/ethernet/qlogic/qed/qed_main.c | 2 +
> drivers/net/ethernet/qlogic/qed/qed_rdma.c | 179 +++++++++++++++++++++++++++-
> drivers/net/ethernet/qlogic/qed/qed_rdma.h | 2 +
> drivers/net/ethernet/qlogic/qed/qed_roce.c | 17 ++-
> include/linux/qed/qed_rdma_if.h | 12 +-
> 9 files changed, 235 insertions(+), 8 deletions(-)
>
...
> + struct qed_sp_init_data init_data;
...
> + memset(&init_data, 0, sizeof(init_data));
This patter is so common in this patch, why?
"struct qed_sp_init_data init_data = {};" will do the trick.
Thanks
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 801 bytes --]
^ permalink raw reply [flat|nested] 7+ messages in thread
* [RFC PATCH] qed: qed_rdma_modify_srq() can be static
2018-05-30 13:11 [PATCH net-next] qed: Add srq core support for RoCE and iWARP Yuval Bason
2018-05-31 17:33 ` Leon Romanovsky
@ 2018-06-01 3:41 ` kbuild test robot
2018-06-01 3:41 ` [PATCH net-next] qed: Add srq core support for RoCE and iWARP kbuild test robot
2018-07-09 16:42 ` Jason Gunthorpe
3 siblings, 0 replies; 7+ messages in thread
From: kbuild test robot @ 2018-06-01 3:41 UTC (permalink / raw)
To: Yuval Bason
Cc: kbuild-all, yuval.bason, davem, netdev, jgg, dledford,
linux-rdma, Michal Kalderon, Ariel Elior
Fixes: 27c50d39911b ("qed: Add srq core support for RoCE and iWARP")
Signed-off-by: kbuild test robot <fengguang.wu@intel.com>
---
qed_rdma.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index bd23659..f118328 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -1652,8 +1652,8 @@ static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
return QED_LEADING_HWFN(cdev);
}
-int qed_rdma_modify_srq(void *rdma_cxt,
- struct qed_rdma_modify_srq_in_params *in_params)
+static int qed_rdma_modify_srq(void *rdma_cxt,
+ struct qed_rdma_modify_srq_in_params *in_params)
{
struct rdma_srq_modify_ramrod_data *p_ramrod;
struct qed_hwfn *p_hwfn = rdma_cxt;
@@ -1688,8 +1688,8 @@ int qed_rdma_modify_srq(void *rdma_cxt,
return rc;
}
-int qed_rdma_destroy_srq(void *rdma_cxt,
- struct qed_rdma_destroy_srq_in_params *in_params)
+static int qed_rdma_destroy_srq(void *rdma_cxt,
+ struct qed_rdma_destroy_srq_in_params *in_params)
{
struct rdma_srq_destroy_ramrod_data *p_ramrod;
struct qed_hwfn *p_hwfn = rdma_cxt;
@@ -1731,9 +1731,9 @@ int qed_rdma_destroy_srq(void *rdma_cxt,
return rc;
}
-int qed_rdma_create_srq(void *rdma_cxt,
- struct qed_rdma_create_srq_in_params *in_params,
- struct qed_rdma_create_srq_out_params *out_params)
+static int qed_rdma_create_srq(void *rdma_cxt,
+ struct qed_rdma_create_srq_in_params *in_params,
+ struct qed_rdma_create_srq_out_params *out_params)
{
struct rdma_srq_create_ramrod_data *p_ramrod;
struct qed_hwfn *p_hwfn = rdma_cxt;
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH net-next] qed: Add srq core support for RoCE and iWARP
2018-05-30 13:11 [PATCH net-next] qed: Add srq core support for RoCE and iWARP Yuval Bason
2018-05-31 17:33 ` Leon Romanovsky
2018-06-01 3:41 ` [RFC PATCH] qed: qed_rdma_modify_srq() can be static kbuild test robot
@ 2018-06-01 3:41 ` kbuild test robot
2018-07-09 16:42 ` Jason Gunthorpe
3 siblings, 0 replies; 7+ messages in thread
From: kbuild test robot @ 2018-06-01 3:41 UTC (permalink / raw)
To: Yuval Bason
Cc: kbuild-all, yuval.bason, davem, netdev, jgg, dledford,
linux-rdma, Michal Kalderon, Ariel Elior
Hi Yuval,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on net-next/master]
url: https://github.com/0day-ci/linux/commits/Yuval-Bason/qed-Add-srq-core-support-for-RoCE-and-iWARP/20180601-073407
reproduce:
# apt-get install sparse
make ARCH=x86_64 allmodconfig
make C=1 CF=-D__CHECK_ENDIAN__
sparse warnings: (new ones prefixed by >>)
drivers/net/ethernet/qlogic/qed/qed_rdma.c:137:5: sparse: symbol 'qed_rdma_get_sb_id' was not declared. Should it be static?
drivers/net/ethernet/qlogic/qed/qed_rdma.c:448:32: sparse: expression using sizeof(void)
drivers/net/ethernet/qlogic/qed/qed_rdma.c:448:32: sparse: expression using sizeof(void)
drivers/net/ethernet/qlogic/qed/qed_rdma.c:452:36: sparse: expression using sizeof(void)
drivers/net/ethernet/qlogic/qed/qed_rdma.c:452:36: sparse: expression using sizeof(void)
drivers/net/ethernet/qlogic/qed/qed_rdma.c:459:27: sparse: expression using sizeof(void)
drivers/net/ethernet/qlogic/qed/qed_rdma.c:459:27: sparse: expression using sizeof(void)
drivers/net/ethernet/qlogic/qed/qed_rdma.c:471:19: sparse: expression using sizeof(void)
drivers/net/ethernet/qlogic/qed/qed_rdma.c:471:19: sparse: expression using sizeof(void)
drivers/net/ethernet/qlogic/qed/qed_rdma.c:544:30: sparse: expression using sizeof(void)
drivers/net/ethernet/qlogic/qed/qed_rdma.c:709:5: sparse: symbol 'qed_rdma_stop' was not declared. Should it be static?
drivers/net/ethernet/qlogic/qed/qed_rdma.c:796:33: sparse: cast removes address space of expression
drivers/net/ethernet/qlogic/qed/qed_rdma.c:899:16: sparse: expression using sizeof(void)
drivers/net/ethernet/qlogic/qed/qed_rdma.c:899:16: sparse: expression using sizeof(void)
drivers/net/ethernet/qlogic/qed/qed_rdma.c:921:16: sparse: expression using sizeof(void)
drivers/net/ethernet/qlogic/qed/qed_rdma.c:921:16: sparse: expression using sizeof(void)
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1063:31: sparse: incorrect type in assignment (different base types) @@ expected restricted __le16 [usertype] int_timeout @@ got unsignedrestricted __le16 [usertype] int_timeout @@
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1063:31: expected restricted __le16 [usertype] int_timeout
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1063:31: got unsigned short [unsigned] [usertype] int_timeout
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1165:21: sparse: incorrect type in assignment (different base types) @@ expected unsigned short [unsigned] [short] [usertype] <noident> @@ got unsigned] [short] [usertype] <noident> @@
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1165:21: expected unsigned short [unsigned] [short] [usertype] <noident>
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1165:21: got restricted __le16 [usertype] <noident>
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1166:21: sparse: incorrect type in assignment (different base types) @@ expected unsigned short [unsigned] [short] [usertype] <noident> @@ got unsigned] [short] [usertype] <noident> @@
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1166:21: expected unsigned short [unsigned] [short] [usertype] <noident>
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1166:21: got restricted __le16 [usertype] <noident>
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1167:21: sparse: incorrect type in assignment (different base types) @@ expected unsigned short [unsigned] [short] [usertype] <noident> @@ got unsigned] [short] [usertype] <noident> @@
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1167:21: expected unsigned short [unsigned] [short] [usertype] <noident>
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1167:21: got restricted __le16 [usertype] <noident>
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1458:9: sparse: invalid assignment: &=
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1458:9: left side has type restricted __le16
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1458:9: right side has type int
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1458:9: sparse: invalid assignment: |=
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1458:9: left side has type restricted __le16
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1458:9: right side has type unsigned long long
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1462:9: sparse: invalid assignment: &=
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1462:9: left side has type restricted __le16
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1462:9: right side has type int
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1462:9: sparse: invalid assignment: |=
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1462:9: left side has type restricted __le16
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1462:9: right side has type unsigned long long
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1465:9: sparse: invalid assignment: &=
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1465:9: left side has type restricted __le16
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1465:9: right side has type int
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1465:9: sparse: invalid assignment: |=
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1465:9: left side has type restricted __le16
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1465:9: right side has type unsigned long long
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1470:17: sparse: invalid assignment: &=
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1470:17: left side has type restricted __le16
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1470:17: right side has type int
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1470:17: sparse: invalid assignment: |=
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1470:17: left side has type restricted __le16
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1470:17: right side has type unsigned long long
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1474:9: sparse: invalid assignment: &=
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1474:9: left side has type restricted __le16
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1474:9: right side has type int
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1474:9: sparse: invalid assignment: |=
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1474:9: left side has type restricted __le16
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1474:9: right side has type unsigned long long
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1478:9: sparse: invalid assignment: &=
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1478:9: left side has type restricted __le16
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1478:9: right side has type int
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1478:9: sparse: invalid assignment: |=
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1478:9: left side has type restricted __le16
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1478:9: right side has type unsigned long long
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1482:9: sparse: invalid assignment: &=
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1482:9: left side has type restricted __le16
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1482:9: right side has type int
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1482:9: sparse: invalid assignment: |=
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1482:9: left side has type restricted __le16
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1482:9: right side has type unsigned long long
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1486:9: sparse: invalid assignment: &=
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1486:9: left side has type restricted __le16
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1486:9: right side has type int
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1486:9: sparse: invalid assignment: |=
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1486:9: left side has type restricted __le16
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1486:9: right side has type unsigned long long
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1490:9: sparse: invalid assignment: &=
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1490:9: left side has type restricted __le16
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1490:9: right side has type int
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1490:9: sparse: invalid assignment: |=
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1490:9: left side has type restricted __le16
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1490:9: right side has type unsigned long long
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1493:9: sparse: invalid assignment: &=
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1493:9: left side has type restricted __le16
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1493:9: right side has type int
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1493:9: sparse: invalid assignment: |=
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1493:9: left side has type restricted __le16
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1493:9: right side has type unsigned long long
>> drivers/net/ethernet/qlogic/qed/qed_rdma.c:1679:29: sparse: incorrect type in assignment (different base types) @@ expected restricted __le32 [usertype] wqe_limit @@ got restricted __le32 [usertype] wqe_limit @@
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1679:29: expected restricted __le32 [usertype] wqe_limit
drivers/net/ethernet/qlogic/qed/qed_rdma.c:1679:29: got restricted __le16 [usertype] <noident>
>> drivers/net/ethernet/qlogic/qed/qed_rdma.c:1655:5: sparse: symbol 'qed_rdma_modify_srq' was not declared. Should it be static?
>> drivers/net/ethernet/qlogic/qed/qed_rdma.c:1691:5: sparse: symbol 'qed_rdma_destroy_srq' was not declared. Should it be static?
>> drivers/net/ethernet/qlogic/qed/qed_rdma.c:1734:5: sparse: symbol 'qed_rdma_create_srq' was not declared. Should it be static?
Please review and possibly fold the followup patch.
vim +1679 drivers/net/ethernet/qlogic/qed/qed_rdma.c
1425
1426 static int
1427 qed_rdma_register_tid(void *rdma_cxt,
1428 struct qed_rdma_register_tid_in_params *params)
1429 {
1430 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1431 struct rdma_register_tid_ramrod_data *p_ramrod;
1432 struct qed_sp_init_data init_data;
1433 struct qed_spq_entry *p_ent;
1434 enum rdma_tid_type tid_type;
1435 u8 fw_return_code;
1436 int rc;
1437
1438 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
1439
1440 /* Get SPQ entry */
1441 memset(&init_data, 0, sizeof(init_data));
1442 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1443 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1444
1445 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
1446 p_hwfn->p_rdma_info->proto, &init_data);
1447 if (rc) {
1448 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1449 return rc;
1450 }
1451
1452 if (p_hwfn->p_rdma_info->last_tid < params->itid)
1453 p_hwfn->p_rdma_info->last_tid = params->itid;
1454
1455 p_ramrod = &p_ent->ramrod.rdma_register_tid;
1456
1457 p_ramrod->flags = 0;
1458 SET_FIELD(p_ramrod->flags,
1459 RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
1460 params->pbl_two_level);
1461
1462 SET_FIELD(p_ramrod->flags,
1463 RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
1464
1465 SET_FIELD(p_ramrod->flags,
1466 RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
1467
1468 /* Don't initialize D/C field, as it may override other bits. */
1469 if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
1470 SET_FIELD(p_ramrod->flags,
1471 RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
1472 params->page_size_log - 12);
1473
1474 SET_FIELD(p_ramrod->flags,
1475 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
1476 params->remote_read);
1477
1478 SET_FIELD(p_ramrod->flags,
1479 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
1480 params->remote_write);
1481
1482 SET_FIELD(p_ramrod->flags,
1483 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
1484 params->remote_atomic);
1485
1486 SET_FIELD(p_ramrod->flags,
1487 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
1488 params->local_write);
1489
> 1490 SET_FIELD(p_ramrod->flags,
1491 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
1492
> 1493 SET_FIELD(p_ramrod->flags,
1494 RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
1495 params->mw_bind);
1496
1497 SET_FIELD(p_ramrod->flags1,
1498 RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
1499 params->pbl_page_size_log - 12);
1500
1501 SET_FIELD(p_ramrod->flags2,
1502 RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
1503
1504 switch (params->tid_type) {
1505 case QED_RDMA_TID_REGISTERED_MR:
1506 tid_type = RDMA_TID_REGISTERED_MR;
1507 break;
1508 case QED_RDMA_TID_FMR:
1509 tid_type = RDMA_TID_FMR;
1510 break;
1511 case QED_RDMA_TID_MW_TYPE1:
1512 tid_type = RDMA_TID_MW_TYPE1;
1513 break;
1514 case QED_RDMA_TID_MW_TYPE2A:
1515 tid_type = RDMA_TID_MW_TYPE2A;
1516 break;
1517 default:
1518 rc = -EINVAL;
1519 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1520 return rc;
1521 }
1522 SET_FIELD(p_ramrod->flags1,
1523 RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
1524
1525 p_ramrod->itid = cpu_to_le32(params->itid);
1526 p_ramrod->key = params->key;
1527 p_ramrod->pd = cpu_to_le16(params->pd);
1528 p_ramrod->length_hi = (u8)(params->length >> 32);
1529 p_ramrod->length_lo = DMA_LO_LE(params->length);
1530 if (params->zbva) {
1531 /* Lower 32 bits of the registered MR address.
1532 * In case of zero based MR, will hold FBO
1533 */
1534 p_ramrod->va.hi = 0;
1535 p_ramrod->va.lo = cpu_to_le32(params->fbo);
1536 } else {
1537 DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
1538 }
1539 DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
1540
1541 /* DIF */
1542 if (params->dif_enabled) {
1543 SET_FIELD(p_ramrod->flags2,
1544 RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
1545 DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
1546 params->dif_error_addr);
1547 DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
1548 }
1549
1550 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
1551 if (rc)
1552 return rc;
1553
1554 if (fw_return_code != RDMA_RETURN_OK) {
1555 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
1556 return -EINVAL;
1557 }
1558
1559 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
1560 return rc;
1561 }
1562
1563 static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
1564 {
1565 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1566 struct rdma_deregister_tid_ramrod_data *p_ramrod;
1567 struct qed_sp_init_data init_data;
1568 struct qed_spq_entry *p_ent;
1569 struct qed_ptt *p_ptt;
1570 u8 fw_return_code;
1571 int rc;
1572
1573 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
1574
1575 /* Get SPQ entry */
1576 memset(&init_data, 0, sizeof(init_data));
1577 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1578 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1579
1580 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
1581 p_hwfn->p_rdma_info->proto, &init_data);
1582 if (rc) {
1583 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1584 return rc;
1585 }
1586
1587 p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
1588 p_ramrod->itid = cpu_to_le32(itid);
1589
1590 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
1591 if (rc) {
1592 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1593 return rc;
1594 }
1595
1596 if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
1597 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
1598 return -EINVAL;
1599 } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
1600 /* Bit indicating that the TID is in use and a nig drain is
1601 * required before sending the ramrod again
1602 */
1603 p_ptt = qed_ptt_acquire(p_hwfn);
1604 if (!p_ptt) {
1605 rc = -EBUSY;
1606 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1607 "Failed to acquire PTT\n");
1608 return rc;
1609 }
1610
1611 rc = qed_mcp_drain(p_hwfn, p_ptt);
1612 if (rc) {
1613 qed_ptt_release(p_hwfn, p_ptt);
1614 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1615 "Drain failed\n");
1616 return rc;
1617 }
1618
1619 qed_ptt_release(p_hwfn, p_ptt);
1620
1621 /* Resend the ramrod */
1622 rc = qed_sp_init_request(p_hwfn, &p_ent,
1623 RDMA_RAMROD_DEREGISTER_MR,
1624 p_hwfn->p_rdma_info->proto,
1625 &init_data);
1626 if (rc) {
1627 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1628 "Failed to init sp-element\n");
1629 return rc;
1630 }
1631
1632 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
1633 if (rc) {
1634 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1635 "Ramrod failed\n");
1636 return rc;
1637 }
1638
1639 if (fw_return_code != RDMA_RETURN_OK) {
1640 DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
1641 fw_return_code);
1642 return rc;
1643 }
1644 }
1645
1646 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
1647 return rc;
1648 }
1649
1650 static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
1651 {
1652 return QED_LEADING_HWFN(cdev);
1653 }
1654
> 1655 int qed_rdma_modify_srq(void *rdma_cxt,
1656 struct qed_rdma_modify_srq_in_params *in_params)
1657 {
1658 struct rdma_srq_modify_ramrod_data *p_ramrod;
1659 struct qed_hwfn *p_hwfn = rdma_cxt;
1660 struct qed_sp_init_data init_data;
1661 struct qed_spq_entry *p_ent;
1662 u16 opaque_fid;
1663 int rc;
1664
1665 memset(&init_data, 0, sizeof(init_data));
1666 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1667 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1668
1669 rc = qed_sp_init_request(p_hwfn, &p_ent,
1670 RDMA_RAMROD_MODIFY_SRQ,
1671 p_hwfn->p_rdma_info->proto, &init_data);
1672 if (rc)
1673 return rc;
1674
1675 p_ramrod = &p_ent->ramrod.rdma_modify_srq;
1676 p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id);
1677 opaque_fid = p_hwfn->hw_info.opaque_fid;
1678 p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
> 1679 p_ramrod->wqe_limit = cpu_to_le16(in_params->wqe_limit);
1680
1681 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1682 if (rc)
1683 return rc;
1684
1685 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x",
1686 in_params->srq_id);
1687
1688 return rc;
1689 }
1690
> 1691 int qed_rdma_destroy_srq(void *rdma_cxt,
1692 struct qed_rdma_destroy_srq_in_params *in_params)
1693 {
1694 struct rdma_srq_destroy_ramrod_data *p_ramrod;
1695 struct qed_hwfn *p_hwfn = rdma_cxt;
1696 struct qed_sp_init_data init_data;
1697 struct qed_spq_entry *p_ent;
1698 struct qed_bmap *bmap;
1699 u16 opaque_fid;
1700 int rc;
1701
1702 opaque_fid = p_hwfn->hw_info.opaque_fid;
1703
1704 memset(&init_data, 0, sizeof(init_data));
1705 init_data.opaque_fid = opaque_fid;
1706 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1707
1708 rc = qed_sp_init_request(p_hwfn, &p_ent,
1709 RDMA_RAMROD_DESTROY_SRQ,
1710 p_hwfn->p_rdma_info->proto, &init_data);
1711 if (rc)
1712 return rc;
1713
1714 p_ramrod = &p_ent->ramrod.rdma_destroy_srq;
1715 p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id);
1716 p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
1717
1718 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1719 if (rc)
1720 return rc;
1721
1722 bmap = &p_hwfn->p_rdma_info->srq_map;
1723
1724 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1725 qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id);
1726 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1727
1728 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "SRQ destroyed Id = %x",
1729 in_params->srq_id);
1730
1731 return rc;
1732 }
1733
> 1734 int qed_rdma_create_srq(void *rdma_cxt,
1735 struct qed_rdma_create_srq_in_params *in_params,
1736 struct qed_rdma_create_srq_out_params *out_params)
1737 {
1738 struct rdma_srq_create_ramrod_data *p_ramrod;
1739 struct qed_hwfn *p_hwfn = rdma_cxt;
1740 struct qed_sp_init_data init_data;
1741 enum qed_cxt_elem_type elem_type;
1742 struct qed_spq_entry *p_ent;
1743 u16 opaque_fid, srq_id;
1744 struct qed_bmap *bmap;
1745 u32 returned_id;
1746 int rc;
1747
1748 bmap = &p_hwfn->p_rdma_info->srq_map;
1749 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1750 rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id);
1751 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1752
1753 if (rc) {
1754 DP_NOTICE(p_hwfn, "failed to allocate srq id\n");
1755 return rc;
1756 }
1757
1758 elem_type = QED_ELEM_SRQ;
1759 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id);
1760 if (rc)
1761 goto err;
1762 /* returned id is no greater than u16 */
1763 srq_id = (u16)returned_id;
1764 opaque_fid = p_hwfn->hw_info.opaque_fid;
1765
1766 memset(&init_data, 0, sizeof(init_data));
1767 opaque_fid = p_hwfn->hw_info.opaque_fid;
1768 init_data.opaque_fid = opaque_fid;
1769 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1770
1771 rc = qed_sp_init_request(p_hwfn, &p_ent,
1772 RDMA_RAMROD_CREATE_SRQ,
1773 p_hwfn->p_rdma_info->proto, &init_data);
1774 if (rc)
1775 goto err;
1776
1777 p_ramrod = &p_ent->ramrod.rdma_create_srq;
1778 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr);
1779 p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages);
1780 p_ramrod->pd_id = cpu_to_le16(in_params->pd_id);
1781 p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id);
1782 p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
1783 p_ramrod->page_size = cpu_to_le16(in_params->page_size);
1784 DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr);
1785
1786 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1787 if (rc)
1788 goto err;
1789
1790 out_params->srq_id = srq_id;
1791
1792 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1793 "SRQ created Id = %x\n", out_params->srq_id);
1794
1795 return rc;
1796
1797 err:
1798 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1799 qed_bmap_release_id(p_hwfn, bmap, returned_id);
1800 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1801
1802 return rc;
1803 }
1804
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
^ permalink raw reply [flat|nested] 7+ messages in thread
* RE: [PATCH net-next] qed: Add srq core support for RoCE and iWARP
2018-05-31 17:33 ` Leon Romanovsky
@ 2018-06-03 16:10 ` Bason, Yuval
0 siblings, 0 replies; 7+ messages in thread
From: Bason, Yuval @ 2018-06-03 16:10 UTC (permalink / raw)
To: Leon Romanovsky
Cc: davem, netdev, jgg, dledford, linux-rdma, Kalderon, Michal, Elior, Ariel
From: Leon Romanovsky [mailto:leon@kernel.org]
Sent: Thursday, May 31, 2018 8:33 PM
> On Wed, May 30, 2018 at 04:11:37PM +0300, Yuval Bason wrote:
> > This patch adds support for configuring SRQ and provides the necessary
> > APIs for rdma upper layer driver (qedr) to enable the SRQ feature.
> >
> > Signed-off-by: Michal Kalderon <michal.kalderon@cavium.com>
> > Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
> > Signed-off-by: Yuval Bason <yuval.bason@cavium.com>
> > ---
> > drivers/net/ethernet/qlogic/qed/qed_cxt.c | 5 +-
> > drivers/net/ethernet/qlogic/qed/qed_cxt.h | 1 +
> > drivers/net/ethernet/qlogic/qed/qed_hsi.h | 2 +
> > drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 23 ++++
> > drivers/net/ethernet/qlogic/qed/qed_main.c | 2 +
> > drivers/net/ethernet/qlogic/qed/qed_rdma.c | 179
> +++++++++++++++++++++++++++-
> > drivers/net/ethernet/qlogic/qed/qed_rdma.h | 2 +
> > drivers/net/ethernet/qlogic/qed/qed_roce.c | 17 ++-
> > include/linux/qed/qed_rdma_if.h | 12 +-
> > 9 files changed, 235 insertions(+), 8 deletions(-)
> >
>
> ...
>
> > + struct qed_sp_init_data init_data;
>
> ...
>
> > + memset(&init_data, 0, sizeof(init_data));
>
> This patter is so common in this patch, why?
>
> "struct qed_sp_init_data init_data = {};" will do the trick.
>
Thanks for pointing out, will be fixed in v2.
> Thanks
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH net-next] qed: Add srq core support for RoCE and iWARP
2018-05-30 13:11 [PATCH net-next] qed: Add srq core support for RoCE and iWARP Yuval Bason
` (2 preceding siblings ...)
2018-06-01 3:41 ` [PATCH net-next] qed: Add srq core support for RoCE and iWARP kbuild test robot
@ 2018-07-09 16:42 ` Jason Gunthorpe
2018-07-10 9:20 ` Bason, Yuval
3 siblings, 1 reply; 7+ messages in thread
From: Jason Gunthorpe @ 2018-07-09 16:42 UTC (permalink / raw)
To: Yuval Bason
Cc: davem, netdev, dledford, linux-rdma, Michal Kalderon, Ariel Elior
On Wed, May 30, 2018 at 04:11:37PM +0300, Yuval Bason wrote:
> This patch adds support for configuring SRQ and provides the necessary
> APIs for rdma upper layer driver (qedr) to enable the SRQ feature.
>
> Signed-off-by: Michal Kalderon <michal.kalderon@cavium.com>
> Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
> Signed-off-by: Yuval Bason <yuval.bason@cavium.com>
> ---
> drivers/net/ethernet/qlogic/qed/qed_cxt.c | 5 +-
> drivers/net/ethernet/qlogic/qed/qed_cxt.h | 1 +
> drivers/net/ethernet/qlogic/qed/qed_hsi.h | 2 +
> drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 23 ++++
> drivers/net/ethernet/qlogic/qed/qed_main.c | 2 +
> drivers/net/ethernet/qlogic/qed/qed_rdma.c | 179 +++++++++++++++++++++++++++-
> drivers/net/ethernet/qlogic/qed/qed_rdma.h | 2 +
> drivers/net/ethernet/qlogic/qed/qed_roce.c | 17 ++-
> include/linux/qed/qed_rdma_if.h | 12 +-
> 9 files changed, 235 insertions(+), 8 deletions(-)
Is this a pre-requisite for your related RDMA patches?
If yes, are you proposing that this patch should go via the RDMA tree?
Jason
^ permalink raw reply [flat|nested] 7+ messages in thread
* RE: [PATCH net-next] qed: Add srq core support for RoCE and iWARP
2018-07-09 16:42 ` Jason Gunthorpe
@ 2018-07-10 9:20 ` Bason, Yuval
0 siblings, 0 replies; 7+ messages in thread
From: Bason, Yuval @ 2018-07-10 9:20 UTC (permalink / raw)
To: Jason Gunthorpe
Cc: davem, netdev, dledford, linux-rdma, Kalderon, Michal, Elior, Ariel
> From: Jason Gunthorpe [mailto:jgg@ziepe.ca]
> Sent: Monday, July 9, 2018 7:42 PM
>
> On Wed, May 30, 2018 at 04:11:37PM +0300, Yuval Bason wrote:
> > This patch adds support for configuring SRQ and provides the necessary
> > APIs for rdma upper layer driver (qedr) to enable the SRQ feature.
> >
> > Signed-off-by: Michal Kalderon <michal.kalderon@cavium.com>
> > Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
> > Signed-off-by: Yuval Bason <yuval.bason@cavium.com>
> > ---
> > drivers/net/ethernet/qlogic/qed/qed_cxt.c | 5 +-
> > drivers/net/ethernet/qlogic/qed/qed_cxt.h | 1 +
> > drivers/net/ethernet/qlogic/qed/qed_hsi.h | 2 +
> > drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 23 ++++
> > drivers/net/ethernet/qlogic/qed/qed_main.c | 2 +
> > drivers/net/ethernet/qlogic/qed/qed_rdma.c | 179
> +++++++++++++++++++++++++++-
> > drivers/net/ethernet/qlogic/qed/qed_rdma.h | 2 +
> > drivers/net/ethernet/qlogic/qed/qed_roce.c | 17 ++-
> > include/linux/qed/qed_rdma_if.h | 12 +-
> > 9 files changed, 235 insertions(+), 8 deletions(-)
>
> Is this a pre-requisite for your related RDMA patches?
>
Yes, but this was already accepted to net-next in previous cycle and is part of the rdma-next tree:
https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git/commit/drivers/net/ethernet/qlogic/qed?h=for-next&id=39dbc646fd2c67ee9b71450ce172cbd714d4e7fb
> If yes, are you proposing that this patch should go via the RDMA tree?
>
> Jason
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2018-07-10 9:20 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-05-30 13:11 [PATCH net-next] qed: Add srq core support for RoCE and iWARP Yuval Bason
2018-05-31 17:33 ` Leon Romanovsky
2018-06-03 16:10 ` Bason, Yuval
2018-06-01 3:41 ` [RFC PATCH] qed: qed_rdma_modify_srq() can be static kbuild test robot
2018-06-01 3:41 ` [PATCH net-next] qed: Add srq core support for RoCE and iWARP kbuild test robot
2018-07-09 16:42 ` Jason Gunthorpe
2018-07-10 9:20 ` Bason, Yuval
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).