* [PATCH rdma-next v1 1/4] RDMA/core: Create and destroy counters in the ib_core
2020-06-30 10:18 [PATCH rdma-next v1 0/4] ib_core allocation patches Leon Romanovsky
@ 2020-06-30 10:18 ` Leon Romanovsky
2020-06-30 10:18 ` [PATCH rdma-next v1 2/4] RDMA: Clean MW allocation and free flows Leon Romanovsky
` (4 subsequent siblings)
5 siblings, 0 replies; 13+ messages in thread
From: Leon Romanovsky @ 2020-06-30 10:18 UTC (permalink / raw)
To: Doug Ledford, Jason Gunthorpe; +Cc: Leon Romanovsky, linux-rdma
From: Leon Romanovsky <leonro@mellanox.com>
Move allocation and destruction of counters under ib_core responsibility
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
---
drivers/infiniband/core/device.c | 1 +
.../core/uverbs_std_types_counters.c | 17 ++++++++--------
drivers/infiniband/hw/mlx5/main.c | 20 ++++++-------------
include/rdma/ib_verbs.h | 7 ++++---
4 files changed, 20 insertions(+), 25 deletions(-)
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 1da1b0731f25..088559d72d57 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -2682,6 +2682,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, set_vf_link_state);
SET_OBJ_SIZE(dev_ops, ib_ah);
+ SET_OBJ_SIZE(dev_ops, ib_counters);
SET_OBJ_SIZE(dev_ops, ib_cq);
SET_OBJ_SIZE(dev_ops, ib_pd);
SET_OBJ_SIZE(dev_ops, ib_srq);
diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c
index 9f013304e677..c7e7438752bc 100644
--- a/drivers/infiniband/core/uverbs_std_types_counters.c
+++ b/drivers/infiniband/core/uverbs_std_types_counters.c
@@ -46,7 +46,9 @@ static int uverbs_free_counters(struct ib_uobject *uobject,
if (ret)
return ret;
- return counters->device->ops.destroy_counters(counters);
+ counters->device->ops.destroy_counters(counters);
+ kfree(counters);
+ return 0;
}
static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(
@@ -66,20 +68,19 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(
if (!ib_dev->ops.create_counters)
return -EOPNOTSUPP;
- counters = ib_dev->ops.create_counters(ib_dev, attrs);
- if (IS_ERR(counters)) {
- ret = PTR_ERR(counters);
- goto err_create_counters;
- }
+ counters = rdma_zalloc_drv_obj(ib_dev, ib_counters);
+ if (!counters)
+ return -ENOMEM;
counters->device = ib_dev;
counters->uobject = uobj;
uobj->object = counters;
atomic_set(&counters->usecnt, 0);
- return 0;
+ ret = ib_dev->ops.create_counters(counters, attrs);
+ if (ret)
+ kfree(counters);
-err_create_counters:
return ret;
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 46c596a855e7..ca35394a181a 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -6458,7 +6458,7 @@ static int mlx5_ib_read_counters(struct ib_counters *counters,
return ret;
}
-static int mlx5_ib_destroy_counters(struct ib_counters *counters)
+static void mlx5_ib_destroy_counters(struct ib_counters *counters)
{
struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
@@ -6466,24 +6466,15 @@ static int mlx5_ib_destroy_counters(struct ib_counters *counters)
if (mcounters->hw_cntrs_hndl)
mlx5_fc_destroy(to_mdev(counters->device)->mdev,
mcounters->hw_cntrs_hndl);
-
- kfree(mcounters);
-
- return 0;
}
-static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
- struct uverbs_attr_bundle *attrs)
+static int mlx5_ib_create_counters(struct ib_counters *counters,
+ struct uverbs_attr_bundle *attrs)
{
- struct mlx5_ib_mcounters *mcounters;
-
- mcounters = kzalloc(sizeof(*mcounters), GFP_KERNEL);
- if (!mcounters)
- return ERR_PTR(-ENOMEM);
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
mutex_init(&mcounters->mcntrs_mutex);
-
- return &mcounters->ibcntrs;
+ return 0;
}
static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
@@ -6651,6 +6642,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.resize_cq = mlx5_ib_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs),
INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 55762f0458a6..9b69bc0a53cf 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2541,9 +2541,9 @@ struct ib_device_ops {
struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
struct ib_dm_mr_attr *attr,
struct uverbs_attr_bundle *attrs);
- struct ib_counters *(*create_counters)(
- struct ib_device *device, struct uverbs_attr_bundle *attrs);
- int (*destroy_counters)(struct ib_counters *counters);
+ int (*create_counters)(struct ib_counters *counters,
+ struct uverbs_attr_bundle *attrs);
+ void (*destroy_counters)(struct ib_counters *counters);
int (*read_counters)(struct ib_counters *counters,
struct ib_counters_read_attr *counters_read_attr,
struct uverbs_attr_bundle *attrs);
@@ -2651,6 +2651,7 @@ struct ib_device_ops {
struct uverbs_attr_bundle *attrs);
DECLARE_RDMA_OBJ_SIZE(ib_ah);
+ DECLARE_RDMA_OBJ_SIZE(ib_counters);
DECLARE_RDMA_OBJ_SIZE(ib_cq);
DECLARE_RDMA_OBJ_SIZE(ib_pd);
DECLARE_RDMA_OBJ_SIZE(ib_srq);
--
2.26.2
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH rdma-next v1 2/4] RDMA: Clean MW allocation and free flows
2020-06-30 10:18 [PATCH rdma-next v1 0/4] ib_core allocation patches Leon Romanovsky
2020-06-30 10:18 ` [PATCH rdma-next v1 1/4] RDMA/core: Create and destroy counters in the ib_core Leon Romanovsky
@ 2020-06-30 10:18 ` Leon Romanovsky
2020-07-06 23:04 ` Jason Gunthorpe
2020-06-30 10:18 ` [PATCH rdma-next v1 3/4] RDMA: Move XRCD to be under ib_core responsibility Leon Romanovsky
` (3 subsequent siblings)
5 siblings, 1 reply; 13+ messages in thread
From: Leon Romanovsky @ 2020-06-30 10:18 UTC (permalink / raw)
To: Doug Ledford, Jason Gunthorpe
Cc: Leon Romanovsky, Lijun Ou, linux-rdma, Potnuri Bharat Teja,
Weihang Li, Wei Hu(Xavier),
Yishai Hadas
From: Leon Romanovsky <leonro@mellanox.com>
Move allocation and destruction of memory windows under ib_core
responsibility and clean drivers to ensure that no updates to MW
ib_core structures are done in driver layer.
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
---
drivers/infiniband/core/device.c | 1 +
drivers/infiniband/core/uverbs.h | 2 +-
drivers/infiniband/core/uverbs_cmd.c | 26 ++++++++-----
drivers/infiniband/core/uverbs_main.c | 10 ++---
drivers/infiniband/core/uverbs_std_types.c | 3 +-
drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 5 +--
drivers/infiniband/hw/cxgb4/mem.c | 35 ++++++-----------
drivers/infiniband/hw/cxgb4/provider.c | 4 +-
drivers/infiniband/hw/hns/hns_roce_device.h | 5 +--
drivers/infiniband/hw/hns/hns_roce_main.c | 2 +
drivers/infiniband/hw/hns/hns_roce_mr.c | 31 +++++----------
drivers/infiniband/hw/mlx4/main.c | 2 +
drivers/infiniband/hw/mlx4/mlx4_ib.h | 5 +--
drivers/infiniband/hw/mlx4/mr.c | 32 +++++-----------
drivers/infiniband/hw/mlx5/main.c | 2 +
drivers/infiniband/hw/mlx5/mlx5_ib.h | 5 +--
drivers/infiniband/hw/mlx5/mr.c | 42 ++++++++-------------
include/rdma/ib_verbs.h | 6 +--
18 files changed, 91 insertions(+), 127 deletions(-)
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 088559d72d57..257e8a667977 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -2684,6 +2684,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_OBJ_SIZE(dev_ops, ib_ah);
SET_OBJ_SIZE(dev_ops, ib_counters);
SET_OBJ_SIZE(dev_ops, ib_cq);
+ SET_OBJ_SIZE(dev_ops, ib_mw);
SET_OBJ_SIZE(dev_ops, ib_pd);
SET_OBJ_SIZE(dev_ops, ib_srq);
SET_OBJ_SIZE(dev_ops, ib_ucontext);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 53a10479958b..072bfe4e1b5b 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -242,7 +242,7 @@ int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs);
-int uverbs_dealloc_mw(struct ib_mw *mw);
+void uverbs_dealloc_mw(struct ib_mw *mw);
void ib_uverbs_detach_umcast(struct ib_qp *qp,
struct ib_uqp_object *uobj);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 68c9a0210220..a5301f3d3059 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -890,7 +890,7 @@ static int ib_uverbs_dereg_mr(struct uverbs_attr_bundle *attrs)
static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_alloc_mw cmd;
- struct ib_uverbs_alloc_mw_resp resp;
+ struct ib_uverbs_alloc_mw_resp resp = {};
struct ib_uobject *uobj;
struct ib_pd *pd;
struct ib_mw *mw;
@@ -916,21 +916,24 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
goto err_put;
}
- mw = pd->device->ops.alloc_mw(pd, cmd.mw_type, &attrs->driver_udata);
- if (IS_ERR(mw)) {
- ret = PTR_ERR(mw);
+ mw = rdma_zalloc_drv_obj(ib_dev, ib_mw);
+ if (!mw) {
+ ret = -ENOMEM;
goto err_put;
}
- mw->device = pd->device;
- mw->pd = pd;
+ mw->device = ib_dev;
+ mw->pd = pd;
mw->uobject = uobj;
- atomic_inc(&pd->usecnt);
-
uobj->object = mw;
+ mw->type = cmd.mw_type;
- memset(&resp, 0, sizeof(resp));
- resp.rkey = mw->rkey;
+ ret = pd->device->ops.alloc_mw(mw, &mw->rkey, &attrs->driver_udata);
+ if (ret)
+ goto err_alloc;
+
+ atomic_inc(&pd->usecnt);
+ resp.rkey = mw->rkey;
resp.mw_handle = uobj->id;
ret = uverbs_response(attrs, &resp, sizeof(resp));
@@ -943,6 +946,9 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
err_copy:
uverbs_dealloc_mw(mw);
+ mw = NULL;
+err_alloc:
+ kfree(mw);
err_put:
uobj_put_obj_read(pd);
err_free:
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 69e4755cc04b..706c972ea3a1 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -102,15 +102,13 @@ struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile)
}
EXPORT_SYMBOL(ib_uverbs_get_ucontext_file);
-int uverbs_dealloc_mw(struct ib_mw *mw)
+void uverbs_dealloc_mw(struct ib_mw *mw)
{
struct ib_pd *pd = mw->pd;
- int ret;
- ret = mw->device->ops.dealloc_mw(mw);
- if (!ret)
- atomic_dec(&pd->usecnt);
- return ret;
+ mw->device->ops.dealloc_mw(mw);
+ atomic_dec(&pd->usecnt);
+ kfree(mw);
}
static void ib_uverbs_release_dev(struct device *device)
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index 08c39cfb1bd9..e4994cc4cc51 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -72,7 +72,8 @@ static int uverbs_free_mw(struct ib_uobject *uobject,
enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs)
{
- return uverbs_dealloc_mw((struct ib_mw *)uobject->object);
+ uverbs_dealloc_mw((struct ib_mw *)uobject->object);
+ return 0;
}
static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 27da0705c88a..5545bcd0043e 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -983,10 +983,9 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg, struct ib_udata *udata);
int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
-int c4iw_dealloc_mw(struct ib_mw *mw);
+void c4iw_dealloc_mw(struct ib_mw *mw);
void c4iw_dealloc(struct uld_ctx *ctx);
-struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata);
+int c4iw_alloc_mw(struct ib_mw *mw, u32 *rkey, struct ib_udata *udata);
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
u64 length, u64 virt, int acc,
struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 1e4f4e525598..edb62d9cf404 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -611,30 +611,23 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(err);
}
-struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata)
+int c4iw_alloc_mw(struct ib_mw *ibmw, u32 *rkey, struct ib_udata *udata)
{
+ struct c4iw_mw *mhp = to_c4iw_mw(ibmw);
struct c4iw_dev *rhp;
struct c4iw_pd *php;
- struct c4iw_mw *mhp;
u32 mmid;
u32 stag = 0;
int ret;
- if (type != IB_MW_TYPE_1)
- return ERR_PTR(-EINVAL);
+ if (ibmw->type != IB_MW_TYPE_1)
+ return -EINVAL;
- php = to_c4iw_pd(pd);
+ php = to_c4iw_pd(ibmw->pd);
rhp = php->rhp;
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- return ERR_PTR(-ENOMEM);
-
mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
- if (!mhp->wr_waitp) {
- ret = -ENOMEM;
- goto free_mhp;
- }
+ if (!mhp->wr_waitp)
+ return -ENOMEM;
mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
if (!mhp->dereg_skb) {
@@ -645,18 +638,19 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
ret = allocate_window(&rhp->rdev, &stag, php->pdid, mhp->wr_waitp);
if (ret)
goto free_skb;
+
mhp->rhp = rhp;
mhp->attr.pdid = php->pdid;
mhp->attr.type = FW_RI_STAG_MW;
mhp->attr.stag = stag;
mmid = (stag) >> 8;
- mhp->ibmw.rkey = stag;
+ *rkey = stag;
if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
ret = -ENOMEM;
goto dealloc_win;
}
pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
- return &(mhp->ibmw);
+ return 0;
dealloc_win:
deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
@@ -665,12 +659,10 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
kfree_skb(mhp->dereg_skb);
free_wr_wait:
c4iw_put_wr_wait(mhp->wr_waitp);
-free_mhp:
- kfree(mhp);
- return ERR_PTR(ret);
+ return ret;
}
-int c4iw_dealloc_mw(struct ib_mw *mw)
+void c4iw_dealloc_mw(struct ib_mw *mw)
{
struct c4iw_dev *rhp;
struct c4iw_mw *mhp;
@@ -684,9 +676,6 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
mhp->wr_waitp);
kfree_skb(mhp->dereg_skb);
c4iw_put_wr_wait(mhp->wr_waitp);
- pr_debug("ib_mw %p mmid 0x%x ptr %p\n", mw, mmid, mhp);
- kfree(mhp);
- return 0;
}
struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 1d3ff59e4060..7ec74e2fa885 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -508,8 +508,10 @@ static const struct ib_device_ops c4iw_dev_ops = {
.query_qp = c4iw_ib_query_qp,
.reg_user_mr = c4iw_reg_user_mr,
.req_notify_cq = c4iw_arm_cq,
- INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd),
+
INIT_RDMA_OBJ_SIZE(ib_cq, c4iw_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_mw, c4iw_mw, ibmw),
+ INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_srq, c4iw_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, c4iw_ucontext, ibucontext),
};
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index b890ff63f24d..e745e5c41770 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -1201,9 +1201,8 @@ int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
unsigned long mpt_index);
unsigned long key_to_hw_index(u32 key);
-struct ib_mw *hns_roce_alloc_mw(struct ib_pd *pd, enum ib_mw_type,
- struct ib_udata *udata);
-int hns_roce_dealloc_mw(struct ib_mw *ibmw);
+int hns_roce_alloc_mw(struct ib_mw *pd, u32 *rkey, struct ib_udata *udata);
+void hns_roce_dealloc_mw(struct ib_mw *ibmw);
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 5907cfd878a6..45f7353bd348 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -454,6 +454,8 @@ static const struct ib_device_ops hns_roce_dev_mr_ops = {
static const struct ib_device_ops hns_roce_dev_mw_ops = {
.alloc_mw = hns_roce_alloc_mw,
.dealloc_mw = hns_roce_dealloc_mw,
+
+ INIT_RDMA_OBJ_SIZE(ib_mw, hns_roce_mw, ibmw),
};
static const struct ib_device_ops hns_roce_dev_frmr_ops = {
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 0e71ebee9e52..4af2797c3b95 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -589,28 +589,22 @@ static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
return ret;
}
-struct ib_mw *hns_roce_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
- struct ib_udata *udata)
+int hns_roce_alloc_mw(struct ib_mw *ibmw, u32 *rkey, struct ib_udata *udata)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(ib_pd->device);
- struct hns_roce_mw *mw;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
+ struct hns_roce_mw *mw = to_hr_mw(ibmw);
unsigned long index = 0;
int ret;
- mw = kmalloc(sizeof(*mw), GFP_KERNEL);
- if (!mw)
- return ERR_PTR(-ENOMEM);
-
/* Allocate a key for mw from bitmap */
ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
if (ret)
- goto err_bitmap;
+ return ret;
mw->rkey = hw_index_to_key(index);
- mw->ibmw.rkey = mw->rkey;
- mw->ibmw.type = type;
- mw->pdn = to_hr_pd(ib_pd)->pdn;
+ *rkey = mw->rkey;
+ mw->pdn = to_hr_pd(ibmw->pd)->pdn;
mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
@@ -619,26 +613,19 @@ struct ib_mw *hns_roce_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
if (ret)
goto err_mw;
- return &mw->ibmw;
+ return 0;
err_mw:
hns_roce_mw_free(hr_dev, mw);
-
-err_bitmap:
- kfree(mw);
-
- return ERR_PTR(ret);
+ return ret;
}
-int hns_roce_dealloc_mw(struct ib_mw *ibmw)
+void hns_roce_dealloc_mw(struct ib_mw *ibmw)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
struct hns_roce_mw *mw = to_hr_mw(ibmw);
hns_roce_mw_free(hr_dev, mw);
- kfree(mw);
-
- return 0;
}
static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 816d28854a8e..6471f47bd365 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2602,6 +2602,8 @@ static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
.alloc_mw = mlx4_ib_alloc_mw,
.dealloc_mw = mlx4_ib_dealloc_mw,
+
+ INIT_RDMA_OBJ_SIZE(ib_mw, mlx4_ib_mw, ibmw),
};
static const struct ib_device_ops mlx4_ib_dev_xrc_ops = {
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 6f4ea1067095..5fbe766aef6b 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -725,9 +725,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
int mlx4_ib_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
-struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata);
-int mlx4_ib_dealloc_mw(struct ib_mw *mw);
+int mlx4_ib_alloc_mw(struct ib_mw *mw, u32 *rkey, struct ib_udata *udata);
+void mlx4_ib_dealloc_mw(struct ib_mw *mw);
struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg, struct ib_udata *udata);
int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index d7c78f841d2f..82d7d8dcc9d4 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -610,47 +610,35 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
return 0;
}
-struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata)
+int mlx4_ib_alloc_mw(struct ib_mw *ibmw, u32 *rkey, struct ib_udata *udata)
{
- struct mlx4_ib_dev *dev = to_mdev(pd->device);
- struct mlx4_ib_mw *mw;
+ struct mlx4_ib_dev *dev = to_mdev(ibmw->device);
+ struct mlx4_ib_mw *mw = to_mmw(ibmw);
int err;
- mw = kmalloc(sizeof(*mw), GFP_KERNEL);
- if (!mw)
- return ERR_PTR(-ENOMEM);
-
- err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
- to_mlx4_type(type), &mw->mmw);
+ err = mlx4_mw_alloc(dev->dev, to_mpd(ibmw->pd)->pdn,
+ to_mlx4_type(ibmw->type), &mw->mmw);
if (err)
- goto err_free;
+ return err;
err = mlx4_mw_enable(dev->dev, &mw->mmw);
if (err)
goto err_mw;
- mw->ibmw.rkey = mw->mmw.key;
+ *rkey = mw->mmw.key;
- return &mw->ibmw;
+ return 0;
err_mw:
mlx4_mw_free(dev->dev, &mw->mmw);
-
-err_free:
- kfree(mw);
-
- return ERR_PTR(err);
+ return err;
}
-int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
+void mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
{
struct mlx4_ib_mw *mw = to_mmw(ibmw);
mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
- kfree(mw);
-
- return 0;
}
struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index ca35394a181a..8b0b1f95af2f 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -6669,6 +6669,8 @@ static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
static const struct ib_device_ops mlx5_ib_dev_mw_ops = {
.alloc_mw = mlx5_ib_alloc_mw,
.dealloc_mw = mlx5_ib_dealloc_mw,
+
+ INIT_RDMA_OBJ_SIZE(ib_mw, mlx5_ib_mw, ibmw),
};
static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 26545e88709d..d84f56517caa 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1195,9 +1195,8 @@ int mlx5_ib_advise_mr(struct ib_pd *pd,
struct ib_sge *sg_list,
u32 num_sge,
struct uverbs_attr_bundle *attrs);
-struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata);
-int mlx5_ib_dealloc_mw(struct ib_mw *mw);
+int mlx5_ib_alloc_mw(struct ib_mw *mw, u32 *rkey, struct ib_udata *udata);
+void mlx5_ib_dealloc_mw(struct ib_mw *mw);
int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
int page_shift, int flags);
struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 44683073be0c..58a3d62937a1 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1973,12 +1973,11 @@ struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
max_num_meta_sg);
}
-struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata)
+int mlx5_ib_alloc_mw(struct ib_mw *ibmw, u32 *rkey, struct ib_udata *udata)
{
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_dev *dev = to_mdev(ibmw->device);
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
- struct mlx5_ib_mw *mw = NULL;
+ struct mlx5_ib_mw *mw = to_mmw(ibmw);
u32 *in = NULL;
void *mkc;
int ndescs;
@@ -1991,21 +1990,20 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
if (err)
- return ERR_PTR(err);
+ return err;
if (req.comp_mask || req.reserved1 || req.reserved2)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
if (udata->inlen > sizeof(req) &&
!ib_is_udata_cleared(udata, sizeof(req),
udata->inlen - sizeof(req)))
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
- mw = kzalloc(sizeof(*mw), GFP_KERNEL);
in = kzalloc(inlen, GFP_KERNEL);
- if (!mw || !in) {
+ if (!in) {
err = -ENOMEM;
goto free;
}
@@ -2014,11 +2012,11 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn);
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, lr, 1);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
- MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
+ MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2)));
MLX5_SET(mkc, mkc, qpn, 0xffffff);
err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen);
@@ -2026,17 +2024,15 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
goto free;
mw->mmkey.type = MLX5_MKEY_MW;
- mw->ibmw.rkey = mw->mmkey.key;
+ *rkey = mw->mmkey.key;
mw->ndescs = ndescs;
resp.response_length = min(offsetof(typeof(resp), response_length) +
sizeof(resp.response_length), udata->outlen);
if (resp.response_length) {
err = ib_copy_to_udata(udata, &resp, resp.response_length);
- if (err) {
- mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
- goto free;
- }
+ if (err)
+ goto free_mkey;
}
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
@@ -2048,21 +2044,19 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
}
kfree(in);
- return &mw->ibmw;
+ return 0;
free_mkey:
mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
free:
- kfree(mw);
kfree(in);
- return ERR_PTR(err);
+ return err;
}
-int mlx5_ib_dealloc_mw(struct ib_mw *mw)
+void mlx5_ib_dealloc_mw(struct ib_mw *mw)
{
struct mlx5_ib_dev *dev = to_mdev(mw->device);
struct mlx5_ib_mw *mmw = to_mmw(mw);
- int err;
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key));
@@ -2073,11 +2067,7 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw)
synchronize_srcu(&dev->odp_srcu);
}
- err = mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
- if (err)
- return err;
- kfree(mmw);
- return 0;
+ mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
}
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 9b69bc0a53cf..5241a32193db 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2490,9 +2490,8 @@ struct ib_device_ops {
unsigned int *sg_offset);
int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status);
- struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata);
- int (*dealloc_mw)(struct ib_mw *mw);
+ int (*alloc_mw)(struct ib_mw *mw, u32 *rkey, struct ib_udata *udata);
+ void (*dealloc_mw)(struct ib_mw *mw);
int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
@@ -2653,6 +2652,7 @@ struct ib_device_ops {
DECLARE_RDMA_OBJ_SIZE(ib_ah);
DECLARE_RDMA_OBJ_SIZE(ib_counters);
DECLARE_RDMA_OBJ_SIZE(ib_cq);
+ DECLARE_RDMA_OBJ_SIZE(ib_mw);
DECLARE_RDMA_OBJ_SIZE(ib_pd);
DECLARE_RDMA_OBJ_SIZE(ib_srq);
DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
--
2.26.2
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH rdma-next v1 2/4] RDMA: Clean MW allocation and free flows
2020-06-30 10:18 ` [PATCH rdma-next v1 2/4] RDMA: Clean MW allocation and free flows Leon Romanovsky
@ 2020-07-06 23:04 ` Jason Gunthorpe
2020-07-07 4:42 ` Leon Romanovsky
0 siblings, 1 reply; 13+ messages in thread
From: Jason Gunthorpe @ 2020-07-06 23:04 UTC (permalink / raw)
To: Leon Romanovsky
Cc: Doug Ledford, Leon Romanovsky, Lijun Ou, linux-rdma,
Potnuri Bharat Teja, Weihang Li, Wei Hu(Xavier),
Yishai Hadas
On Tue, Jun 30, 2020 at 01:18:53PM +0300, Leon Romanovsky wrote:
> @@ -916,21 +916,24 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
> goto err_put;
> }
>
> - mw = pd->device->ops.alloc_mw(pd, cmd.mw_type, &attrs->driver_udata);
> - if (IS_ERR(mw)) {
> - ret = PTR_ERR(mw);
> + mw = rdma_zalloc_drv_obj(ib_dev, ib_mw);
> + if (!mw) {
> + ret = -ENOMEM;
> goto err_put;
> }
>
> - mw->device = pd->device;
> - mw->pd = pd;
> + mw->device = ib_dev;
> + mw->pd = pd;
> mw->uobject = uobj;
> - atomic_inc(&pd->usecnt);
> -
> uobj->object = mw;
> + mw->type = cmd.mw_type;
>
> - memset(&resp, 0, sizeof(resp));
> - resp.rkey = mw->rkey;
> + ret = pd->device->ops.alloc_mw(mw, &mw->rkey, &attrs->driver_udata);
Why the strange &mw->rkey ? Can't the drivers just do mw->rkey = foo ?
Jason
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH rdma-next v1 2/4] RDMA: Clean MW allocation and free flows
2020-07-06 23:04 ` Jason Gunthorpe
@ 2020-07-07 4:42 ` Leon Romanovsky
2020-07-07 11:21 ` Jason Gunthorpe
0 siblings, 1 reply; 13+ messages in thread
From: Leon Romanovsky @ 2020-07-07 4:42 UTC (permalink / raw)
To: Jason Gunthorpe
Cc: Doug Ledford, Lijun Ou, linux-rdma, Potnuri Bharat Teja,
Weihang Li, Wei Hu(Xavier),
Yishai Hadas
On Mon, Jul 06, 2020 at 08:04:16PM -0300, Jason Gunthorpe wrote:
> On Tue, Jun 30, 2020 at 01:18:53PM +0300, Leon Romanovsky wrote:
> > @@ -916,21 +916,24 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
> > goto err_put;
> > }
> >
> > - mw = pd->device->ops.alloc_mw(pd, cmd.mw_type, &attrs->driver_udata);
> > - if (IS_ERR(mw)) {
> > - ret = PTR_ERR(mw);
> > + mw = rdma_zalloc_drv_obj(ib_dev, ib_mw);
> > + if (!mw) {
> > + ret = -ENOMEM;
> > goto err_put;
> > }
> >
> > - mw->device = pd->device;
> > - mw->pd = pd;
> > + mw->device = ib_dev;
> > + mw->pd = pd;
> > mw->uobject = uobj;
> > - atomic_inc(&pd->usecnt);
> > -
> > uobj->object = mw;
> > + mw->type = cmd.mw_type;
> >
> > - memset(&resp, 0, sizeof(resp));
> > - resp.rkey = mw->rkey;
> > + ret = pd->device->ops.alloc_mw(mw, &mw->rkey, &attrs->driver_udata);
>
> Why the strange &mw->rkey ? Can't the drivers just do mw->rkey = foo ?
We can, if we want to allow drivers set fields in ib_* structures that
there passed as part of alloc_* flows. It doesn't feel right to me to
mix different layers.
Thanks
>
> Jason
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH rdma-next v1 2/4] RDMA: Clean MW allocation and free flows
2020-07-07 4:42 ` Leon Romanovsky
@ 2020-07-07 11:21 ` Jason Gunthorpe
2020-07-07 12:15 ` Leon Romanovsky
0 siblings, 1 reply; 13+ messages in thread
From: Jason Gunthorpe @ 2020-07-07 11:21 UTC (permalink / raw)
To: Leon Romanovsky
Cc: Doug Ledford, Lijun Ou, linux-rdma, Potnuri Bharat Teja,
Weihang Li, Wei Hu(Xavier),
Yishai Hadas
On Tue, Jul 07, 2020 at 07:42:03AM +0300, Leon Romanovsky wrote:
> On Mon, Jul 06, 2020 at 08:04:16PM -0300, Jason Gunthorpe wrote:
> > On Tue, Jun 30, 2020 at 01:18:53PM +0300, Leon Romanovsky wrote:
> > > @@ -916,21 +916,24 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
> > > goto err_put;
> > > }
> > >
> > > - mw = pd->device->ops.alloc_mw(pd, cmd.mw_type, &attrs->driver_udata);
> > > - if (IS_ERR(mw)) {
> > > - ret = PTR_ERR(mw);
> > > + mw = rdma_zalloc_drv_obj(ib_dev, ib_mw);
> > > + if (!mw) {
> > > + ret = -ENOMEM;
> > > goto err_put;
> > > }
> > >
> > > - mw->device = pd->device;
> > > - mw->pd = pd;
> > > + mw->device = ib_dev;
> > > + mw->pd = pd;
> > > mw->uobject = uobj;
> > > - atomic_inc(&pd->usecnt);
> > > -
> > > uobj->object = mw;
> > > + mw->type = cmd.mw_type;
> > >
> > > - memset(&resp, 0, sizeof(resp));
> > > - resp.rkey = mw->rkey;
> > > + ret = pd->device->ops.alloc_mw(mw, &mw->rkey, &attrs->driver_udata);
> >
> > Why the strange &mw->rkey ? Can't the drivers just do mw->rkey = foo ?
>
> We can, if we want to allow drivers set fields in ib_* structures that
> there passed as part of alloc_* flows.
This is better than passing weird loose pointers around
Jason
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH rdma-next v1 2/4] RDMA: Clean MW allocation and free flows
2020-07-07 11:21 ` Jason Gunthorpe
@ 2020-07-07 12:15 ` Leon Romanovsky
0 siblings, 0 replies; 13+ messages in thread
From: Leon Romanovsky @ 2020-07-07 12:15 UTC (permalink / raw)
To: Jason Gunthorpe
Cc: Doug Ledford, Lijun Ou, linux-rdma, Potnuri Bharat Teja,
Weihang Li, Wei Hu(Xavier),
Yishai Hadas
On Tue, Jul 07, 2020 at 08:21:09AM -0300, Jason Gunthorpe wrote:
> On Tue, Jul 07, 2020 at 07:42:03AM +0300, Leon Romanovsky wrote:
> > On Mon, Jul 06, 2020 at 08:04:16PM -0300, Jason Gunthorpe wrote:
> > > On Tue, Jun 30, 2020 at 01:18:53PM +0300, Leon Romanovsky wrote:
> > > > @@ -916,21 +916,24 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
> > > > goto err_put;
> > > > }
> > > >
> > > > - mw = pd->device->ops.alloc_mw(pd, cmd.mw_type, &attrs->driver_udata);
> > > > - if (IS_ERR(mw)) {
> > > > - ret = PTR_ERR(mw);
> > > > + mw = rdma_zalloc_drv_obj(ib_dev, ib_mw);
> > > > + if (!mw) {
> > > > + ret = -ENOMEM;
> > > > goto err_put;
> > > > }
> > > >
> > > > - mw->device = pd->device;
> > > > - mw->pd = pd;
> > > > + mw->device = ib_dev;
> > > > + mw->pd = pd;
> > > > mw->uobject = uobj;
> > > > - atomic_inc(&pd->usecnt);
> > > > -
> > > > uobj->object = mw;
> > > > + mw->type = cmd.mw_type;
> > > >
> > > > - memset(&resp, 0, sizeof(resp));
> > > > - resp.rkey = mw->rkey;
> > > > + ret = pd->device->ops.alloc_mw(mw, &mw->rkey, &attrs->driver_udata);
> > >
> > > Why the strange &mw->rkey ? Can't the drivers just do mw->rkey = foo ?
> >
> > We can, if we want to allow drivers set fields in ib_* structures that
> > there passed as part of alloc_* flows.
>
> This is better than passing weird loose pointers around
I don't think that it is right approach, but I'll change.
Thanks
>
> Jason
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH rdma-next v1 3/4] RDMA: Move XRCD to be under ib_core responsibility
2020-06-30 10:18 [PATCH rdma-next v1 0/4] ib_core allocation patches Leon Romanovsky
2020-06-30 10:18 ` [PATCH rdma-next v1 1/4] RDMA/core: Create and destroy counters in the ib_core Leon Romanovsky
2020-06-30 10:18 ` [PATCH rdma-next v1 2/4] RDMA: Clean MW allocation and free flows Leon Romanovsky
@ 2020-06-30 10:18 ` Leon Romanovsky
2020-06-30 10:18 ` [PATCH rdma-next v1 4/4] RDMA/core: Convert RWQ table logic to ib_core allocation scheme Leon Romanovsky
` (2 subsequent siblings)
5 siblings, 0 replies; 13+ messages in thread
From: Leon Romanovsky @ 2020-06-30 10:18 UTC (permalink / raw)
To: Doug Ledford, Jason Gunthorpe; +Cc: Leon Romanovsky, linux-rdma, Yishai Hadas
From: Leon Romanovsky <leonro@mellanox.com>
Update the code to allocate and free ib_xrcd structure in the
ib_core instead of inside drivers.
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
---
drivers/infiniband/core/device.c | 1 +
drivers/infiniband/core/verbs.c | 28 ++++++++++++++-------
drivers/infiniband/hw/mlx4/main.c | 37 +++++++++++-----------------
drivers/infiniband/hw/mlx5/main.c | 2 ++
drivers/infiniband/hw/mlx5/mlx5_ib.h | 5 ++--
drivers/infiniband/hw/mlx5/qp.c | 34 +++++++------------------
include/rdma/ib_verbs.h | 6 ++---
7 files changed, 51 insertions(+), 62 deletions(-)
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 257e8a667977..b15fa3fa09ac 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -2688,6 +2688,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_OBJ_SIZE(dev_ops, ib_pd);
SET_OBJ_SIZE(dev_ops, ib_srq);
SET_OBJ_SIZE(dev_ops, ib_ucontext);
+ SET_OBJ_SIZE(dev_ops, ib_xrcd);
}
EXPORT_SYMBOL(ib_set_device_ops);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 8f320c1a2a9d..c41e3fdef5d6 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -2294,20 +2294,28 @@ struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
struct inode *inode, struct ib_udata *udata)
{
struct ib_xrcd *xrcd;
+ int ret;
if (!device->ops.alloc_xrcd)
return ERR_PTR(-EOPNOTSUPP);
- xrcd = device->ops.alloc_xrcd(device, udata);
- if (!IS_ERR(xrcd)) {
- xrcd->device = device;
- xrcd->inode = inode;
- atomic_set(&xrcd->usecnt, 0);
- init_rwsem(&xrcd->tgt_qps_rwsem);
- xa_init(&xrcd->tgt_qps);
- }
+ xrcd = rdma_zalloc_drv_obj(device, ib_xrcd);
+ if (!xrcd)
+ return ERR_PTR(-ENOMEM);
+ xrcd->device = device;
+ xrcd->inode = inode;
+ atomic_set(&xrcd->usecnt, 0);
+ init_rwsem(&xrcd->tgt_qps_rwsem);
+ xa_init(&xrcd->tgt_qps);
+
+ ret = device->ops.alloc_xrcd(xrcd, udata);
+ if (ret)
+ goto err;
return xrcd;
+err:
+ kfree(xrcd);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL(ib_alloc_xrcd_user);
@@ -2317,7 +2325,9 @@ int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata)
return -EBUSY;
WARN_ON(!xa_empty(&xrcd->tgt_qps));
- return xrcd->device->ops.dealloc_xrcd(xrcd, udata);
+ xrcd->device->ops.dealloc_xrcd(xrcd, udata);
+ kfree(xrcd);
+ return 0;
}
EXPORT_SYMBOL(ib_dealloc_xrcd_user);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 6471f47bd365..0ad584a3b8d6 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1219,56 +1219,47 @@ static void mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
}
-static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
- struct ib_udata *udata)
+static int mlx4_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
{
- struct mlx4_ib_xrcd *xrcd;
+ struct mlx4_ib_dev *dev = to_mdev(ibxrcd->device);
+ struct mlx4_ib_xrcd *xrcd = to_mxrcd(ibxrcd);
struct ib_cq_init_attr cq_attr = {};
int err;
- if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
- return ERR_PTR(-ENOSYS);
-
- xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
- if (!xrcd)
- return ERR_PTR(-ENOMEM);
+ if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
+ return -EOPNOTSUPP;
- err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
+ err = mlx4_xrcd_alloc(dev->dev, &xrcd->xrcdn);
if (err)
- goto err1;
+ return err;
- xrcd->pd = ib_alloc_pd(ibdev, 0);
+ xrcd->pd = ib_alloc_pd(ibxrcd->device, 0);
if (IS_ERR(xrcd->pd)) {
err = PTR_ERR(xrcd->pd);
goto err2;
}
cq_attr.cqe = 1;
- xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
+ xrcd->cq = ib_create_cq(ibxrcd->device, NULL, NULL, xrcd, &cq_attr);
if (IS_ERR(xrcd->cq)) {
err = PTR_ERR(xrcd->cq);
goto err3;
}
- return &xrcd->ibxrcd;
+ return 0;
err3:
ib_dealloc_pd(xrcd->pd);
err2:
- mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
-err1:
- kfree(xrcd);
- return ERR_PTR(err);
+ mlx4_xrcd_free(dev->dev, xrcd->xrcdn);
+ return err;
}
-static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
+static void mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
{
ib_destroy_cq(to_mxrcd(xrcd)->cq);
ib_dealloc_pd(to_mxrcd(xrcd)->pd);
mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
- kfree(xrcd);
-
- return 0;
}
static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
@@ -2609,6 +2600,8 @@ static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
static const struct ib_device_ops mlx4_ib_dev_xrc_ops = {
.alloc_xrcd = mlx4_ib_alloc_xrcd,
.dealloc_xrcd = mlx4_ib_dealloc_xrcd,
+
+ INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx4_ib_xrcd, ibxrcd),
};
static const struct ib_device_ops mlx4_ib_dev_fs_ops = {
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 8b0b1f95af2f..a6d5c35a2845 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -6676,6 +6676,8 @@ static const struct ib_device_ops mlx5_ib_dev_mw_ops = {
static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
.alloc_xrcd = mlx5_ib_alloc_xrcd,
.dealloc_xrcd = mlx5_ib_dealloc_xrcd,
+
+ INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx5_ib_xrcd, ibxrcd),
};
static const struct ib_device_ops mlx5_ib_dev_dm_ops = {
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index d84f56517caa..726386fe4440 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1223,9 +1223,8 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in, struct ib_mad *out,
size_t *out_mad_size, u16 *out_mad_pkey_index);
-struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
- struct ib_udata *udata);
-int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
+int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
+void mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index f939c9b769f0..0ae73f4e28a3 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4700,41 +4700,25 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
return err;
}
-struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
- struct ib_udata *udata)
+int mlx5_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
{
- struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_ib_xrcd *xrcd;
- int err;
+ struct mlx5_ib_dev *dev = to_mdev(ibxrcd->device);
+ struct mlx5_ib_xrcd *xrcd = to_mxrcd(ibxrcd);
+ int ret;
if (!MLX5_CAP_GEN(dev->mdev, xrc))
- return ERR_PTR(-ENOSYS);
-
- xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
- if (!xrcd)
- return ERR_PTR(-ENOMEM);
-
- err = mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0);
- if (err) {
- kfree(xrcd);
- return ERR_PTR(-ENOMEM);
- }
+ return -EOPNOTSUPP;
- return &xrcd->ibxrcd;
+ ret = mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0);
+ return ret;
}
-int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
+void mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
- int err;
- err = mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
- if (err)
- mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
-
- kfree(xrcd);
- return 0;
+ mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
}
static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 5241a32193db..6e2cb69fe90b 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2494,9 +2494,8 @@ struct ib_device_ops {
void (*dealloc_mw)(struct ib_mw *mw);
int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
- struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
- struct ib_udata *udata);
- int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
+ int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
+ void (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
struct ib_flow *(*create_flow)(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain, struct ib_udata *udata);
@@ -2656,6 +2655,7 @@ struct ib_device_ops {
DECLARE_RDMA_OBJ_SIZE(ib_pd);
DECLARE_RDMA_OBJ_SIZE(ib_srq);
DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
+ DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
};
struct ib_core_device {
--
2.26.2
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH rdma-next v1 4/4] RDMA/core: Convert RWQ table logic to ib_core allocation scheme
2020-06-30 10:18 [PATCH rdma-next v1 0/4] ib_core allocation patches Leon Romanovsky
` (2 preceding siblings ...)
2020-06-30 10:18 ` [PATCH rdma-next v1 3/4] RDMA: Move XRCD to be under ib_core responsibility Leon Romanovsky
@ 2020-06-30 10:18 ` Leon Romanovsky
2020-06-30 11:31 ` Yishai Hadas
2020-06-30 14:59 ` [PATCH rdma-next v1 0/4] ib_core allocation patches Leon Romanovsky
2020-07-06 23:13 ` Jason Gunthorpe
5 siblings, 1 reply; 13+ messages in thread
From: Leon Romanovsky @ 2020-06-30 10:18 UTC (permalink / raw)
To: Doug Ledford, Jason Gunthorpe; +Cc: Leon Romanovsky, linux-rdma, Yishai Hadas
From: Leon Romanovsky <leonro@mellanox.com>
Move struct ib_rwq_ind_table allocation to ib_core.
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
---
drivers/infiniband/core/device.c | 1 +
drivers/infiniband/core/uverbs_cmd.c | 37 ++++++++++++-------
drivers/infiniband/core/uverbs_std_types.c | 16 +++++---
drivers/infiniband/core/verbs.c | 23 ------------
drivers/infiniband/hw/mlx4/main.c | 4 +-
drivers/infiniband/hw/mlx4/mlx4_ib.h | 12 +++---
drivers/infiniband/hw/mlx4/qp.c | 40 ++++++--------------
drivers/infiniband/hw/mlx5/main.c | 3 ++
drivers/infiniband/hw/mlx5/mlx5_ib.h | 8 ++--
drivers/infiniband/hw/mlx5/qp.c | 43 +++++++++-------------
include/rdma/ib_verbs.h | 11 +++---
11 files changed, 85 insertions(+), 113 deletions(-)
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index b15fa3fa09ac..85d921c8e2b5 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -2686,6 +2686,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_OBJ_SIZE(dev_ops, ib_cq);
SET_OBJ_SIZE(dev_ops, ib_mw);
SET_OBJ_SIZE(dev_ops, ib_pd);
+ SET_OBJ_SIZE(dev_ops, ib_rwq_ind_table);
SET_OBJ_SIZE(dev_ops, ib_srq);
SET_OBJ_SIZE(dev_ops, ib_ucontext);
SET_OBJ_SIZE(dev_ops, ib_xrcd);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index a5301f3d3059..a83d11d1e3ee 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -3090,13 +3090,13 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_ex_create_rwq_ind_table cmd;
struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {};
- struct ib_uobject *uobj;
+ struct ib_uobject *uobj;
int err;
struct ib_rwq_ind_table_init_attr init_attr = {};
struct ib_rwq_ind_table *rwq_ind_tbl;
- struct ib_wq **wqs = NULL;
+ struct ib_wq **wqs = NULL;
u32 *wqs_handles = NULL;
- struct ib_wq *wq = NULL;
+ struct ib_wq *wq = NULL;
int i, j, num_read_wqs;
u32 num_wq_handles;
struct uverbs_req_iter iter;
@@ -3151,17 +3151,15 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
goto put_wqs;
}
- init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
- init_attr.ind_tbl = wqs;
-
- rwq_ind_tbl = ib_dev->ops.create_rwq_ind_table(ib_dev, &init_attr,
- &attrs->driver_udata);
-
- if (IS_ERR(rwq_ind_tbl)) {
- err = PTR_ERR(rwq_ind_tbl);
+ rwq_ind_tbl = rdma_zalloc_drv_obj(ib_dev, ib_rwq_ind_table);
+ if (!rwq_ind_tbl) {
+ err = -ENOMEM;
goto err_uobj;
}
+ init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
+ init_attr.ind_tbl = wqs;
+
rwq_ind_tbl->ind_tbl = wqs;
rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
rwq_ind_tbl->uobject = uobj;
@@ -3169,6 +3167,12 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
rwq_ind_tbl->device = ib_dev;
atomic_set(&rwq_ind_tbl->usecnt, 0);
+ err = ib_dev->ops.create_rwq_ind_table(rwq_ind_tbl, &init_attr,
+ &rwq_ind_tbl->ind_tbl_num,
+ &attrs->driver_udata);
+ if (err)
+ goto err_create;
+
for (i = 0; i < num_wq_handles; i++)
atomic_inc(&wqs[i]->usecnt);
@@ -3190,7 +3194,13 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
return 0;
err_copy:
- ib_destroy_rwq_ind_table(rwq_ind_tbl);
+ for (i = 0; i < num_wq_handles; i++)
+ atomic_dec(&wqs[i]->usecnt);
+
+ if (ib_dev->ops.destroy_rwq_ind_table)
+ ib_dev->ops.destroy_rwq_ind_table(rwq_ind_tbl);
+err_create:
+ kfree(rwq_ind_tbl);
err_uobj:
uobj_alloc_abort(uobj, attrs);
put_wqs:
@@ -4018,8 +4028,7 @@ const struct uapi_definition uverbs_def_write_intf[] = {
IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL,
ib_uverbs_ex_destroy_rwq_ind_table,
UAPI_DEF_WRITE_I(
- struct ib_uverbs_ex_destroy_rwq_ind_table),
- UAPI_DEF_METHOD_NEEDS_FN(destroy_rwq_ind_table))),
+ struct ib_uverbs_ex_destroy_rwq_ind_table))),
DECLARE_UVERBS_OBJECT(
UVERBS_OBJECT_WQ,
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index e4994cc4cc51..13c6b3066e4f 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -82,12 +82,21 @@ static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
{
struct ib_rwq_ind_table *rwq_ind_tbl = uobject->object;
struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl;
- int ret;
+ u32 table_size = (1 << rwq_ind_tbl->log_ind_tbl_size);
+ int ret = 0, i;
+
+ if (atomic_read(&rwq_ind_tbl->usecnt))
+ ret = -EBUSY;
- ret = ib_destroy_rwq_ind_table(rwq_ind_tbl);
if (ib_is_destroy_retryable(ret, why, uobject))
return ret;
+ for (i = 0; i < table_size; i++)
+ atomic_dec(&ind_tbl[i]->usecnt);
+
+ if (rwq_ind_tbl->device->ops.destroy_rwq_ind_table)
+ rwq_ind_tbl->device->ops.destroy_rwq_ind_table(rwq_ind_tbl);
+ kfree(rwq_ind_tbl);
kfree(ind_tbl);
return ret;
}
@@ -258,9 +267,6 @@ const struct uapi_definition uverbs_def_obj_intf[] = {
UAPI_DEF_OBJ_NEEDS_FN(dealloc_mw)),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_FLOW,
UAPI_DEF_OBJ_NEEDS_FN(destroy_flow)),
- UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
- UVERBS_OBJECT_RWQ_IND_TBL,
- UAPI_DEF_OBJ_NEEDS_FN(destroy_rwq_ind_table)),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_XRCD,
UAPI_DEF_OBJ_NEEDS_FN(dealloc_xrcd)),
{}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index c41e3fdef5d6..be4c6ee3804a 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -2412,29 +2412,6 @@ int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
}
EXPORT_SYMBOL(ib_modify_wq);
-/*
- * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
- * @wq_ind_table: The Indirection Table to destroy.
-*/
-int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
-{
- int err, i;
- u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
- struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
-
- if (atomic_read(&rwq_ind_table->usecnt))
- return -EBUSY;
-
- err = rwq_ind_table->device->ops.destroy_rwq_ind_table(rwq_ind_table);
- if (!err) {
- for (i = 0; i < table_size; i++)
- atomic_dec(&ind_tbl[i]->usecnt);
- }
-
- return err;
-}
-EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
-
int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status)
{
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 0ad584a3b8d6..88a3a9a88bee 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2585,9 +2585,11 @@ static const struct ib_device_ops mlx4_ib_dev_ops = {
static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
.create_rwq_ind_table = mlx4_ib_create_rwq_ind_table,
.create_wq = mlx4_ib_create_wq,
- .destroy_rwq_ind_table = mlx4_ib_destroy_rwq_ind_table,
.destroy_wq = mlx4_ib_destroy_wq,
.modify_wq = mlx4_ib_modify_wq,
+
+ INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx4_ib_rwq_ind_table,
+ ib_rwq_ind_tbl),
};
static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 5fbe766aef6b..2730a0f65f47 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -366,6 +366,10 @@ struct mlx4_ib_ah {
union mlx4_ext_av av;
};
+struct mlx4_ib_rwq_ind_table {
+ struct ib_rwq_ind_table ib_rwq_ind_tbl;
+};
+
/****************************************/
/* alias guid support */
/****************************************/
@@ -893,11 +897,9 @@ void mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
u32 wq_attr_mask, struct ib_udata *udata);
-struct ib_rwq_ind_table
-*mlx4_ib_create_rwq_ind_table(struct ib_device *device,
- struct ib_rwq_ind_table_init_attr *init_attr,
- struct ib_udata *udata);
-int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
+int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ u32 *ind_tbl_num, struct ib_udata *udata);
int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
int *num_of_mtts);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index cf51e3cbd969..5795bfc1a512 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -4340,34 +4340,32 @@ void mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
kfree(qp);
}
-struct ib_rwq_ind_table
-*mlx4_ib_create_rwq_ind_table(struct ib_device *device,
- struct ib_rwq_ind_table_init_attr *init_attr,
- struct ib_udata *udata)
+int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ u32 *ind_tbl_num, struct ib_udata *udata)
{
- struct ib_rwq_ind_table *rwq_ind_table;
struct mlx4_ib_create_rwq_ind_tbl_resp resp = {};
unsigned int ind_tbl_size = 1 << init_attr->log_ind_tbl_size;
+ struct ib_device *device = rwq_ind_table->device;
unsigned int base_wqn;
size_t min_resp_len;
- int i;
- int err;
+ int i, err = 0;
if (udata->inlen > 0 &&
!ib_is_udata_cleared(udata, 0,
udata->inlen))
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
if (udata->outlen && udata->outlen < min_resp_len)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (ind_tbl_size >
device->attrs.rss_caps.max_rwq_indirection_table_size) {
pr_debug("log_ind_tbl_size = %d is bigger than supported = %d\n",
ind_tbl_size,
device->attrs.rss_caps.max_rwq_indirection_table_size);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
base_wqn = init_attr->ind_tbl[0]->wq_num;
@@ -4375,39 +4373,23 @@ struct ib_rwq_ind_table
if (base_wqn % ind_tbl_size) {
pr_debug("WQN=0x%x isn't aligned with indirection table size\n",
base_wqn);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
for (i = 1; i < ind_tbl_size; i++) {
if (++base_wqn != init_attr->ind_tbl[i]->wq_num) {
pr_debug("indirection table's WQNs aren't consecutive\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
}
- rwq_ind_table = kzalloc(sizeof(*rwq_ind_table), GFP_KERNEL);
- if (!rwq_ind_table)
- return ERR_PTR(-ENOMEM);
-
if (udata->outlen) {
resp.response_length = offsetof(typeof(resp), response_length) +
sizeof(resp.response_length);
err = ib_copy_to_udata(udata, &resp, resp.response_length);
- if (err)
- goto err;
}
- return rwq_ind_table;
-
-err:
- kfree(rwq_ind_table);
- return ERR_PTR(err);
-}
-
-int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
-{
- kfree(ib_rwq_ind_tbl);
- return 0;
+ return err;
}
struct mlx4_ib_drain_cqe {
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index a6d5c35a2845..682ba1b6f34a 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -6844,6 +6844,9 @@ static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
.destroy_wq = mlx5_ib_destroy_wq,
.get_netdev = mlx5_ib_get_netdev,
.modify_wq = mlx5_ib_modify_wq,
+
+ INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx5_ib_rwq_ind_table,
+ ib_rwq_ind_tbl),
};
static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 726386fe4440..c4a285e1950d 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1272,10 +1272,10 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
u32 wq_attr_mask, struct ib_udata *udata);
-struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
- struct ib_rwq_ind_table_init_attr *init_attr,
- struct ib_udata *udata);
-int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
+int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ u32 *ind_tbl_num, struct ib_udata *udata);
+void mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_dm_alloc_attr *attr,
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 0ae73f4e28a3..3124c80169fb 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -5063,12 +5063,13 @@ void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
kfree(rwq);
}
-struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
- struct ib_rwq_ind_table_init_attr *init_attr,
- struct ib_udata *udata)
+int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ u32 *ind_tbl_num, struct ib_udata *udata)
{
- struct mlx5_ib_dev *dev = to_mdev(device);
- struct mlx5_ib_rwq_ind_table *rwq_ind_tbl;
+ struct mlx5_ib_rwq_ind_table *rwq_ind_tbl =
+ to_mrwq_ind_table(ib_rwq_ind_table);
+ struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_table->device);
int sz = 1 << init_attr->log_ind_tbl_size;
struct mlx5_ib_create_rwq_ind_tbl_resp resp = {};
size_t min_resp_len;
@@ -5081,30 +5082,24 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
if (udata->inlen > 0 &&
!ib_is_udata_cleared(udata, 0,
udata->inlen))
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
if (init_attr->log_ind_tbl_size >
MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
init_attr->log_ind_tbl_size,
MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
if (udata->outlen && udata->outlen < min_resp_len)
- return ERR_PTR(-EINVAL);
-
- rwq_ind_tbl = kzalloc(sizeof(*rwq_ind_tbl), GFP_KERNEL);
- if (!rwq_ind_tbl)
- return ERR_PTR(-ENOMEM);
+ return -EINVAL;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
in = kvzalloc(inlen, GFP_KERNEL);
- if (!in) {
- err = -ENOMEM;
- goto err;
- }
+ if (!in)
+ return -ENOMEM;
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
@@ -5118,12 +5113,10 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
MLX5_SET(create_rqt_in, in, uid, rwq_ind_tbl->uid);
err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
- kvfree(in);
-
if (err)
goto err;
- rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn;
+ *ind_tbl_num = rwq_ind_tbl->rqtn;
if (udata->outlen) {
resp.response_length = offsetof(typeof(resp), response_length) +
sizeof(resp.response_length);
@@ -5132,24 +5125,22 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
goto err_copy;
}
- return &rwq_ind_tbl->ib_rwq_ind_tbl;
+ kvfree(in);
+ return 0;
err_copy:
mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
err:
- kfree(rwq_ind_tbl);
- return ERR_PTR(err);
+ kvfree(in);
+ return err;
}
-int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
+void mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
{
struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl);
struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device);
mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
-
- kfree(rwq_ind_tbl);
- return 0;
}
int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 6e2cb69fe90b..ed948946e443 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2526,11 +2526,10 @@ struct ib_device_ops {
void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
u32 wq_attr_mask, struct ib_udata *udata);
- struct ib_rwq_ind_table *(*create_rwq_ind_table)(
- struct ib_device *device,
- struct ib_rwq_ind_table_init_attr *init_attr,
- struct ib_udata *udata);
- int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
+ int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ u32 *ind_tbl_num, struct ib_udata *udata);
+ void (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
struct ib_dm *(*alloc_dm)(struct ib_device *device,
struct ib_ucontext *context,
struct ib_dm_alloc_attr *attr,
@@ -2653,6 +2652,7 @@ struct ib_device_ops {
DECLARE_RDMA_OBJ_SIZE(ib_cq);
DECLARE_RDMA_OBJ_SIZE(ib_mw);
DECLARE_RDMA_OBJ_SIZE(ib_pd);
+ DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
DECLARE_RDMA_OBJ_SIZE(ib_srq);
DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
@@ -4430,7 +4430,6 @@ struct ib_wq *ib_create_wq(struct ib_pd *pd,
int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
u32 wq_attr_mask);
-int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset, unsigned int page_size);
--
2.26.2
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH rdma-next v1 4/4] RDMA/core: Convert RWQ table logic to ib_core allocation scheme
2020-06-30 10:18 ` [PATCH rdma-next v1 4/4] RDMA/core: Convert RWQ table logic to ib_core allocation scheme Leon Romanovsky
@ 2020-06-30 11:31 ` Yishai Hadas
2020-06-30 11:42 ` Leon Romanovsky
0 siblings, 1 reply; 13+ messages in thread
From: Yishai Hadas @ 2020-06-30 11:31 UTC (permalink / raw)
To: Leon Romanovsky, Jason Gunthorpe, Leon Romanovsky
Cc: Doug Ledford, linux-rdma, Yishai Hadas, Maor Gottlieb
On 6/30/2020 1:18 PM, Leon Romanovsky wrote:
> From: Leon Romanovsky <leonro@mellanox.com>
>
> Move struct ib_rwq_ind_table allocation to ib_core.
>
> Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
> ---
> drivers/infiniband/core/device.c | 1 +
> drivers/infiniband/core/uverbs_cmd.c | 37 ++++++++++++-------
> drivers/infiniband/core/uverbs_std_types.c | 16 +++++---
> drivers/infiniband/core/verbs.c | 23 ------------
> drivers/infiniband/hw/mlx4/main.c | 4 +-
> drivers/infiniband/hw/mlx4/mlx4_ib.h | 12 +++---
> drivers/infiniband/hw/mlx4/qp.c | 40 ++++++--------------
> drivers/infiniband/hw/mlx5/main.c | 3 ++
> drivers/infiniband/hw/mlx5/mlx5_ib.h | 8 ++--
> drivers/infiniband/hw/mlx5/qp.c | 43 +++++++++-------------
> include/rdma/ib_verbs.h | 11 +++---
> 11 files changed, 85 insertions(+), 113 deletions(-)
>
> diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
> index b15fa3fa09ac..85d921c8e2b5 100644
> --- a/drivers/infiniband/core/device.c
> +++ b/drivers/infiniband/core/device.c
> @@ -2686,6 +2686,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
> SET_OBJ_SIZE(dev_ops, ib_cq);
> SET_OBJ_SIZE(dev_ops, ib_mw);
> SET_OBJ_SIZE(dev_ops, ib_pd);
> + SET_OBJ_SIZE(dev_ops, ib_rwq_ind_table);
> SET_OBJ_SIZE(dev_ops, ib_srq);
> SET_OBJ_SIZE(dev_ops, ib_ucontext);
> SET_OBJ_SIZE(dev_ops, ib_xrcd);
> diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
> index a5301f3d3059..a83d11d1e3ee 100644
> --- a/drivers/infiniband/core/uverbs_cmd.c
> +++ b/drivers/infiniband/core/uverbs_cmd.c
> @@ -3090,13 +3090,13 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
> {
> struct ib_uverbs_ex_create_rwq_ind_table cmd;
> struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {};
> - struct ib_uobject *uobj;
> + struct ib_uobject *uobj;
> int err;
> struct ib_rwq_ind_table_init_attr init_attr = {};
> struct ib_rwq_ind_table *rwq_ind_tbl;
> - struct ib_wq **wqs = NULL;
> + struct ib_wq **wqs = NULL;
> u32 *wqs_handles = NULL;
> - struct ib_wq *wq = NULL;
> + struct ib_wq *wq = NULL;
> int i, j, num_read_wqs;
> u32 num_wq_handles;
> struct uverbs_req_iter iter;
> @@ -3151,17 +3151,15 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
> goto put_wqs;
> }
>
> - init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
> - init_attr.ind_tbl = wqs;
> -
> - rwq_ind_tbl = ib_dev->ops.create_rwq_ind_table(ib_dev, &init_attr,
> - &attrs->driver_udata);
> -
> - if (IS_ERR(rwq_ind_tbl)) {
> - err = PTR_ERR(rwq_ind_tbl);
> + rwq_ind_tbl = rdma_zalloc_drv_obj(ib_dev, ib_rwq_ind_table);
> + if (!rwq_ind_tbl) {
> + err = -ENOMEM;
> goto err_uobj;
> }
>
> + init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
> + init_attr.ind_tbl = wqs;
> +
> rwq_ind_tbl->ind_tbl = wqs;
> rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
> rwq_ind_tbl->uobject = uobj;
> @@ -3169,6 +3167,12 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
> rwq_ind_tbl->device = ib_dev;
> atomic_set(&rwq_ind_tbl->usecnt, 0);
>
> + err = ib_dev->ops.create_rwq_ind_table(rwq_ind_tbl, &init_attr,
> + &rwq_ind_tbl->ind_tbl_num,
> + &attrs->driver_udata);
> + if (err)
> + goto err_create;
> +
> for (i = 0; i < num_wq_handles; i++)
> atomic_inc(&wqs[i]->usecnt);
>
> @@ -3190,7 +3194,13 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
> return 0;
>
> err_copy:
> - ib_destroy_rwq_ind_table(rwq_ind_tbl);
> + for (i = 0; i < num_wq_handles; i++)
> + atomic_dec(&wqs[i]->usecnt);
> +
> + if (ib_dev->ops.destroy_rwq_ind_table)
> + ib_dev->ops.destroy_rwq_ind_table(rwq_ind_tbl);
> +err_create:
> + kfree(rwq_ind_tbl);
> err_uobj:
> uobj_alloc_abort(uobj, attrs);
> put_wqs:
> @@ -4018,8 +4028,7 @@ const struct uapi_definition uverbs_def_write_intf[] = {
> IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL,
> ib_uverbs_ex_destroy_rwq_ind_table,
> UAPI_DEF_WRITE_I(
> - struct ib_uverbs_ex_destroy_rwq_ind_table),
> - UAPI_DEF_METHOD_NEEDS_FN(destroy_rwq_ind_table))),
> + struct ib_uverbs_ex_destroy_rwq_ind_table))),
>
> DECLARE_UVERBS_OBJECT(
> UVERBS_OBJECT_WQ,
> diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
> index e4994cc4cc51..13c6b3066e4f 100644
> --- a/drivers/infiniband/core/uverbs_std_types.c
> +++ b/drivers/infiniband/core/uverbs_std_types.c
> @@ -82,12 +82,21 @@ static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
> {
> struct ib_rwq_ind_table *rwq_ind_tbl = uobject->object;
> struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl;
> - int ret;
> + u32 table_size = (1 << rwq_ind_tbl->log_ind_tbl_size);
> + int ret = 0, i;
> +
> + if (atomic_read(&rwq_ind_tbl->usecnt))
> + ret = -EBUSY;
>
> - ret = ib_destroy_rwq_ind_table(rwq_ind_tbl);
> if (ib_is_destroy_retryable(ret, why, uobject))
> return ret;
>
> + for (i = 0; i < table_size; i++)
> + atomic_dec(&ind_tbl[i]->usecnt);
> +
> + if (rwq_ind_tbl->device->ops.destroy_rwq_ind_table)
> + rwq_ind_tbl->device->ops.destroy_rwq_ind_table(rwq_ind_tbl);
We had here two notes from previous review that need to be settled, ref
count decrement before object destruction (high priority) and
considering the existance of both alloc/destroy functions from driver
point of view from symetic reasons. (low priority).
Let's get Jason's feedback here please.
> + kfree(rwq_ind_tbl);
> kfree(ind_tbl);
> return ret;
> }
> @@ -258,9 +267,6 @@ const struct uapi_definition uverbs_def_obj_intf[] = {
> UAPI_DEF_OBJ_NEEDS_FN(dealloc_mw)),
> UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_FLOW,
> UAPI_DEF_OBJ_NEEDS_FN(destroy_flow)),
> - UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
> - UVERBS_OBJECT_RWQ_IND_TBL,
> - UAPI_DEF_OBJ_NEEDS_FN(destroy_rwq_ind_table)),
> UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_XRCD,
> UAPI_DEF_OBJ_NEEDS_FN(dealloc_xrcd)),
> {}
> diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
> index c41e3fdef5d6..be4c6ee3804a 100644
> --- a/drivers/infiniband/core/verbs.c
> +++ b/drivers/infiniband/core/verbs.c
> @@ -2412,29 +2412,6 @@ int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
> }
> EXPORT_SYMBOL(ib_modify_wq);
>
> -/*
> - * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
> - * @wq_ind_table: The Indirection Table to destroy.
> -*/
> -int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
> -{
> - int err, i;
> - u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
> - struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
> -
> - if (atomic_read(&rwq_ind_table->usecnt))
> - return -EBUSY;
> -
> - err = rwq_ind_table->device->ops.destroy_rwq_ind_table(rwq_ind_table);
> - if (!err) {
> - for (i = 0; i < table_size; i++)
> - atomic_dec(&ind_tbl[i]->usecnt);
> - }
> -
> - return err;
> -}
> -EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
> -
> int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
> struct ib_mr_status *mr_status)
> {
> diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
> index 0ad584a3b8d6..88a3a9a88bee 100644
> --- a/drivers/infiniband/hw/mlx4/main.c
> +++ b/drivers/infiniband/hw/mlx4/main.c
> @@ -2585,9 +2585,11 @@ static const struct ib_device_ops mlx4_ib_dev_ops = {
> static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
> .create_rwq_ind_table = mlx4_ib_create_rwq_ind_table,
> .create_wq = mlx4_ib_create_wq,
> - .destroy_rwq_ind_table = mlx4_ib_destroy_rwq_ind_table,
> .destroy_wq = mlx4_ib_destroy_wq,
> .modify_wq = mlx4_ib_modify_wq,
> +
> + INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx4_ib_rwq_ind_table,
> + ib_rwq_ind_tbl),
> };
>
> static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
> diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
> index 5fbe766aef6b..2730a0f65f47 100644
> --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
> +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
> @@ -366,6 +366,10 @@ struct mlx4_ib_ah {
> union mlx4_ext_av av;
> };
>
> +struct mlx4_ib_rwq_ind_table {
> + struct ib_rwq_ind_table ib_rwq_ind_tbl;
> +};
> +
> /****************************************/
> /* alias guid support */
> /****************************************/
> @@ -893,11 +897,9 @@ void mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
> int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
> u32 wq_attr_mask, struct ib_udata *udata);
>
> -struct ib_rwq_ind_table
> -*mlx4_ib_create_rwq_ind_table(struct ib_device *device,
> - struct ib_rwq_ind_table_init_attr *init_attr,
> - struct ib_udata *udata);
> -int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
> +int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl,
> + struct ib_rwq_ind_table_init_attr *init_attr,
> + u32 *ind_tbl_num, struct ib_udata *udata);
> int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
> int *num_of_mtts);
>
> diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
> index cf51e3cbd969..5795bfc1a512 100644
> --- a/drivers/infiniband/hw/mlx4/qp.c
> +++ b/drivers/infiniband/hw/mlx4/qp.c
> @@ -4340,34 +4340,32 @@ void mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
> kfree(qp);
> }
>
> -struct ib_rwq_ind_table
> -*mlx4_ib_create_rwq_ind_table(struct ib_device *device,
> - struct ib_rwq_ind_table_init_attr *init_attr,
> - struct ib_udata *udata)
> +int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table,
> + struct ib_rwq_ind_table_init_attr *init_attr,
> + u32 *ind_tbl_num, struct ib_udata *udata)
> {
> - struct ib_rwq_ind_table *rwq_ind_table;
> struct mlx4_ib_create_rwq_ind_tbl_resp resp = {};
> unsigned int ind_tbl_size = 1 << init_attr->log_ind_tbl_size;
> + struct ib_device *device = rwq_ind_table->device;
> unsigned int base_wqn;
> size_t min_resp_len;
> - int i;
> - int err;
> + int i, err = 0;
>
> if (udata->inlen > 0 &&
> !ib_is_udata_cleared(udata, 0,
> udata->inlen))
> - return ERR_PTR(-EOPNOTSUPP);
> + return -EOPNOTSUPP;
>
> min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
> if (udata->outlen && udata->outlen < min_resp_len)
> - return ERR_PTR(-EINVAL);
> + return -EINVAL;
>
> if (ind_tbl_size >
> device->attrs.rss_caps.max_rwq_indirection_table_size) {
> pr_debug("log_ind_tbl_size = %d is bigger than supported = %d\n",
> ind_tbl_size,
> device->attrs.rss_caps.max_rwq_indirection_table_size);
> - return ERR_PTR(-EINVAL);
> + return -EINVAL;
> }
>
> base_wqn = init_attr->ind_tbl[0]->wq_num;
> @@ -4375,39 +4373,23 @@ struct ib_rwq_ind_table
> if (base_wqn % ind_tbl_size) {
> pr_debug("WQN=0x%x isn't aligned with indirection table size\n",
> base_wqn);
> - return ERR_PTR(-EINVAL);
> + return -EINVAL;
> }
>
> for (i = 1; i < ind_tbl_size; i++) {
> if (++base_wqn != init_attr->ind_tbl[i]->wq_num) {
> pr_debug("indirection table's WQNs aren't consecutive\n");
> - return ERR_PTR(-EINVAL);
> + return -EINVAL;
> }
> }
>
> - rwq_ind_table = kzalloc(sizeof(*rwq_ind_table), GFP_KERNEL);
> - if (!rwq_ind_table)
> - return ERR_PTR(-ENOMEM);
> -
> if (udata->outlen) {
> resp.response_length = offsetof(typeof(resp), response_length) +
> sizeof(resp.response_length);
> err = ib_copy_to_udata(udata, &resp, resp.response_length);
> - if (err)
> - goto err;
> }
>
> - return rwq_ind_table;
> -
> -err:
> - kfree(rwq_ind_table);
> - return ERR_PTR(err);
> -}
> -
> -int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
> -{
> - kfree(ib_rwq_ind_tbl);
> - return 0;
> + return err;
> }
>
> struct mlx4_ib_drain_cqe {
> diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
> index a6d5c35a2845..682ba1b6f34a 100644
> --- a/drivers/infiniband/hw/mlx5/main.c
> +++ b/drivers/infiniband/hw/mlx5/main.c
> @@ -6844,6 +6844,9 @@ static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
> .destroy_wq = mlx5_ib_destroy_wq,
> .get_netdev = mlx5_ib_get_netdev,
> .modify_wq = mlx5_ib_modify_wq,
> +
> + INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx5_ib_rwq_ind_table,
> + ib_rwq_ind_tbl),
> };
>
> static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
> diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
> index 726386fe4440..c4a285e1950d 100644
> --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
> +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
> @@ -1272,10 +1272,10 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
> void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
> int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
> u32 wq_attr_mask, struct ib_udata *udata);
> -struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
> - struct ib_rwq_ind_table_init_attr *init_attr,
> - struct ib_udata *udata);
> -int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
> +int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
> + struct ib_rwq_ind_table_init_attr *init_attr,
> + u32 *ind_tbl_num, struct ib_udata *udata);
> +void mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
> struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
> struct ib_ucontext *context,
> struct ib_dm_alloc_attr *attr,
> diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
> index 0ae73f4e28a3..3124c80169fb 100644
> --- a/drivers/infiniband/hw/mlx5/qp.c
> +++ b/drivers/infiniband/hw/mlx5/qp.c
> @@ -5063,12 +5063,13 @@ void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
> kfree(rwq);
> }
>
> -struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
> - struct ib_rwq_ind_table_init_attr *init_attr,
> - struct ib_udata *udata)
> +int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
> + struct ib_rwq_ind_table_init_attr *init_attr,
> + u32 *ind_tbl_num, struct ib_udata *udata)
> {
> - struct mlx5_ib_dev *dev = to_mdev(device);
> - struct mlx5_ib_rwq_ind_table *rwq_ind_tbl;
> + struct mlx5_ib_rwq_ind_table *rwq_ind_tbl =
> + to_mrwq_ind_table(ib_rwq_ind_table);
> + struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_table->device);
> int sz = 1 << init_attr->log_ind_tbl_size;
> struct mlx5_ib_create_rwq_ind_tbl_resp resp = {};
> size_t min_resp_len;
> @@ -5081,30 +5082,24 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
> if (udata->inlen > 0 &&
> !ib_is_udata_cleared(udata, 0,
> udata->inlen))
> - return ERR_PTR(-EOPNOTSUPP);
> + return -EOPNOTSUPP;
>
> if (init_attr->log_ind_tbl_size >
> MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
> mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
> init_attr->log_ind_tbl_size,
> MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
> - return ERR_PTR(-EINVAL);
> + return -EINVAL;
> }
>
> min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
> if (udata->outlen && udata->outlen < min_resp_len)
> - return ERR_PTR(-EINVAL);
> -
> - rwq_ind_tbl = kzalloc(sizeof(*rwq_ind_tbl), GFP_KERNEL);
> - if (!rwq_ind_tbl)
> - return ERR_PTR(-ENOMEM);
> + return -EINVAL;
>
> inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
> in = kvzalloc(inlen, GFP_KERNEL);
> - if (!in) {
> - err = -ENOMEM;
> - goto err;
> - }
> + if (!in)
> + return -ENOMEM;
>
> rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
>
> @@ -5118,12 +5113,10 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
> MLX5_SET(create_rqt_in, in, uid, rwq_ind_tbl->uid);
>
> err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
> - kvfree(in);
> -
> if (err)
> goto err;
>
> - rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn;
> + *ind_tbl_num = rwq_ind_tbl->rqtn;
> if (udata->outlen) {
> resp.response_length = offsetof(typeof(resp), response_length) +
> sizeof(resp.response_length);
> @@ -5132,24 +5125,22 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
> goto err_copy;
> }
>
> - return &rwq_ind_tbl->ib_rwq_ind_tbl;
> + kvfree(in);
> + return 0;
>
> err_copy:
> mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
> err:
> - kfree(rwq_ind_tbl);
> - return ERR_PTR(err);
> + kvfree(in);
> + return err;
> }
>
> -int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
> +void mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
> {
> struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl);
> struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device);
>
> mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
> -
> - kfree(rwq_ind_tbl);
> - return 0;
> }
>
> int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
> diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
> index 6e2cb69fe90b..ed948946e443 100644
> --- a/include/rdma/ib_verbs.h
> +++ b/include/rdma/ib_verbs.h
> @@ -2526,11 +2526,10 @@ struct ib_device_ops {
> void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
> int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
> u32 wq_attr_mask, struct ib_udata *udata);
> - struct ib_rwq_ind_table *(*create_rwq_ind_table)(
> - struct ib_device *device,
> - struct ib_rwq_ind_table_init_attr *init_attr,
> - struct ib_udata *udata);
> - int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
> + int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table,
> + struct ib_rwq_ind_table_init_attr *init_attr,
> + u32 *ind_tbl_num, struct ib_udata *udata);
> + void (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
> struct ib_dm *(*alloc_dm)(struct ib_device *device,
> struct ib_ucontext *context,
> struct ib_dm_alloc_attr *attr,
> @@ -2653,6 +2652,7 @@ struct ib_device_ops {
> DECLARE_RDMA_OBJ_SIZE(ib_cq);
> DECLARE_RDMA_OBJ_SIZE(ib_mw);
> DECLARE_RDMA_OBJ_SIZE(ib_pd);
> + DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
> DECLARE_RDMA_OBJ_SIZE(ib_srq);
> DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
> DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
> @@ -4430,7 +4430,6 @@ struct ib_wq *ib_create_wq(struct ib_pd *pd,
> int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
> int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
> u32 wq_attr_mask);
> -int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
>
> int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
> unsigned int *sg_offset, unsigned int page_size);
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH rdma-next v1 4/4] RDMA/core: Convert RWQ table logic to ib_core allocation scheme
2020-06-30 11:31 ` Yishai Hadas
@ 2020-06-30 11:42 ` Leon Romanovsky
0 siblings, 0 replies; 13+ messages in thread
From: Leon Romanovsky @ 2020-06-30 11:42 UTC (permalink / raw)
To: Yishai Hadas
Cc: Jason Gunthorpe, Doug Ledford, linux-rdma, Yishai Hadas, Maor Gottlieb
On Tue, Jun 30, 2020 at 02:31:16PM +0300, Yishai Hadas wrote:
> On 6/30/2020 1:18 PM, Leon Romanovsky wrote:
> > From: Leon Romanovsky <leonro@mellanox.com>
> >
> > Move struct ib_rwq_ind_table allocation to ib_core.
> >
> > Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
> > ---
> > drivers/infiniband/core/device.c | 1 +
> > drivers/infiniband/core/uverbs_cmd.c | 37 ++++++++++++-------
> > drivers/infiniband/core/uverbs_std_types.c | 16 +++++---
> > drivers/infiniband/core/verbs.c | 23 ------------
> > drivers/infiniband/hw/mlx4/main.c | 4 +-
> > drivers/infiniband/hw/mlx4/mlx4_ib.h | 12 +++---
> > drivers/infiniband/hw/mlx4/qp.c | 40 ++++++--------------
> > drivers/infiniband/hw/mlx5/main.c | 3 ++
> > drivers/infiniband/hw/mlx5/mlx5_ib.h | 8 ++--
> > drivers/infiniband/hw/mlx5/qp.c | 43 +++++++++-------------
> > include/rdma/ib_verbs.h | 11 +++---
> > 11 files changed, 85 insertions(+), 113 deletions(-)
<...>
> > +
> > + if (rwq_ind_tbl->device->ops.destroy_rwq_ind_table)
> > + rwq_ind_tbl->device->ops.destroy_rwq_ind_table(rwq_ind_tbl);
>
>
> We had here two notes from previous review that need to be settled, ref
> count decrement before object destruction (high priority) and considering
> the existance of both alloc/destroy functions from driver point of view from
> symetic reasons. (low priority).
>
> Let's get Jason's feedback here please.
I'm confident that Jason will give his feedback like he always does
while accepting/declining patches. It goes without saying.
From my point of view, there is nothing to change.
Thanks
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH rdma-next v1 0/4] ib_core allocation patches
2020-06-30 10:18 [PATCH rdma-next v1 0/4] ib_core allocation patches Leon Romanovsky
` (3 preceding siblings ...)
2020-06-30 10:18 ` [PATCH rdma-next v1 4/4] RDMA/core: Convert RWQ table logic to ib_core allocation scheme Leon Romanovsky
@ 2020-06-30 14:59 ` Leon Romanovsky
2020-07-06 23:13 ` Jason Gunthorpe
5 siblings, 0 replies; 13+ messages in thread
From: Leon Romanovsky @ 2020-06-30 14:59 UTC (permalink / raw)
To: Doug Ledford, Jason Gunthorpe
Cc: Lijun Ou, linux-kernel, linux-rdma, Potnuri Bharat Teja,
Weihang Li, Wei Hu(Xavier),
Yishai Hadas
On Tue, Jun 30, 2020 at 01:18:51PM +0300, Leon Romanovsky wrote:
> From: Leon Romanovsky <leonro@mellanox.com>
>
> Changelog
> v1:
> * Removed empty "//" comment
> * Deleted destroy_rwq_ind_table from object tree
> * One patch was accepted, so rebased on latest for-upstream
> v0:
> https://lore.kernel.org/lkml/20200624105422.1452290-1-leon@kernel.org
>
> ----------------------------------------------------------------------
>
> Let's continue my allocation work.
>
> Leon Romanovsky (4):
> RDMA/core: Create and destroy counters in the ib_core
> RDMA: Clean MW allocation and free flows
> RDMA: Move XRCD to be under ib_core responsibility
> RDMA/core: Convert RWQ table logic to ib_core allocation scheme
Jason,
Please drop this patch, I will resubmit it together with fixes to
create/destroy RWQ table, like proper error unwind and proper usecnt
usage.
Thanks
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH rdma-next v1 0/4] ib_core allocation patches
2020-06-30 10:18 [PATCH rdma-next v1 0/4] ib_core allocation patches Leon Romanovsky
` (4 preceding siblings ...)
2020-06-30 14:59 ` [PATCH rdma-next v1 0/4] ib_core allocation patches Leon Romanovsky
@ 2020-07-06 23:13 ` Jason Gunthorpe
5 siblings, 0 replies; 13+ messages in thread
From: Jason Gunthorpe @ 2020-07-06 23:13 UTC (permalink / raw)
To: Leon Romanovsky
Cc: Doug Ledford, Leon Romanovsky, Lijun Ou, linux-kernel,
linux-rdma, Potnuri Bharat Teja, Weihang Li, Wei Hu(Xavier),
Yishai Hadas
On Tue, Jun 30, 2020 at 01:18:51PM +0300, Leon Romanovsky wrote:
> From: Leon Romanovsky <leonro@mellanox.com>
>
> Changelog
> v1:
> * Removed empty "//" comment
> * Deleted destroy_rwq_ind_table from object tree
> * One patch was accepted, so rebased on latest for-upstream
> v0:
> https://lore.kernel.org/lkml/20200624105422.1452290-1-leon@kernel.org
>
>
> Let's continue my allocation work.
>
> Leon Romanovsky (4):
> RDMA/core: Create and destroy counters in the ib_core
> RDMA: Move XRCD to be under ib_core responsibility
These two applied to for-next
Thanks,
Jason
^ permalink raw reply [flat|nested] 13+ messages in thread