linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Michal Kalderon <mkalderon@marvell.com>
To: Gal Pressman <galpress@amazon.com>
Cc: Ariel Elior <aelior@marvell.com>, "jgg@ziepe.ca" <jgg@ziepe.ca>,
	"dledford@redhat.com" <dledford@redhat.com>,
	"bmt@zurich.ibm.com" <bmt@zurich.ibm.com>,
	"sleybo@amazon.com" <sleybo@amazon.com>,
	"leon@kernel.org" <leon@kernel.org>,
	"linux-rdma@vger.kernel.org" <linux-rdma@vger.kernel.org>,
	Ariel Elior <aelior@marvell.com>
Subject: RE: [PATCH v8 rdma-next 3/7] RDMA/efa: Use the common mmap_xa helpers
Date: Fri, 30 Aug 2019 06:15:41 +0000	[thread overview]
Message-ID: <MN2PR18MB31820C96A5906F9054AE23D6A1BD0@MN2PR18MB3182.namprd18.prod.outlook.com> (raw)
In-Reply-To: <c8781373-53ec-649e-1f1d-56cc17c229f9@amazon.com>

> From: Gal Pressman <galpress@amazon.com>
> Sent: Thursday, August 29, 2019 5:21 PM
> 
> On 27/08/2019 16:28, Michal Kalderon wrote:
> > +static void efa_qp_user_mmap_entries_remove(struct efa_ucontext
> *ucontext,
> > +					    struct efa_qp *qp)
> > +{
> > +	rdma_user_mmap_entry_remove(&ucontext->ibucontext, qp-
> >sq_db_mmap_key);
> > +	rdma_user_mmap_entry_remove(&ucontext->ibucontext,
> > +				    qp->llq_desc_mmap_key);
> > +	rdma_user_mmap_entry_remove(&ucontext->ibucontext, qp-
> >rq_mmap_key);
> > +	rdma_user_mmap_entry_remove(&ucontext->ibucontext,
> > +qp->rq_db_mmap_key);
> 
> Please remove the entries in reverse insertion order.
I don't mind fixing, but why ? 

> 
> > +}
> > +
> >  static int qp_mmap_entries_setup(struct efa_qp *qp,
> >  				 struct efa_dev *dev,
> >  				 struct efa_ucontext *ucontext,
> >  				 struct efa_com_create_qp_params
> *params,
> >  				 struct efa_ibv_create_qp_resp *resp)  {
> > -	/*
> > -	 * Once an entry is inserted it might be mmapped, hence cannot be
> > -	 * cleaned up until dealloc_ucontext.
> > -	 */
> > -	resp->sq_db_mmap_key =
> > -		mmap_entry_insert(dev, ucontext, qp,
> > -				  dev->db_bar_addr + resp->sq_db_offset,
> > -				  PAGE_SIZE, EFA_MMAP_IO_NC);
> > -	if (resp->sq_db_mmap_key == EFA_MMAP_INVALID)
> > -		return -ENOMEM;
> > +	u64 address;
> > +	u64 length;
> > +	int err;
> > +
> > +	err = efa_user_mmap_entry_insert(&ucontext->ibucontext,
> > +					 dev->db_bar_addr +
> > +					 resp->sq_db_offset,
> > +					 PAGE_SIZE, EFA_MMAP_IO_NC,
> > +					 &qp->sq_db_mmap_key);
> > +	if (err)
> > +		return err;
> >
> > +	resp->sq_db_mmap_key = qp->sq_db_mmap_key;
> >  	resp->sq_db_offset &= ~PAGE_MASK;
> >
> > -	resp->llq_desc_mmap_key =
> > -		mmap_entry_insert(dev, ucontext, qp,
> > -				  dev->mem_bar_addr + resp-
> >llq_desc_offset,
> > -				  PAGE_ALIGN(params-
> >sq_ring_size_in_bytes +
> > -					     (resp->llq_desc_offset &
> ~PAGE_MASK)),
> > -				  EFA_MMAP_IO_WC);
> > -	if (resp->llq_desc_mmap_key == EFA_MMAP_INVALID)
> > -		return -ENOMEM;
> > +	address = dev->mem_bar_addr + resp->llq_desc_offset;
> > +	length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
> > +			    (resp->llq_desc_offset & ~PAGE_MASK));
> > +
> > +	err = efa_user_mmap_entry_insert(&ucontext->ibucontext,
> > +					 address,
> > +					 length,
> > +					 EFA_MMAP_IO_WC,
> > +					 &qp->llq_desc_mmap_key);
> > +	if (err)
> > +		goto err1;
> >
> > +	resp->llq_desc_mmap_key = qp->llq_desc_mmap_key;
> >  	resp->llq_desc_offset &= ~PAGE_MASK;
> >
> >  	if (qp->rq_size) {
> > -		resp->rq_db_mmap_key =
> > -			mmap_entry_insert(dev, ucontext, qp,
> > -					  dev->db_bar_addr + resp-
> >rq_db_offset,
> > -					  PAGE_SIZE, EFA_MMAP_IO_NC);
> > -		if (resp->rq_db_mmap_key == EFA_MMAP_INVALID)
> > -			return -ENOMEM;
> > +		address = dev->db_bar_addr + resp->rq_db_offset;
> >
> > +		err = efa_user_mmap_entry_insert(&ucontext->ibucontext,
> > +						 address, PAGE_SIZE,
> > +						 EFA_MMAP_IO_NC,
> > +						 &qp->rq_db_mmap_key);
> > +		if (err)
> > +			goto err2;
> > +
> > +		resp->rq_db_mmap_key = qp->rq_db_mmap_key;
> >  		resp->rq_db_offset &= ~PAGE_MASK;
> >
> > -		resp->rq_mmap_key =
> > -			mmap_entry_insert(dev, ucontext, qp,
> > -					  virt_to_phys(qp->rq_cpu_addr),
> > -					  qp->rq_size,
> EFA_MMAP_DMA_PAGE);
> > -		if (resp->rq_mmap_key == EFA_MMAP_INVALID)
> > -			return -ENOMEM;
> > +		address = virt_to_phys(qp->rq_cpu_addr);
> > +		err = efa_user_mmap_entry_insert(&ucontext->ibucontext,
> > +						 address, qp->rq_size,
> > +						 EFA_MMAP_DMA_PAGE,
> > +						 &qp->rq_mmap_key);
> > +		if (err)
> > +			goto err3;
> >
> > +		resp->rq_mmap_key = qp->rq_mmap_key;
> >  		resp->rq_mmap_size = qp->rq_size;
> >  	}
> >
> >  	return 0;
> > +
> > +err3:
> > +	rdma_user_mmap_entry_remove(&ucontext->ibucontext,
> > +qp->rq_db_mmap_key);
> > +
> > +err2:
> > +	rdma_user_mmap_entry_remove(&ucontext->ibucontext,
> > +				    qp->llq_desc_mmap_key);
> > +
> > +err1:
> > +	rdma_user_mmap_entry_remove(&ucontext->ibucontext,
> > +qp->sq_db_mmap_key);
> 
> I prefer meaningful goto labels, e.g err_remove_sq_db instead of err1.
Ok

> 
> > +
> > +	/* If any error occurred, we init the keys back to invalid */
> > +	efa_qp_init_keys(qp);
> > +
> > +	return err;
> >  }
> >
> >  static int efa_qp_validate_cap(struct efa_dev *dev, @@ -634,7 +594,6
> > @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
> >  	struct efa_dev *dev = to_edev(ibpd->device);
> >  	struct efa_ibv_create_qp_resp resp = {};
> >  	struct efa_ibv_create_qp cmd = {};
> > -	bool rq_entry_inserted = false;
> >  	struct efa_ucontext *ucontext;
> >  	struct efa_qp *qp;
> >  	int err;
> > @@ -687,6 +646,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
> >  		goto err_out;
> >  	}
> >
> > +	efa_qp_init_keys(qp);
> >  	create_qp_params.uarn = ucontext->uarn;
> >  	create_qp_params.pd = to_epd(ibpd)->pdn;
> >
> > @@ -742,7 +702,6 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
> >  	if (err)
> >  		goto err_destroy_qp;
> >
> > -	rq_entry_inserted = true;
> >  	qp->qp_handle = create_qp_resp.qp_handle;
> >  	qp->ibqp.qp_num = create_qp_resp.qp_num;
> >  	qp->ibqp.qp_type = init_attr->qp_type; @@ -759,7 +718,7 @@
> struct
> > ib_qp *efa_create_qp(struct ib_pd *ibpd,
> >  			ibdev_dbg(&dev->ibdev,
> >  				  "Failed to copy udata for qp[%u]\n",
> >  				  create_qp_resp.qp_num);
> > -			goto err_destroy_qp;
> > +			goto err_remove_mmap_entries;
> >  		}
> >  	}
> >
> > @@ -767,15 +726,17 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
> >
> >  	return &qp->ibqp;
> >
> > +err_remove_mmap_entries:
> > +	efa_qp_user_mmap_entries_remove(ucontext, qp);
> >  err_destroy_qp:
> >  	efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
> >  err_free_mapped:
> > -	if (qp->rq_size) {
> > +	if (qp->rq_dma_addr)
> 
> What's the difference?
Seemed a better query since it now only covers the rq_dma_addr unmapping. 

> 
> >  		dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr,
> qp->rq_size,
> >  				 DMA_TO_DEVICE);
> > -		if (!rq_entry_inserted)
> > -			free_pages_exact(qp->rq_cpu_addr, qp->rq_size);
> > -	}
> > +
> > +	if (qp->rq_mmap_key == RDMA_USER_MMAP_INVALID)
> > +		free_pages_exact(qp->rq_cpu_addr, qp->rq_size);
> 
> This should be inside the previous if statement, otherwise it might try to free
> pages that weren't allocated.
If they weren't allocated the key will be INVALID and they won't be freed.

> 
> >  err_free_qp:
> >  	kfree(qp);
> >  err_out:
> > @@ -887,6 +848,7 @@ static int efa_destroy_cq_idx(struct efa_dev *dev,
> > int cq_idx)
> >
> >  void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)  {
> > +	struct efa_ucontext *ucontext;
> 
> Reverse xmas tree.
ok
> 
> >  	struct efa_dev *dev = to_edev(ibcq->device);
> >  	struct efa_cq *cq = to_ecq(ibcq);
> >
> > @@ -894,20 +856,33 @@ void efa_destroy_cq(struct ib_cq *ibcq, struct
> ib_udata *udata)
> >  		  "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
> >  		  cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
> >
> > +	ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
> > +					     ibucontext);
> >  	efa_destroy_cq_idx(dev, cq->cq_idx);
> >  	dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
> >  			 DMA_FROM_DEVICE);
> > +	rdma_user_mmap_entry_remove(&ucontext->ibucontext,
> > +				    cq->mmap_key);
> 
> Entry removal should be first.
Why ? removing can lead to freeing, why would we want that before unmapping ? 

> 
> >  }
> >
> >  static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext
> *ucontext,
> >  		      struct vm_area_struct *vma, u64 key, u64 length)  {
> > -	struct efa_mmap_entry *entry;
> > +	struct rdma_user_mmap_entry *rdma_entry;
> > +	struct efa_user_mmap_entry *entry;
> >  	unsigned long va;
> >  	u64 pfn;
> >  	int err;
> >
> > -	entry = mmap_entry_get(dev, ucontext, key, length);
> > -	if (!entry) {
> > +	rdma_entry = rdma_user_mmap_entry_get(&ucontext-
> >ibucontext, key,
> > +					      length, vma);
> > +	if (!rdma_entry) {
> >  		ibdev_dbg(&dev->ibdev, "key[%#llx] does not have valid
> entry\n",
> >  			  key);
> >  		return -EINVAL;
> >  	}
> > +	entry = to_emmap(rdma_entry);
> > +	if (entry->length != length) {
> > +		ibdev_dbg(&dev->ibdev,
> > +			  "key[%#llx] does not have valid length[%#llx]
> expected[%#llx]\n",
> > +			  key, length, entry->length);
> 
> Need to put the entry.
Right thanks, will also fix in qedr and siw.

> 
> > +		return -EINVAL;
> > +	}
> >
> >  	ibdev_dbg(&dev->ibdev,
> >  		  "Mapping address[%#llx], length[%#llx],
> mmap_flag[%d]\n",
> > -		  entry->address, length, entry->mmap_flag);
> > +		  entry->address, entry->length, entry->mmap_flag);
> >
> >  	pfn = entry->address >> PAGE_SHIFT;
> >  	switch (entry->mmap_flag) {
> > @@ -1637,6 +1630,10 @@ static int __efa_mmap(struct efa_dev *dev,
> struct efa_ucontext *ucontext,
> >  			&dev->ibdev,
> >  			"Couldn't mmap address[%#llx] length[%#llx]
> mmap_flag[%d] err[%d]\n",
> >  			entry->address, length, entry->mmap_flag, err);
> > +
> > +		rdma_user_mmap_entry_put(&ucontext->ibucontext,
> > +					 rdma_entry);
> > +
> >  		return err;
> >  	}
> >

  reply	other threads:[~2019-08-30  6:15 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-27 13:28 [PATCH v8 rdma-next 0/7] RDMA/qedr: Use the doorbell overflow recovery mechanism for RDMA Michal Kalderon
2019-08-27 13:28 ` [PATCH v8 rdma-next 1/7] RDMA/core: Move core content from ib_uverbs to ib_core Michal Kalderon
2019-08-27 13:28 ` [PATCH v8 rdma-next 2/7] RDMA/core: Create mmap database and cookie helper functions Michal Kalderon
2019-08-29 11:35   ` Gal Pressman
2019-08-29 12:40     ` Jason Gunthorpe
2019-08-30  6:10       ` [EXT] " Michal Kalderon
2019-08-27 13:28 ` [PATCH v8 rdma-next 3/7] RDMA/efa: Use the common mmap_xa helpers Michal Kalderon
2019-08-29 14:20   ` Gal Pressman
2019-08-30  6:15     ` Michal Kalderon [this message]
2019-09-01  6:56       ` Gal Pressman
2019-09-02  7:47         ` Michal Kalderon
2019-08-27 13:28 ` [PATCH v8 rdma-next 4/7] RDMA/siw: " Michal Kalderon
2019-08-27 13:28 ` [PATCH v8 rdma-next 5/7] RDMA/qedr: Use the common mmap API Michal Kalderon
2019-08-27 13:28 ` [PATCH v8 rdma-next 6/7] RDMA/qedr: Add doorbell overflow recovery support Michal Kalderon
2019-08-27 13:28 ` [PATCH v8 rdma-next 7/7] RDMA/qedr: Add iWARP doorbell " Michal Kalderon
2019-08-28 14:41 ` [PATCH v8 rdma-next 0/7] RDMA/qedr: Use the doorbell overflow recovery mechanism for RDMA Gal Pressman
2019-08-30 12:07 ` [PATCH v8 rdma-next 4/7] RDMA/siw: Use the common mmap_xa helpers Bernard Metzler
2019-08-30 12:42   ` [EXT] " Michal Kalderon
2019-08-30 13:19   ` Bernard Metzler
2019-09-02  8:00     ` Michal Kalderon
2019-09-02 10:55     ` Bernard Metzler
2019-09-02 11:21       ` Michal Kalderon
2019-09-02 11:18     ` Bernard Metzler
2019-09-02 13:16       ` Michal Kalderon
2019-09-02 14:17       ` Bernard Metzler

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=MN2PR18MB31820C96A5906F9054AE23D6A1BD0@MN2PR18MB3182.namprd18.prod.outlook.com \
    --to=mkalderon@marvell.com \
    --cc=aelior@marvell.com \
    --cc=bmt@zurich.ibm.com \
    --cc=dledford@redhat.com \
    --cc=galpress@amazon.com \
    --cc=jgg@ziepe.ca \
    --cc=leon@kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    --cc=sleybo@amazon.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).