From: Christoph Hellwig <hch@lst.de>
To: Guenter Roeck <linux@roeck-us.net>
Cc: linux-kernel@vger.kernel.org, iommu@lists.linux-foundation.org,
Geert Uytterhoeven <geert@linux-m68k.org>,
David Rientjes <rientjes@google.com>,
Christophe JAILLET <christophe.jaillet@wanadoo.fr>,
Robin Murphy <robin.murphy@arm.com>,
Christoph Hellwig <hch@lst.de>
Subject: Re: [PATCH v2] dma-pool: Fix too large DMA pools on medium systems
Date: Wed, 24 Jun 2020 09:38:15 +0200 [thread overview]
Message-ID: <20200624073815.GE18609@lst.de> (raw)
In-Reply-To: <20200620200936.GA106151@roeck-us.net>
Hi Guenter,
can you try the patch below? This just converts the huge allocations
in mptbase to use GFP_KERNEL. Christophe (added to Cc) actually has
a scripted conversion for the rest that he hasn't posted yet, so I'll
aim for the minimal version here.
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 68aea22f2b8978..5216487db4fbea 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1324,13 +1324,13 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
return 0; /* fw doesn't need any host buffers */
/* spin till we get enough memory */
- while(host_page_buffer_sz > 0) {
-
- if((ioc->HostPageBuffer = pci_alloc_consistent(
- ioc->pcidev,
- host_page_buffer_sz,
- &ioc->HostPageBuffer_dma)) != NULL) {
-
+ while (host_page_buffer_sz > 0) {
+ ioc->HostPageBuffer =
+ dma_alloc_coherent(&ioc->pcidev->dev,
+ host_page_buffer_sz,
+ &ioc->HostPageBuffer_dma,
+ GFP_KERNEL);
+ if (ioc->HostPageBuffer) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
ioc->name, ioc->HostPageBuffer,
@@ -2741,8 +2741,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
sz = ioc->alloc_sz;
dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "free @ %p, sz=%d bytes\n",
ioc->name, ioc->alloc, ioc->alloc_sz));
- pci_free_consistent(ioc->pcidev, sz,
- ioc->alloc, ioc->alloc_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc,
+ ioc->alloc_dma);
ioc->reply_frames = NULL;
ioc->req_frames = NULL;
ioc->alloc = NULL;
@@ -2751,8 +2751,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
if (ioc->sense_buf_pool != NULL) {
sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
- pci_free_consistent(ioc->pcidev, sz,
- ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool,
+ ioc->sense_buf_pool_dma);
ioc->sense_buf_pool = NULL;
ioc->alloc_total -= sz;
}
@@ -2802,7 +2802,7 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
"HostPageBuffer free @ %p, sz=%d bytes\n",
ioc->name, ioc->HostPageBuffer,
ioc->HostPageBuffer_sz));
- pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
+ dma_free_coherent(&ioc->pcidev->dev, ioc->HostPageBuffer_sz,
ioc->HostPageBuffer, ioc->HostPageBuffer_dma);
ioc->HostPageBuffer = NULL;
ioc->HostPageBuffer_sz = 0;
@@ -4497,7 +4497,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
ioc->name, sz, sz, num_chain));
total_size += sz;
- mem = pci_alloc_consistent(ioc->pcidev, total_size, &alloc_dma);
+ mem = dma_alloc_coherent(&ioc->pcidev->dev, total_size,
+ &alloc_dma, GFP_KERNEL);
if (mem == NULL) {
printk(MYIOC_s_ERR_FMT "Unable to allocate Reply, Request, Chain Buffers!\n",
ioc->name);
@@ -4574,8 +4575,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
- ioc->sense_buf_pool =
- pci_alloc_consistent(ioc->pcidev, sz, &ioc->sense_buf_pool_dma);
+ ioc->sense_buf_pool = dma_alloc_coherent(&ioc->pcidev->dev, sz,
+ &ioc->sense_buf_pool_dma, GFP_KERNEL);
if (ioc->sense_buf_pool == NULL) {
printk(MYIOC_s_ERR_FMT "Unable to allocate Sense Buffers!\n",
ioc->name);
@@ -4613,18 +4614,16 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
if (ioc->alloc != NULL) {
sz = ioc->alloc_sz;
- pci_free_consistent(ioc->pcidev,
- sz,
- ioc->alloc, ioc->alloc_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc,
+ ioc->alloc_dma);
ioc->reply_frames = NULL;
ioc->req_frames = NULL;
ioc->alloc_total -= sz;
}
if (ioc->sense_buf_pool != NULL) {
sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
- pci_free_consistent(ioc->pcidev,
- sz,
- ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool,
+ ioc->sense_buf_pool_dma);
ioc->sense_buf_pool = NULL;
}
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu
next prev parent reply other threads:[~2020-06-24 7:38 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-06-08 13:22 [PATCH v2] dma-pool: Fix too large DMA pools on medium systems Geert Uytterhoeven
2020-06-08 21:04 ` David Rientjes via iommu
2020-06-09 13:26 ` Christoph Hellwig
2020-06-20 20:09 ` Guenter Roeck
2020-06-21 8:35 ` Geert Uytterhoeven
2020-06-21 13:11 ` Guenter Roeck
2020-06-21 20:20 ` David Rientjes via iommu
2020-06-22 16:07 ` Robin Murphy
2020-06-22 17:31 ` Christoph Hellwig
2020-06-24 7:38 ` Christoph Hellwig [this message]
2020-06-24 16:20 ` Guenter Roeck
2020-06-27 16:13 ` Marion & Christophe JAILLET
2020-06-29 8:12 ` Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200624073815.GE18609@lst.de \
--to=hch@lst.de \
--cc=christophe.jaillet@wanadoo.fr \
--cc=geert@linux-m68k.org \
--cc=iommu@lists.linux-foundation.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux@roeck-us.net \
--cc=rientjes@google.com \
--cc=robin.murphy@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).