All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jianxiong Gao <jxgao@google.com>
To: stable@vger.kernel.org, hch@lst.de, marcorr@google.com,
	sashal@kernel.org
Cc: Jianxiong Gao <jxgao@google.com>,
	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Subject: [PATCH 5.4 v2 8/9] swiotlb: respect min_align_mask
Date: Tue, 18 May 2021 22:18:18 +0000	[thread overview]
Message-ID: <20210518221818.2963918-9-jxgao@google.com> (raw)
In-Reply-To: <20210518221818.2963918-1-jxgao@google.com>

swiotlb: respect min_align_mask

Respect the min_align_mask in struct device_dma_parameters in swiotlb.

There are two parts to it:
 1) for the lower bits of the alignment inside the io tlb slot, just
    extent the size of the allocation and leave the start of the slot
     empty
 2) for the high bits ensure we find a slot that matches the high bits
    of the alignment to avoid wasting too much memory

Based on an earlier patch from Jianxiong Gao <jxgao@google.com>.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Jianxiong Gao <jxgao@google.com>
Tested-by: Jianxiong Gao <jxgao@google.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

Upstream: 1f221a0d0dbf0e48ef3a9c62871281d6a7819f05
Signed-off-by: Jianxiong Gao <jxgao@google.com>
---
 kernel/dma/swiotlb.c | 42 ++++++++++++++++++++++++++++++++----------
 1 file changed, 32 insertions(+), 10 deletions(-)

diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index f4e18ae33507..743bf7e36385 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -454,6 +454,15 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
 }
 
 #define slot_addr(start, idx)  ((start) + ((idx) << IO_TLB_SHIFT))
+
+/*
+ * Return the offset into a iotlb slot required to keep the device happy.
+ */
+static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
+{
+	return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
+}
+
 /*
  * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
  */
@@ -475,24 +484,29 @@ static unsigned int wrap_index(unsigned int index)
  * Find a suitable number of IO TLB entries size that will fit this request and
  * allocate a buffer from that IO TLB pool.
  */
-static int find_slots(struct device *dev, size_t alloc_size)
+static int find_slots(struct device *dev, phys_addr_t orig_addr,
+		      size_t alloc_size)
 {
         unsigned long boundary_mask = dma_get_seg_boundary(dev);
         dma_addr_t tbl_dma_addr =
                 __phys_to_dma(dev, io_tlb_start) & boundary_mask;
         unsigned long max_slots = get_max_slots(boundary_mask);
-        unsigned int nslots = nr_slots(alloc_size), stride = 1;
+	unsigned int iotlb_align_mask =
+	    dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
+	unsigned int nslots = nr_slots(alloc_size), stride;
         unsigned int index, wrap, count = 0, i;
         unsigned long flags;
 
         BUG_ON(!nslots);
 
 	/*
-	 * For mappings greater than or equal to a page, we limit the stride
-	 * (and hence alignment) to a page size.
+	 * For mappings with an alignment requirement don't bother looping to
+	 * unaligned slots once we found an aligned one.  For allocations of
+	 * PAGE_SIZE or larger only look for page aligned allocations.
 	 */
+	stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
 	if (alloc_size >= PAGE_SIZE)
-               stride <<= (PAGE_SHIFT - IO_TLB_SHIFT);
+		stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT));
 
 	spin_lock_irqsave(&io_tlb_lock, flags);
 	if (unlikely(nslots > io_tlb_nslabs - io_tlb_used))
@@ -500,6 +514,12 @@ static int find_slots(struct device *dev, size_t alloc_size)
 
         index = wrap = wrap_index(ALIGN(io_tlb_index, stride));
 	do {
+		if ((slot_addr(tbl_dma_addr, index) & iotlb_align_mask) !=
+		    (orig_addr & iotlb_align_mask)) {
+			index = wrap_index(index + 1);
+			continue;
+		}
+
 		/*
 		 * If we find a slot that indicates we have 'nslots' number of
 		 * contiguous buffers, we allocate the buffers from that slot
@@ -545,6 +565,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, dma_addr_t dma_addr,
                                    size_t alloc_size, enum dma_data_direction dir,
                                    unsigned long attrs)
  {
+	unsigned int offset = swiotlb_align_offset(dev, orig_addr);
         unsigned int index, i;
         phys_addr_t tlb_addr;
 
@@ -560,7 +581,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, dma_addr_t dma_addr,
                 return (phys_addr_t)DMA_MAPPING_ERROR;
         }
 
-        index = find_slots(dev, alloc_size);
+	index = find_slots(dev, orig_addr, alloc_size + offset);
         if (index == -1) {
                 if (!(attrs & DMA_ATTR_NO_WARN))
                         dev_warn_ratelimited(dev,
@@ -574,10 +595,10 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, dma_addr_t dma_addr,
 	 * This is needed when we sync the memory.  Then we sync the buffer if
 	 * needed.
 	 */
-        for (i = 0; i < nr_slots(alloc_size); i++)
+	for (i = 0; i < nr_slots(alloc_size + offset); i++)
                 io_tlb_orig_addr[index + i] = slot_addr(orig_addr, i);
 
-        tlb_addr = slot_addr(io_tlb_start, index);
+	tlb_addr = slot_addr(io_tlb_start, index) + offset;
 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
 	    (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
 		swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE);
@@ -593,8 +614,9 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
 			      enum dma_data_direction dir, unsigned long attrs)
 {
 	unsigned long flags;
-	int i, count, nslots = nr_slots(alloc_size);
-	int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
+	unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr);
+	int i, count, nslots = nr_slots(alloc_size + offset);
+	int index = (tlb_addr - offset - io_tlb_start) >> IO_TLB_SHIFT;
 	phys_addr_t orig_addr = io_tlb_orig_addr[index];
 
 	/*
-- 
2.31.1.751.gd2f1c929bd-goog


  parent reply	other threads:[~2021-05-18 22:19 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-18 22:18 [PATCH 5.4 v2 0/9] preserve DMA offsets when using swiotlb Jianxiong Gao
2021-05-18 22:18 ` [PATCH 5.4 v2 1/9] driver core: add a min_align_mask field to struct device_dma_parameters Jianxiong Gao
2021-05-18 22:18 ` [PATCH 5.4 v2 2/9] swiotlb: add a IO_TLB_SIZE define Jianxiong Gao
2021-05-18 22:18 ` [PATCH 5.4 v2 3/9] swiotlb: factor out an io_tlb_offset helper Jianxiong Gao
2021-05-18 22:18 ` [PATCH 5.4 v2 4/9] swiotlb: factor out a nr_slots helper Jianxiong Gao
2021-05-18 22:18 ` [PATCH 5.4 v2 5/9] swiotlb: clean up swiotlb_tbl_unmap_single Jianxiong Gao
2021-05-18 22:18 ` [PATCH 5.4 v2 6/9] swiotlb: refactor swiotlb_tbl_map_single Jianxiong Gao
2021-05-18 22:18 ` [PATCH 5.4 v2 7/9] swiotlb: don't modify orig_addr in swiotlb_tbl_sync_single Jianxiong Gao
2021-05-18 22:18 ` Jianxiong Gao [this message]
2021-05-18 22:18 ` [PATCH 5.4 v2 9/9] nvme-pci: set min_align_mask Jianxiong Gao
2021-05-19  8:11 ` [PATCH 5.4 v2 0/9] preserve DMA offsets when using swiotlb Greg KH
2021-05-19 16:42   ` Jianxiong Gao
2021-05-19 17:03     ` Greg KH
2021-05-19 17:18       ` Marc Orr
2021-05-19 17:25         ` Greg KH
2021-05-19 20:01           ` Marc Orr
2021-05-20  8:32             ` Greg KH

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210518221818.2963918-9-jxgao@google.com \
    --to=jxgao@google.com \
    --cc=hch@lst.de \
    --cc=konrad.wilk@oracle.com \
    --cc=marcorr@google.com \
    --cc=sashal@kernel.org \
    --cc=stable@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.