All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/5] ALSA: More abstracition of memory alloc helpers
@ 2021-06-09 16:25 Takashi Iwai
  2021-06-09 16:25 ` [PATCH 1/5] ALSA: trident: Drop shadow TLB pointer table Takashi Iwai
                   ` (4 more replies)
  0 siblings, 5 replies; 6+ messages in thread
From: Takashi Iwai @ 2021-06-09 16:25 UTC (permalink / raw)
  To: alsa-devel

Hi,

this is a patch set for the code cleanup and refactoring around the
ALSA core memory allocation helpers.  It begins with a cleanup of the
non-used code, then followed by the refacotoring with the new ops,
finally moved the memory-specific mmap handling code from PCM to the
core memalloc.


Takashi

===

Takashi Iwai (5):
  ALSA: trident: Drop shadow TLB pointer table
  ALSA: core: Drop snd_sgbuf_get_ptr()
  ALSA: core: Abstract memory alloc helpers
  ALSA: core: Move mmap handler into memalloc ops
  ALSA: core: Add continuous and vmalloc mmap ops

 include/sound/memalloc.h           |  83 +-----
 include/sound/pcm.h                |  19 --
 sound/core/memalloc.c              | 444 +++++++++++++++++++----------
 sound/core/memalloc_local.h        |  19 ++
 sound/core/pcm_local.h             |   5 -
 sound/core/pcm_memory.c            |  21 --
 sound/core/pcm_native.c            |  39 +--
 sound/core/sgbuf.c                 |  90 ++++--
 sound/pci/trident/trident.h        |   1 -
 sound/pci/trident/trident_main.c   |  11 +-
 sound/pci/trident/trident_memory.c |  53 ++--
 11 files changed, 405 insertions(+), 380 deletions(-)
 create mode 100644 sound/core/memalloc_local.h

-- 
2.26.2


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1/5] ALSA: trident: Drop shadow TLB pointer table
  2021-06-09 16:25 [PATCH 0/5] ALSA: More abstracition of memory alloc helpers Takashi Iwai
@ 2021-06-09 16:25 ` Takashi Iwai
  2021-06-09 16:25 ` [PATCH 2/5] ALSA: core: Drop snd_sgbuf_get_ptr() Takashi Iwai
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Takashi Iwai @ 2021-06-09 16:25 UTC (permalink / raw)
  To: alsa-devel

The shadow TLB pointer table is allocated and set up, but never really
used any longer by the driver.  Let's drop it.

Since this is the only user of snd_pcm_sgbuf_get_ptr(), we can clean
up the API after this change.

Signed-off-by: Takashi Iwai <tiwai@suse.de>
---
 sound/pci/trident/trident.h        |  1 -
 sound/pci/trident/trident_main.c   | 11 +------
 sound/pci/trident/trident_memory.c | 53 +++++++++---------------------
 3 files changed, 17 insertions(+), 48 deletions(-)

diff --git a/sound/pci/trident/trident.h b/sound/pci/trident/trident.h
index c7567edbe4c4..c579a44bb9ae 100644
--- a/sound/pci/trident/trident.h
+++ b/sound/pci/trident/trident.h
@@ -251,7 +251,6 @@ struct snd_trident_memblk_arg {
 struct snd_trident_tlb {
 	__le32 *entries;		/* 16k-aligned TLB table */
 	dma_addr_t entries_dmaaddr;	/* 16k-aligned PCI address to TLB table */
-	unsigned long * shadow_entries;	/* shadow entries with virtual addresses */
 	struct snd_dma_buffer buffer;
 	struct snd_util_memhdr * memhdr;	/* page allocation list */
 	struct snd_dma_buffer silent_page;
diff --git a/sound/pci/trident/trident_main.c b/sound/pci/trident/trident_main.c
index 281ea7143b1c..cfbca3bd60ed 100644
--- a/sound/pci/trident/trident_main.c
+++ b/sound/pci/trident/trident_main.c
@@ -3331,12 +3331,6 @@ static int snd_trident_tlb_alloc(struct snd_trident *trident)
 	}
 	trident->tlb.entries = (__le32 *)ALIGN((unsigned long)trident->tlb.buffer.area, SNDRV_TRIDENT_MAX_PAGES * 4);
 	trident->tlb.entries_dmaaddr = ALIGN(trident->tlb.buffer.addr, SNDRV_TRIDENT_MAX_PAGES * 4);
-	/* allocate shadow TLB page table (virtual addresses) */
-	trident->tlb.shadow_entries =
-		vmalloc(array_size(SNDRV_TRIDENT_MAX_PAGES,
-				   sizeof(unsigned long)));
-	if (!trident->tlb.shadow_entries)
-		return -ENOMEM;
 
 	/* allocate and setup silent page and initialise TLB entries */
 	if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &trident->pci->dev,
@@ -3345,10 +3339,8 @@ static int snd_trident_tlb_alloc(struct snd_trident *trident)
 		return -ENOMEM;
 	}
 	memset(trident->tlb.silent_page.area, 0, SNDRV_TRIDENT_PAGE_SIZE);
-	for (i = 0; i < SNDRV_TRIDENT_MAX_PAGES; i++) {
+	for (i = 0; i < SNDRV_TRIDENT_MAX_PAGES; i++)
 		trident->tlb.entries[i] = cpu_to_le32(trident->tlb.silent_page.addr & ~(SNDRV_TRIDENT_PAGE_SIZE-1));
-		trident->tlb.shadow_entries[i] = (unsigned long)trident->tlb.silent_page.area;
-	}
 
 	/* use emu memory block manager code to manage tlb page allocation */
 	trident->tlb.memhdr = snd_util_memhdr_new(SNDRV_TRIDENT_PAGE_SIZE * SNDRV_TRIDENT_MAX_PAGES);
@@ -3665,7 +3657,6 @@ static int snd_trident_free(struct snd_trident *trident)
 		snd_util_memhdr_free(trident->tlb.memhdr);
 		if (trident->tlb.silent_page.area)
 			snd_dma_free_pages(&trident->tlb.silent_page);
-		vfree(trident->tlb.shadow_entries);
 		snd_dma_free_pages(&trident->tlb.buffer);
 	}
 	pci_release_regions(trident->pci);
diff --git a/sound/pci/trident/trident_memory.c b/sound/pci/trident/trident_memory.c
index bb24dbf0530d..f831ec02702f 100644
--- a/sound/pci/trident/trident_memory.c
+++ b/sound/pci/trident/trident_memory.c
@@ -19,11 +19,8 @@
 /* page arguments of these two macros are Trident page (4096 bytes), not like
  * aligned pages in others
  */
-#define __set_tlb_bus(trident,page,ptr,addr) \
-	do { (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); \
-	     (trident)->tlb.shadow_entries[page] = (ptr); } while (0)
-#define __tlb_to_ptr(trident,page) \
-	(void*)((trident)->tlb.shadow_entries[page])
+#define __set_tlb_bus(trident,page,addr) \
+	(trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1))
 #define __tlb_to_addr(trident,page) \
 	(dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1))
 
@@ -32,15 +29,13 @@
 #define ALIGN_PAGE_SIZE		PAGE_SIZE	/* minimum page size for allocation */
 #define MAX_ALIGN_PAGES		SNDRV_TRIDENT_MAX_PAGES	/* maxmium aligned pages */
 /* fill TLB entrie(s) corresponding to page with ptr */
-#define set_tlb_bus(trident,page,ptr,addr) __set_tlb_bus(trident,page,ptr,addr)
+#define set_tlb_bus(trident,page,addr) __set_tlb_bus(trident,page,addr)
 /* fill TLB entrie(s) corresponding to page with silence pointer */
-#define set_silent_tlb(trident,page)	__set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr)
+#define set_silent_tlb(trident,page)	__set_tlb_bus(trident, page, trident->tlb.silent_page.addr)
 /* get aligned page from offset address */
 #define get_aligned_page(offset)	((offset) >> 12)
 /* get offset address from aligned page */
 #define aligned_page_offset(page)	((page) << 12)
-/* get buffer address from aligned page */
-#define page_to_ptr(trident,page)	__tlb_to_ptr(trident, page)
 /* get PCI physical address from aligned page */
 #define page_to_addr(trident,page)	__tlb_to_addr(trident, page)
 
@@ -50,22 +45,21 @@
 #define MAX_ALIGN_PAGES		(SNDRV_TRIDENT_MAX_PAGES / 2)
 #define get_aligned_page(offset)	((offset) >> 13)
 #define aligned_page_offset(page)	((page) << 13)
-#define page_to_ptr(trident,page)	__tlb_to_ptr(trident, (page) << 1)
 #define page_to_addr(trident,page)	__tlb_to_addr(trident, (page) << 1)
 
 /* fill TLB entries -- we need to fill two entries */
 static inline void set_tlb_bus(struct snd_trident *trident, int page,
-			       unsigned long ptr, dma_addr_t addr)
+			       dma_addr_t addr)
 {
 	page <<= 1;
-	__set_tlb_bus(trident, page, ptr, addr);
-	__set_tlb_bus(trident, page+1, ptr + SNDRV_TRIDENT_PAGE_SIZE, addr + SNDRV_TRIDENT_PAGE_SIZE);
+	__set_tlb_bus(trident, page, addr);
+	__set_tlb_bus(trident, page+1, addr + SNDRV_TRIDENT_PAGE_SIZE);
 }
 static inline void set_silent_tlb(struct snd_trident *trident, int page)
 {
 	page <<= 1;
-	__set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
-	__set_tlb_bus(trident, page+1, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
+	__set_tlb_bus(trident, page, trident->tlb.silent_page.addr);
+	__set_tlb_bus(trident, page+1, trident->tlb.silent_page.addr);
 }
 
 #else
@@ -80,18 +74,16 @@ static inline void set_silent_tlb(struct snd_trident *trident, int page)
  */
 #define get_aligned_page(offset)	((offset) / ALIGN_PAGE_SIZE)
 #define aligned_page_offset(page)	((page) * ALIGN_PAGE_SIZE)
-#define page_to_ptr(trident,page)	__tlb_to_ptr(trident, (page) * UNIT_PAGES)
 #define page_to_addr(trident,page)	__tlb_to_addr(trident, (page) * UNIT_PAGES)
 
 /* fill TLB entries -- UNIT_PAGES entries must be filled */
 static inline void set_tlb_bus(struct snd_trident *trident, int page,
-			       unsigned long ptr, dma_addr_t addr)
+			       dma_addr_t addr)
 {
 	int i;
 	page *= UNIT_PAGES;
-	for (i = 0; i < UNIT_PAGES; i++, page++) {
-		__set_tlb_bus(trident, page, ptr, addr);
-		ptr += SNDRV_TRIDENT_PAGE_SIZE;
+	for (i = 0; i < UNIT_PAGES; i++, pagetr++) {
+		__set_tlb_bus(trident, page, addr);
 		addr += SNDRV_TRIDENT_PAGE_SIZE;
 	}
 }
@@ -100,20 +92,11 @@ static inline void set_silent_tlb(struct snd_trident *trident, int page)
 	int i;
 	page *= UNIT_PAGES;
 	for (i = 0; i < UNIT_PAGES; i++, page++)
-		__set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
+		__set_tlb_bus(trident, page, trident->tlb.silent_page.addr);
 }
 
 #endif /* PAGE_SIZE */
 
-/* calculate buffer pointer from offset address */
-static inline void *offset_ptr(struct snd_trident *trident, int offset)
-{
-	char *ptr;
-	ptr = page_to_ptr(trident, get_aligned_page(offset));
-	ptr += offset % ALIGN_PAGE_SIZE;
-	return (void*)ptr;
-}
-
 /* first and last (aligned) pages of memory block */
 #define firstpg(blk)	(((struct snd_trident_memblk_arg *)snd_util_memblk_argptr(blk))->first_page)
 #define lastpg(blk)	(((struct snd_trident_memblk_arg *)snd_util_memblk_argptr(blk))->last_page)
@@ -201,14 +184,12 @@ snd_trident_alloc_sg_pages(struct snd_trident *trident,
 	for (page = firstpg(blk); page <= lastpg(blk); page++, idx++) {
 		unsigned long ofs = idx << PAGE_SHIFT;
 		dma_addr_t addr = snd_pcm_sgbuf_get_addr(substream, ofs);
-		unsigned long ptr = (unsigned long)
-			snd_pcm_sgbuf_get_ptr(substream, ofs);
 		if (! is_valid_page(addr)) {
 			__snd_util_mem_free(hdr, blk);
 			mutex_unlock(&hdr->block_mutex);
 			return NULL;
 		}
-		set_tlb_bus(trident, page, ptr, addr);
+		set_tlb_bus(trident, page, addr);
 	}
 	mutex_unlock(&hdr->block_mutex);
 	return blk;
@@ -226,7 +207,6 @@ snd_trident_alloc_cont_pages(struct snd_trident *trident,
 	int page;
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	dma_addr_t addr;
-	unsigned long ptr;
 
 	if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
 		       runtime->dma_bytes > SNDRV_TRIDENT_MAX_PAGES *
@@ -245,15 +225,14 @@ snd_trident_alloc_cont_pages(struct snd_trident *trident,
 			   
 	/* set TLB entries */
 	addr = runtime->dma_addr;
-	ptr = (unsigned long)runtime->dma_area;
 	for (page = firstpg(blk); page <= lastpg(blk); page++,
-	     ptr += SNDRV_TRIDENT_PAGE_SIZE, addr += SNDRV_TRIDENT_PAGE_SIZE) {
+	     addr += SNDRV_TRIDENT_PAGE_SIZE) {
 		if (! is_valid_page(addr)) {
 			__snd_util_mem_free(hdr, blk);
 			mutex_unlock(&hdr->block_mutex);
 			return NULL;
 		}
-		set_tlb_bus(trident, page, ptr, addr);
+		set_tlb_bus(trident, page, addr);
 	}
 	mutex_unlock(&hdr->block_mutex);
 	return blk;
-- 
2.26.2


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/5] ALSA: core: Drop snd_sgbuf_get_ptr()
  2021-06-09 16:25 [PATCH 0/5] ALSA: More abstracition of memory alloc helpers Takashi Iwai
  2021-06-09 16:25 ` [PATCH 1/5] ALSA: trident: Drop shadow TLB pointer table Takashi Iwai
@ 2021-06-09 16:25 ` Takashi Iwai
  2021-06-09 16:25 ` [PATCH 3/5] ALSA: core: Abstract memory alloc helpers Takashi Iwai
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Takashi Iwai @ 2021-06-09 16:25 UTC (permalink / raw)
  To: alsa-devel

snd_sgbuf_get_ptr() and its sibling snd_pcm_sgbuf_get_ptr() are no
longer used by any drivers.  Let's drop them.

Signed-off-by: Takashi Iwai <tiwai@suse.de>
---
 include/sound/memalloc.h | 19 -------------------
 include/sound/pcm.h      | 11 -----------
 2 files changed, 30 deletions(-)

diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 5daa937684a4..3ab084438fdc 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -103,19 +103,6 @@ static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
 	return addr + offset % PAGE_SIZE;
 }
 
-/*
- * return the virtual address at the corresponding offset
- */
-static inline void *snd_sgbuf_get_ptr(struct snd_dma_buffer *dmab,
-				     size_t offset)
-{
-	struct snd_sg_buf *sgbuf = dmab->private_data;
-
-	if (!sgbuf)
-		return dmab->area + offset;
-	return sgbuf->table[offset >> PAGE_SHIFT].buf + offset % PAGE_SIZE;
-}
-
 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
 				      unsigned int ofs, unsigned int size);
 #else
@@ -126,12 +113,6 @@ static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
 	return dmab->addr + offset;
 }
 
-static inline void *snd_sgbuf_get_ptr(struct snd_dma_buffer *dmab,
-				      size_t offset)
-{
-	return dmab->area + offset;
-}
-
 #define snd_sgbuf_get_chunk_size(dmab, ofs, size)	(size)
 
 #endif /* CONFIG_SND_DMA_SGBUF */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 2e1200d17d0c..11e0a68335bd 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -1272,17 +1272,6 @@ snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs)
 	return snd_sgbuf_get_addr(snd_pcm_get_dma_buf(substream), ofs);
 }
 
-/**
- * snd_pcm_sgbuf_get_ptr - Get the virtual address at the corresponding offset
- * @substream: PCM substream
- * @ofs: byte offset
- */
-static inline void *
-snd_pcm_sgbuf_get_ptr(struct snd_pcm_substream *substream, unsigned int ofs)
-{
-	return snd_sgbuf_get_ptr(snd_pcm_get_dma_buf(substream), ofs);
-}
-
 /**
  * snd_pcm_sgbuf_get_chunk_size - Compute the max size that fits within the
  * contig. page from the given size
-- 
2.26.2


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 3/5] ALSA: core: Abstract memory alloc helpers
  2021-06-09 16:25 [PATCH 0/5] ALSA: More abstracition of memory alloc helpers Takashi Iwai
  2021-06-09 16:25 ` [PATCH 1/5] ALSA: trident: Drop shadow TLB pointer table Takashi Iwai
  2021-06-09 16:25 ` [PATCH 2/5] ALSA: core: Drop snd_sgbuf_get_ptr() Takashi Iwai
@ 2021-06-09 16:25 ` Takashi Iwai
  2021-06-09 16:25 ` [PATCH 4/5] ALSA: core: Move mmap handler into memalloc ops Takashi Iwai
  2021-06-09 16:25 ` [PATCH 5/5] ALSA: core: Add continuous and vmalloc mmap ops Takashi Iwai
  4 siblings, 0 replies; 6+ messages in thread
From: Takashi Iwai @ 2021-06-09 16:25 UTC (permalink / raw)
  To: alsa-devel

This patch introduces the ops table to each memory allocation type
(SNDRV_DMA_TYPE_XXX) and abstract the handling for the better code
management.  Then we get separate the page allocation, release and
other tasks for each type, especially for the SG buffer.

Each buffer type has now callbacks in the struct snd_malloc_ops, and
the common helper functions call those ops accordingly.  The former
inline code that is specific to SG-buffer is moved into the local
sgbuf.c, and we can simplify the PCM code without details of memory
handling.

Signed-off-by: Takashi Iwai <tiwai@suse.de>
---
 include/sound/memalloc.h    |  61 +-----
 include/sound/pcm.h         |   8 -
 sound/core/memalloc.c       | 391 ++++++++++++++++++++++--------------
 sound/core/memalloc_local.h |  18 ++
 sound/core/pcm_local.h      |   5 -
 sound/core/pcm_memory.c     |  21 --
 sound/core/pcm_native.c     |  20 +-
 sound/core/sgbuf.c          |  90 ++++++---
 8 files changed, 328 insertions(+), 286 deletions(-)
 create mode 100644 sound/core/memalloc_local.h

diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 3ab084438fdc..6dc85a7f44ad 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -9,9 +9,8 @@
 #ifndef __SOUND_MEMALLOC_H
 #define __SOUND_MEMALLOC_H
 
-#include <asm/page.h>
-
 struct device;
+struct page;
 
 /*
  * buffer device info
@@ -64,59 +63,6 @@ static inline unsigned int snd_sgbuf_aligned_pages(size_t size)
 	return (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 }
 
-#ifdef CONFIG_SND_DMA_SGBUF
-/*
- * Scatter-Gather generic device pages
- */
-void *snd_malloc_sgbuf_pages(struct device *device,
-			     size_t size, struct snd_dma_buffer *dmab,
-			     size_t *res_size);
-int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab);
-
-struct snd_sg_page {
-	void *buf;
-	dma_addr_t addr;
-};
-
-struct snd_sg_buf {
-	int size;	/* allocated byte size */
-	int pages;	/* allocated pages */
-	int tblsize;	/* allocated table size */
-	struct snd_sg_page *table;	/* address table */
-	struct page **page_table;	/* page table (for vmap/vunmap) */
-	struct device *dev;
-};
-
-/*
- * return the physical address at the corresponding offset
- */
-static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
-					   size_t offset)
-{
-	struct snd_sg_buf *sgbuf = dmab->private_data;
-	dma_addr_t addr;
-
-	if (!sgbuf)
-		return dmab->addr + offset;
-	addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
-	addr &= ~((dma_addr_t)PAGE_SIZE - 1);
-	return addr + offset % PAGE_SIZE;
-}
-
-unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
-				      unsigned int ofs, unsigned int size);
-#else
-/* non-SG versions */
-static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
-					    size_t offset)
-{
-	return dmab->addr + offset;
-}
-
-#define snd_sgbuf_get_chunk_size(dmab, ofs, size)	(size)
-
-#endif /* CONFIG_SND_DMA_SGBUF */
-
 /* allocate/release a buffer */
 int snd_dma_alloc_pages(int type, struct device *dev, size_t size,
 			struct snd_dma_buffer *dmab);
@@ -124,5 +70,10 @@ int snd_dma_alloc_pages_fallback(int type, struct device *dev, size_t size,
                                  struct snd_dma_buffer *dmab);
 void snd_dma_free_pages(struct snd_dma_buffer *dmab);
 
+dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset);
+struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset);
+unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
+				      unsigned int ofs, unsigned int size);
+
 #endif /* __SOUND_MEMALLOC_H */
 
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 11e0a68335bd..d8f5f85ebd3f 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -1253,14 +1253,6 @@ static inline int snd_pcm_lib_alloc_vmalloc_32_buffer
 
 #define snd_pcm_get_dma_buf(substream) ((substream)->runtime->dma_buffer_p)
 
-#ifdef CONFIG_SND_DMA_SGBUF
-/*
- * SG-buffer handling
- */
-#define snd_pcm_substream_sgbuf(substream) \
-	snd_pcm_get_dma_buf(substream)->private_data
-#endif /* SND_DMA_SGBUF */
-
 /**
  * snd_pcm_sgbuf_get_addr - Get the DMA address at the corresponding offset
  * @substream: PCM substream
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 966bef5acc75..ad68bcdf82cf 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -15,99 +15,27 @@
 #include <asm/set_memory.h>
 #endif
 #include <sound/memalloc.h>
+#include "memalloc_local.h"
 
-/*
- *
- *  Bus-specific memory allocators
- *
- */
+static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
 
-#ifdef CONFIG_HAS_DMA
-/* allocate the coherent DMA pages */
-static void snd_malloc_dev_pages(struct snd_dma_buffer *dmab, size_t size)
-{
-	gfp_t gfp_flags;
-
-	gfp_flags = GFP_KERNEL
-		| __GFP_COMP	/* compound page lets parts be mapped */
-		| __GFP_NORETRY /* don't trigger OOM-killer */
-		| __GFP_NOWARN; /* no stack trace print - this call is non-critical */
-	dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr,
-					gfp_flags);
-#ifdef CONFIG_X86
-	if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
-		set_memory_wc((unsigned long)dmab->area,
-			      PAGE_ALIGN(size) >> PAGE_SHIFT);
-#endif
-}
-
-/* free the coherent DMA pages */
-static void snd_free_dev_pages(struct snd_dma_buffer *dmab)
+/* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */
+static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
+					  gfp_t default_gfp)
 {
-#ifdef CONFIG_X86
-	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
-		set_memory_wb((unsigned long)dmab->area,
-			      PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
-#endif
-	dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
+	if (!dmab->dev.dev)
+		return default_gfp;
+	else
+		return (__force gfp_t)(unsigned long)dmab->dev.dev;
 }
 
-#ifdef CONFIG_GENERIC_ALLOCATOR
-/**
- * snd_malloc_dev_iram - allocate memory from on-chip internal ram
- * @dmab: buffer allocation record to store the allocated data
- * @size: number of bytes to allocate from the iram
- *
- * This function requires iram phandle provided via of_node
- */
-static void snd_malloc_dev_iram(struct snd_dma_buffer *dmab, size_t size)
+static int __snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
 {
-	struct device *dev = dmab->dev.dev;
-	struct gen_pool *pool = NULL;
-
-	dmab->area = NULL;
-	dmab->addr = 0;
-
-	if (dev->of_node)
-		pool = of_gen_pool_get(dev->of_node, "iram", 0);
-
-	if (!pool)
-		return;
-
-	/* Assign the pool into private_data field */
-	dmab->private_data = pool;
+	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
 
-	dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr,
-					PAGE_SIZE);
-}
-
-/**
- * snd_free_dev_iram - free allocated specific memory from on-chip internal ram
- * @dmab: buffer allocation record to store the allocated data
- */
-static void snd_free_dev_iram(struct snd_dma_buffer *dmab)
-{
-	struct gen_pool *pool = dmab->private_data;
-
-	if (pool && dmab->area)
-		gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
-}
-#endif /* CONFIG_GENERIC_ALLOCATOR */
-#endif /* CONFIG_HAS_DMA */
-
-/*
- *
- *  ALSA generic memory management
- *
- */
-
-static inline gfp_t snd_mem_get_gfp_flags(const struct device *dev,
-					  gfp_t default_gfp)
-{
-	if (!dev)
-		return default_gfp;
-	else
-		return (__force gfp_t)(unsigned long)dev;
+	if (WARN_ON_ONCE(!ops || !ops->alloc))
+		return -EINVAL;
+	return ops->alloc(dmab, size);
 }
 
 /**
@@ -126,7 +54,7 @@ static inline gfp_t snd_mem_get_gfp_flags(const struct device *dev,
 int snd_dma_alloc_pages(int type, struct device *device, size_t size,
 			struct snd_dma_buffer *dmab)
 {
-	gfp_t gfp;
+	int err;
 
 	if (WARN_ON(!size))
 		return -ENXIO;
@@ -140,43 +68,10 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
 	dmab->area = NULL;
 	dmab->addr = 0;
 	dmab->private_data = NULL;
-	switch (type) {
-	case SNDRV_DMA_TYPE_CONTINUOUS:
-		gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL);
-		dmab->area = alloc_pages_exact(size, gfp);
-		break;
-	case SNDRV_DMA_TYPE_VMALLOC:
-		gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL | __GFP_HIGHMEM);
-		dmab->area = __vmalloc(size, gfp);
-		break;
-#ifdef CONFIG_HAS_DMA
-#ifdef CONFIG_GENERIC_ALLOCATOR
-	case SNDRV_DMA_TYPE_DEV_IRAM:
-		snd_malloc_dev_iram(dmab, size);
-		if (dmab->area)
-			break;
-		/* Internal memory might have limited size and no enough space,
-		 * so if we fail to malloc, try to fetch memory traditionally.
-		 */
-		dmab->dev.type = SNDRV_DMA_TYPE_DEV;
-		fallthrough;
-#endif /* CONFIG_GENERIC_ALLOCATOR */
-	case SNDRV_DMA_TYPE_DEV:
-	case SNDRV_DMA_TYPE_DEV_UC:
-		snd_malloc_dev_pages(dmab, size);
-		break;
-#endif
-#ifdef CONFIG_SND_DMA_SGBUF
-	case SNDRV_DMA_TYPE_DEV_SG:
-	case SNDRV_DMA_TYPE_DEV_UC_SG:
-		snd_malloc_sgbuf_pages(device, size, dmab, NULL);
-		break;
-#endif
-	default:
-		pr_err("snd-malloc: invalid device type %d\n", type);
-		return -ENXIO;
-	}
-	if (! dmab->area)
+	err = __snd_dma_alloc_pages(dmab, size);
+	if (err < 0)
+		return err;
+	if (!dmab->area)
 		return -ENOMEM;
 	dmab->bytes = size;
 	return 0;
@@ -217,7 +112,6 @@ int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
 }
 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
 
-
 /**
  * snd_dma_free_pages - release the allocated buffer
  * @dmab: the buffer allocation record to release
@@ -226,32 +120,235 @@ EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
  */
 void snd_dma_free_pages(struct snd_dma_buffer *dmab)
 {
-	switch (dmab->dev.type) {
-	case SNDRV_DMA_TYPE_CONTINUOUS:
-		free_pages_exact(dmab->area, dmab->bytes);
-		break;
-	case SNDRV_DMA_TYPE_VMALLOC:
-		vfree(dmab->area);
-		break;
+	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+	if (ops && ops->free)
+		ops->free(dmab);
+}
+EXPORT_SYMBOL(snd_dma_free_pages);
+
+/**
+ * snd_sgbuf_get_addr - return the physical address at the corresponding offset
+ * @dmab: buffer allocation information
+ * @offset: offset in the ring buffer
+ */
+dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
+{
+	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+	if (ops && ops->get_addr)
+		return ops->get_addr(dmab, offset);
+	else
+		return dmab->addr + offset;
+}
+EXPORT_SYMBOL(snd_sgbuf_get_addr);
+
+/**
+ * snd_sgbuf_get_page - return the physical page at the corresponding offset
+ * @dmab: buffer allocation information
+ * @offset: offset in the ring buffer
+ */
+struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
+{
+	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+	if (ops && ops->get_page)
+		return ops->get_page(dmab, offset);
+	else
+		return virt_to_page(dmab->area + offset);
+}
+EXPORT_SYMBOL(snd_sgbuf_get_page);
+
+/**
+ * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
+ *	on sg-buffer
+ * @dmab: buffer allocation information
+ * @ofs: offset in the ring buffer
+ * @size: the requested size
+ */
+unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
+				      unsigned int ofs, unsigned int size)
+{
+	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+	if (ops && ops->get_chunk_size)
+		return ops->get_chunk_size(dmab, ofs, size);
+	else
+		return size;
+}
+EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
+
+/*
+ * Continuous pages allocator
+ */
+static int snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+	gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL);
+
+	dmab->area = alloc_pages_exact(size, gfp);
+	return 0;
+}
+
+static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
+{
+	free_pages_exact(dmab->area, dmab->bytes);
+}
+
+static const struct snd_malloc_ops snd_dma_continuous_ops = {
+	.alloc = snd_dma_continuous_alloc,
+	.free = snd_dma_continuous_free,
+};
+
+/*
+ * VMALLOC allocator
+ */
+static int snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+	gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM);
+
+	dmab->area = __vmalloc(size, gfp);
+	return 0;
+}
+
+static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
+{
+	vfree(dmab->area);
+}
+
+static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
+					   size_t offset)
+{
+	return page_to_phys(vmalloc_to_page(dmab->area + offset)) +
+		offset % PAGE_SIZE;
+}
+
+static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
+					     size_t offset)
+{
+	return vmalloc_to_page(dmab->area + offset);
+}
+
+static unsigned int
+snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
+			       unsigned int ofs, unsigned int size)
+{
+	ofs %= PAGE_SIZE;
+	size += ofs;
+	if (size > PAGE_SIZE)
+		size = PAGE_SIZE;
+	return size - ofs;
+}
+
+static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
+	.alloc = snd_dma_vmalloc_alloc,
+	.free = snd_dma_vmalloc_free,
+	.get_addr = snd_dma_vmalloc_get_addr,
+	.get_page = snd_dma_vmalloc_get_page,
+	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
+};
+
 #ifdef CONFIG_HAS_DMA
+/*
+ * IRAM allocator
+ */
 #ifdef CONFIG_GENERIC_ALLOCATOR
-	case SNDRV_DMA_TYPE_DEV_IRAM:
-		snd_free_dev_iram(dmab);
-		break;
+static int snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+	struct device *dev = dmab->dev.dev;
+	struct gen_pool *pool;
+
+	if (dev->of_node) {
+		pool = of_gen_pool_get(dev->of_node, "iram", 0);
+		/* Assign the pool into private_data field */
+		dmab->private_data = pool;
+
+		dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr,
+						      PAGE_SIZE);
+		if (dmab->area)
+			return 0;
+	}
+
+	/* Internal memory might have limited size and no enough space,
+	 * so if we fail to malloc, try to fetch memory traditionally.
+	 */
+	dmab->dev.type = SNDRV_DMA_TYPE_DEV;
+	return __snd_dma_alloc_pages(dmab, size);
+}
+
+static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
+{
+	struct gen_pool *pool = dmab->private_data;
+
+	if (pool && dmab->area)
+		gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
+}
+
+static const struct snd_malloc_ops snd_dma_iram_ops = {
+	.alloc = snd_dma_iram_alloc,
+	.free = snd_dma_iram_free,
+};
 #endif /* CONFIG_GENERIC_ALLOCATOR */
-	case SNDRV_DMA_TYPE_DEV:
-	case SNDRV_DMA_TYPE_DEV_UC:
-		snd_free_dev_pages(dmab);
-		break;
+
+/*
+ * Coherent device pages allocator
+ */
+static int snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+	gfp_t gfp_flags;
+
+	gfp_flags = GFP_KERNEL
+		| __GFP_COMP	/* compound page lets parts be mapped */
+		| __GFP_NORETRY /* don't trigger OOM-killer */
+		| __GFP_NOWARN; /* no stack trace print - this call is non-critical */
+	dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr,
+					gfp_flags);
+#ifdef CONFIG_X86
+	if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
+		set_memory_wc((unsigned long)dmab->area,
+			      PAGE_ALIGN(size) >> PAGE_SHIFT);
 #endif
+	return 0;
+}
+
+static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
+{
+#ifdef CONFIG_X86
+	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
+		set_memory_wb((unsigned long)dmab->area,
+			      PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
+#endif
+	dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
+}
+
+static const struct snd_malloc_ops snd_dma_dev_ops = {
+	.alloc = snd_dma_dev_alloc,
+	.free = snd_dma_dev_free,
+};
+#endif /* CONFIG_HAS_DMA */
+
+/*
+ * Entry points
+ */
+static const struct snd_malloc_ops *dma_ops[] = {
+	[SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
+	[SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
+#ifdef CONFIG_HAS_DMA
+	[SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
+	[SNDRV_DMA_TYPE_DEV_UC] = &snd_dma_dev_ops,
+#ifdef CONFIG_GENERIC_ALLOCATOR
+	[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
+#endif /* CONFIG_GENERIC_ALLOCATOR */
+#endif /* CONFIG_HAS_DMA */
 #ifdef CONFIG_SND_DMA_SGBUF
-	case SNDRV_DMA_TYPE_DEV_SG:
-	case SNDRV_DMA_TYPE_DEV_UC_SG:
-		snd_free_sgbuf_pages(dmab);
-		break;
+	[SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
+	[SNDRV_DMA_TYPE_DEV_UC_SG] = &snd_dma_sg_ops,
 #endif
-	default:
-		pr_err("snd-malloc: invalid device type %d\n", dmab->dev.type);
-	}
+};
+
+static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
+{
+	if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
+			 dmab->dev.type >= ARRAY_SIZE(dma_ops)))
+		return NULL;
+	return dma_ops[dmab->dev.type];
 }
-EXPORT_SYMBOL(snd_dma_free_pages);
diff --git a/sound/core/memalloc_local.h b/sound/core/memalloc_local.h
new file mode 100644
index 000000000000..fe55416253bf
--- /dev/null
+++ b/sound/core/memalloc_local.h
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __MEMALLOC_LOCAL_H
+#define __MEMALLOC_LOCAL_H
+
+struct snd_malloc_ops {
+	int (*alloc)(struct snd_dma_buffer *dmab, size_t size);
+	void (*free)(struct snd_dma_buffer *dmab);
+	dma_addr_t (*get_addr)(struct snd_dma_buffer *dmab, size_t offset);
+	struct page *(*get_page)(struct snd_dma_buffer *dmab, size_t offset);
+	unsigned int (*get_chunk_size)(struct snd_dma_buffer *dmab,
+				       unsigned int ofs, unsigned int size);
+};
+
+#ifdef CONFIG_SND_DMA_SGBUF
+extern const struct snd_malloc_ops snd_dma_sg_ops;
+#endif
+
+#endif /* __MEMALLOC_LOCAL_H */
diff --git a/sound/core/pcm_local.h b/sound/core/pcm_local.h
index e3b3558aeab6..fe9689b8a6a6 100644
--- a/sound/core/pcm_local.h
+++ b/sound/core/pcm_local.h
@@ -65,11 +65,6 @@ void __snd_pcm_xrun(struct snd_pcm_substream *substream);
 void snd_pcm_group_init(struct snd_pcm_group *group);
 void snd_pcm_sync_stop(struct snd_pcm_substream *substream, bool sync_irq);
 
-#ifdef CONFIG_SND_DMA_SGBUF
-struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream,
-				    unsigned long offset);
-#endif
-
 #define PCM_RUNTIME_CHECK(sub) snd_BUG_ON(!(sub) || !(sub)->runtime)
 
 /* loop over all PCM substreams */
diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
index 542a75babdee..d7621ed105bd 100644
--- a/sound/core/pcm_memory.c
+++ b/sound/core/pcm_memory.c
@@ -337,27 +337,6 @@ void snd_pcm_set_managed_buffer_all(struct snd_pcm *pcm, int type,
 }
 EXPORT_SYMBOL(snd_pcm_set_managed_buffer_all);
 
-#ifdef CONFIG_SND_DMA_SGBUF
-/*
- * snd_pcm_sgbuf_ops_page - get the page struct at the given offset
- * @substream: the pcm substream instance
- * @offset: the buffer offset
- *
- * Used as the page callback of PCM ops.
- *
- * Return: The page struct at the given buffer offset. %NULL on failure.
- */
-struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream, unsigned long offset)
-{
-	struct snd_sg_buf *sgbuf = snd_pcm_substream_sgbuf(substream);
-
-	unsigned int idx = offset >> PAGE_SHIFT;
-	if (idx >= (unsigned int)sgbuf->pages)
-		return NULL;
-	return sgbuf->page_table[idx];
-}
-#endif /* CONFIG_SND_DMA_SGBUF */
-
 /**
  * snd_pcm_lib_malloc_pages - allocate the DMA buffer
  * @substream: the substream to allocate the DMA buffer to
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index eb468573f070..48d63dbb17ba 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -3644,24 +3644,6 @@ static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file
 }
 #endif /* coherent mmap */
 
-static inline struct page *
-snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs)
-{
-	void *vaddr = substream->runtime->dma_area + ofs;
-
-	switch (substream->dma_buffer.dev.type) {
-#ifdef CONFIG_SND_DMA_SGBUF
-	case SNDRV_DMA_TYPE_DEV_SG:
-	case SNDRV_DMA_TYPE_DEV_UC_SG:
-		return snd_pcm_sgbuf_ops_page(substream, ofs);
-#endif /* CONFIG_SND_DMA_SGBUF */
-	case SNDRV_DMA_TYPE_VMALLOC:
-		return vmalloc_to_page(vaddr);
-	default:
-		return virt_to_page(vaddr);
-	}
-}
-
 /*
  * fault callback for mmapping a RAM page
  */
@@ -3683,7 +3665,7 @@ static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf)
 	if (substream->ops->page)
 		page = substream->ops->page(substream, offset);
 	else
-		page = snd_pcm_default_page_ops(substream, offset);
+		page = snd_sgbuf_get_page(snd_pcm_get_dma_buf(substream), offset);
 	if (!page)
 		return VM_FAULT_SIGBUS;
 	get_page(page);
diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c
index 29ddb76187e5..232cf3f1bcb3 100644
--- a/sound/core/sgbuf.c
+++ b/sound/core/sgbuf.c
@@ -10,20 +10,34 @@
 #include <linux/vmalloc.h>
 #include <linux/export.h>
 #include <sound/memalloc.h>
-
+#include "memalloc_local.h"
+
+struct snd_sg_page {
+	void *buf;
+	dma_addr_t addr;
+};
+
+struct snd_sg_buf {
+	int size;	/* allocated byte size */
+	int pages;	/* allocated pages */
+	int tblsize;	/* allocated table size */
+	struct snd_sg_page *table;	/* address table */
+	struct page **page_table;	/* page table (for vmap/vunmap) */
+	struct device *dev;
+};
 
 /* table entries are align to 32 */
 #define SGBUF_TBL_ALIGN		32
 #define sgbuf_align_table(tbl)	ALIGN((tbl), SGBUF_TBL_ALIGN)
 
-int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
+static void snd_dma_sg_free(struct snd_dma_buffer *dmab)
 {
 	struct snd_sg_buf *sgbuf = dmab->private_data;
 	struct snd_dma_buffer tmpb;
 	int i;
 
-	if (! sgbuf)
-		return -EINVAL;
+	if (!sgbuf)
+		return;
 
 	vunmap(dmab->area);
 	dmab->area = NULL;
@@ -45,15 +59,11 @@ int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
 	kfree(sgbuf->page_table);
 	kfree(sgbuf);
 	dmab->private_data = NULL;
-	
-	return 0;
 }
 
 #define MAX_ALLOC_PAGES		32
 
-void *snd_malloc_sgbuf_pages(struct device *device,
-			     size_t size, struct snd_dma_buffer *dmab,
-			     size_t *res_size)
+static int snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
 {
 	struct snd_sg_buf *sgbuf;
 	unsigned int i, pages, chunk, maxpages;
@@ -63,18 +73,16 @@ void *snd_malloc_sgbuf_pages(struct device *device,
 	int type = SNDRV_DMA_TYPE_DEV;
 	pgprot_t prot = PAGE_KERNEL;
 
-	dmab->area = NULL;
-	dmab->addr = 0;
 	dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
-	if (! sgbuf)
-		return NULL;
+	if (!sgbuf)
+		return -ENOMEM;
 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG) {
 		type = SNDRV_DMA_TYPE_DEV_UC;
 #ifdef pgprot_noncached
 		prot = pgprot_noncached(PAGE_KERNEL);
 #endif
 	}
-	sgbuf->dev = device;
+	sgbuf->dev = dmab->dev.dev;
 	pages = snd_sgbuf_aligned_pages(size);
 	sgbuf->tblsize = sgbuf_align_table(pages);
 	table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
@@ -94,12 +102,10 @@ void *snd_malloc_sgbuf_pages(struct device *device,
 		if (chunk > maxpages)
 			chunk = maxpages;
 		chunk <<= PAGE_SHIFT;
-		if (snd_dma_alloc_pages_fallback(type, device,
+		if (snd_dma_alloc_pages_fallback(type, dmab->dev.dev,
 						 chunk, &tmpb) < 0) {
 			if (!sgbuf->pages)
 				goto _failed;
-			if (!res_size)
-				goto _failed;
 			size = sgbuf->pages * PAGE_SIZE;
 			break;
 		}
@@ -124,27 +130,42 @@ void *snd_malloc_sgbuf_pages(struct device *device,
 	dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
 	if (! dmab->area)
 		goto _failed;
-	if (res_size)
-		*res_size = sgbuf->size;
-	return dmab->area;
+	return 0;
 
  _failed:
-	snd_free_sgbuf_pages(dmab); /* free the table */
-	return NULL;
+	snd_dma_sg_free(dmab); /* free the table */
+	return -ENOMEM;
 }
 
-/*
- * compute the max chunk size with continuous pages on sg-buffer
- */
-unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
-				      unsigned int ofs, unsigned int size)
+static dma_addr_t snd_dma_sg_get_addr(struct snd_dma_buffer *dmab,
+				      size_t offset)
+{
+	struct snd_sg_buf *sgbuf = dmab->private_data;
+	dma_addr_t addr;
+
+	addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
+	addr &= ~((dma_addr_t)PAGE_SIZE - 1);
+	return addr + offset % PAGE_SIZE;
+}
+
+static struct page *snd_dma_sg_get_page(struct snd_dma_buffer *dmab,
+					size_t offset)
+{
+	struct snd_sg_buf *sgbuf = dmab->private_data;
+	unsigned int idx = offset >> PAGE_SHIFT;
+
+	if (idx >= (unsigned int)sgbuf->pages)
+		return NULL;
+	return sgbuf->page_table[idx];
+}
+
+static unsigned int snd_dma_sg_get_chunk_size(struct snd_dma_buffer *dmab,
+					      unsigned int ofs,
+					      unsigned int size)
 {
 	struct snd_sg_buf *sg = dmab->private_data;
 	unsigned int start, end, pg;
 
-	if (!sg)
-		return size;
-
 	start = ofs >> PAGE_SHIFT;
 	end = (ofs + size - 1) >> PAGE_SHIFT;
 	/* check page continuity */
@@ -160,4 +181,11 @@ unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
 	/* ok, all on continuous pages */
 	return size;
 }
-EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
+
+const struct snd_malloc_ops snd_dma_sg_ops = {
+	.alloc = snd_dma_sg_alloc,
+	.free = snd_dma_sg_free,
+	.get_addr = snd_dma_sg_get_addr,
+	.get_page = snd_dma_sg_get_page,
+	.get_chunk_size = snd_dma_sg_get_chunk_size,
+};
-- 
2.26.2


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 4/5] ALSA: core: Move mmap handler into memalloc ops
  2021-06-09 16:25 [PATCH 0/5] ALSA: More abstracition of memory alloc helpers Takashi Iwai
                   ` (2 preceding siblings ...)
  2021-06-09 16:25 ` [PATCH 3/5] ALSA: core: Abstract memory alloc helpers Takashi Iwai
@ 2021-06-09 16:25 ` Takashi Iwai
  2021-06-09 16:25 ` [PATCH 5/5] ALSA: core: Add continuous and vmalloc mmap ops Takashi Iwai
  4 siblings, 0 replies; 6+ messages in thread
From: Takashi Iwai @ 2021-06-09 16:25 UTC (permalink / raw)
  To: alsa-devel

This patch moves the mmap handling code into the common memalloc
handler.  It allows us to reduce the memory-type specific code in PCM
code gracefully.

Signed-off-by: Takashi Iwai <tiwai@suse.de>
---
 include/sound/memalloc.h    |  3 +++
 sound/core/memalloc.c       | 36 ++++++++++++++++++++++++++++++++++++
 sound/core/memalloc_local.h |  1 +
 sound/core/pcm_native.c     | 19 +++----------------
 4 files changed, 43 insertions(+), 16 deletions(-)

diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 6dc85a7f44ad..1918c60f1f35 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -11,6 +11,7 @@
 
 struct device;
 struct page;
+struct vm_area_struct;
 
 /*
  * buffer device info
@@ -69,6 +70,8 @@ int snd_dma_alloc_pages(int type, struct device *dev, size_t size,
 int snd_dma_alloc_pages_fallback(int type, struct device *dev, size_t size,
                                  struct snd_dma_buffer *dmab);
 void snd_dma_free_pages(struct snd_dma_buffer *dmab);
+int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
+			struct vm_area_struct *area);
 
 dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset);
 struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset);
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index ad68bcdf82cf..bdcb9230acf8 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -127,6 +127,23 @@ void snd_dma_free_pages(struct snd_dma_buffer *dmab)
 }
 EXPORT_SYMBOL(snd_dma_free_pages);
 
+/**
+ * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
+ * @dmab: buffer allocation information
+ * @area: VM area information
+ */
+int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
+			struct vm_area_struct *area)
+{
+	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+	if (ops && ops->mmap)
+		return ops->mmap(dmab, area);
+	else
+		return -ENOENT;
+}
+EXPORT_SYMBOL(snd_dma_buffer_mmap);
+
 /**
  * snd_sgbuf_get_addr - return the physical address at the corresponding offset
  * @dmab: buffer allocation information
@@ -283,9 +300,20 @@ static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
 		gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
 }
 
+static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
+			     struct vm_area_struct *area)
+{
+	area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
+	return remap_pfn_range(area, area->vm_start,
+			       dmab->addr >> PAGE_SHIFT,
+			       area->vm_end - area->vm_start,
+			       area->vm_page_prot);
+}
+
 static const struct snd_malloc_ops snd_dma_iram_ops = {
 	.alloc = snd_dma_iram_alloc,
 	.free = snd_dma_iram_free,
+	.mmap = snd_dma_iram_mmap,
 };
 #endif /* CONFIG_GENERIC_ALLOCATOR */
 
@@ -320,9 +348,17 @@ static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
 	dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
 }
 
+static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
+			    struct vm_area_struct *area)
+{
+	return dma_mmap_coherent(dmab->dev.dev, area,
+				 dmab->area, dmab->addr, dmab->bytes);
+}
+
 static const struct snd_malloc_ops snd_dma_dev_ops = {
 	.alloc = snd_dma_dev_alloc,
 	.free = snd_dma_dev_free,
+	.mmap = snd_dma_dev_mmap,
 };
 #endif /* CONFIG_HAS_DMA */
 
diff --git a/sound/core/memalloc_local.h b/sound/core/memalloc_local.h
index fe55416253bf..dbea7f2aed07 100644
--- a/sound/core/memalloc_local.h
+++ b/sound/core/memalloc_local.h
@@ -9,6 +9,7 @@ struct snd_malloc_ops {
 	struct page *(*get_page)(struct snd_dma_buffer *dmab, size_t offset);
 	unsigned int (*get_chunk_size)(struct snd_dma_buffer *dmab,
 				       unsigned int ofs, unsigned int size);
+	int (*mmap)(struct snd_dma_buffer *dmab, struct vm_area_struct *area);
 };
 
 #ifdef CONFIG_SND_DMA_SGBUF
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 48d63dbb17ba..14e32825c339 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -3700,22 +3700,9 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
 			     struct vm_area_struct *area)
 {
 	area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
-#ifdef CONFIG_GENERIC_ALLOCATOR
-	if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_IRAM) {
-		area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
-		return remap_pfn_range(area, area->vm_start,
-				substream->dma_buffer.addr >> PAGE_SHIFT,
-				area->vm_end - area->vm_start, area->vm_page_prot);
-	}
-#endif /* CONFIG_GENERIC_ALLOCATOR */
-	if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
-	    (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV ||
-	     substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_UC))
-		return dma_mmap_coherent(substream->dma_buffer.dev.dev,
-					 area,
-					 substream->runtime->dma_area,
-					 substream->runtime->dma_addr,
-					 substream->runtime->dma_bytes);
+	if (!substream->ops->page &&
+	    !snd_dma_buffer_mmap(snd_pcm_get_dma_buf(substream), area))
+		return 0;
 	/* mmap with fault handler */
 	area->vm_ops = &snd_pcm_vm_ops_data_fault;
 	return 0;
-- 
2.26.2


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 5/5] ALSA: core: Add continuous and vmalloc mmap ops
  2021-06-09 16:25 [PATCH 0/5] ALSA: More abstracition of memory alloc helpers Takashi Iwai
                   ` (3 preceding siblings ...)
  2021-06-09 16:25 ` [PATCH 4/5] ALSA: core: Move mmap handler into memalloc ops Takashi Iwai
@ 2021-06-09 16:25 ` Takashi Iwai
  4 siblings, 0 replies; 6+ messages in thread
From: Takashi Iwai @ 2021-06-09 16:25 UTC (permalink / raw)
  To: alsa-devel

The mmap of continuous pages and vmalloc'ed pages are relatively
easily done in a shot with the existing helper functions.
Implement the mmap ops for those types, so that the mmap works without
relying on the page fault handling.

Signed-off-by: Takashi Iwai <tiwai@suse.de>
---
 sound/core/memalloc.c | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)

diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index bdcb9230acf8..83b79edfa52d 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -211,9 +211,19 @@ static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
 	free_pages_exact(dmab->area, dmab->bytes);
 }
 
+static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
+				   struct vm_area_struct *area)
+{
+	return remap_pfn_range(area, area->vm_start,
+			       dmab->addr >> PAGE_SHIFT,
+			       area->vm_end - area->vm_start,
+			       area->vm_page_prot);
+}
+
 static const struct snd_malloc_ops snd_dma_continuous_ops = {
 	.alloc = snd_dma_continuous_alloc,
 	.free = snd_dma_continuous_free,
+	.mmap = snd_dma_continuous_mmap,
 };
 
 /*
@@ -232,6 +242,12 @@ static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
 	vfree(dmab->area);
 }
 
+static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
+				struct vm_area_struct *area)
+{
+	return remap_vmalloc_range(area, dmab->area, 0);
+}
+
 static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
 					   size_t offset)
 {
@@ -259,6 +275,7 @@ snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
 static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
 	.alloc = snd_dma_vmalloc_alloc,
 	.free = snd_dma_vmalloc_free,
+	.mmap = snd_dma_vmalloc_mmap,
 	.get_addr = snd_dma_vmalloc_get_addr,
 	.get_page = snd_dma_vmalloc_get_page,
 	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
-- 
2.26.2


^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2021-06-09 16:29 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-06-09 16:25 [PATCH 0/5] ALSA: More abstracition of memory alloc helpers Takashi Iwai
2021-06-09 16:25 ` [PATCH 1/5] ALSA: trident: Drop shadow TLB pointer table Takashi Iwai
2021-06-09 16:25 ` [PATCH 2/5] ALSA: core: Drop snd_sgbuf_get_ptr() Takashi Iwai
2021-06-09 16:25 ` [PATCH 3/5] ALSA: core: Abstract memory alloc helpers Takashi Iwai
2021-06-09 16:25 ` [PATCH 4/5] ALSA: core: Move mmap handler into memalloc ops Takashi Iwai
2021-06-09 16:25 ` [PATCH 5/5] ALSA: core: Add continuous and vmalloc mmap ops Takashi Iwai

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.