All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
@ 2020-12-07 23:10 ` Ashish Kalra
  0 siblings, 0 replies; 24+ messages in thread
From: Ashish Kalra @ 2020-12-07 23:10 UTC (permalink / raw)
  To: konrad.wilk
  Cc: hch, tglx, mingo, hpa, x86, luto, peterz, dave.hansen, iommu,
	linux-kernel, brijesh.singh, Thomas.Lendacky, Jon.Grimm,
	rientjes

From: Ashish Kalra <ashish.kalra@amd.com>

For SEV, all DMA to and from guest has to use shared (un-encrypted) pages.
SEV uses SWIOTLB to make this happen without requiring changes to device
drivers.  However, depending on workload being run, the default 64MB of
SWIOTLB might not be enough and SWIOTLB may run out of buffers to use
for DMA, resulting in I/O errors and/or performance degradation for
high I/O workloads.

Adjust the default size of SWIOTLB for SEV guests using a
percentage of the total memory available to guest for SWIOTLB buffers.

Using late_initcall() interface to invoke swiotlb_adjust() does not
work as the size adjustment needs to be done before mem_encrypt_init()
and reserve_crashkernel() which use the allocated SWIOTLB buffer size,
hence call it explicitly from setup_arch().

The SWIOTLB default size adjustment needs to be added as an architecture
specific interface/callback to allow architectures such as those supporting
memory encryption to adjust/expand SWIOTLB size for their use.

v5 fixed build errors and warnings as
Reported-by: kbuild test robot <lkp@intel.com>

Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
---
 arch/x86/kernel/setup.c   |  2 ++
 arch/x86/mm/mem_encrypt.c | 37 +++++++++++++++++++++++++++++++++++++
 include/linux/swiotlb.h   |  6 ++++++
 kernel/dma/swiotlb.c      | 22 ++++++++++++++++++++++
 4 files changed, 67 insertions(+)

diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 84f581c91db4..31e24e198061 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1149,6 +1149,8 @@ void __init setup_arch(char **cmdline_p)
 	if (boot_cpu_has(X86_FEATURE_GBPAGES))
 		hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
 
+	swiotlb_adjust();
+
 	/*
 	 * Reserve memory for crash kernel after SRAT is parsed so that it
 	 * won't consume hotpluggable memory.
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 1bcfbcd2bfd7..d1b8d60040cf 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -485,7 +485,44 @@ static void print_mem_encrypt_feature_info(void)
 	pr_cont("\n");
 }
 
+/*
+ * The percentage of guest memory used here for SWIOTLB buffers
+ * is more of an approximation of the static adjustment which
+ * is 128M for <1G guests, 256M for 1G-4G guests and 512M for >4G guests.
+ */
+#define SEV_ADJUST_SWIOTLB_SIZE_PERCENT	6
+
 /* Architecture __weak replacement functions */
+unsigned long __init arch_swiotlb_adjust(unsigned long iotlb_default_size)
+{
+	unsigned long size = iotlb_default_size;
+
+	/*
+	 * For SEV, all DMA has to occur via shared/unencrypted pages.
+	 * SEV uses SWOTLB to make this happen without changing device
+	 * drivers. However, depending on the workload being run, the
+	 * default 64MB of SWIOTLB may not be enough and`SWIOTLB may
+	 * run out of buffers for DMA, resulting in I/O errors and/or
+	 * performance degradation especially with high I/O workloads.
+	 * Adjust the default size of SWIOTLB for SEV guests using
+	 * a percentage of guest memory for SWIOTLB buffers.
+	 * Also as the SWIOTLB bounce buffer memory is allocated
+	 * from low memory, ensure that the adjusted size is within
+	 * the limits of low available memory.
+	 *
+	 */
+	if (sev_active()) {
+		phys_addr_t total_mem = memblock_phys_mem_size();
+
+		size = total_mem * SEV_ADJUST_SWIOTLB_SIZE_PERCENT / 100;
+		size = clamp_val(size, iotlb_default_size, SZ_1G);
+		pr_info("SWIOTLB bounce buffer size adjusted to %luMB for SEV",
+			size >> 20);
+	}
+
+	return size;
+}
+
 void __init mem_encrypt_init(void)
 {
 	if (!sme_me_mask)
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 3bb72266a75a..b5904fa4b67c 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -33,6 +33,7 @@ extern void swiotlb_init(int verbose);
 int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
 extern unsigned long swiotlb_nr_tbl(void);
 unsigned long swiotlb_size_or_default(void);
+unsigned long __init arch_swiotlb_adjust(unsigned long size);
 extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
 extern int swiotlb_late_init_with_default_size(size_t default_size);
 extern void __init swiotlb_update_mem_attributes(void);
@@ -77,6 +78,7 @@ void __init swiotlb_exit(void);
 unsigned int swiotlb_max_segment(void);
 size_t swiotlb_max_mapping_size(struct device *dev);
 bool is_swiotlb_active(void);
+void __init swiotlb_adjust(void);
 #else
 #define swiotlb_force SWIOTLB_NO_FORCE
 static inline bool is_swiotlb_buffer(phys_addr_t paddr)
@@ -99,6 +101,10 @@ static inline bool is_swiotlb_active(void)
 {
 	return false;
 }
+
+static inline void swiotlb_adjust(void)
+{
+}
 #endif /* CONFIG_SWIOTLB */
 
 extern void swiotlb_print_info(void);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 781b9dca197c..0150ca2336bc 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -163,6 +163,28 @@ unsigned long swiotlb_size_or_default(void)
 	return size ? size : (IO_TLB_DEFAULT_SIZE);
 }
 
+unsigned long __init __weak arch_swiotlb_adjust(unsigned long size)
+{
+	return size;
+}
+
+void __init swiotlb_adjust(void)
+{
+	unsigned long size;
+
+	/*
+	 * If swiotlb parameter has not been specified, give a chance to
+	 * architectures such as those supporting memory encryption to
+	 * adjust/expand SWIOTLB size for their use.
+	 */
+	if (!io_tlb_nslabs) {
+		size = arch_swiotlb_adjust(IO_TLB_DEFAULT_SIZE);
+		size = ALIGN(size, 1 << IO_TLB_SHIFT);
+		io_tlb_nslabs = size >> IO_TLB_SHIFT;
+		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+	}
+}
+
 void swiotlb_print_info(void)
 {
 	unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
@ 2020-12-07 23:10 ` Ashish Kalra
  0 siblings, 0 replies; 24+ messages in thread
From: Ashish Kalra @ 2020-12-07 23:10 UTC (permalink / raw)
  To: konrad.wilk
  Cc: Thomas.Lendacky, Jon.Grimm, brijesh.singh, dave.hansen, peterz,
	x86, linux-kernel, iommu, mingo, luto, hpa, rientjes, tglx, hch

From: Ashish Kalra <ashish.kalra@amd.com>

For SEV, all DMA to and from guest has to use shared (un-encrypted) pages.
SEV uses SWIOTLB to make this happen without requiring changes to device
drivers.  However, depending on workload being run, the default 64MB of
SWIOTLB might not be enough and SWIOTLB may run out of buffers to use
for DMA, resulting in I/O errors and/or performance degradation for
high I/O workloads.

Adjust the default size of SWIOTLB for SEV guests using a
percentage of the total memory available to guest for SWIOTLB buffers.

Using late_initcall() interface to invoke swiotlb_adjust() does not
work as the size adjustment needs to be done before mem_encrypt_init()
and reserve_crashkernel() which use the allocated SWIOTLB buffer size,
hence call it explicitly from setup_arch().

The SWIOTLB default size adjustment needs to be added as an architecture
specific interface/callback to allow architectures such as those supporting
memory encryption to adjust/expand SWIOTLB size for their use.

v5 fixed build errors and warnings as
Reported-by: kbuild test robot <lkp@intel.com>

Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
---
 arch/x86/kernel/setup.c   |  2 ++
 arch/x86/mm/mem_encrypt.c | 37 +++++++++++++++++++++++++++++++++++++
 include/linux/swiotlb.h   |  6 ++++++
 kernel/dma/swiotlb.c      | 22 ++++++++++++++++++++++
 4 files changed, 67 insertions(+)

diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 84f581c91db4..31e24e198061 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1149,6 +1149,8 @@ void __init setup_arch(char **cmdline_p)
 	if (boot_cpu_has(X86_FEATURE_GBPAGES))
 		hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
 
+	swiotlb_adjust();
+
 	/*
 	 * Reserve memory for crash kernel after SRAT is parsed so that it
 	 * won't consume hotpluggable memory.
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 1bcfbcd2bfd7..d1b8d60040cf 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -485,7 +485,44 @@ static void print_mem_encrypt_feature_info(void)
 	pr_cont("\n");
 }
 
+/*
+ * The percentage of guest memory used here for SWIOTLB buffers
+ * is more of an approximation of the static adjustment which
+ * is 128M for <1G guests, 256M for 1G-4G guests and 512M for >4G guests.
+ */
+#define SEV_ADJUST_SWIOTLB_SIZE_PERCENT	6
+
 /* Architecture __weak replacement functions */
+unsigned long __init arch_swiotlb_adjust(unsigned long iotlb_default_size)
+{
+	unsigned long size = iotlb_default_size;
+
+	/*
+	 * For SEV, all DMA has to occur via shared/unencrypted pages.
+	 * SEV uses SWOTLB to make this happen without changing device
+	 * drivers. However, depending on the workload being run, the
+	 * default 64MB of SWIOTLB may not be enough and`SWIOTLB may
+	 * run out of buffers for DMA, resulting in I/O errors and/or
+	 * performance degradation especially with high I/O workloads.
+	 * Adjust the default size of SWIOTLB for SEV guests using
+	 * a percentage of guest memory for SWIOTLB buffers.
+	 * Also as the SWIOTLB bounce buffer memory is allocated
+	 * from low memory, ensure that the adjusted size is within
+	 * the limits of low available memory.
+	 *
+	 */
+	if (sev_active()) {
+		phys_addr_t total_mem = memblock_phys_mem_size();
+
+		size = total_mem * SEV_ADJUST_SWIOTLB_SIZE_PERCENT / 100;
+		size = clamp_val(size, iotlb_default_size, SZ_1G);
+		pr_info("SWIOTLB bounce buffer size adjusted to %luMB for SEV",
+			size >> 20);
+	}
+
+	return size;
+}
+
 void __init mem_encrypt_init(void)
 {
 	if (!sme_me_mask)
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 3bb72266a75a..b5904fa4b67c 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -33,6 +33,7 @@ extern void swiotlb_init(int verbose);
 int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
 extern unsigned long swiotlb_nr_tbl(void);
 unsigned long swiotlb_size_or_default(void);
+unsigned long __init arch_swiotlb_adjust(unsigned long size);
 extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
 extern int swiotlb_late_init_with_default_size(size_t default_size);
 extern void __init swiotlb_update_mem_attributes(void);
@@ -77,6 +78,7 @@ void __init swiotlb_exit(void);
 unsigned int swiotlb_max_segment(void);
 size_t swiotlb_max_mapping_size(struct device *dev);
 bool is_swiotlb_active(void);
+void __init swiotlb_adjust(void);
 #else
 #define swiotlb_force SWIOTLB_NO_FORCE
 static inline bool is_swiotlb_buffer(phys_addr_t paddr)
@@ -99,6 +101,10 @@ static inline bool is_swiotlb_active(void)
 {
 	return false;
 }
+
+static inline void swiotlb_adjust(void)
+{
+}
 #endif /* CONFIG_SWIOTLB */
 
 extern void swiotlb_print_info(void);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 781b9dca197c..0150ca2336bc 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -163,6 +163,28 @@ unsigned long swiotlb_size_or_default(void)
 	return size ? size : (IO_TLB_DEFAULT_SIZE);
 }
 
+unsigned long __init __weak arch_swiotlb_adjust(unsigned long size)
+{
+	return size;
+}
+
+void __init swiotlb_adjust(void)
+{
+	unsigned long size;
+
+	/*
+	 * If swiotlb parameter has not been specified, give a chance to
+	 * architectures such as those supporting memory encryption to
+	 * adjust/expand SWIOTLB size for their use.
+	 */
+	if (!io_tlb_nslabs) {
+		size = arch_swiotlb_adjust(IO_TLB_DEFAULT_SIZE);
+		size = ALIGN(size, 1 << IO_TLB_SHIFT);
+		io_tlb_nslabs = size >> IO_TLB_SHIFT;
+		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+	}
+}
+
 void swiotlb_print_info(void)
 {
 	unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
-- 
2.17.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
  2020-12-07 23:10 ` Ashish Kalra
@ 2020-12-08 22:22   ` Konrad Rzeszutek Wilk
  -1 siblings, 0 replies; 24+ messages in thread
From: Konrad Rzeszutek Wilk @ 2020-12-08 22:22 UTC (permalink / raw)
  To: Ashish Kalra
  Cc: hch, tglx, mingo, hpa, x86, luto, peterz, dave.hansen, iommu,
	linux-kernel, brijesh.singh, Thomas.Lendacky, Jon.Grimm,
	rientjes

On Mon, Dec 07, 2020 at 11:10:57PM +0000, Ashish Kalra wrote:
> From: Ashish Kalra <ashish.kalra@amd.com>
> 
> For SEV, all DMA to and from guest has to use shared (un-encrypted) pages.
> SEV uses SWIOTLB to make this happen without requiring changes to device
> drivers.  However, depending on workload being run, the default 64MB of
> SWIOTLB might not be enough and SWIOTLB may run out of buffers to use
> for DMA, resulting in I/O errors and/or performance degradation for
> high I/O workloads.
> 
> Adjust the default size of SWIOTLB for SEV guests using a
> percentage of the total memory available to guest for SWIOTLB buffers.
> 
> Using late_initcall() interface to invoke swiotlb_adjust() does not
> work as the size adjustment needs to be done before mem_encrypt_init()
> and reserve_crashkernel() which use the allocated SWIOTLB buffer size,
> hence call it explicitly from setup_arch().
> 
> The SWIOTLB default size adjustment needs to be added as an architecture
> specific interface/callback to allow architectures such as those supporting
> memory encryption to adjust/expand SWIOTLB size for their use.
> 
> v5 fixed build errors and warnings as
> Reported-by: kbuild test robot <lkp@intel.com>
> 
> Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
> ---
>  arch/x86/kernel/setup.c   |  2 ++
>  arch/x86/mm/mem_encrypt.c | 37 +++++++++++++++++++++++++++++++++++++
>  include/linux/swiotlb.h   |  6 ++++++
>  kernel/dma/swiotlb.c      | 22 ++++++++++++++++++++++
>  4 files changed, 67 insertions(+)
> 
> diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
> index 84f581c91db4..31e24e198061 100644
> --- a/arch/x86/kernel/setup.c
> +++ b/arch/x86/kernel/setup.c
> @@ -1149,6 +1149,8 @@ void __init setup_arch(char **cmdline_p)
>  	if (boot_cpu_has(X86_FEATURE_GBPAGES))
>  		hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
>  
> +	swiotlb_adjust();
> +
>  	/*
>  	 * Reserve memory for crash kernel after SRAT is parsed so that it
>  	 * won't consume hotpluggable memory.
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index 1bcfbcd2bfd7..d1b8d60040cf 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -485,7 +485,44 @@ static void print_mem_encrypt_feature_info(void)
>  	pr_cont("\n");
>  }
>  
> +/*
> + * The percentage of guest memory used here for SWIOTLB buffers
> + * is more of an approximation of the static adjustment which
> + * is 128M for <1G guests, 256M for 1G-4G guests and 512M for >4G guests.

No?

it is 64MB for <1G, and ~128M to 256M for 1G-to-4G

I will fix it up.
> + */
> +#define SEV_ADJUST_SWIOTLB_SIZE_PERCENT	6
> +
>  /* Architecture __weak replacement functions */
> +unsigned long __init arch_swiotlb_adjust(unsigned long iotlb_default_size)
> +{
> +	unsigned long size = iotlb_default_size;
> +
> +	/*
> +	 * For SEV, all DMA has to occur via shared/unencrypted pages.
> +	 * SEV uses SWOTLB to make this happen without changing device
> +	 * drivers. However, depending on the workload being run, the
> +	 * default 64MB of SWIOTLB may not be enough and`SWIOTLB may
> +	 * run out of buffers for DMA, resulting in I/O errors and/or
> +	 * performance degradation especially with high I/O workloads.
> +	 * Adjust the default size of SWIOTLB for SEV guests using
> +	 * a percentage of guest memory for SWIOTLB buffers.
> +	 * Also as the SWIOTLB bounce buffer memory is allocated
> +	 * from low memory, ensure that the adjusted size is within
> +	 * the limits of low available memory.
> +	 *
> +	 */
> +	if (sev_active()) {
> +		phys_addr_t total_mem = memblock_phys_mem_size();
> +
> +		size = total_mem * SEV_ADJUST_SWIOTLB_SIZE_PERCENT / 100;
> +		size = clamp_val(size, iotlb_default_size, SZ_1G);
> +		pr_info("SWIOTLB bounce buffer size adjusted to %luMB for SEV",
> +			size >> 20);
> +	}
> +
> +	return size;
> +}
> +
>  void __init mem_encrypt_init(void)
>  {
>  	if (!sme_me_mask)
> diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
> index 3bb72266a75a..b5904fa4b67c 100644
> --- a/include/linux/swiotlb.h
> +++ b/include/linux/swiotlb.h
> @@ -33,6 +33,7 @@ extern void swiotlb_init(int verbose);
>  int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
>  extern unsigned long swiotlb_nr_tbl(void);
>  unsigned long swiotlb_size_or_default(void);
> +unsigned long __init arch_swiotlb_adjust(unsigned long size);
>  extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
>  extern int swiotlb_late_init_with_default_size(size_t default_size);
>  extern void __init swiotlb_update_mem_attributes(void);
> @@ -77,6 +78,7 @@ void __init swiotlb_exit(void);
>  unsigned int swiotlb_max_segment(void);
>  size_t swiotlb_max_mapping_size(struct device *dev);
>  bool is_swiotlb_active(void);
> +void __init swiotlb_adjust(void);
>  #else
>  #define swiotlb_force SWIOTLB_NO_FORCE
>  static inline bool is_swiotlb_buffer(phys_addr_t paddr)
> @@ -99,6 +101,10 @@ static inline bool is_swiotlb_active(void)
>  {
>  	return false;
>  }
> +
> +static inline void swiotlb_adjust(void)
> +{
> +}
>  #endif /* CONFIG_SWIOTLB */
>  
>  extern void swiotlb_print_info(void);
> diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
> index 781b9dca197c..0150ca2336bc 100644
> --- a/kernel/dma/swiotlb.c
> +++ b/kernel/dma/swiotlb.c
> @@ -163,6 +163,28 @@ unsigned long swiotlb_size_or_default(void)
>  	return size ? size : (IO_TLB_DEFAULT_SIZE);
>  }
>  
> +unsigned long __init __weak arch_swiotlb_adjust(unsigned long size)
> +{
> +	return size;
> +}
> +
> +void __init swiotlb_adjust(void)
> +{
> +	unsigned long size;
> +
> +	/*
> +	 * If swiotlb parameter has not been specified, give a chance to
> +	 * architectures such as those supporting memory encryption to
> +	 * adjust/expand SWIOTLB size for their use.
> +	 */
> +	if (!io_tlb_nslabs) {
> +		size = arch_swiotlb_adjust(IO_TLB_DEFAULT_SIZE);
> +		size = ALIGN(size, 1 << IO_TLB_SHIFT);
> +		io_tlb_nslabs = size >> IO_TLB_SHIFT;
> +		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
> +	}
> +}
> +
>  void swiotlb_print_info(void)
>  {
>  	unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
> -- 
> 2.17.1
> 

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
@ 2020-12-08 22:22   ` Konrad Rzeszutek Wilk
  0 siblings, 0 replies; 24+ messages in thread
From: Konrad Rzeszutek Wilk @ 2020-12-08 22:22 UTC (permalink / raw)
  To: Ashish Kalra
  Cc: Thomas.Lendacky, Jon.Grimm, brijesh.singh, dave.hansen, peterz,
	x86, linux-kernel, iommu, mingo, luto, hpa, rientjes, tglx, hch

On Mon, Dec 07, 2020 at 11:10:57PM +0000, Ashish Kalra wrote:
> From: Ashish Kalra <ashish.kalra@amd.com>
> 
> For SEV, all DMA to and from guest has to use shared (un-encrypted) pages.
> SEV uses SWIOTLB to make this happen without requiring changes to device
> drivers.  However, depending on workload being run, the default 64MB of
> SWIOTLB might not be enough and SWIOTLB may run out of buffers to use
> for DMA, resulting in I/O errors and/or performance degradation for
> high I/O workloads.
> 
> Adjust the default size of SWIOTLB for SEV guests using a
> percentage of the total memory available to guest for SWIOTLB buffers.
> 
> Using late_initcall() interface to invoke swiotlb_adjust() does not
> work as the size adjustment needs to be done before mem_encrypt_init()
> and reserve_crashkernel() which use the allocated SWIOTLB buffer size,
> hence call it explicitly from setup_arch().
> 
> The SWIOTLB default size adjustment needs to be added as an architecture
> specific interface/callback to allow architectures such as those supporting
> memory encryption to adjust/expand SWIOTLB size for their use.
> 
> v5 fixed build errors and warnings as
> Reported-by: kbuild test robot <lkp@intel.com>
> 
> Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
> ---
>  arch/x86/kernel/setup.c   |  2 ++
>  arch/x86/mm/mem_encrypt.c | 37 +++++++++++++++++++++++++++++++++++++
>  include/linux/swiotlb.h   |  6 ++++++
>  kernel/dma/swiotlb.c      | 22 ++++++++++++++++++++++
>  4 files changed, 67 insertions(+)
> 
> diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
> index 84f581c91db4..31e24e198061 100644
> --- a/arch/x86/kernel/setup.c
> +++ b/arch/x86/kernel/setup.c
> @@ -1149,6 +1149,8 @@ void __init setup_arch(char **cmdline_p)
>  	if (boot_cpu_has(X86_FEATURE_GBPAGES))
>  		hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
>  
> +	swiotlb_adjust();
> +
>  	/*
>  	 * Reserve memory for crash kernel after SRAT is parsed so that it
>  	 * won't consume hotpluggable memory.
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index 1bcfbcd2bfd7..d1b8d60040cf 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -485,7 +485,44 @@ static void print_mem_encrypt_feature_info(void)
>  	pr_cont("\n");
>  }
>  
> +/*
> + * The percentage of guest memory used here for SWIOTLB buffers
> + * is more of an approximation of the static adjustment which
> + * is 128M for <1G guests, 256M for 1G-4G guests and 512M for >4G guests.

No?

it is 64MB for <1G, and ~128M to 256M for 1G-to-4G

I will fix it up.
> + */
> +#define SEV_ADJUST_SWIOTLB_SIZE_PERCENT	6
> +
>  /* Architecture __weak replacement functions */
> +unsigned long __init arch_swiotlb_adjust(unsigned long iotlb_default_size)
> +{
> +	unsigned long size = iotlb_default_size;
> +
> +	/*
> +	 * For SEV, all DMA has to occur via shared/unencrypted pages.
> +	 * SEV uses SWOTLB to make this happen without changing device
> +	 * drivers. However, depending on the workload being run, the
> +	 * default 64MB of SWIOTLB may not be enough and`SWIOTLB may
> +	 * run out of buffers for DMA, resulting in I/O errors and/or
> +	 * performance degradation especially with high I/O workloads.
> +	 * Adjust the default size of SWIOTLB for SEV guests using
> +	 * a percentage of guest memory for SWIOTLB buffers.
> +	 * Also as the SWIOTLB bounce buffer memory is allocated
> +	 * from low memory, ensure that the adjusted size is within
> +	 * the limits of low available memory.
> +	 *
> +	 */
> +	if (sev_active()) {
> +		phys_addr_t total_mem = memblock_phys_mem_size();
> +
> +		size = total_mem * SEV_ADJUST_SWIOTLB_SIZE_PERCENT / 100;
> +		size = clamp_val(size, iotlb_default_size, SZ_1G);
> +		pr_info("SWIOTLB bounce buffer size adjusted to %luMB for SEV",
> +			size >> 20);
> +	}
> +
> +	return size;
> +}
> +
>  void __init mem_encrypt_init(void)
>  {
>  	if (!sme_me_mask)
> diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
> index 3bb72266a75a..b5904fa4b67c 100644
> --- a/include/linux/swiotlb.h
> +++ b/include/linux/swiotlb.h
> @@ -33,6 +33,7 @@ extern void swiotlb_init(int verbose);
>  int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
>  extern unsigned long swiotlb_nr_tbl(void);
>  unsigned long swiotlb_size_or_default(void);
> +unsigned long __init arch_swiotlb_adjust(unsigned long size);
>  extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
>  extern int swiotlb_late_init_with_default_size(size_t default_size);
>  extern void __init swiotlb_update_mem_attributes(void);
> @@ -77,6 +78,7 @@ void __init swiotlb_exit(void);
>  unsigned int swiotlb_max_segment(void);
>  size_t swiotlb_max_mapping_size(struct device *dev);
>  bool is_swiotlb_active(void);
> +void __init swiotlb_adjust(void);
>  #else
>  #define swiotlb_force SWIOTLB_NO_FORCE
>  static inline bool is_swiotlb_buffer(phys_addr_t paddr)
> @@ -99,6 +101,10 @@ static inline bool is_swiotlb_active(void)
>  {
>  	return false;
>  }
> +
> +static inline void swiotlb_adjust(void)
> +{
> +}
>  #endif /* CONFIG_SWIOTLB */
>  
>  extern void swiotlb_print_info(void);
> diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
> index 781b9dca197c..0150ca2336bc 100644
> --- a/kernel/dma/swiotlb.c
> +++ b/kernel/dma/swiotlb.c
> @@ -163,6 +163,28 @@ unsigned long swiotlb_size_or_default(void)
>  	return size ? size : (IO_TLB_DEFAULT_SIZE);
>  }
>  
> +unsigned long __init __weak arch_swiotlb_adjust(unsigned long size)
> +{
> +	return size;
> +}
> +
> +void __init swiotlb_adjust(void)
> +{
> +	unsigned long size;
> +
> +	/*
> +	 * If swiotlb parameter has not been specified, give a chance to
> +	 * architectures such as those supporting memory encryption to
> +	 * adjust/expand SWIOTLB size for their use.
> +	 */
> +	if (!io_tlb_nslabs) {
> +		size = arch_swiotlb_adjust(IO_TLB_DEFAULT_SIZE);
> +		size = ALIGN(size, 1 << IO_TLB_SHIFT);
> +		io_tlb_nslabs = size >> IO_TLB_SHIFT;
> +		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
> +	}
> +}
> +
>  void swiotlb_print_info(void)
>  {
>  	unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
> -- 
> 2.17.1
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
  2020-12-08 22:22   ` Konrad Rzeszutek Wilk
@ 2020-12-08 23:01     ` Borislav Petkov
  -1 siblings, 0 replies; 24+ messages in thread
From: Borislav Petkov @ 2020-12-08 23:01 UTC (permalink / raw)
  To: Konrad Rzeszutek Wilk
  Cc: Ashish Kalra, hch, tglx, mingo, hpa, x86, luto, peterz,
	dave.hansen, iommu, linux-kernel, brijesh.singh, Thomas.Lendacky,
	Jon.Grimm, rientjes

On Tue, Dec 08, 2020 at 05:22:20PM -0500, Konrad Rzeszutek Wilk wrote:
> I will fix it up.

So who's picking this up? If not me then I probably should have a
detailed look at the x86 bits before it goes in...

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
@ 2020-12-08 23:01     ` Borislav Petkov
  0 siblings, 0 replies; 24+ messages in thread
From: Borislav Petkov @ 2020-12-08 23:01 UTC (permalink / raw)
  To: Konrad Rzeszutek Wilk
  Cc: Thomas.Lendacky, Ashish Kalra, Jon.Grimm, brijesh.singh,
	dave.hansen, peterz, x86, linux-kernel, iommu, mingo, luto, hpa,
	rientjes, tglx, hch

On Tue, Dec 08, 2020 at 05:22:20PM -0500, Konrad Rzeszutek Wilk wrote:
> I will fix it up.

So who's picking this up? If not me then I probably should have a
detailed look at the x86 bits before it goes in...

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
  2020-12-08 23:01     ` Borislav Petkov
@ 2020-12-08 23:27       ` Konrad Rzeszutek Wilk
  -1 siblings, 0 replies; 24+ messages in thread
From: Konrad Rzeszutek Wilk @ 2020-12-08 23:27 UTC (permalink / raw)
  To: Borislav Petkov
  Cc: Ashish Kalra, hch, tglx, mingo, hpa, x86, luto, peterz,
	dave.hansen, iommu, linux-kernel, brijesh.singh, Thomas.Lendacky,
	Jon.Grimm, rientjes

On December 8, 2020 6:01:19 PM EST, Borislav Petkov <bp@alien8.de> wrote:
>On Tue, Dec 08, 2020 at 05:22:20PM -0500, Konrad Rzeszutek Wilk wrote:
>> I will fix it up.
>
>So who's picking this up? If not me then I probably should have a
>detailed look at the x86 bits before it goes in...

I was planning to pick this up (got one more SWIOTLB related patch).

That said  if you have the time to take a peek at the x86 bits -  that would be awesome!



^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
@ 2020-12-08 23:27       ` Konrad Rzeszutek Wilk
  0 siblings, 0 replies; 24+ messages in thread
From: Konrad Rzeszutek Wilk @ 2020-12-08 23:27 UTC (permalink / raw)
  To: Borislav Petkov
  Cc: Thomas.Lendacky, Ashish Kalra, Jon.Grimm, brijesh.singh,
	dave.hansen, peterz, x86, linux-kernel, iommu, mingo, luto, hpa,
	rientjes, tglx, hch

On December 8, 2020 6:01:19 PM EST, Borislav Petkov <bp@alien8.de> wrote:
>On Tue, Dec 08, 2020 at 05:22:20PM -0500, Konrad Rzeszutek Wilk wrote:
>> I will fix it up.
>
>So who's picking this up? If not me then I probably should have a
>detailed look at the x86 bits before it goes in...

I was planning to pick this up (got one more SWIOTLB related patch).

That said  if you have the time to take a peek at the x86 bits -  that would be awesome!


_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
  2020-12-08 23:27       ` Konrad Rzeszutek Wilk
@ 2020-12-08 23:33         ` Borislav Petkov
  -1 siblings, 0 replies; 24+ messages in thread
From: Borislav Petkov @ 2020-12-08 23:33 UTC (permalink / raw)
  To: Konrad Rzeszutek Wilk
  Cc: Ashish Kalra, hch, tglx, mingo, hpa, x86, luto, peterz,
	dave.hansen, iommu, linux-kernel, brijesh.singh, Thomas.Lendacky,
	Jon.Grimm, rientjes

On Tue, Dec 08, 2020 at 06:27:39PM -0500, Konrad Rzeszutek Wilk wrote:
> That said if you have the time to take a peek at the x86 bits - that
> would be awesome!

Sure, tomorrow.

Good night. :-)

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
@ 2020-12-08 23:33         ` Borislav Petkov
  0 siblings, 0 replies; 24+ messages in thread
From: Borislav Petkov @ 2020-12-08 23:33 UTC (permalink / raw)
  To: Konrad Rzeszutek Wilk
  Cc: Thomas.Lendacky, Ashish Kalra, Jon.Grimm, brijesh.singh,
	dave.hansen, peterz, x86, linux-kernel, iommu, mingo, luto, hpa,
	rientjes, tglx, hch

On Tue, Dec 08, 2020 at 06:27:39PM -0500, Konrad Rzeszutek Wilk wrote:
> That said if you have the time to take a peek at the x86 bits - that
> would be awesome!

Sure, tomorrow.

Good night. :-)

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
  2020-12-07 23:10 ` Ashish Kalra
@ 2020-12-09 11:01   ` Borislav Petkov
  -1 siblings, 0 replies; 24+ messages in thread
From: Borislav Petkov @ 2020-12-09 11:01 UTC (permalink / raw)
  To: Ashish Kalra
  Cc: konrad.wilk, hch, tglx, mingo, hpa, x86, luto, peterz,
	dave.hansen, iommu, linux-kernel, brijesh.singh, Thomas.Lendacky,
	Jon.Grimm, rientjes

> Subject: Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.

Fix subject prefix to "x86, swiotlb: ... SWIOTLB ... for SEV guests

Fix typo and no fullstop at the end.

On Mon, Dec 07, 2020 at 11:10:57PM +0000, Ashish Kalra wrote:
> From: Ashish Kalra <ashish.kalra@amd.com>
> 
> For SEV, all DMA to and from guest has to use shared (un-encrypted) pages.
> SEV uses SWIOTLB to make this happen without requiring changes to device
> drivers.  However, depending on workload being run, the default 64MB of
				 ^
				 the

> SWIOTLB might not be enough and SWIOTLB may run out of buffers to use

				s/SWIOTLB/it/

> for DMA, resulting in I/O errors and/or performance degradation for
> high I/O workloads.
> 
> Adjust the default size of SWIOTLB for SEV guests using a
> percentage of the total memory available to guest for SWIOTLB buffers.
					     ^
					     the

> 
> Using late_initcall() interface to invoke swiotlb_adjust() does not
> work as the size adjustment needs to be done before mem_encrypt_init()
> and reserve_crashkernel() which use the allocated SWIOTLB buffer size,
> hence call it explicitly from setup_arch().

So setup_arch() is x86-specific and already a dumping ground for all
kinds of init stuff.

Why don't you call swiotlb_adjust() in mem_encrypt_init() where it
already does swiotlb stuff - swiotlb_update_mem_attributes() - and avoid
all the arch-agnostic function glue?

That is, unless Konrad wants to do other swiotlb adjusting on !x86 too...

> The SWIOTLB default size adjustment needs to be added as an architecture
> specific interface/callback to allow architectures such as those supporting
> memory encryption to adjust/expand SWIOTLB size for their use.

So are other arches wanting this or is this just an assumption? If
latter, you can do x86 only now and let the others extend it when they
really need it.

> v5 fixed build errors and warnings as
> Reported-by: kbuild test robot <lkp@intel.com>
> 
> Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
> ---
>  arch/x86/kernel/setup.c   |  2 ++
>  arch/x86/mm/mem_encrypt.c | 37 +++++++++++++++++++++++++++++++++++++
>  include/linux/swiotlb.h   |  6 ++++++
>  kernel/dma/swiotlb.c      | 22 ++++++++++++++++++++++
>  4 files changed, 67 insertions(+)
> 
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index 1bcfbcd2bfd7..d1b8d60040cf 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -485,7 +485,44 @@ static void print_mem_encrypt_feature_info(void)
>  	pr_cont("\n");
>  }
>  
> +/*
> + * The percentage of guest memory used here for SWIOTLB buffers
> + * is more of an approximation of the static adjustment which
> + * is 128M for <1G guests, 256M for 1G-4G guests and 512M for >4G guests.
> + */
> +#define SEV_ADJUST_SWIOTLB_SIZE_PERCENT	6
> +
>  /* Architecture __weak replacement functions */
> +unsigned long __init arch_swiotlb_adjust(unsigned long iotlb_default_size)
> +{
> +	unsigned long size = iotlb_default_size;
> +
> +	/*
> +	 * For SEV, all DMA has to occur via shared/unencrypted pages.
> +	 * SEV uses SWOTLB to make this happen without changing device
> +	 * drivers. However, depending on the workload being run, the
> +	 * default 64MB of SWIOTLB may not be enough and`SWIOTLB may
> +	 * run out of buffers for DMA, resulting in I/O errors and/or
> +	 * performance degradation especially with high I/O workloads.

<--- newline in the comment here.

> +	 * Adjust the default size of SWIOTLB for SEV guests using
> +	 * a percentage of guest memory for SWIOTLB buffers.
> +	 * Also as the SWIOTLB bounce buffer memory is allocated
	       ^
	       ,

> +	 * from low memory, ensure that the adjusted size is within
> +	 * the limits of low available memory.
> +	 *
> +	 */
> +	if (sev_active()) {
> +		phys_addr_t total_mem = memblock_phys_mem_size();
> +
> +		size = total_mem * SEV_ADJUST_SWIOTLB_SIZE_PERCENT / 100;
> +		size = clamp_val(size, iotlb_default_size, SZ_1G);
> +		pr_info("SWIOTLB bounce buffer size adjusted to %luMB for SEV",
> +			size >> 20);
> +	}
> +
> +	return size;
> +}
> +
>  void __init mem_encrypt_init(void)
>  {
>  	if (!sme_me_mask)

Thx.

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
@ 2020-12-09 11:01   ` Borislav Petkov
  0 siblings, 0 replies; 24+ messages in thread
From: Borislav Petkov @ 2020-12-09 11:01 UTC (permalink / raw)
  To: Ashish Kalra
  Cc: Thomas.Lendacky, Jon.Grimm, brijesh.singh, dave.hansen,
	konrad.wilk, peterz, x86, linux-kernel, iommu, mingo, luto, hpa,
	rientjes, tglx, hch

> Subject: Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.

Fix subject prefix to "x86, swiotlb: ... SWIOTLB ... for SEV guests

Fix typo and no fullstop at the end.

On Mon, Dec 07, 2020 at 11:10:57PM +0000, Ashish Kalra wrote:
> From: Ashish Kalra <ashish.kalra@amd.com>
> 
> For SEV, all DMA to and from guest has to use shared (un-encrypted) pages.
> SEV uses SWIOTLB to make this happen without requiring changes to device
> drivers.  However, depending on workload being run, the default 64MB of
				 ^
				 the

> SWIOTLB might not be enough and SWIOTLB may run out of buffers to use

				s/SWIOTLB/it/

> for DMA, resulting in I/O errors and/or performance degradation for
> high I/O workloads.
> 
> Adjust the default size of SWIOTLB for SEV guests using a
> percentage of the total memory available to guest for SWIOTLB buffers.
					     ^
					     the

> 
> Using late_initcall() interface to invoke swiotlb_adjust() does not
> work as the size adjustment needs to be done before mem_encrypt_init()
> and reserve_crashkernel() which use the allocated SWIOTLB buffer size,
> hence call it explicitly from setup_arch().

So setup_arch() is x86-specific and already a dumping ground for all
kinds of init stuff.

Why don't you call swiotlb_adjust() in mem_encrypt_init() where it
already does swiotlb stuff - swiotlb_update_mem_attributes() - and avoid
all the arch-agnostic function glue?

That is, unless Konrad wants to do other swiotlb adjusting on !x86 too...

> The SWIOTLB default size adjustment needs to be added as an architecture
> specific interface/callback to allow architectures such as those supporting
> memory encryption to adjust/expand SWIOTLB size for their use.

So are other arches wanting this or is this just an assumption? If
latter, you can do x86 only now and let the others extend it when they
really need it.

> v5 fixed build errors and warnings as
> Reported-by: kbuild test robot <lkp@intel.com>
> 
> Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
> ---
>  arch/x86/kernel/setup.c   |  2 ++
>  arch/x86/mm/mem_encrypt.c | 37 +++++++++++++++++++++++++++++++++++++
>  include/linux/swiotlb.h   |  6 ++++++
>  kernel/dma/swiotlb.c      | 22 ++++++++++++++++++++++
>  4 files changed, 67 insertions(+)
> 
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index 1bcfbcd2bfd7..d1b8d60040cf 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -485,7 +485,44 @@ static void print_mem_encrypt_feature_info(void)
>  	pr_cont("\n");
>  }
>  
> +/*
> + * The percentage of guest memory used here for SWIOTLB buffers
> + * is more of an approximation of the static adjustment which
> + * is 128M for <1G guests, 256M for 1G-4G guests and 512M for >4G guests.
> + */
> +#define SEV_ADJUST_SWIOTLB_SIZE_PERCENT	6
> +
>  /* Architecture __weak replacement functions */
> +unsigned long __init arch_swiotlb_adjust(unsigned long iotlb_default_size)
> +{
> +	unsigned long size = iotlb_default_size;
> +
> +	/*
> +	 * For SEV, all DMA has to occur via shared/unencrypted pages.
> +	 * SEV uses SWOTLB to make this happen without changing device
> +	 * drivers. However, depending on the workload being run, the
> +	 * default 64MB of SWIOTLB may not be enough and`SWIOTLB may
> +	 * run out of buffers for DMA, resulting in I/O errors and/or
> +	 * performance degradation especially with high I/O workloads.

<--- newline in the comment here.

> +	 * Adjust the default size of SWIOTLB for SEV guests using
> +	 * a percentage of guest memory for SWIOTLB buffers.
> +	 * Also as the SWIOTLB bounce buffer memory is allocated
	       ^
	       ,

> +	 * from low memory, ensure that the adjusted size is within
> +	 * the limits of low available memory.
> +	 *
> +	 */
> +	if (sev_active()) {
> +		phys_addr_t total_mem = memblock_phys_mem_size();
> +
> +		size = total_mem * SEV_ADJUST_SWIOTLB_SIZE_PERCENT / 100;
> +		size = clamp_val(size, iotlb_default_size, SZ_1G);
> +		pr_info("SWIOTLB bounce buffer size adjusted to %luMB for SEV",
> +			size >> 20);
> +	}
> +
> +	return size;
> +}
> +
>  void __init mem_encrypt_init(void)
>  {
>  	if (!sme_me_mask)

Thx.

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
  2020-12-09 11:01   ` Borislav Petkov
@ 2020-12-09 12:29     ` Ashish Kalra
  -1 siblings, 0 replies; 24+ messages in thread
From: Ashish Kalra @ 2020-12-09 12:29 UTC (permalink / raw)
  To: Borislav Petkov
  Cc: konrad.wilk, hch, tglx, mingo, hpa, x86, luto, peterz,
	dave.hansen, iommu, linux-kernel, brijesh.singh, Thomas.Lendacky,
	Jon.Grimm, rientjes

On Wed, Dec 09, 2020 at 12:01:15PM +0100, Borislav Petkov wrote:
> > Subject: Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
> 
> Fix subject prefix to "x86, swiotlb: ... SWIOTLB ... for SEV guests
> 
> Fix typo and no fullstop at the end.
> 
> On Mon, Dec 07, 2020 at 11:10:57PM +0000, Ashish Kalra wrote:
> > From: Ashish Kalra <ashish.kalra@amd.com>
> > 
> > For SEV, all DMA to and from guest has to use shared (un-encrypted) pages.
> > SEV uses SWIOTLB to make this happen without requiring changes to device
> > drivers.  However, depending on workload being run, the default 64MB of
> 				 ^
> 				 the
> 
> > SWIOTLB might not be enough and SWIOTLB may run out of buffers to use
> 
> 				s/SWIOTLB/it/
> 
> > for DMA, resulting in I/O errors and/or performance degradation for
> > high I/O workloads.
> > 
> > Adjust the default size of SWIOTLB for SEV guests using a
> > percentage of the total memory available to guest for SWIOTLB buffers.
> 					     ^
> 					     the
> 
> > 
> > Using late_initcall() interface to invoke swiotlb_adjust() does not
> > work as the size adjustment needs to be done before mem_encrypt_init()
> > and reserve_crashkernel() which use the allocated SWIOTLB buffer size,
> > hence call it explicitly from setup_arch().
> 
> So setup_arch() is x86-specific and already a dumping ground for all
> kinds of init stuff.
> 
> Why don't you call swiotlb_adjust() in mem_encrypt_init() where it
> already does swiotlb stuff - swiotlb_update_mem_attributes() - and avoid
> all the arch-agnostic function glue?
> 

As i mentioned in the main comments above, this cannot be called in
mem_encrypt_init() as that breaks reserve_crashkernel() which depends on
SWIOTLB buffer size and is called before mem_encrypt_init(), therefore,
it needs to be called from setup_atch() before reserve_crashkernel(). 

> That is, unless Konrad wants to do other swiotlb adjusting on !x86 too...
> 
> > The SWIOTLB default size adjustment needs to be added as an architecture
> > specific interface/callback to allow architectures such as those supporting
> > memory encryption to adjust/expand SWIOTLB size for their use.
> 
> So are other arches wanting this or is this just an assumption? If
> latter, you can do x86 only now and let the others extend it when they
> really need it.

I believe that other memory encryption architectures such as s390 are
also looking for something similar to be available. 

Thanks,
Ashish


^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
@ 2020-12-09 12:29     ` Ashish Kalra
  0 siblings, 0 replies; 24+ messages in thread
From: Ashish Kalra @ 2020-12-09 12:29 UTC (permalink / raw)
  To: Borislav Petkov
  Cc: Thomas.Lendacky, Jon.Grimm, brijesh.singh, dave.hansen,
	konrad.wilk, peterz, x86, linux-kernel, iommu, mingo, luto, hpa,
	rientjes, tglx, hch

On Wed, Dec 09, 2020 at 12:01:15PM +0100, Borislav Petkov wrote:
> > Subject: Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
> 
> Fix subject prefix to "x86, swiotlb: ... SWIOTLB ... for SEV guests
> 
> Fix typo and no fullstop at the end.
> 
> On Mon, Dec 07, 2020 at 11:10:57PM +0000, Ashish Kalra wrote:
> > From: Ashish Kalra <ashish.kalra@amd.com>
> > 
> > For SEV, all DMA to and from guest has to use shared (un-encrypted) pages.
> > SEV uses SWIOTLB to make this happen without requiring changes to device
> > drivers.  However, depending on workload being run, the default 64MB of
> 				 ^
> 				 the
> 
> > SWIOTLB might not be enough and SWIOTLB may run out of buffers to use
> 
> 				s/SWIOTLB/it/
> 
> > for DMA, resulting in I/O errors and/or performance degradation for
> > high I/O workloads.
> > 
> > Adjust the default size of SWIOTLB for SEV guests using a
> > percentage of the total memory available to guest for SWIOTLB buffers.
> 					     ^
> 					     the
> 
> > 
> > Using late_initcall() interface to invoke swiotlb_adjust() does not
> > work as the size adjustment needs to be done before mem_encrypt_init()
> > and reserve_crashkernel() which use the allocated SWIOTLB buffer size,
> > hence call it explicitly from setup_arch().
> 
> So setup_arch() is x86-specific and already a dumping ground for all
> kinds of init stuff.
> 
> Why don't you call swiotlb_adjust() in mem_encrypt_init() where it
> already does swiotlb stuff - swiotlb_update_mem_attributes() - and avoid
> all the arch-agnostic function glue?
> 

As i mentioned in the main comments above, this cannot be called in
mem_encrypt_init() as that breaks reserve_crashkernel() which depends on
SWIOTLB buffer size and is called before mem_encrypt_init(), therefore,
it needs to be called from setup_atch() before reserve_crashkernel(). 

> That is, unless Konrad wants to do other swiotlb adjusting on !x86 too...
> 
> > The SWIOTLB default size adjustment needs to be added as an architecture
> > specific interface/callback to allow architectures such as those supporting
> > memory encryption to adjust/expand SWIOTLB size for their use.
> 
> So are other arches wanting this or is this just an assumption? If
> latter, you can do x86 only now and let the others extend it when they
> really need it.

I believe that other memory encryption architectures such as s390 are
also looking for something similar to be available. 

Thanks,
Ashish

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
  2020-12-09 12:29     ` Ashish Kalra
@ 2020-12-09 12:54       ` Borislav Petkov
  -1 siblings, 0 replies; 24+ messages in thread
From: Borislav Petkov @ 2020-12-09 12:54 UTC (permalink / raw)
  To: Ashish Kalra
  Cc: konrad.wilk, hch, tglx, mingo, hpa, x86, luto, peterz,
	dave.hansen, iommu, linux-kernel, brijesh.singh, Thomas.Lendacky,
	Jon.Grimm, rientjes

On Wed, Dec 09, 2020 at 12:29:07PM +0000, Ashish Kalra wrote:
> As i mentioned in the main comments above, this cannot be called in
> mem_encrypt_init() as that breaks reserve_crashkernel() which depends
> on SWIOTLB buffer size

Please elaborate how does it break.

> and is called before mem_encrypt_init(), therefore, it needs to be
> called from setup_atch() before reserve_crashkernel().

I know you have your requirements what needs to be called when like all
the other vendors who want to run stuff early in a particular order but
our boot init order is a single fragile mess. So this better be done
right!

Also,

[    0.016630] software IO TLB: swiotlb_adjust:
[    0.017005] reserve_crashkernel:
[    0.050523] software IO TLB: swiotlb_init:

this looks strange - we're doing a swiotlb size adjust before init.

It probably makes sense as in: adjust the size before the SWIOTLB is
initialized so that it uses the correct size but this better be spelled
out.

> I believe that other memory encryption architectures such as s390 are
> also looking for something similar to be available.

Until you have something more palpable than belief, "let the others
extend it when they really need it." as I already mentioned.

Thx.

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
@ 2020-12-09 12:54       ` Borislav Petkov
  0 siblings, 0 replies; 24+ messages in thread
From: Borislav Petkov @ 2020-12-09 12:54 UTC (permalink / raw)
  To: Ashish Kalra
  Cc: Thomas.Lendacky, Jon.Grimm, brijesh.singh, dave.hansen,
	konrad.wilk, peterz, x86, linux-kernel, iommu, mingo, luto, hpa,
	rientjes, tglx, hch

On Wed, Dec 09, 2020 at 12:29:07PM +0000, Ashish Kalra wrote:
> As i mentioned in the main comments above, this cannot be called in
> mem_encrypt_init() as that breaks reserve_crashkernel() which depends
> on SWIOTLB buffer size

Please elaborate how does it break.

> and is called before mem_encrypt_init(), therefore, it needs to be
> called from setup_atch() before reserve_crashkernel().

I know you have your requirements what needs to be called when like all
the other vendors who want to run stuff early in a particular order but
our boot init order is a single fragile mess. So this better be done
right!

Also,

[    0.016630] software IO TLB: swiotlb_adjust:
[    0.017005] reserve_crashkernel:
[    0.050523] software IO TLB: swiotlb_init:

this looks strange - we're doing a swiotlb size adjust before init.

It probably makes sense as in: adjust the size before the SWIOTLB is
initialized so that it uses the correct size but this better be spelled
out.

> I believe that other memory encryption architectures such as s390 are
> also looking for something similar to be available.

Until you have something more palpable than belief, "let the others
extend it when they really need it." as I already mentioned.

Thx.

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
  2020-12-09 12:54       ` Borislav Petkov
@ 2020-12-09 13:19         ` Ashish Kalra
  -1 siblings, 0 replies; 24+ messages in thread
From: Ashish Kalra @ 2020-12-09 13:19 UTC (permalink / raw)
  To: Borislav Petkov
  Cc: konrad.wilk, hch, tglx, mingo, hpa, x86, luto, peterz,
	dave.hansen, iommu, linux-kernel, brijesh.singh, Thomas.Lendacky,
	Jon.Grimm, rientjes

On Wed, Dec 09, 2020 at 01:54:42PM +0100, Borislav Petkov wrote:
> On Wed, Dec 09, 2020 at 12:29:07PM +0000, Ashish Kalra wrote:
> > As i mentioned in the main comments above, this cannot be called in
> > mem_encrypt_init() as that breaks reserve_crashkernel() which depends
> > on SWIOTLB buffer size
> 
> Please elaborate how does it break.
> 

reserve_crashkernel() calls swiotlb_size_or_default() to get SWIOTLB
buffer size and then accordingly allocates low memory for crashkernel. 
If SWIOTLB buffer size is adjusted after reserve_crashkernel() and
swiotlb_size_or_default(), then SWIOTLB buffers will overlap the memory
reserved for crashkernel. Hence any SWIOTLB buffer adjustment needs to
be done before or in swiotlb_size_or_default(), but Konrad is not in
favor of modifying swiotlb_size_or_default(), hence this separate
swiotlb_adjust() interface is introduced. 

> > and is called before mem_encrypt_init(), therefore, it needs to be
> > called from setup_atch() before reserve_crashkernel().
> 
> I know you have your requirements what needs to be called when like all
> the other vendors who want to run stuff early in a particular order but
> our boot init order is a single fragile mess. So this better be done
> right!
> 
> Also,
> 
> [    0.016630] software IO TLB: swiotlb_adjust:
> [    0.017005] reserve_crashkernel:
> [    0.050523] software IO TLB: swiotlb_init:
> 
> this looks strange - we're doing a swiotlb size adjust before init.
> 
> It probably makes sense as in: adjust the size before the SWIOTLB is
> initialized so that it uses the correct size but this better be spelled
> out.
> 

Yes the adjustment is done before init. 

> > I believe that other memory encryption architectures such as s390 are
> > also looking for something similar to be available.
> 
> Until you have something more palpable than belief, "let the others
> extend it when they really need it." as I already mentioned.

There is a need to introduce an architecture specific callback
for swiotlb_adjust() because of the following reason :

The sev_active() function is only available to x86, so this will break
other archs, if we use this function in generic swiotlb code.

Therefore, we need arch-specific callback/interface to be invoked from
generic swiotlb code to do the SEV specific actions such as SWIOTLB
buffer size adjustment.

Thanks,
Ashish


^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
@ 2020-12-09 13:19         ` Ashish Kalra
  0 siblings, 0 replies; 24+ messages in thread
From: Ashish Kalra @ 2020-12-09 13:19 UTC (permalink / raw)
  To: Borislav Petkov
  Cc: Thomas.Lendacky, Jon.Grimm, brijesh.singh, dave.hansen,
	konrad.wilk, peterz, x86, linux-kernel, iommu, mingo, luto, hpa,
	rientjes, tglx, hch

On Wed, Dec 09, 2020 at 01:54:42PM +0100, Borislav Petkov wrote:
> On Wed, Dec 09, 2020 at 12:29:07PM +0000, Ashish Kalra wrote:
> > As i mentioned in the main comments above, this cannot be called in
> > mem_encrypt_init() as that breaks reserve_crashkernel() which depends
> > on SWIOTLB buffer size
> 
> Please elaborate how does it break.
> 

reserve_crashkernel() calls swiotlb_size_or_default() to get SWIOTLB
buffer size and then accordingly allocates low memory for crashkernel. 
If SWIOTLB buffer size is adjusted after reserve_crashkernel() and
swiotlb_size_or_default(), then SWIOTLB buffers will overlap the memory
reserved for crashkernel. Hence any SWIOTLB buffer adjustment needs to
be done before or in swiotlb_size_or_default(), but Konrad is not in
favor of modifying swiotlb_size_or_default(), hence this separate
swiotlb_adjust() interface is introduced. 

> > and is called before mem_encrypt_init(), therefore, it needs to be
> > called from setup_atch() before reserve_crashkernel().
> 
> I know you have your requirements what needs to be called when like all
> the other vendors who want to run stuff early in a particular order but
> our boot init order is a single fragile mess. So this better be done
> right!
> 
> Also,
> 
> [    0.016630] software IO TLB: swiotlb_adjust:
> [    0.017005] reserve_crashkernel:
> [    0.050523] software IO TLB: swiotlb_init:
> 
> this looks strange - we're doing a swiotlb size adjust before init.
> 
> It probably makes sense as in: adjust the size before the SWIOTLB is
> initialized so that it uses the correct size but this better be spelled
> out.
> 

Yes the adjustment is done before init. 

> > I believe that other memory encryption architectures such as s390 are
> > also looking for something similar to be available.
> 
> Until you have something more palpable than belief, "let the others
> extend it when they really need it." as I already mentioned.

There is a need to introduce an architecture specific callback
for swiotlb_adjust() because of the following reason :

The sev_active() function is only available to x86, so this will break
other archs, if we use this function in generic swiotlb code.

Therefore, we need arch-specific callback/interface to be invoked from
generic swiotlb code to do the SEV specific actions such as SWIOTLB
buffer size adjustment.

Thanks,
Ashish

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
  2020-12-09 13:19         ` Ashish Kalra
@ 2020-12-09 17:51           ` Borislav Petkov
  -1 siblings, 0 replies; 24+ messages in thread
From: Borislav Petkov @ 2020-12-09 17:51 UTC (permalink / raw)
  To: Ashish Kalra
  Cc: konrad.wilk, hch, tglx, mingo, hpa, x86, luto, peterz,
	dave.hansen, iommu, linux-kernel, brijesh.singh, Thomas.Lendacky,
	Jon.Grimm, rientjes

On Wed, Dec 09, 2020 at 01:19:46PM +0000, Ashish Kalra wrote:
> reserve_crashkernel() calls swiotlb_size_or_default() to get SWIOTLB
...

Thanks for explaining.

> There is a need to introduce an architecture specific callback
> for swiotlb_adjust() because of the following reason :

So what your version currently does is:

1. from arch code, call generic code - swiotlb_adjust

2. in generic code, call back into arch code - arch_swiotlb_adjust

But that's twice the work needed to get you where you wanna go.

What you wanna do is, from arch code, call into swiotlb generic code.
That's it, no more.

Just like mem_encrypt.c calls swiotlb_update_mem_attributes(), for
example.

And other architectures can simply do the same thing and you have it all
solved and other architectures don't even need to refactor - they simply
copy what x86 does.

IOW, something like this:

---
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 2f62bbdd9d12..31c4df123aa0 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -37,6 +37,7 @@ void __init sme_map_bootdata(char *real_mode_data);
 void __init sme_unmap_bootdata(char *real_mode_data);
 
 void __init sme_early_init(void);
+void __init sev_setup_arch(void);
 
 void __init sme_encrypt_kernel(struct boot_params *bp);
 void __init sme_enable(struct boot_params *bp);
@@ -69,6 +70,7 @@ static inline void __init sme_map_bootdata(char *real_mode_data) { }
 static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
 
 static inline void __init sme_early_init(void) { }
+static inline void __init sev_setup_arch(void) { }
 
 static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
 static inline void __init sme_enable(struct boot_params *bp) { }
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index a23130c86bdd..740f3bdb3f61 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1049,6 +1049,12 @@ void __init setup_arch(char **cmdline_p)
 	memblock_set_current_limit(ISA_END_ADDRESS);
 	e820__memblock_setup();
 
+	/*
+	 * Needs to run after memblock setup because it needs the physical
+	 * memory size.
+	 */
+	sev_setup_arch();
+
 	reserve_bios_regions();
 
 	efi_fake_memmap();
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index bc0833713be9..f3db85673eae 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -198,6 +198,37 @@ void __init sme_early_init(void)
 		swiotlb_force = SWIOTLB_FORCE;
 }
 
+void __init sev_setup_arch(void)
+{
+	phys_addr_t total_mem = memblock_phys_mem_size();
+	unsigned long size;
+
+	if (!sev_active())
+		return;
+
+	/*
+	 * For SEV, all DMA has to occur via shared/unencrypted pages.
+	 * SEV uses SWOTLB to make this happen without changing device
+	 * drivers. However, depending on the workload being run, the
+	 * default 64MB of SWIOTLB may not be enough and`SWIOTLB may
+	 * run out of buffers for DMA, resulting in I/O errors and/or
+	 * performance degradation especially with high I/O workloads.
+	 *
+	 * Adjust the default size of SWIOTLB for SEV guests using
+	 * a percentage of guest memory for SWIOTLB buffers.
+	 * Also as the SWIOTLB bounce buffer memory is allocated
+	 * from low memory, ensure that the adjusted size is within
+	 * the limits of low available memory.
+	 *
+	 * The percentage of guest memory used here for SWIOTLB buffers
+	 * is more of an approximation of the static adjustment which
+	 * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
+	 */
+	size = total_mem * 6 / 100;
+	size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
+	swiotlb_adjust_size(size);
+}
+
 static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
 {
 	pgprot_t old_prot, new_prot;
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index fbdc65782195..7aa94e2f99c6 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -30,6 +30,9 @@ enum swiotlb_force {
  */
 #define IO_TLB_SHIFT 11
 
+/* default to 64MB */
+#define IO_TLB_DEFAULT_SIZE (64UL<<20)
+
 extern void swiotlb_init(int verbose);
 int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
 extern unsigned long swiotlb_nr_tbl(void);
@@ -78,6 +81,7 @@ void __init swiotlb_exit(void);
 unsigned int swiotlb_max_segment(void);
 size_t swiotlb_max_mapping_size(struct device *dev);
 bool is_swiotlb_active(void);
+void __init swiotlb_adjust_size(unsigned long new_size);
 #else
 #define swiotlb_force SWIOTLB_NO_FORCE
 static inline bool is_swiotlb_buffer(phys_addr_t paddr)
@@ -100,6 +104,10 @@ static inline bool is_swiotlb_active(void)
 {
 	return false;
 }
+
+static void swiotlb_adjust_size(unsigned long new_size)
+{
+}
 #endif /* CONFIG_SWIOTLB */
 
 extern void swiotlb_print_info(void);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 781b9dca197c..7c42df6e6100 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -152,8 +152,6 @@ void swiotlb_set_max_segment(unsigned int val)
 		max_segment = rounddown(val, PAGE_SIZE);
 }
 
-/* default to 64MB */
-#define IO_TLB_DEFAULT_SIZE (64UL<<20)
 unsigned long swiotlb_size_or_default(void)
 {
 	unsigned long size;
@@ -163,6 +161,24 @@ unsigned long swiotlb_size_or_default(void)
 	return size ? size : (IO_TLB_DEFAULT_SIZE);
 }
 
+void __init swiotlb_adjust_size(unsigned long new_size)
+{
+	unsigned long size;
+
+	/*
+	 * If swiotlb parameter has not been specified, give a chance to
+	 * architectures such as those supporting memory encryption to
+	 * adjust/expand SWIOTLB size for their use.
+	 */
+	if (!io_tlb_nslabs) {
+		size = ALIGN(new_size, 1 << IO_TLB_SHIFT);
+		io_tlb_nslabs = size >> IO_TLB_SHIFT;
+		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+
+		pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
+	}
+}
+
 void swiotlb_print_info(void)
 {
 	unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
@ 2020-12-09 17:51           ` Borislav Petkov
  0 siblings, 0 replies; 24+ messages in thread
From: Borislav Petkov @ 2020-12-09 17:51 UTC (permalink / raw)
  To: Ashish Kalra
  Cc: Thomas.Lendacky, Jon.Grimm, brijesh.singh, dave.hansen,
	konrad.wilk, peterz, x86, linux-kernel, iommu, mingo, luto, hpa,
	rientjes, tglx, hch

On Wed, Dec 09, 2020 at 01:19:46PM +0000, Ashish Kalra wrote:
> reserve_crashkernel() calls swiotlb_size_or_default() to get SWIOTLB
...

Thanks for explaining.

> There is a need to introduce an architecture specific callback
> for swiotlb_adjust() because of the following reason :

So what your version currently does is:

1. from arch code, call generic code - swiotlb_adjust

2. in generic code, call back into arch code - arch_swiotlb_adjust

But that's twice the work needed to get you where you wanna go.

What you wanna do is, from arch code, call into swiotlb generic code.
That's it, no more.

Just like mem_encrypt.c calls swiotlb_update_mem_attributes(), for
example.

And other architectures can simply do the same thing and you have it all
solved and other architectures don't even need to refactor - they simply
copy what x86 does.

IOW, something like this:

---
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 2f62bbdd9d12..31c4df123aa0 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -37,6 +37,7 @@ void __init sme_map_bootdata(char *real_mode_data);
 void __init sme_unmap_bootdata(char *real_mode_data);
 
 void __init sme_early_init(void);
+void __init sev_setup_arch(void);
 
 void __init sme_encrypt_kernel(struct boot_params *bp);
 void __init sme_enable(struct boot_params *bp);
@@ -69,6 +70,7 @@ static inline void __init sme_map_bootdata(char *real_mode_data) { }
 static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
 
 static inline void __init sme_early_init(void) { }
+static inline void __init sev_setup_arch(void) { }
 
 static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
 static inline void __init sme_enable(struct boot_params *bp) { }
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index a23130c86bdd..740f3bdb3f61 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1049,6 +1049,12 @@ void __init setup_arch(char **cmdline_p)
 	memblock_set_current_limit(ISA_END_ADDRESS);
 	e820__memblock_setup();
 
+	/*
+	 * Needs to run after memblock setup because it needs the physical
+	 * memory size.
+	 */
+	sev_setup_arch();
+
 	reserve_bios_regions();
 
 	efi_fake_memmap();
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index bc0833713be9..f3db85673eae 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -198,6 +198,37 @@ void __init sme_early_init(void)
 		swiotlb_force = SWIOTLB_FORCE;
 }
 
+void __init sev_setup_arch(void)
+{
+	phys_addr_t total_mem = memblock_phys_mem_size();
+	unsigned long size;
+
+	if (!sev_active())
+		return;
+
+	/*
+	 * For SEV, all DMA has to occur via shared/unencrypted pages.
+	 * SEV uses SWOTLB to make this happen without changing device
+	 * drivers. However, depending on the workload being run, the
+	 * default 64MB of SWIOTLB may not be enough and`SWIOTLB may
+	 * run out of buffers for DMA, resulting in I/O errors and/or
+	 * performance degradation especially with high I/O workloads.
+	 *
+	 * Adjust the default size of SWIOTLB for SEV guests using
+	 * a percentage of guest memory for SWIOTLB buffers.
+	 * Also as the SWIOTLB bounce buffer memory is allocated
+	 * from low memory, ensure that the adjusted size is within
+	 * the limits of low available memory.
+	 *
+	 * The percentage of guest memory used here for SWIOTLB buffers
+	 * is more of an approximation of the static adjustment which
+	 * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
+	 */
+	size = total_mem * 6 / 100;
+	size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
+	swiotlb_adjust_size(size);
+}
+
 static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
 {
 	pgprot_t old_prot, new_prot;
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index fbdc65782195..7aa94e2f99c6 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -30,6 +30,9 @@ enum swiotlb_force {
  */
 #define IO_TLB_SHIFT 11
 
+/* default to 64MB */
+#define IO_TLB_DEFAULT_SIZE (64UL<<20)
+
 extern void swiotlb_init(int verbose);
 int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
 extern unsigned long swiotlb_nr_tbl(void);
@@ -78,6 +81,7 @@ void __init swiotlb_exit(void);
 unsigned int swiotlb_max_segment(void);
 size_t swiotlb_max_mapping_size(struct device *dev);
 bool is_swiotlb_active(void);
+void __init swiotlb_adjust_size(unsigned long new_size);
 #else
 #define swiotlb_force SWIOTLB_NO_FORCE
 static inline bool is_swiotlb_buffer(phys_addr_t paddr)
@@ -100,6 +104,10 @@ static inline bool is_swiotlb_active(void)
 {
 	return false;
 }
+
+static void swiotlb_adjust_size(unsigned long new_size)
+{
+}
 #endif /* CONFIG_SWIOTLB */
 
 extern void swiotlb_print_info(void);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 781b9dca197c..7c42df6e6100 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -152,8 +152,6 @@ void swiotlb_set_max_segment(unsigned int val)
 		max_segment = rounddown(val, PAGE_SIZE);
 }
 
-/* default to 64MB */
-#define IO_TLB_DEFAULT_SIZE (64UL<<20)
 unsigned long swiotlb_size_or_default(void)
 {
 	unsigned long size;
@@ -163,6 +161,24 @@ unsigned long swiotlb_size_or_default(void)
 	return size ? size : (IO_TLB_DEFAULT_SIZE);
 }
 
+void __init swiotlb_adjust_size(unsigned long new_size)
+{
+	unsigned long size;
+
+	/*
+	 * If swiotlb parameter has not been specified, give a chance to
+	 * architectures such as those supporting memory encryption to
+	 * adjust/expand SWIOTLB size for their use.
+	 */
+	if (!io_tlb_nslabs) {
+		size = ALIGN(new_size, 1 << IO_TLB_SHIFT);
+		io_tlb_nslabs = size >> IO_TLB_SHIFT;
+		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+
+		pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
+	}
+}
+
 void swiotlb_print_info(void)
 {
 	unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
  2020-12-09 17:51           ` Borislav Petkov
@ 2020-12-09 19:34             ` Ashish Kalra
  -1 siblings, 0 replies; 24+ messages in thread
From: Ashish Kalra @ 2020-12-09 19:34 UTC (permalink / raw)
  To: Borislav Petkov
  Cc: konrad.wilk, hch, tglx, mingo, hpa, x86, luto, peterz,
	dave.hansen, iommu, linux-kernel, brijesh.singh, Thomas.Lendacky,
	Jon.Grimm, rientjes

On Wed, Dec 09, 2020 at 06:51:05PM +0100, Borislav Petkov wrote:
> On Wed, Dec 09, 2020 at 01:19:46PM +0000, Ashish Kalra wrote:
> > reserve_crashkernel() calls swiotlb_size_or_default() to get SWIOTLB
> ...
> 
> Thanks for explaining.
> 
> > There is a need to introduce an architecture specific callback
> > for swiotlb_adjust() because of the following reason :
> 
> So what your version currently does is:
> 
> 1. from arch code, call generic code - swiotlb_adjust
> 
> 2. in generic code, call back into arch code - arch_swiotlb_adjust
> 
> But that's twice the work needed to get you where you wanna go.
> 
> What you wanna do is, from arch code, call into swiotlb generic code.
> That's it, no more.
> 
> Just like mem_encrypt.c calls swiotlb_update_mem_attributes(), for
> example.
> 
> And other architectures can simply do the same thing and you have it all
> solved and other architectures don't even need to refactor - they simply
> copy what x86 does.
> 
> IOW, something like this:
> 

This should work, but i am concerned about making IO_TLB_DEFAULT_SIZE
(which is pretty much private to generic swiotlb code) to be visible
externally, i don't know if there are any concerns with that ?

Thanks,
Ashish

> ---
> diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
> index 2f62bbdd9d12..31c4df123aa0 100644
> --- a/arch/x86/include/asm/mem_encrypt.h
> +++ b/arch/x86/include/asm/mem_encrypt.h
> @@ -37,6 +37,7 @@ void __init sme_map_bootdata(char *real_mode_data);
>  void __init sme_unmap_bootdata(char *real_mode_data);
>  
>  void __init sme_early_init(void);
> +void __init sev_setup_arch(void);
>  
>  void __init sme_encrypt_kernel(struct boot_params *bp);
>  void __init sme_enable(struct boot_params *bp);
> @@ -69,6 +70,7 @@ static inline void __init sme_map_bootdata(char *real_mode_data) { }
>  static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
>  
>  static inline void __init sme_early_init(void) { }
> +static inline void __init sev_setup_arch(void) { }
>  
>  static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
>  static inline void __init sme_enable(struct boot_params *bp) { }
> diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
> index a23130c86bdd..740f3bdb3f61 100644
> --- a/arch/x86/kernel/setup.c
> +++ b/arch/x86/kernel/setup.c
> @@ -1049,6 +1049,12 @@ void __init setup_arch(char **cmdline_p)
>  	memblock_set_current_limit(ISA_END_ADDRESS);
>  	e820__memblock_setup();
>  
> +	/*
> +	 * Needs to run after memblock setup because it needs the physical
> +	 * memory size.
> +	 */
> +	sev_setup_arch();
> +
>  	reserve_bios_regions();
>  
>  	efi_fake_memmap();
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index bc0833713be9..f3db85673eae 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -198,6 +198,37 @@ void __init sme_early_init(void)
>  		swiotlb_force = SWIOTLB_FORCE;
>  }
>  
> +void __init sev_setup_arch(void)
> +{
> +	phys_addr_t total_mem = memblock_phys_mem_size();
> +	unsigned long size;
> +
> +	if (!sev_active())
> +		return;
> +
> +	/*
> +	 * For SEV, all DMA has to occur via shared/unencrypted pages.
> +	 * SEV uses SWOTLB to make this happen without changing device
> +	 * drivers. However, depending on the workload being run, the
> +	 * default 64MB of SWIOTLB may not be enough and`SWIOTLB may
> +	 * run out of buffers for DMA, resulting in I/O errors and/or
> +	 * performance degradation especially with high I/O workloads.
> +	 *
> +	 * Adjust the default size of SWIOTLB for SEV guests using
> +	 * a percentage of guest memory for SWIOTLB buffers.
> +	 * Also as the SWIOTLB bounce buffer memory is allocated
> +	 * from low memory, ensure that the adjusted size is within
> +	 * the limits of low available memory.
> +	 *
> +	 * The percentage of guest memory used here for SWIOTLB buffers
> +	 * is more of an approximation of the static adjustment which
> +	 * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
> +	 */
> +	size = total_mem * 6 / 100;
> +	size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
> +	swiotlb_adjust_size(size);
> +}
> +
>  static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
>  {
>  	pgprot_t old_prot, new_prot;
> diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
> index fbdc65782195..7aa94e2f99c6 100644
> --- a/include/linux/swiotlb.h
> +++ b/include/linux/swiotlb.h
> @@ -30,6 +30,9 @@ enum swiotlb_force {
>   */
>  #define IO_TLB_SHIFT 11
>  
> +/* default to 64MB */
> +#define IO_TLB_DEFAULT_SIZE (64UL<<20)
> +
>  extern void swiotlb_init(int verbose);
>  int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
>  extern unsigned long swiotlb_nr_tbl(void);
> @@ -78,6 +81,7 @@ void __init swiotlb_exit(void);
>  unsigned int swiotlb_max_segment(void);
>  size_t swiotlb_max_mapping_size(struct device *dev);
>  bool is_swiotlb_active(void);
> +void __init swiotlb_adjust_size(unsigned long new_size);
>  #else
>  #define swiotlb_force SWIOTLB_NO_FORCE
>  static inline bool is_swiotlb_buffer(phys_addr_t paddr)
> @@ -100,6 +104,10 @@ static inline bool is_swiotlb_active(void)
>  {
>  	return false;
>  }
> +
> +static void swiotlb_adjust_size(unsigned long new_size)
> +{
> +}
>  #endif /* CONFIG_SWIOTLB */
>  
>  extern void swiotlb_print_info(void);
> diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
> index 781b9dca197c..7c42df6e6100 100644
> --- a/kernel/dma/swiotlb.c
> +++ b/kernel/dma/swiotlb.c
> @@ -152,8 +152,6 @@ void swiotlb_set_max_segment(unsigned int val)
>  		max_segment = rounddown(val, PAGE_SIZE);
>  }
>  
> -/* default to 64MB */
> -#define IO_TLB_DEFAULT_SIZE (64UL<<20)
>  unsigned long swiotlb_size_or_default(void)
>  {
>  	unsigned long size;
> @@ -163,6 +161,24 @@ unsigned long swiotlb_size_or_default(void)
>  	return size ? size : (IO_TLB_DEFAULT_SIZE);
>  }
>  
> +void __init swiotlb_adjust_size(unsigned long new_size)
> +{
> +	unsigned long size;
> +
> +	/*
> +	 * If swiotlb parameter has not been specified, give a chance to
> +	 * architectures such as those supporting memory encryption to
> +	 * adjust/expand SWIOTLB size for their use.
> +	 */
> +	if (!io_tlb_nslabs) {
> +		size = ALIGN(new_size, 1 << IO_TLB_SHIFT);
> +		io_tlb_nslabs = size >> IO_TLB_SHIFT;
> +		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
> +
> +		pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
> +	}
> +}
> +
>  void swiotlb_print_info(void)
>  {
>  	unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
> 
> -- 
> Regards/Gruss,
>     Boris.
> 
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fpeople.kernel.org%2Ftglx%2Fnotes-about-netiquette&amp;data=04%7C01%7Cashish.kalra%40amd.com%7C426a6f6ef2334ac4f8e308d89c6b03cf%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637431330751727796%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&amp;sdata=xdEJqIPZUvIRmKbvM9Zv%2BVLvoyNYlSejjSyQ3ip%2FiQ0%3D&amp;reserved=0

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
@ 2020-12-09 19:34             ` Ashish Kalra
  0 siblings, 0 replies; 24+ messages in thread
From: Ashish Kalra @ 2020-12-09 19:34 UTC (permalink / raw)
  To: Borislav Petkov
  Cc: Thomas.Lendacky, Jon.Grimm, brijesh.singh, dave.hansen,
	konrad.wilk, peterz, x86, linux-kernel, iommu, mingo, luto, hpa,
	rientjes, tglx, hch

On Wed, Dec 09, 2020 at 06:51:05PM +0100, Borislav Petkov wrote:
> On Wed, Dec 09, 2020 at 01:19:46PM +0000, Ashish Kalra wrote:
> > reserve_crashkernel() calls swiotlb_size_or_default() to get SWIOTLB
> ...
> 
> Thanks for explaining.
> 
> > There is a need to introduce an architecture specific callback
> > for swiotlb_adjust() because of the following reason :
> 
> So what your version currently does is:
> 
> 1. from arch code, call generic code - swiotlb_adjust
> 
> 2. in generic code, call back into arch code - arch_swiotlb_adjust
> 
> But that's twice the work needed to get you where you wanna go.
> 
> What you wanna do is, from arch code, call into swiotlb generic code.
> That's it, no more.
> 
> Just like mem_encrypt.c calls swiotlb_update_mem_attributes(), for
> example.
> 
> And other architectures can simply do the same thing and you have it all
> solved and other architectures don't even need to refactor - they simply
> copy what x86 does.
> 
> IOW, something like this:
> 

This should work, but i am concerned about making IO_TLB_DEFAULT_SIZE
(which is pretty much private to generic swiotlb code) to be visible
externally, i don't know if there are any concerns with that ?

Thanks,
Ashish

> ---
> diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
> index 2f62bbdd9d12..31c4df123aa0 100644
> --- a/arch/x86/include/asm/mem_encrypt.h
> +++ b/arch/x86/include/asm/mem_encrypt.h
> @@ -37,6 +37,7 @@ void __init sme_map_bootdata(char *real_mode_data);
>  void __init sme_unmap_bootdata(char *real_mode_data);
>  
>  void __init sme_early_init(void);
> +void __init sev_setup_arch(void);
>  
>  void __init sme_encrypt_kernel(struct boot_params *bp);
>  void __init sme_enable(struct boot_params *bp);
> @@ -69,6 +70,7 @@ static inline void __init sme_map_bootdata(char *real_mode_data) { }
>  static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
>  
>  static inline void __init sme_early_init(void) { }
> +static inline void __init sev_setup_arch(void) { }
>  
>  static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
>  static inline void __init sme_enable(struct boot_params *bp) { }
> diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
> index a23130c86bdd..740f3bdb3f61 100644
> --- a/arch/x86/kernel/setup.c
> +++ b/arch/x86/kernel/setup.c
> @@ -1049,6 +1049,12 @@ void __init setup_arch(char **cmdline_p)
>  	memblock_set_current_limit(ISA_END_ADDRESS);
>  	e820__memblock_setup();
>  
> +	/*
> +	 * Needs to run after memblock setup because it needs the physical
> +	 * memory size.
> +	 */
> +	sev_setup_arch();
> +
>  	reserve_bios_regions();
>  
>  	efi_fake_memmap();
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index bc0833713be9..f3db85673eae 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -198,6 +198,37 @@ void __init sme_early_init(void)
>  		swiotlb_force = SWIOTLB_FORCE;
>  }
>  
> +void __init sev_setup_arch(void)
> +{
> +	phys_addr_t total_mem = memblock_phys_mem_size();
> +	unsigned long size;
> +
> +	if (!sev_active())
> +		return;
> +
> +	/*
> +	 * For SEV, all DMA has to occur via shared/unencrypted pages.
> +	 * SEV uses SWOTLB to make this happen without changing device
> +	 * drivers. However, depending on the workload being run, the
> +	 * default 64MB of SWIOTLB may not be enough and`SWIOTLB may
> +	 * run out of buffers for DMA, resulting in I/O errors and/or
> +	 * performance degradation especially with high I/O workloads.
> +	 *
> +	 * Adjust the default size of SWIOTLB for SEV guests using
> +	 * a percentage of guest memory for SWIOTLB buffers.
> +	 * Also as the SWIOTLB bounce buffer memory is allocated
> +	 * from low memory, ensure that the adjusted size is within
> +	 * the limits of low available memory.
> +	 *
> +	 * The percentage of guest memory used here for SWIOTLB buffers
> +	 * is more of an approximation of the static adjustment which
> +	 * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
> +	 */
> +	size = total_mem * 6 / 100;
> +	size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
> +	swiotlb_adjust_size(size);
> +}
> +
>  static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
>  {
>  	pgprot_t old_prot, new_prot;
> diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
> index fbdc65782195..7aa94e2f99c6 100644
> --- a/include/linux/swiotlb.h
> +++ b/include/linux/swiotlb.h
> @@ -30,6 +30,9 @@ enum swiotlb_force {
>   */
>  #define IO_TLB_SHIFT 11
>  
> +/* default to 64MB */
> +#define IO_TLB_DEFAULT_SIZE (64UL<<20)
> +
>  extern void swiotlb_init(int verbose);
>  int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
>  extern unsigned long swiotlb_nr_tbl(void);
> @@ -78,6 +81,7 @@ void __init swiotlb_exit(void);
>  unsigned int swiotlb_max_segment(void);
>  size_t swiotlb_max_mapping_size(struct device *dev);
>  bool is_swiotlb_active(void);
> +void __init swiotlb_adjust_size(unsigned long new_size);
>  #else
>  #define swiotlb_force SWIOTLB_NO_FORCE
>  static inline bool is_swiotlb_buffer(phys_addr_t paddr)
> @@ -100,6 +104,10 @@ static inline bool is_swiotlb_active(void)
>  {
>  	return false;
>  }
> +
> +static void swiotlb_adjust_size(unsigned long new_size)
> +{
> +}
>  #endif /* CONFIG_SWIOTLB */
>  
>  extern void swiotlb_print_info(void);
> diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
> index 781b9dca197c..7c42df6e6100 100644
> --- a/kernel/dma/swiotlb.c
> +++ b/kernel/dma/swiotlb.c
> @@ -152,8 +152,6 @@ void swiotlb_set_max_segment(unsigned int val)
>  		max_segment = rounddown(val, PAGE_SIZE);
>  }
>  
> -/* default to 64MB */
> -#define IO_TLB_DEFAULT_SIZE (64UL<<20)
>  unsigned long swiotlb_size_or_default(void)
>  {
>  	unsigned long size;
> @@ -163,6 +161,24 @@ unsigned long swiotlb_size_or_default(void)
>  	return size ? size : (IO_TLB_DEFAULT_SIZE);
>  }
>  
> +void __init swiotlb_adjust_size(unsigned long new_size)
> +{
> +	unsigned long size;
> +
> +	/*
> +	 * If swiotlb parameter has not been specified, give a chance to
> +	 * architectures such as those supporting memory encryption to
> +	 * adjust/expand SWIOTLB size for their use.
> +	 */
> +	if (!io_tlb_nslabs) {
> +		size = ALIGN(new_size, 1 << IO_TLB_SHIFT);
> +		io_tlb_nslabs = size >> IO_TLB_SHIFT;
> +		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
> +
> +		pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
> +	}
> +}
> +
>  void swiotlb_print_info(void)
>  {
>  	unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
> 
> -- 
> Regards/Gruss,
>     Boris.
> 
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fpeople.kernel.org%2Ftglx%2Fnotes-about-netiquette&amp;data=04%7C01%7Cashish.kalra%40amd.com%7C426a6f6ef2334ac4f8e308d89c6b03cf%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637431330751727796%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&amp;sdata=xdEJqIPZUvIRmKbvM9Zv%2BVLvoyNYlSejjSyQ3ip%2FiQ0%3D&amp;reserved=0
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
  2020-12-09 19:34             ` Ashish Kalra
@ 2020-12-09 19:43               ` Borislav Petkov
  -1 siblings, 0 replies; 24+ messages in thread
From: Borislav Petkov @ 2020-12-09 19:43 UTC (permalink / raw)
  To: Ashish Kalra
  Cc: konrad.wilk, hch, tglx, mingo, hpa, x86, luto, peterz,
	dave.hansen, iommu, linux-kernel, brijesh.singh, Thomas.Lendacky,
	Jon.Grimm, rientjes

On Wed, Dec 09, 2020 at 07:34:16PM +0000, Ashish Kalra wrote:
> This should work, but i am concerned about making IO_TLB_DEFAULT_SIZE
> (which is pretty much private to generic swiotlb code) to be visible
> externally, i don't know if there are any concerns with that ?

Meh, it's just a define and it is not a secret that swiotlb size by
default is 64M.

Btw, pls trim your reply by removing quoted text you're not responding
to.

Thx.

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
@ 2020-12-09 19:43               ` Borislav Petkov
  0 siblings, 0 replies; 24+ messages in thread
From: Borislav Petkov @ 2020-12-09 19:43 UTC (permalink / raw)
  To: Ashish Kalra
  Cc: Thomas.Lendacky, Jon.Grimm, brijesh.singh, dave.hansen,
	konrad.wilk, peterz, x86, linux-kernel, iommu, mingo, luto, hpa,
	rientjes, tglx, hch

On Wed, Dec 09, 2020 at 07:34:16PM +0000, Ashish Kalra wrote:
> This should work, but i am concerned about making IO_TLB_DEFAULT_SIZE
> (which is pretty much private to generic swiotlb code) to be visible
> externally, i don't know if there are any concerns with that ?

Meh, it's just a define and it is not a secret that swiotlb size by
default is 64M.

Btw, pls trim your reply by removing quoted text you're not responding
to.

Thx.

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 24+ messages in thread

end of thread, other threads:[~2020-12-09 19:45 UTC | newest]

Thread overview: 24+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-12-07 23:10 [PATCH v8] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests Ashish Kalra
2020-12-07 23:10 ` Ashish Kalra
2020-12-08 22:22 ` Konrad Rzeszutek Wilk
2020-12-08 22:22   ` Konrad Rzeszutek Wilk
2020-12-08 23:01   ` Borislav Petkov
2020-12-08 23:01     ` Borislav Petkov
2020-12-08 23:27     ` Konrad Rzeszutek Wilk
2020-12-08 23:27       ` Konrad Rzeszutek Wilk
2020-12-08 23:33       ` Borislav Petkov
2020-12-08 23:33         ` Borislav Petkov
2020-12-09 11:01 ` Borislav Petkov
2020-12-09 11:01   ` Borislav Petkov
2020-12-09 12:29   ` Ashish Kalra
2020-12-09 12:29     ` Ashish Kalra
2020-12-09 12:54     ` Borislav Petkov
2020-12-09 12:54       ` Borislav Petkov
2020-12-09 13:19       ` Ashish Kalra
2020-12-09 13:19         ` Ashish Kalra
2020-12-09 17:51         ` Borislav Petkov
2020-12-09 17:51           ` Borislav Petkov
2020-12-09 19:34           ` Ashish Kalra
2020-12-09 19:34             ` Ashish Kalra
2020-12-09 19:43             ` Borislav Petkov
2020-12-09 19:43               ` Borislav Petkov

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.