* [PATCH v6 1/7] xen/arm: introduce new helper device_tree_get_meminfo
2021-09-08 9:52 [PATCH v6 0/7] Domain on Static Allocation Penny Zheng
@ 2021-09-08 9:52 ` Penny Zheng
2021-09-08 23:29 ` Stefano Stabellini
2021-09-08 9:52 ` [PATCH v6 2/7] xen/arm: introduce domain on Static Allocation Penny Zheng
` (5 subsequent siblings)
6 siblings, 1 reply; 18+ messages in thread
From: Penny Zheng @ 2021-09-08 9:52 UTC (permalink / raw)
To: xen-devel, sstabellini, julien; +Cc: Bertrand.Marquis, Wei.Chen, jbeulich
This commit creates a new helper device_tree_get_meminfo to iterate over a
device tree property to get memory info, like "reg".
Signed-off-by: Penny Zheng <penny.zheng@arm.com>
---
xen/arch/arm/bootfdt.c | 83 ++++++++++++++++++++++++------------------
1 file changed, 47 insertions(+), 36 deletions(-)
diff --git a/xen/arch/arm/bootfdt.c b/xen/arch/arm/bootfdt.c
index 476e32e0f5..b01badda3e 100644
--- a/xen/arch/arm/bootfdt.c
+++ b/xen/arch/arm/bootfdt.c
@@ -63,6 +63,52 @@ void __init device_tree_get_reg(const __be32 **cell, u32 address_cells,
*size = dt_next_cell(size_cells, cell);
}
+static int __init device_tree_get_meminfo(const void *fdt, int node,
+ const char *prop_name,
+ u32 address_cells, u32 size_cells,
+ void *data)
+{
+ const struct fdt_property *prop;
+ unsigned int i, banks;
+ const __be32 *cell;
+ u32 reg_cells = address_cells + size_cells;
+ paddr_t start, size;
+ struct meminfo *mem = data;
+
+ if ( address_cells < 1 || size_cells < 1 )
+ {
+ printk("fdt: property `%s': invalid #address-cells or #size-cells",
+ prop_name);
+ return -EINVAL;
+ }
+
+ prop = fdt_get_property(fdt, node, prop_name, NULL);
+ if ( !prop )
+ return -ENOENT;
+
+ cell = (const __be32 *)prop->data;
+ banks = fdt32_to_cpu(prop->len) / (reg_cells * sizeof (u32));
+
+ for ( i = 0; i < banks && mem->nr_banks < NR_MEM_BANKS; i++ )
+ {
+ device_tree_get_reg(&cell, address_cells, size_cells, &start, &size);
+ /* Some DT may describe empty bank, ignore them */
+ if ( !size )
+ continue;
+ mem->bank[mem->nr_banks].start = start;
+ mem->bank[mem->nr_banks].size = size;
+ mem->nr_banks++;
+ }
+
+ if ( i < banks )
+ {
+ printk("Warning: Max number of supported memory regions reached.\n");
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
u32 __init device_tree_get_u32(const void *fdt, int node,
const char *prop_name, u32 dflt)
{
@@ -139,42 +185,7 @@ static int __init process_memory_node(const void *fdt, int node,
u32 address_cells, u32 size_cells,
void *data)
{
- const struct fdt_property *prop;
- int i;
- int banks;
- const __be32 *cell;
- paddr_t start, size;
- u32 reg_cells = address_cells + size_cells;
- struct meminfo *mem = data;
-
- if ( address_cells < 1 || size_cells < 1 )
- {
- printk("fdt: node `%s': invalid #address-cells or #size-cells",
- name);
- return -EINVAL;
- }
-
- prop = fdt_get_property(fdt, node, "reg", NULL);
- if ( !prop )
- return -ENOENT;
-
- cell = (const __be32 *)prop->data;
- banks = fdt32_to_cpu(prop->len) / (reg_cells * sizeof (u32));
-
- for ( i = 0; i < banks && mem->nr_banks < NR_MEM_BANKS; i++ )
- {
- device_tree_get_reg(&cell, address_cells, size_cells, &start, &size);
- /* Some DT may describe empty bank, ignore them */
- if ( !size )
- continue;
- mem->bank[mem->nr_banks].start = start;
- mem->bank[mem->nr_banks].size = size;
- mem->nr_banks++;
- }
-
- if ( i < banks )
- return -ENOSPC;
- return 0;
+ return device_tree_get_meminfo(fdt, node, "reg", address_cells, size_cells, data);
}
static int __init process_reserved_memory_node(const void *fdt, int node,
--
2.25.1
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [PATCH v6 1/7] xen/arm: introduce new helper device_tree_get_meminfo
2021-09-08 9:52 ` [PATCH v6 1/7] xen/arm: introduce new helper device_tree_get_meminfo Penny Zheng
@ 2021-09-08 23:29 ` Stefano Stabellini
0 siblings, 0 replies; 18+ messages in thread
From: Stefano Stabellini @ 2021-09-08 23:29 UTC (permalink / raw)
To: Penny Zheng
Cc: xen-devel, sstabellini, julien, Bertrand.Marquis, Wei.Chen, jbeulich
On Wed, 8 Sep 2021, Penny Zheng wrote:
> This commit creates a new helper device_tree_get_meminfo to iterate over a
> device tree property to get memory info, like "reg".
>
> Signed-off-by: Penny Zheng <penny.zheng@arm.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
> ---
> xen/arch/arm/bootfdt.c | 83 ++++++++++++++++++++++++------------------
> 1 file changed, 47 insertions(+), 36 deletions(-)
>
> diff --git a/xen/arch/arm/bootfdt.c b/xen/arch/arm/bootfdt.c
> index 476e32e0f5..b01badda3e 100644
> --- a/xen/arch/arm/bootfdt.c
> +++ b/xen/arch/arm/bootfdt.c
> @@ -63,6 +63,52 @@ void __init device_tree_get_reg(const __be32 **cell, u32 address_cells,
> *size = dt_next_cell(size_cells, cell);
> }
>
> +static int __init device_tree_get_meminfo(const void *fdt, int node,
> + const char *prop_name,
> + u32 address_cells, u32 size_cells,
> + void *data)
> +{
> + const struct fdt_property *prop;
> + unsigned int i, banks;
> + const __be32 *cell;
> + u32 reg_cells = address_cells + size_cells;
> + paddr_t start, size;
> + struct meminfo *mem = data;
> +
> + if ( address_cells < 1 || size_cells < 1 )
> + {
> + printk("fdt: property `%s': invalid #address-cells or #size-cells",
> + prop_name);
> + return -EINVAL;
> + }
> +
> + prop = fdt_get_property(fdt, node, prop_name, NULL);
> + if ( !prop )
> + return -ENOENT;
> +
> + cell = (const __be32 *)prop->data;
> + banks = fdt32_to_cpu(prop->len) / (reg_cells * sizeof (u32));
> +
> + for ( i = 0; i < banks && mem->nr_banks < NR_MEM_BANKS; i++ )
> + {
> + device_tree_get_reg(&cell, address_cells, size_cells, &start, &size);
> + /* Some DT may describe empty bank, ignore them */
> + if ( !size )
> + continue;
> + mem->bank[mem->nr_banks].start = start;
> + mem->bank[mem->nr_banks].size = size;
> + mem->nr_banks++;
> + }
> +
> + if ( i < banks )
> + {
> + printk("Warning: Max number of supported memory regions reached.\n");
> + return -ENOSPC;
> + }
> +
> + return 0;
> +}
> +
> u32 __init device_tree_get_u32(const void *fdt, int node,
> const char *prop_name, u32 dflt)
> {
> @@ -139,42 +185,7 @@ static int __init process_memory_node(const void *fdt, int node,
> u32 address_cells, u32 size_cells,
> void *data)
> {
> - const struct fdt_property *prop;
> - int i;
> - int banks;
> - const __be32 *cell;
> - paddr_t start, size;
> - u32 reg_cells = address_cells + size_cells;
> - struct meminfo *mem = data;
> -
> - if ( address_cells < 1 || size_cells < 1 )
> - {
> - printk("fdt: node `%s': invalid #address-cells or #size-cells",
> - name);
> - return -EINVAL;
> - }
> -
> - prop = fdt_get_property(fdt, node, "reg", NULL);
> - if ( !prop )
> - return -ENOENT;
> -
> - cell = (const __be32 *)prop->data;
> - banks = fdt32_to_cpu(prop->len) / (reg_cells * sizeof (u32));
> -
> - for ( i = 0; i < banks && mem->nr_banks < NR_MEM_BANKS; i++ )
> - {
> - device_tree_get_reg(&cell, address_cells, size_cells, &start, &size);
> - /* Some DT may describe empty bank, ignore them */
> - if ( !size )
> - continue;
> - mem->bank[mem->nr_banks].start = start;
> - mem->bank[mem->nr_banks].size = size;
> - mem->nr_banks++;
> - }
> -
> - if ( i < banks )
> - return -ENOSPC;
> - return 0;
> + return device_tree_get_meminfo(fdt, node, "reg", address_cells, size_cells, data);
> }
>
> static int __init process_reserved_memory_node(const void *fdt, int node,
> --
> 2.25.1
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* [PATCH v6 2/7] xen/arm: introduce domain on Static Allocation
2021-09-08 9:52 [PATCH v6 0/7] Domain on Static Allocation Penny Zheng
2021-09-08 9:52 ` [PATCH v6 1/7] xen/arm: introduce new helper device_tree_get_meminfo Penny Zheng
@ 2021-09-08 9:52 ` Penny Zheng
2021-09-08 23:54 ` Stefano Stabellini
2021-09-08 9:52 ` [PATCH v6 3/7] xen: introduce mark_page_free Penny Zheng
` (4 subsequent siblings)
6 siblings, 1 reply; 18+ messages in thread
From: Penny Zheng @ 2021-09-08 9:52 UTC (permalink / raw)
To: xen-devel, sstabellini, julien; +Cc: Bertrand.Marquis, Wei.Chen, jbeulich
Static Allocation refers to system or sub-system(domains) for which memory
areas are pre-defined by configuration using physical address ranges.
Those pre-defined memory, -- Static Memory, as parts of RAM reserved in the
beginning, shall never go to heap allocator or boot allocator for any use.
Memory can be statically allocated to a domain using the property "xen,static-
mem" defined in the domain configuration. The number of cells for the address
and the size must be defined using respectively the properties
"#xen,static-mem-address-cells" and "#xen,static-mem-size-cells".
The property 'memory' is still needed and should match the amount of memory
given to the guest. Currently, it either comes from static memory or lets Xen
allocate from heap. *Mixing* is not supported.
The static memory will be mapped in the guest at the usual guest memory
addresses (GUEST_RAM0_BASE, GUEST_RAM1_BASE) defined by
xen/include/public/arch-arm.h.
This patch introduces this new `xen,static-mem` feature, and also documents
and parses this new attribute at boot time.
This patch also introduces a new field "bool xen_domain" in "struct membank"
to tell whether the memory bank is reserved as the whole hardware resource,
or bind to a xen domain node, through "xen,static-mem"
Signed-off-by: Penny Zheng <penny.zheng@arm.com>
---
docs/misc/arm/device-tree/booting.txt | 42 +++++++++++++++++++++++++++
xen/arch/arm/bootfdt.c | 30 +++++++++++++++++--
xen/include/asm-arm/setup.h | 1 +
3 files changed, 71 insertions(+), 2 deletions(-)
diff --git a/docs/misc/arm/device-tree/booting.txt b/docs/misc/arm/device-tree/booting.txt
index 5243bc7fd3..44cd9e1a9a 100644
--- a/docs/misc/arm/device-tree/booting.txt
+++ b/docs/misc/arm/device-tree/booting.txt
@@ -268,3 +268,45 @@ The DTB fragment is loaded at 0xc000000 in the example above. It should
follow the convention explained in docs/misc/arm/passthrough.txt. The
DTB fragment will be added to the guest device tree, so that the guest
kernel will be able to discover the device.
+
+
+Static Allocation
+=============
+
+Static Allocation refers to system or sub-system(domains) for which memory
+areas are pre-defined by configuration using physical address ranges.
+
+Memory can be statically allocated to a domain using the property "xen,static-
+mem" defined in the domain configuration. The number of cells for the address
+and the size must be defined using respectively the properties
+"#xen,static-mem-address-cells" and "#xen,static-mem-size-cells".
+
+The property 'memory' is still needed and should match the amount of memory
+given to the guest. Currently, it either comes from static memory or lets Xen
+allocate from heap. *Mixing* is not supported.
+
+The static memory will be mapped in the guest at the usual guest memory
+addresses (GUEST_RAM0_BASE, GUEST_RAM1_BASE) defined by
+xen/include/public/arch-arm.h.
+
+Below is an example on how to specify the static memory region in the
+device-tree:
+
+ / {
+ chosen {
+ domU1 {
+ compatible = "xen,domain";
+ #address-cells = <0x2>;
+ #size-cells = <0x2>;
+ cpus = <2>;
+ memory = <0x0 0x80000>;
+ #xen,static-mem-address-cells = <0x1>;
+ #xen,static-mem-size-cells = <0x1>;
+ xen,static-mem = <0x30000000 0x20000000>;
+ ...
+ };
+ };
+ };
+
+This will reserve a 512MB region starting at the host physical address
+0x30000000 to be exclusively used by DomU1.
diff --git a/xen/arch/arm/bootfdt.c b/xen/arch/arm/bootfdt.c
index b01badda3e..afaa0e249b 100644
--- a/xen/arch/arm/bootfdt.c
+++ b/xen/arch/arm/bootfdt.c
@@ -66,7 +66,7 @@ void __init device_tree_get_reg(const __be32 **cell, u32 address_cells,
static int __init device_tree_get_meminfo(const void *fdt, int node,
const char *prop_name,
u32 address_cells, u32 size_cells,
- void *data)
+ void *data, bool xen_domain)
{
const struct fdt_property *prop;
unsigned int i, banks;
@@ -97,6 +97,7 @@ static int __init device_tree_get_meminfo(const void *fdt, int node,
continue;
mem->bank[mem->nr_banks].start = start;
mem->bank[mem->nr_banks].size = size;
+ mem->bank[mem->nr_banks].xen_domain = xen_domain;
mem->nr_banks++;
}
@@ -185,7 +186,8 @@ static int __init process_memory_node(const void *fdt, int node,
u32 address_cells, u32 size_cells,
void *data)
{
- return device_tree_get_meminfo(fdt, node, "reg", address_cells, size_cells, data);
+ return device_tree_get_meminfo(fdt, node, "reg", address_cells, size_cells,
+ data, false);
}
static int __init process_reserved_memory_node(const void *fdt, int node,
@@ -339,6 +341,28 @@ static void __init process_chosen_node(const void *fdt, int node,
add_boot_module(BOOTMOD_RAMDISK, start, end-start, false);
}
+static int __init process_domain_node(const void *fdt, int node,
+ const char *name,
+ u32 address_cells, u32 size_cells)
+{
+ const struct fdt_property *prop;
+
+ printk("Checking for \"xen,static-mem\" in domain node\n");
+
+ prop = fdt_get_property(fdt, node, "xen,static-mem", NULL);
+ if ( !prop )
+ /* No "xen,static-mem" present. */
+ return 0;
+
+ address_cells = device_tree_get_u32(fdt, node,
+ "#xen,static-mem-address-cells", 0);
+ size_cells = device_tree_get_u32(fdt, node,
+ "#xen,static-mem-size-cells", 0);
+
+ return device_tree_get_meminfo(fdt, node, "xen,static-mem", address_cells,
+ size_cells, &bootinfo.reserved_mem, true);
+}
+
static int __init early_scan_node(const void *fdt,
int node, const char *name, int depth,
u32 address_cells, u32 size_cells,
@@ -357,6 +381,8 @@ static int __init early_scan_node(const void *fdt,
process_multiboot_node(fdt, node, name, address_cells, size_cells);
else if ( depth == 1 && device_tree_node_matches(fdt, node, "chosen") )
process_chosen_node(fdt, node, name, address_cells, size_cells);
+ else if ( depth == 2 && device_tree_node_compatible(fdt, node, "xen,domain") )
+ rc = process_domain_node(fdt, node, name, address_cells, size_cells);
if ( rc < 0 )
printk("fdt: node `%s': parsing failed\n", name);
diff --git a/xen/include/asm-arm/setup.h b/xen/include/asm-arm/setup.h
index c4b6af6029..95da0b7ab9 100644
--- a/xen/include/asm-arm/setup.h
+++ b/xen/include/asm-arm/setup.h
@@ -24,6 +24,7 @@ typedef enum {
struct membank {
paddr_t start;
paddr_t size;
+ bool xen_domain; /* whether the memory bank is bound to a Xen domain. */
};
struct meminfo {
--
2.25.1
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [PATCH v6 2/7] xen/arm: introduce domain on Static Allocation
2021-09-08 9:52 ` [PATCH v6 2/7] xen/arm: introduce domain on Static Allocation Penny Zheng
@ 2021-09-08 23:54 ` Stefano Stabellini
0 siblings, 0 replies; 18+ messages in thread
From: Stefano Stabellini @ 2021-09-08 23:54 UTC (permalink / raw)
To: Penny Zheng
Cc: xen-devel, sstabellini, julien, Bertrand.Marquis, Wei.Chen, jbeulich
On Wed, 8 Sep 2021, Penny Zheng wrote:
> Static Allocation refers to system or sub-system(domains) for which memory
> areas are pre-defined by configuration using physical address ranges.
>
> Those pre-defined memory, -- Static Memory, as parts of RAM reserved in the
> beginning, shall never go to heap allocator or boot allocator for any use.
>
> Memory can be statically allocated to a domain using the property "xen,static-
> mem" defined in the domain configuration. The number of cells for the address
> and the size must be defined using respectively the properties
> "#xen,static-mem-address-cells" and "#xen,static-mem-size-cells".
>
> The property 'memory' is still needed and should match the amount of memory
> given to the guest. Currently, it either comes from static memory or lets Xen
> allocate from heap. *Mixing* is not supported.
>
> The static memory will be mapped in the guest at the usual guest memory
> addresses (GUEST_RAM0_BASE, GUEST_RAM1_BASE) defined by
> xen/include/public/arch-arm.h.
>
> This patch introduces this new `xen,static-mem` feature, and also documents
> and parses this new attribute at boot time.
>
> This patch also introduces a new field "bool xen_domain" in "struct membank"
> to tell whether the memory bank is reserved as the whole hardware resource,
> or bind to a xen domain node, through "xen,static-mem"
>
> Signed-off-by: Penny Zheng <penny.zheng@arm.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
> ---
> docs/misc/arm/device-tree/booting.txt | 42 +++++++++++++++++++++++++++
> xen/arch/arm/bootfdt.c | 30 +++++++++++++++++--
> xen/include/asm-arm/setup.h | 1 +
> 3 files changed, 71 insertions(+), 2 deletions(-)
>
> diff --git a/docs/misc/arm/device-tree/booting.txt b/docs/misc/arm/device-tree/booting.txt
> index 5243bc7fd3..44cd9e1a9a 100644
> --- a/docs/misc/arm/device-tree/booting.txt
> +++ b/docs/misc/arm/device-tree/booting.txt
> @@ -268,3 +268,45 @@ The DTB fragment is loaded at 0xc000000 in the example above. It should
> follow the convention explained in docs/misc/arm/passthrough.txt. The
> DTB fragment will be added to the guest device tree, so that the guest
> kernel will be able to discover the device.
> +
> +
> +Static Allocation
> +=============
> +
> +Static Allocation refers to system or sub-system(domains) for which memory
> +areas are pre-defined by configuration using physical address ranges.
> +
> +Memory can be statically allocated to a domain using the property "xen,static-
> +mem" defined in the domain configuration. The number of cells for the address
> +and the size must be defined using respectively the properties
> +"#xen,static-mem-address-cells" and "#xen,static-mem-size-cells".
> +
> +The property 'memory' is still needed and should match the amount of memory
> +given to the guest. Currently, it either comes from static memory or lets Xen
> +allocate from heap. *Mixing* is not supported.
> +
> +The static memory will be mapped in the guest at the usual guest memory
> +addresses (GUEST_RAM0_BASE, GUEST_RAM1_BASE) defined by
> +xen/include/public/arch-arm.h.
> +
> +Below is an example on how to specify the static memory region in the
> +device-tree:
> +
> + / {
> + chosen {
> + domU1 {
> + compatible = "xen,domain";
> + #address-cells = <0x2>;
> + #size-cells = <0x2>;
> + cpus = <2>;
> + memory = <0x0 0x80000>;
> + #xen,static-mem-address-cells = <0x1>;
> + #xen,static-mem-size-cells = <0x1>;
> + xen,static-mem = <0x30000000 0x20000000>;
> + ...
> + };
> + };
> + };
> +
> +This will reserve a 512MB region starting at the host physical address
> +0x30000000 to be exclusively used by DomU1.
> diff --git a/xen/arch/arm/bootfdt.c b/xen/arch/arm/bootfdt.c
> index b01badda3e..afaa0e249b 100644
> --- a/xen/arch/arm/bootfdt.c
> +++ b/xen/arch/arm/bootfdt.c
> @@ -66,7 +66,7 @@ void __init device_tree_get_reg(const __be32 **cell, u32 address_cells,
> static int __init device_tree_get_meminfo(const void *fdt, int node,
> const char *prop_name,
> u32 address_cells, u32 size_cells,
> - void *data)
> + void *data, bool xen_domain)
> {
> const struct fdt_property *prop;
> unsigned int i, banks;
> @@ -97,6 +97,7 @@ static int __init device_tree_get_meminfo(const void *fdt, int node,
> continue;
> mem->bank[mem->nr_banks].start = start;
> mem->bank[mem->nr_banks].size = size;
> + mem->bank[mem->nr_banks].xen_domain = xen_domain;
> mem->nr_banks++;
> }
>
> @@ -185,7 +186,8 @@ static int __init process_memory_node(const void *fdt, int node,
> u32 address_cells, u32 size_cells,
> void *data)
> {
> - return device_tree_get_meminfo(fdt, node, "reg", address_cells, size_cells, data);
> + return device_tree_get_meminfo(fdt, node, "reg", address_cells, size_cells,
> + data, false);
> }
>
> static int __init process_reserved_memory_node(const void *fdt, int node,
> @@ -339,6 +341,28 @@ static void __init process_chosen_node(const void *fdt, int node,
> add_boot_module(BOOTMOD_RAMDISK, start, end-start, false);
> }
>
> +static int __init process_domain_node(const void *fdt, int node,
> + const char *name,
> + u32 address_cells, u32 size_cells)
> +{
> + const struct fdt_property *prop;
> +
> + printk("Checking for \"xen,static-mem\" in domain node\n");
> +
> + prop = fdt_get_property(fdt, node, "xen,static-mem", NULL);
> + if ( !prop )
> + /* No "xen,static-mem" present. */
> + return 0;
> +
> + address_cells = device_tree_get_u32(fdt, node,
> + "#xen,static-mem-address-cells", 0);
> + size_cells = device_tree_get_u32(fdt, node,
> + "#xen,static-mem-size-cells", 0);
> +
> + return device_tree_get_meminfo(fdt, node, "xen,static-mem", address_cells,
> + size_cells, &bootinfo.reserved_mem, true);
> +}
> +
> static int __init early_scan_node(const void *fdt,
> int node, const char *name, int depth,
> u32 address_cells, u32 size_cells,
> @@ -357,6 +381,8 @@ static int __init early_scan_node(const void *fdt,
> process_multiboot_node(fdt, node, name, address_cells, size_cells);
> else if ( depth == 1 && device_tree_node_matches(fdt, node, "chosen") )
> process_chosen_node(fdt, node, name, address_cells, size_cells);
> + else if ( depth == 2 && device_tree_node_compatible(fdt, node, "xen,domain") )
> + rc = process_domain_node(fdt, node, name, address_cells, size_cells);
>
> if ( rc < 0 )
> printk("fdt: node `%s': parsing failed\n", name);
> diff --git a/xen/include/asm-arm/setup.h b/xen/include/asm-arm/setup.h
> index c4b6af6029..95da0b7ab9 100644
> --- a/xen/include/asm-arm/setup.h
> +++ b/xen/include/asm-arm/setup.h
> @@ -24,6 +24,7 @@ typedef enum {
> struct membank {
> paddr_t start;
> paddr_t size;
> + bool xen_domain; /* whether the memory bank is bound to a Xen domain. */
> };
>
> struct meminfo {
> --
> 2.25.1
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* [PATCH v6 3/7] xen: introduce mark_page_free
2021-09-08 9:52 [PATCH v6 0/7] Domain on Static Allocation Penny Zheng
2021-09-08 9:52 ` [PATCH v6 1/7] xen/arm: introduce new helper device_tree_get_meminfo Penny Zheng
2021-09-08 9:52 ` [PATCH v6 2/7] xen/arm: introduce domain on Static Allocation Penny Zheng
@ 2021-09-08 9:52 ` Penny Zheng
2021-09-08 9:52 ` [PATCH v6 4/7] xen/arm: static memory initialization Penny Zheng
` (3 subsequent siblings)
6 siblings, 0 replies; 18+ messages in thread
From: Penny Zheng @ 2021-09-08 9:52 UTC (permalink / raw)
To: xen-devel, sstabellini, julien; +Cc: Bertrand.Marquis, Wei.Chen, jbeulich
This commit defines a new helper mark_page_free to extract common code,
like following the same cache/TLB coherency policy, between free_heap_pages
and the new function free_staticmem_pages, which will be introduced later.
The PDX compression makes that conversion between the MFN and the page can
be potentially non-trivial. As the function is internal, pass the MFN and
the page. They are both expected to match.
Signed-off-by: Penny Zheng <penny.zheng@arm.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Julien Grall <jgrall@amazon.com>
---
xen/common/page_alloc.c | 89 ++++++++++++++++++++++-------------------
1 file changed, 48 insertions(+), 41 deletions(-)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 958ba0cd92..a3ee5eca9e 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1376,6 +1376,53 @@ bool scrub_free_pages(void)
return node_to_scrub(false) != NUMA_NO_NODE;
}
+static void mark_page_free(struct page_info *pg, mfn_t mfn)
+{
+ ASSERT(mfn_x(mfn) == mfn_x(page_to_mfn(pg)));
+
+ /*
+ * Cannot assume that count_info == 0, as there are some corner cases
+ * where it isn't the case and yet it isn't a bug:
+ * 1. page_get_owner() is NULL
+ * 2. page_get_owner() is a domain that was never accessible by
+ * its domid (e.g., failed to fully construct the domain).
+ * 3. page was never addressable by the guest (e.g., it's an
+ * auto-translate-physmap guest and the page was never included
+ * in its pseudophysical address space).
+ * In all the above cases there can be no guest mappings of this page.
+ */
+ switch ( pg->count_info & PGC_state )
+ {
+ case PGC_state_inuse:
+ BUG_ON(pg->count_info & PGC_broken);
+ pg->count_info = PGC_state_free;
+ break;
+
+ case PGC_state_offlining:
+ pg->count_info = (pg->count_info & PGC_broken) |
+ PGC_state_offlined;
+ tainted = 1;
+ break;
+
+ default:
+ printk(XENLOG_ERR
+ "pg MFN %"PRI_mfn" c=%#lx o=%u v=%#lx t=%#x\n",
+ mfn_x(mfn),
+ pg->count_info, pg->v.free.order,
+ pg->u.free.val, pg->tlbflush_timestamp);
+ BUG();
+ }
+
+ /* If a page has no owner it will need no safety TLB flush. */
+ pg->u.free.need_tlbflush = (page_get_owner(pg) != NULL);
+ if ( pg->u.free.need_tlbflush )
+ page_set_tlbflush_timestamp(pg);
+
+ /* This page is not a guest frame any more. */
+ page_set_owner(pg, NULL); /* set_gpfn_from_mfn snoops pg owner */
+ set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
+}
+
/* Free 2^@order set of pages. */
static void free_heap_pages(
struct page_info *pg, unsigned int order, bool need_scrub)
@@ -1392,47 +1439,7 @@ static void free_heap_pages(
for ( i = 0; i < (1 << order); i++ )
{
- /*
- * Cannot assume that count_info == 0, as there are some corner cases
- * where it isn't the case and yet it isn't a bug:
- * 1. page_get_owner() is NULL
- * 2. page_get_owner() is a domain that was never accessible by
- * its domid (e.g., failed to fully construct the domain).
- * 3. page was never addressable by the guest (e.g., it's an
- * auto-translate-physmap guest and the page was never included
- * in its pseudophysical address space).
- * In all the above cases there can be no guest mappings of this page.
- */
- switch ( pg[i].count_info & PGC_state )
- {
- case PGC_state_inuse:
- BUG_ON(pg[i].count_info & PGC_broken);
- pg[i].count_info = PGC_state_free;
- break;
-
- case PGC_state_offlining:
- pg[i].count_info = (pg[i].count_info & PGC_broken) |
- PGC_state_offlined;
- tainted = 1;
- break;
-
- default:
- printk(XENLOG_ERR
- "pg[%u] MFN %"PRI_mfn" c=%#lx o=%u v=%#lx t=%#x\n",
- i, mfn_x(mfn) + i,
- pg[i].count_info, pg[i].v.free.order,
- pg[i].u.free.val, pg[i].tlbflush_timestamp);
- BUG();
- }
-
- /* If a page has no owner it will need no safety TLB flush. */
- pg[i].u.free.need_tlbflush = (page_get_owner(&pg[i]) != NULL);
- if ( pg[i].u.free.need_tlbflush )
- page_set_tlbflush_timestamp(&pg[i]);
-
- /* This page is not a guest frame any more. */
- page_set_owner(&pg[i], NULL); /* set_gpfn_from_mfn snoops pg owner */
- set_gpfn_from_mfn(mfn_x(mfn) + i, INVALID_M2P_ENTRY);
+ mark_page_free(&pg[i], mfn_add(mfn, i));
if ( need_scrub )
{
--
2.25.1
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH v6 4/7] xen/arm: static memory initialization
2021-09-08 9:52 [PATCH v6 0/7] Domain on Static Allocation Penny Zheng
` (2 preceding siblings ...)
2021-09-08 9:52 ` [PATCH v6 3/7] xen: introduce mark_page_free Penny Zheng
@ 2021-09-08 9:52 ` Penny Zheng
2021-09-08 23:54 ` Stefano Stabellini
2021-09-08 9:52 ` [PATCH v6 5/7] xen: re-define assign_pages and introduce a new function assign_page Penny Zheng
` (2 subsequent siblings)
6 siblings, 1 reply; 18+ messages in thread
From: Penny Zheng @ 2021-09-08 9:52 UTC (permalink / raw)
To: xen-devel, sstabellini, julien; +Cc: Bertrand.Marquis, Wei.Chen, jbeulich
This patch introduces static memory initialization, during system boot-up.
The new function init_staticmem_pages is responsible for static memory
initialization.
Helper free_staticmem_pages is the equivalent of free_heap_pages, to free
nr_mfns pages of static memory.
This commit also introduces a new CONFIG_STATIC_MEMORY option to wrap all
static-allocation-related code.
Put asynchronously scrubbing pages of static memory in TODO list.
Signed-off-by: Penny Zheng <penny.zheng@arm.com>
---
xen/arch/arm/setup.c | 27 +++++++++++++++++++++++++++
xen/common/Kconfig | 13 +++++++++++++
xen/common/page_alloc.c | 21 +++++++++++++++++++++
xen/include/xen/mm.h | 6 ++++++
4 files changed, 67 insertions(+)
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index 63a908e325..5be7f2b0c2 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -609,6 +609,29 @@ static void __init init_pdx(void)
}
}
+/* Static memory initialization */
+static void __init init_staticmem_pages(void)
+{
+#ifdef CONFIG_STATIC_MEMORY
+ unsigned int bank;
+
+ for ( bank = 0 ; bank < bootinfo.reserved_mem.nr_banks; bank++ )
+ {
+ if ( bootinfo.reserved_mem.bank[bank].xen_domain )
+ {
+ mfn_t bank_start = _mfn(PFN_UP(bootinfo.reserved_mem.bank[bank].start));
+ unsigned long bank_pages = PFN_DOWN(bootinfo.reserved_mem.bank[bank].size);
+ mfn_t bank_end = mfn_add(bank_start, bank_pages);
+
+ if ( mfn_x(bank_end) <= mfn_x(bank_start) )
+ return;
+
+ free_staticmem_pages(mfn_to_page(bank_start), bank_pages, false);
+ }
+ }
+#endif
+}
+
#ifdef CONFIG_ARM_32
static void __init setup_mm(void)
{
@@ -736,6 +759,8 @@ static void __init setup_mm(void)
/* Add xenheap memory that was not already added to the boot allocator. */
init_xenheap_pages(mfn_to_maddr(xenheap_mfn_start),
mfn_to_maddr(xenheap_mfn_end));
+
+ init_staticmem_pages();
}
#else /* CONFIG_ARM_64 */
static void __init setup_mm(void)
@@ -789,6 +814,8 @@ static void __init setup_mm(void)
setup_frametable_mappings(ram_start, ram_end);
max_page = PFN_DOWN(ram_end);
+
+ init_staticmem_pages();
}
#endif
diff --git a/xen/common/Kconfig b/xen/common/Kconfig
index 0ddd18e11a..3558be0dbc 100644
--- a/xen/common/Kconfig
+++ b/xen/common/Kconfig
@@ -67,6 +67,19 @@ config MEM_ACCESS
config NEEDS_LIBELF
bool
+config STATIC_MEMORY
+ bool "Static Allocation Support (UNSUPPORTED)" if UNSUPPORTED
+ depends on ARM
+ help
+ Static Allocation refers to system or sub-system(domains) for
+ which memory areas are pre-defined by configuration using physical
+ address ranges.
+
+ When enabled, memory can be statically allocated to a domain using
+ the property "xen,static-mem" defined in the domain configuration.
+
+ If unsure, say N.
+
menu "Speculative hardening"
config SPECULATIVE_HARDEN_ARRAY
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index a3ee5eca9e..ba7adc80db 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2604,6 +2604,27 @@ struct domain *get_pg_owner(domid_t domid)
return pg_owner;
}
+#ifdef CONFIG_STATIC_MEMORY
+/* Equivalent of free_heap_pages to free nr_mfns pages of static memory. */
+void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
+ bool need_scrub)
+{
+ mfn_t mfn = page_to_mfn(pg);
+ unsigned long i;
+
+ for ( i = 0; i < nr_mfns; i++ )
+ {
+ mark_page_free(&pg[i], mfn_add(mfn, i));
+
+ if ( need_scrub )
+ {
+ /* TODO: asynchronous scrubbing for pages of static memory. */
+ scrub_one_page(pg);
+ }
+ }
+}
+#endif
+
/*
* Local variables:
* mode: C
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 667f9dac83..8e8fb5a615 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -85,6 +85,12 @@ bool scrub_free_pages(void);
} while ( false )
#define FREE_XENHEAP_PAGE(p) FREE_XENHEAP_PAGES(p, 0)
+#ifdef CONFIG_STATIC_MEMORY
+/* These functions are for static memory */
+void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
+ bool need_scrub);
+#endif
+
/* Map machine page range in Xen virtual address space. */
int map_pages_to_xen(
unsigned long virt,
--
2.25.1
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [PATCH v6 4/7] xen/arm: static memory initialization
2021-09-08 9:52 ` [PATCH v6 4/7] xen/arm: static memory initialization Penny Zheng
@ 2021-09-08 23:54 ` Stefano Stabellini
0 siblings, 0 replies; 18+ messages in thread
From: Stefano Stabellini @ 2021-09-08 23:54 UTC (permalink / raw)
To: Penny Zheng
Cc: xen-devel, sstabellini, julien, Bertrand.Marquis, Wei.Chen, jbeulich
On Wed, 8 Sep 2021, Penny Zheng wrote:
> This patch introduces static memory initialization, during system boot-up.
>
> The new function init_staticmem_pages is responsible for static memory
> initialization.
>
> Helper free_staticmem_pages is the equivalent of free_heap_pages, to free
> nr_mfns pages of static memory.
>
> This commit also introduces a new CONFIG_STATIC_MEMORY option to wrap all
> static-allocation-related code.
>
> Put asynchronously scrubbing pages of static memory in TODO list.
>
> Signed-off-by: Penny Zheng <penny.zheng@arm.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
> ---
> xen/arch/arm/setup.c | 27 +++++++++++++++++++++++++++
> xen/common/Kconfig | 13 +++++++++++++
> xen/common/page_alloc.c | 21 +++++++++++++++++++++
> xen/include/xen/mm.h | 6 ++++++
> 4 files changed, 67 insertions(+)
>
> diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
> index 63a908e325..5be7f2b0c2 100644
> --- a/xen/arch/arm/setup.c
> +++ b/xen/arch/arm/setup.c
> @@ -609,6 +609,29 @@ static void __init init_pdx(void)
> }
> }
>
> +/* Static memory initialization */
> +static void __init init_staticmem_pages(void)
> +{
> +#ifdef CONFIG_STATIC_MEMORY
> + unsigned int bank;
> +
> + for ( bank = 0 ; bank < bootinfo.reserved_mem.nr_banks; bank++ )
> + {
> + if ( bootinfo.reserved_mem.bank[bank].xen_domain )
> + {
> + mfn_t bank_start = _mfn(PFN_UP(bootinfo.reserved_mem.bank[bank].start));
> + unsigned long bank_pages = PFN_DOWN(bootinfo.reserved_mem.bank[bank].size);
> + mfn_t bank_end = mfn_add(bank_start, bank_pages);
> +
> + if ( mfn_x(bank_end) <= mfn_x(bank_start) )
> + return;
> +
> + free_staticmem_pages(mfn_to_page(bank_start), bank_pages, false);
> + }
> + }
> +#endif
> +}
> +
> #ifdef CONFIG_ARM_32
> static void __init setup_mm(void)
> {
> @@ -736,6 +759,8 @@ static void __init setup_mm(void)
> /* Add xenheap memory that was not already added to the boot allocator. */
> init_xenheap_pages(mfn_to_maddr(xenheap_mfn_start),
> mfn_to_maddr(xenheap_mfn_end));
> +
> + init_staticmem_pages();
> }
> #else /* CONFIG_ARM_64 */
> static void __init setup_mm(void)
> @@ -789,6 +814,8 @@ static void __init setup_mm(void)
>
> setup_frametable_mappings(ram_start, ram_end);
> max_page = PFN_DOWN(ram_end);
> +
> + init_staticmem_pages();
> }
> #endif
>
> diff --git a/xen/common/Kconfig b/xen/common/Kconfig
> index 0ddd18e11a..3558be0dbc 100644
> --- a/xen/common/Kconfig
> +++ b/xen/common/Kconfig
> @@ -67,6 +67,19 @@ config MEM_ACCESS
> config NEEDS_LIBELF
> bool
>
> +config STATIC_MEMORY
> + bool "Static Allocation Support (UNSUPPORTED)" if UNSUPPORTED
> + depends on ARM
> + help
> + Static Allocation refers to system or sub-system(domains) for
> + which memory areas are pre-defined by configuration using physical
> + address ranges.
> +
> + When enabled, memory can be statically allocated to a domain using
> + the property "xen,static-mem" defined in the domain configuration.
> +
> + If unsure, say N.
> +
> menu "Speculative hardening"
>
> config SPECULATIVE_HARDEN_ARRAY
> diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
> index a3ee5eca9e..ba7adc80db 100644
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -2604,6 +2604,27 @@ struct domain *get_pg_owner(domid_t domid)
> return pg_owner;
> }
>
> +#ifdef CONFIG_STATIC_MEMORY
> +/* Equivalent of free_heap_pages to free nr_mfns pages of static memory. */
> +void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
> + bool need_scrub)
> +{
> + mfn_t mfn = page_to_mfn(pg);
> + unsigned long i;
> +
> + for ( i = 0; i < nr_mfns; i++ )
> + {
> + mark_page_free(&pg[i], mfn_add(mfn, i));
> +
> + if ( need_scrub )
> + {
> + /* TODO: asynchronous scrubbing for pages of static memory. */
> + scrub_one_page(pg);
> + }
> + }
> +}
> +#endif
> +
> /*
> * Local variables:
> * mode: C
> diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
> index 667f9dac83..8e8fb5a615 100644
> --- a/xen/include/xen/mm.h
> +++ b/xen/include/xen/mm.h
> @@ -85,6 +85,12 @@ bool scrub_free_pages(void);
> } while ( false )
> #define FREE_XENHEAP_PAGE(p) FREE_XENHEAP_PAGES(p, 0)
>
> +#ifdef CONFIG_STATIC_MEMORY
> +/* These functions are for static memory */
> +void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
> + bool need_scrub);
> +#endif
> +
> /* Map machine page range in Xen virtual address space. */
> int map_pages_to_xen(
> unsigned long virt,
> --
> 2.25.1
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* [PATCH v6 5/7] xen: re-define assign_pages and introduce a new function assign_page
2021-09-08 9:52 [PATCH v6 0/7] Domain on Static Allocation Penny Zheng
` (3 preceding siblings ...)
2021-09-08 9:52 ` [PATCH v6 4/7] xen/arm: static memory initialization Penny Zheng
@ 2021-09-08 9:52 ` Penny Zheng
2021-09-08 23:57 ` Stefano Stabellini
2021-09-09 9:06 ` Jan Beulich
2021-09-08 9:52 ` [PATCH v6 6/7] xen/arm: introduce acquire_staticmem_pages and acquire_domstatic_pages Penny Zheng
2021-09-08 9:52 ` [PATCH v6 7/7] xen/arm: introduce allocate_static_memory Penny Zheng
6 siblings, 2 replies; 18+ messages in thread
From: Penny Zheng @ 2021-09-08 9:52 UTC (permalink / raw)
To: xen-devel, sstabellini, julien; +Cc: Bertrand.Marquis, Wei.Chen, jbeulich
In order to deal with the trouble of count-to-order conversion when page number
is not in a power-of-two, this commit re-define assign_pages for nr pages and
introduces a new helper assign_page for original page with a single order.
Signed-off-by: Penny Zheng <penny.zheng@arm.com>
---
xen/arch/x86/pv/dom0_build.c | 2 +-
xen/common/grant_table.c | 2 +-
xen/common/memory.c | 6 +++---
xen/common/page_alloc.c | 21 +++++++++++++--------
xen/include/xen/mm.h | 6 ++++++
5 files changed, 24 insertions(+), 13 deletions(-)
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index d7f9e04b28..7787cc8fca 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -568,7 +568,7 @@ int __init dom0_construct_pv(struct domain *d,
else
{
while ( count-- )
- if ( assign_pages(d, mfn_to_page(_mfn(mfn++)), 0, 0) )
+ if ( assign_pages(d, mfn_to_page(_mfn(mfn++)), 1, 0) )
BUG();
}
initrd->mod_end = 0;
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index ee61603a97..50f5f83023 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -2358,7 +2358,7 @@ gnttab_transfer(
* is respected and speculative execution is blocked accordingly
*/
if ( unlikely(!evaluate_nospec(okay)) ||
- unlikely(assign_pages(e, page, 0, MEMF_no_refcount)) )
+ unlikely(assign_pages(e, page, 1, MEMF_no_refcount)) )
{
bool drop_dom_ref;
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 74babb0bd7..9cef8790ff 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -728,8 +728,8 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
/* Assign each output page to the domain. */
for ( j = 0; (page = page_list_remove_head(&out_chunk_list)); ++j )
{
- if ( assign_pages(d, page, exch.out.extent_order,
- MEMF_no_refcount) )
+ if ( assign_page(page, exch.out.extent_order, d,
+ MEMF_no_refcount) )
{
unsigned long dec_count;
bool_t drop_dom_ref;
@@ -797,7 +797,7 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
* cleared PGC_allocated.
*/
while ( (page = page_list_remove_head(&in_chunk_list)) )
- if ( assign_pages(d, page, 0, MEMF_no_refcount) )
+ if ( assign_pages(d, page, 1, MEMF_no_refcount) )
{
BUG_ON(!d->is_dying);
free_domheap_page(page);
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index ba7adc80db..bb19bb10ff 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2261,7 +2261,7 @@ void init_domheap_pages(paddr_t ps, paddr_t pe)
int assign_pages(
struct domain *d,
struct page_info *pg,
- unsigned int order,
+ unsigned long nr,
unsigned int memflags)
{
int rc = 0;
@@ -2281,7 +2281,7 @@ int assign_pages(
{
unsigned int extra_pages = 0;
- for ( i = 0; i < (1ul << order); i++ )
+ for ( i = 0; i < nr; i++ )
{
ASSERT(!(pg[i].count_info & ~PGC_extra));
if ( pg[i].count_info & PGC_extra )
@@ -2290,18 +2290,18 @@ int assign_pages(
ASSERT(!extra_pages ||
((memflags & MEMF_no_refcount) &&
- extra_pages == 1u << order));
+ extra_pages == nr));
}
#endif
if ( pg[0].count_info & PGC_extra )
{
- d->extra_pages += 1u << order;
+ d->extra_pages += nr;
memflags &= ~MEMF_no_refcount;
}
else if ( !(memflags & MEMF_no_refcount) )
{
- unsigned int tot_pages = domain_tot_pages(d) + (1 << order);
+ unsigned int tot_pages = domain_tot_pages(d) + nr;
if ( unlikely(tot_pages > d->max_pages) )
{
@@ -2313,10 +2313,10 @@ int assign_pages(
}
if ( !(memflags & MEMF_no_refcount) &&
- unlikely(domain_adjust_tot_pages(d, 1 << order) == (1 << order)) )
+ unlikely(domain_adjust_tot_pages(d, nr) == nr) )
get_knownalive_domain(d);
- for ( i = 0; i < (1 << order); i++ )
+ for ( i = 0; i < nr; i++ )
{
ASSERT(page_get_owner(&pg[i]) == NULL);
page_set_owner(&pg[i], d);
@@ -2331,6 +2331,11 @@ int assign_pages(
return rc;
}
+int assign_page(struct page_info *pg, unsigned int order, struct domain *d,
+ unsigned int memflags)
+{
+ return assign_pages(d, pg, 1UL << order, memflags);
+}
struct page_info *alloc_domheap_pages(
struct domain *d, unsigned int order, unsigned int memflags)
@@ -2373,7 +2378,7 @@ struct page_info *alloc_domheap_pages(
pg[i].count_info = PGC_extra;
}
}
- if ( assign_pages(d, pg, order, memflags) )
+ if ( assign_page(pg, order, d, memflags) )
{
free_heap_pages(pg, order, memflags & MEMF_no_scrub);
return NULL;
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 8e8fb5a615..a74e93eba8 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -133,8 +133,14 @@ void heap_init_late(void);
int assign_pages(
struct domain *d,
+ struct page_info *pg,
+ unsigned long nr,
+ unsigned int memflags);
+
+int assign_page(
struct page_info *pg,
unsigned int order,
+ struct domain *d,
unsigned int memflags);
/* Dump info to serial console */
--
2.25.1
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [PATCH v6 5/7] xen: re-define assign_pages and introduce a new function assign_page
2021-09-08 9:52 ` [PATCH v6 5/7] xen: re-define assign_pages and introduce a new function assign_page Penny Zheng
@ 2021-09-08 23:57 ` Stefano Stabellini
2021-09-09 2:20 ` Penny Zheng
2021-09-09 9:06 ` Jan Beulich
1 sibling, 1 reply; 18+ messages in thread
From: Stefano Stabellini @ 2021-09-08 23:57 UTC (permalink / raw)
To: Penny Zheng
Cc: xen-devel, sstabellini, julien, Bertrand.Marquis, Wei.Chen, jbeulich
On Wed, 8 Sep 2021, Penny Zheng wrote:
> In order to deal with the trouble of count-to-order conversion when page number
> is not in a power-of-two, this commit re-define assign_pages for nr pages and
> introduces a new helper assign_page for original page with a single order.
>
> Signed-off-by: Penny Zheng <penny.zheng@arm.com>
The patch looks correct to me, however I don't think I understood Jan's
request to the previous version of this patch, so I can't tell if you
addressed his concerns.
> ---
> xen/arch/x86/pv/dom0_build.c | 2 +-
> xen/common/grant_table.c | 2 +-
> xen/common/memory.c | 6 +++---
> xen/common/page_alloc.c | 21 +++++++++++++--------
> xen/include/xen/mm.h | 6 ++++++
> 5 files changed, 24 insertions(+), 13 deletions(-)
>
> diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
> index d7f9e04b28..7787cc8fca 100644
> --- a/xen/arch/x86/pv/dom0_build.c
> +++ b/xen/arch/x86/pv/dom0_build.c
> @@ -568,7 +568,7 @@ int __init dom0_construct_pv(struct domain *d,
> else
> {
> while ( count-- )
> - if ( assign_pages(d, mfn_to_page(_mfn(mfn++)), 0, 0) )
> + if ( assign_pages(d, mfn_to_page(_mfn(mfn++)), 1, 0) )
> BUG();
> }
> initrd->mod_end = 0;
> diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
> index ee61603a97..50f5f83023 100644
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -2358,7 +2358,7 @@ gnttab_transfer(
> * is respected and speculative execution is blocked accordingly
> */
> if ( unlikely(!evaluate_nospec(okay)) ||
> - unlikely(assign_pages(e, page, 0, MEMF_no_refcount)) )
> + unlikely(assign_pages(e, page, 1, MEMF_no_refcount)) )
> {
> bool drop_dom_ref;
>
> diff --git a/xen/common/memory.c b/xen/common/memory.c
> index 74babb0bd7..9cef8790ff 100644
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -728,8 +728,8 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
> /* Assign each output page to the domain. */
> for ( j = 0; (page = page_list_remove_head(&out_chunk_list)); ++j )
> {
> - if ( assign_pages(d, page, exch.out.extent_order,
> - MEMF_no_refcount) )
> + if ( assign_page(page, exch.out.extent_order, d,
> + MEMF_no_refcount) )
> {
> unsigned long dec_count;
> bool_t drop_dom_ref;
> @@ -797,7 +797,7 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
> * cleared PGC_allocated.
> */
> while ( (page = page_list_remove_head(&in_chunk_list)) )
> - if ( assign_pages(d, page, 0, MEMF_no_refcount) )
> + if ( assign_pages(d, page, 1, MEMF_no_refcount) )
> {
> BUG_ON(!d->is_dying);
> free_domheap_page(page);
> diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
> index ba7adc80db..bb19bb10ff 100644
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -2261,7 +2261,7 @@ void init_domheap_pages(paddr_t ps, paddr_t pe)
> int assign_pages(
> struct domain *d,
> struct page_info *pg,
> - unsigned int order,
> + unsigned long nr,
> unsigned int memflags)
> {
> int rc = 0;
> @@ -2281,7 +2281,7 @@ int assign_pages(
> {
> unsigned int extra_pages = 0;
>
> - for ( i = 0; i < (1ul << order); i++ )
> + for ( i = 0; i < nr; i++ )
> {
> ASSERT(!(pg[i].count_info & ~PGC_extra));
> if ( pg[i].count_info & PGC_extra )
> @@ -2290,18 +2290,18 @@ int assign_pages(
>
> ASSERT(!extra_pages ||
> ((memflags & MEMF_no_refcount) &&
> - extra_pages == 1u << order));
> + extra_pages == nr));
> }
> #endif
>
> if ( pg[0].count_info & PGC_extra )
> {
> - d->extra_pages += 1u << order;
> + d->extra_pages += nr;
> memflags &= ~MEMF_no_refcount;
> }
> else if ( !(memflags & MEMF_no_refcount) )
> {
> - unsigned int tot_pages = domain_tot_pages(d) + (1 << order);
> + unsigned int tot_pages = domain_tot_pages(d) + nr;
>
> if ( unlikely(tot_pages > d->max_pages) )
> {
> @@ -2313,10 +2313,10 @@ int assign_pages(
> }
>
> if ( !(memflags & MEMF_no_refcount) &&
> - unlikely(domain_adjust_tot_pages(d, 1 << order) == (1 << order)) )
> + unlikely(domain_adjust_tot_pages(d, nr) == nr) )
> get_knownalive_domain(d);
>
> - for ( i = 0; i < (1 << order); i++ )
> + for ( i = 0; i < nr; i++ )
> {
> ASSERT(page_get_owner(&pg[i]) == NULL);
> page_set_owner(&pg[i], d);
> @@ -2331,6 +2331,11 @@ int assign_pages(
> return rc;
> }
>
> +int assign_page(struct page_info *pg, unsigned int order, struct domain *d,
> + unsigned int memflags)
> +{
> + return assign_pages(d, pg, 1UL << order, memflags);
> +}
>
> struct page_info *alloc_domheap_pages(
> struct domain *d, unsigned int order, unsigned int memflags)
> @@ -2373,7 +2378,7 @@ struct page_info *alloc_domheap_pages(
> pg[i].count_info = PGC_extra;
> }
> }
> - if ( assign_pages(d, pg, order, memflags) )
> + if ( assign_page(pg, order, d, memflags) )
> {
> free_heap_pages(pg, order, memflags & MEMF_no_scrub);
> return NULL;
> diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
> index 8e8fb5a615..a74e93eba8 100644
> --- a/xen/include/xen/mm.h
> +++ b/xen/include/xen/mm.h
> @@ -133,8 +133,14 @@ void heap_init_late(void);
>
> int assign_pages(
> struct domain *d,
> + struct page_info *pg,
> + unsigned long nr,
> + unsigned int memflags);
> +
> +int assign_page(
> struct page_info *pg,
> unsigned int order,
> + struct domain *d,
> unsigned int memflags);
>
> /* Dump info to serial console */
> --
> 2.25.1
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* RE: [PATCH v6 5/7] xen: re-define assign_pages and introduce a new function assign_page
2021-09-08 23:57 ` Stefano Stabellini
@ 2021-09-09 2:20 ` Penny Zheng
2021-09-09 7:05 ` Jan Beulich
0 siblings, 1 reply; 18+ messages in thread
From: Penny Zheng @ 2021-09-09 2:20 UTC (permalink / raw)
To: Stefano Stabellini, jbeulich
Cc: xen-devel, julien, Bertrand Marquis, Wei Chen
Hi Jan
> -----Original Message-----
> From: Stefano Stabellini <sstabellini@kernel.org>
> Sent: Thursday, September 9, 2021 7:58 AM
> To: Penny Zheng <Penny.Zheng@arm.com>
> Cc: xen-devel@lists.xenproject.org; sstabellini@kernel.org; julien@xen.org;
> Bertrand Marquis <Bertrand.Marquis@arm.com>; Wei Chen
> <Wei.Chen@arm.com>; jbeulich@suse.com
> Subject: Re: [PATCH v6 5/7] xen: re-define assign_pages and introduce a new
> function assign_page
>
> On Wed, 8 Sep 2021, Penny Zheng wrote:
> > In order to deal with the trouble of count-to-order conversion when
> > page number is not in a power-of-two, this commit re-define
> > assign_pages for nr pages and introduces a new helper assign_page for
> original page with a single order.
> >
> > Signed-off-by: Penny Zheng <penny.zheng@arm.com>
>
> The patch looks correct to me, however I don't think I understood Jan's
> request to the previous version of this patch, so I can't tell if you addressed his
> concerns.
>
Would you like to take a look at whether I address your concerns in this version? Thx.
>
> > ---
> > xen/arch/x86/pv/dom0_build.c | 2 +-
> > xen/common/grant_table.c | 2 +-
> > xen/common/memory.c | 6 +++---
> > xen/common/page_alloc.c | 21 +++++++++++++--------
> > xen/include/xen/mm.h | 6 ++++++
> > 5 files changed, 24 insertions(+), 13 deletions(-)
> >
> > diff --git a/xen/arch/x86/pv/dom0_build.c
> > b/xen/arch/x86/pv/dom0_build.c index d7f9e04b28..7787cc8fca 100644
> > --- a/xen/arch/x86/pv/dom0_build.c
> > +++ b/xen/arch/x86/pv/dom0_build.c
> > @@ -568,7 +568,7 @@ int __init dom0_construct_pv(struct domain *d,
> > else
> > {
> > while ( count-- )
> > - if ( assign_pages(d, mfn_to_page(_mfn(mfn++)), 0, 0) )
> > + if ( assign_pages(d, mfn_to_page(_mfn(mfn++)), 1, 0)
> > + )
> > BUG();
> > }
> > initrd->mod_end = 0;
> > diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c index
> > ee61603a97..50f5f83023 100644
> > --- a/xen/common/grant_table.c
> > +++ b/xen/common/grant_table.c
> > @@ -2358,7 +2358,7 @@ gnttab_transfer(
> > * is respected and speculative execution is blocked accordingly
> > */
> > if ( unlikely(!evaluate_nospec(okay)) ||
> > - unlikely(assign_pages(e, page, 0, MEMF_no_refcount)) )
> > + unlikely(assign_pages(e, page, 1, MEMF_no_refcount)) )
> > {
> > bool drop_dom_ref;
> >
> > diff --git a/xen/common/memory.c b/xen/common/memory.c index
> > 74babb0bd7..9cef8790ff 100644
> > --- a/xen/common/memory.c
> > +++ b/xen/common/memory.c
> > @@ -728,8 +728,8 @@ static long
> memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t)
> arg)
> > /* Assign each output page to the domain. */
> > for ( j = 0; (page = page_list_remove_head(&out_chunk_list)); ++j )
> > {
> > - if ( assign_pages(d, page, exch.out.extent_order,
> > - MEMF_no_refcount) )
> > + if ( assign_page(page, exch.out.extent_order, d,
> > + MEMF_no_refcount) )
> > {
> > unsigned long dec_count;
> > bool_t drop_dom_ref;
> > @@ -797,7 +797,7 @@ static long
> memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t)
> arg)
> > * cleared PGC_allocated.
> > */
> > while ( (page = page_list_remove_head(&in_chunk_list)) )
> > - if ( assign_pages(d, page, 0, MEMF_no_refcount) )
> > + if ( assign_pages(d, page, 1, MEMF_no_refcount) )
> > {
> > BUG_ON(!d->is_dying);
> > free_domheap_page(page);
> > diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index
> > ba7adc80db..bb19bb10ff 100644
> > --- a/xen/common/page_alloc.c
> > +++ b/xen/common/page_alloc.c
> > @@ -2261,7 +2261,7 @@ void init_domheap_pages(paddr_t ps, paddr_t pe)
> > int assign_pages(
> > struct domain *d,
> > struct page_info *pg,
> > - unsigned int order,
> > + unsigned long nr,
> > unsigned int memflags)
> > {
> > int rc = 0;
> > @@ -2281,7 +2281,7 @@ int assign_pages(
> > {
> > unsigned int extra_pages = 0;
> >
> > - for ( i = 0; i < (1ul << order); i++ )
> > + for ( i = 0; i < nr; i++ )
> > {
> > ASSERT(!(pg[i].count_info & ~PGC_extra));
> > if ( pg[i].count_info & PGC_extra ) @@ -2290,18 +2290,18
> > @@ int assign_pages(
> >
> > ASSERT(!extra_pages ||
> > ((memflags & MEMF_no_refcount) &&
> > - extra_pages == 1u << order));
> > + extra_pages == nr));
> > }
> > #endif
> >
> > if ( pg[0].count_info & PGC_extra )
> > {
> > - d->extra_pages += 1u << order;
> > + d->extra_pages += nr;
> > memflags &= ~MEMF_no_refcount;
> > }
> > else if ( !(memflags & MEMF_no_refcount) )
> > {
> > - unsigned int tot_pages = domain_tot_pages(d) + (1 << order);
> > + unsigned int tot_pages = domain_tot_pages(d) + nr;
> >
> > if ( unlikely(tot_pages > d->max_pages) )
> > {
> > @@ -2313,10 +2313,10 @@ int assign_pages(
> > }
> >
> > if ( !(memflags & MEMF_no_refcount) &&
> > - unlikely(domain_adjust_tot_pages(d, 1 << order) == (1 << order)) )
> > + unlikely(domain_adjust_tot_pages(d, nr) == nr) )
> > get_knownalive_domain(d);
> >
> > - for ( i = 0; i < (1 << order); i++ )
> > + for ( i = 0; i < nr; i++ )
> > {
> > ASSERT(page_get_owner(&pg[i]) == NULL);
> > page_set_owner(&pg[i], d);
> > @@ -2331,6 +2331,11 @@ int assign_pages(
> > return rc;
> > }
> >
> > +int assign_page(struct page_info *pg, unsigned int order, struct domain *d,
> > + unsigned int memflags) {
> > + return assign_pages(d, pg, 1UL << order, memflags); }
> >
> > struct page_info *alloc_domheap_pages(
> > struct domain *d, unsigned int order, unsigned int memflags) @@
> > -2373,7 +2378,7 @@ struct page_info *alloc_domheap_pages(
> > pg[i].count_info = PGC_extra;
> > }
> > }
> > - if ( assign_pages(d, pg, order, memflags) )
> > + if ( assign_page(pg, order, d, memflags) )
> > {
> > free_heap_pages(pg, order, memflags & MEMF_no_scrub);
> > return NULL;
> > diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h index
> > 8e8fb5a615..a74e93eba8 100644
> > --- a/xen/include/xen/mm.h
> > +++ b/xen/include/xen/mm.h
> > @@ -133,8 +133,14 @@ void heap_init_late(void);
> >
> > int assign_pages(
> > struct domain *d,
> > + struct page_info *pg,
> > + unsigned long nr,
> > + unsigned int memflags);
> > +
> > +int assign_page(
> > struct page_info *pg,
> > unsigned int order,
> > + struct domain *d,
> > unsigned int memflags);
> >
> > /* Dump info to serial console */
> > --
> > 2.25.1
> >
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH v6 5/7] xen: re-define assign_pages and introduce a new function assign_page
2021-09-09 2:20 ` Penny Zheng
@ 2021-09-09 7:05 ` Jan Beulich
0 siblings, 0 replies; 18+ messages in thread
From: Jan Beulich @ 2021-09-09 7:05 UTC (permalink / raw)
To: Penny Zheng
Cc: xen-devel, julien, Bertrand Marquis, Wei Chen, Stefano Stabellini
On 09.09.2021 04:20, Penny Zheng wrote:
>> From: Stefano Stabellini <sstabellini@kernel.org>
>> Sent: Thursday, September 9, 2021 7:58 AM
>>
>> On Wed, 8 Sep 2021, Penny Zheng wrote:
>>> In order to deal with the trouble of count-to-order conversion when
>>> page number is not in a power-of-two, this commit re-define
>>> assign_pages for nr pages and introduces a new helper assign_page for
>> original page with a single order.
>>>
>>> Signed-off-by: Penny Zheng <penny.zheng@arm.com>
>>
>> The patch looks correct to me, however I don't think I understood Jan's
>> request to the previous version of this patch, so I can't tell if you addressed his
>> concerns.
>
> Would you like to take a look at whether I address your concerns in this version? Thx.
I will take a look, independent of Stefano's reply to you. It's been just
yesterday that you've submitted the new version, so I'm afraid I have to
say that I consider it a little early to ping for a response. I'm sorry.
Jan
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH v6 5/7] xen: re-define assign_pages and introduce a new function assign_page
2021-09-08 9:52 ` [PATCH v6 5/7] xen: re-define assign_pages and introduce a new function assign_page Penny Zheng
2021-09-08 23:57 ` Stefano Stabellini
@ 2021-09-09 9:06 ` Jan Beulich
2021-09-09 9:34 ` Penny Zheng
1 sibling, 1 reply; 18+ messages in thread
From: Jan Beulich @ 2021-09-09 9:06 UTC (permalink / raw)
To: Penny Zheng; +Cc: Bertrand.Marquis, Wei.Chen, xen-devel, sstabellini, julien
On 08.09.2021 11:52, Penny Zheng wrote:
> In order to deal with the trouble of count-to-order conversion when page number
> is not in a power-of-two, this commit re-define assign_pages for nr pages and
> introduces a new helper assign_page for original page with a single order.
>
> Signed-off-by: Penny Zheng <penny.zheng@arm.com>
I have to admit that I'm now very puzzled: Instead of restoring the
long agreed upon ordering of parameters (and then keeping my A-b),
you've dropped the ack.
> --- a/xen/arch/x86/pv/dom0_build.c
> +++ b/xen/arch/x86/pv/dom0_build.c
> @@ -568,7 +568,7 @@ int __init dom0_construct_pv(struct domain *d,
> else
> {
> while ( count-- )
> - if ( assign_pages(d, mfn_to_page(_mfn(mfn++)), 0, 0) )
> + if ( assign_pages(d, mfn_to_page(_mfn(mfn++)), 1, 0) )
This change alone demonstrates the problem when it comes to backporting
future changes: If the original patch contained a code addition similar
to what you change to, without the person doing the backporting paying
close attention, the result will be an order-1 request when an order-0
one is wanted. It was explained to you that in order to make people
doing backports aware of this semantic change, the order of parameters
of the function ought to be altered. That way the compiler will
complain, and the person will know to look closely what adjustments are
needed.
In this context I find it further puzzling ...
> --- a/xen/include/xen/mm.h
> +++ b/xen/include/xen/mm.h
> @@ -133,8 +133,14 @@ void heap_init_late(void);
>
> int assign_pages(
> struct domain *d,
> + struct page_info *pg,
> + unsigned long nr,
> + unsigned int memflags);
> +
> +int assign_page(
> struct page_info *pg,
> unsigned int order,
> + struct domain *d,
> unsigned int memflags);
... that you also neglected the request to harmonize the argument order
of both functions. What we want (and what I thought has long been agreed
upon) is e.g.
int assign_pages(
struct page_info *pg,
unsigned long nr,
struct domain *d,
unsigned int memflags);
int assign_page(
struct page_info *pg,
unsigned int order,
struct domain *d,
unsigned int memflags);
Jan
^ permalink raw reply [flat|nested] 18+ messages in thread
* RE: [PATCH v6 5/7] xen: re-define assign_pages and introduce a new function assign_page
2021-09-09 9:06 ` Jan Beulich
@ 2021-09-09 9:34 ` Penny Zheng
0 siblings, 0 replies; 18+ messages in thread
From: Penny Zheng @ 2021-09-09 9:34 UTC (permalink / raw)
To: Jan Beulich, Stefano Stabellini
Cc: Bertrand Marquis, Wei Chen, xen-devel, julien
Hi Jan and Stefano
> -----Original Message-----
> From: Jan Beulich <jbeulich@suse.com>
> Sent: Thursday, September 9, 2021 5:06 PM
> To: Penny Zheng <Penny.Zheng@arm.com>
> Cc: Bertrand Marquis <Bertrand.Marquis@arm.com>; Wei Chen
> <Wei.Chen@arm.com>; xen-devel@lists.xenproject.org;
> sstabellini@kernel.org; julien@xen.org
> Subject: Re: [PATCH v6 5/7] xen: re-define assign_pages and introduce a new
> function assign_page
>
> On 08.09.2021 11:52, Penny Zheng wrote:
> > In order to deal with the trouble of count-to-order conversion when
> > page number is not in a power-of-two, this commit re-define
> > assign_pages for nr pages and introduces a new helper assign_page for
> original page with a single order.
> >
> > Signed-off-by: Penny Zheng <penny.zheng@arm.com>
>
Stefano, Since I need to re-commit this one, I'll add-in your NIT suggestion in commit 7("
xen/arm: introduce allocate_static_memory"), and push a new Serie asap. ;)
> I have to admit that I'm now very puzzled: Instead of restoring the long agreed
> upon ordering of parameters (and then keeping my A-b), you've dropped the
> ack.
>
> > --- a/xen/arch/x86/pv/dom0_build.c
> > +++ b/xen/arch/x86/pv/dom0_build.c
> > @@ -568,7 +568,7 @@ int __init dom0_construct_pv(struct domain *d,
> > else
> > {
> > while ( count-- )
> > - if ( assign_pages(d, mfn_to_page(_mfn(mfn++)), 0, 0) )
> > + if ( assign_pages(d, mfn_to_page(_mfn(mfn++)), 1, 0)
> > + )
>
> This change alone demonstrates the problem when it comes to backporting
> future changes: If the original patch contained a code addition similar to what
> you change to, without the person doing the backporting paying close
> attention, the result will be an order-1 request when an order-0 one is wanted.
> It was explained to you that in order to make people doing backports aware of
> this semantic change, the order of parameters of the function ought to be
> altered. That way the compiler will complain, and the person will know to look
> closely what adjustments are needed.
>
> In this context I find it further puzzling ...
>
> > --- a/xen/include/xen/mm.h
> > +++ b/xen/include/xen/mm.h
> > @@ -133,8 +133,14 @@ void heap_init_late(void);
> >
> > int assign_pages(
> > struct domain *d,
> > + struct page_info *pg,
> > + unsigned long nr,
> > + unsigned int memflags);
> > +
> > +int assign_page(
> > struct page_info *pg,
> > unsigned int order,
> > + struct domain *d,
> > unsigned int memflags);
>
> ... that you also neglected the request to harmonize the argument order of
> both functions. What we want (and what I thought has long been agreed
> upon) is e.g.
>
> int assign_pages(
> struct page_info *pg,
> unsigned long nr,
> struct domain *d,
> unsigned int memflags);
>
> int assign_page(
> struct page_info *pg,
> unsigned int order,
> struct domain *d,
> unsigned int memflags);
>
Sorry, my fault, I've wrongly interpretate julien's harmonize request twice.
> Jan
^ permalink raw reply [flat|nested] 18+ messages in thread
* [PATCH v6 6/7] xen/arm: introduce acquire_staticmem_pages and acquire_domstatic_pages
2021-09-08 9:52 [PATCH v6 0/7] Domain on Static Allocation Penny Zheng
` (4 preceding siblings ...)
2021-09-08 9:52 ` [PATCH v6 5/7] xen: re-define assign_pages and introduce a new function assign_page Penny Zheng
@ 2021-09-08 9:52 ` Penny Zheng
2021-09-09 0:03 ` Stefano Stabellini
2021-09-08 9:52 ` [PATCH v6 7/7] xen/arm: introduce allocate_static_memory Penny Zheng
6 siblings, 1 reply; 18+ messages in thread
From: Penny Zheng @ 2021-09-08 9:52 UTC (permalink / raw)
To: xen-devel, sstabellini, julien; +Cc: Bertrand.Marquis, Wei.Chen, jbeulich
New function acquire_staticmem_pages aims to acquire nr_mfns contiguous pages
of static memory, starting at #smfn. And it is the equivalent of
alloc_heap_pages for static memory.
For each page, it shall check if the page is reserved(PGC_reserved)
and free. It shall also do a set of necessary initialization, which are
mostly the same ones in alloc_heap_pages, like, following the same
cache-coherency policy and turning page status into PGC_state_inuse, etc.
New function acquire_domstatic_pages is the equivalent of alloc_domheap_pages
for static memory, and it is to acquire nr_mfns contiguous pages of
static memory and assign them to one specific domain.
It uses acquire_staticmem_pages to acquire nr_mfns pages of static memory.
Then on success, it will use assign_pages to assign those pages to one
specific domain.
In order to differentiate pages of static memory from those allocated from
heap, this patch introduces a new page flag PGC_reserved, then mark pages of
static memory PGC_reserved when initializing them.
Signed-off-by: Penny Zheng <penny.zheng@arm.com>
---
xen/common/page_alloc.c | 118 ++++++++++++++++++++++++++++++++++++++-
xen/include/asm-arm/mm.h | 3 +
xen/include/xen/mm.h | 2 +
3 files changed, 121 insertions(+), 2 deletions(-)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index bb19bb10ff..59dffcfa1d 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -151,6 +151,10 @@
#define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
#endif
+#ifndef PGC_reserved
+#define PGC_reserved 0
+#endif
+
/*
* Comma-separated list of hexadecimal page numbers containing bad bytes.
* e.g. 'badpage=0x3f45,0x8a321'.
@@ -2283,7 +2287,7 @@ int assign_pages(
for ( i = 0; i < nr; i++ )
{
- ASSERT(!(pg[i].count_info & ~PGC_extra));
+ ASSERT(!(pg[i].count_info & ~(PGC_extra | PGC_reserved)));
if ( pg[i].count_info & PGC_extra )
extra_pages++;
}
@@ -2322,7 +2326,8 @@ int assign_pages(
page_set_owner(&pg[i], d);
smp_wmb(); /* Domain pointer must be visible before updating refcnt. */
pg[i].count_info =
- (pg[i].count_info & PGC_extra) | PGC_allocated | 1;
+ (pg[i].count_info & (PGC_extra | PGC_reserved)) | PGC_allocated | 1;
+
page_list_add_tail(&pg[i], page_to_list(d, &pg[i]));
}
@@ -2626,8 +2631,117 @@ void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
/* TODO: asynchronous scrubbing for pages of static memory. */
scrub_one_page(pg);
}
+
+ /* In case initializing page of static memory, mark it PGC_reserved. */
+ pg[i].count_info |= PGC_reserved;
}
}
+
+/*
+ * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
+ * static memory.
+ * This function needs to be reworked if used outside of boot.
+ */
+static struct page_info * __init acquire_staticmem_pages(mfn_t smfn,
+ unsigned long nr_mfns,
+ unsigned int memflags)
+{
+ bool need_tlbflush = false;
+ uint32_t tlbflush_timestamp = 0;
+ unsigned long i;
+ struct page_info *pg;
+
+ ASSERT(nr_mfns);
+ for ( i = 0; i < nr_mfns; i++ )
+ if ( !mfn_valid(mfn_add(smfn, i)) )
+ return NULL;
+
+ pg = mfn_to_page(smfn);
+
+ spin_lock(&heap_lock);
+
+ for ( i = 0; i < nr_mfns; i++ )
+ {
+ /* The page should be reserved and not yet allocated. */
+ if ( pg[i].count_info != (PGC_state_free | PGC_reserved) )
+ {
+ printk(XENLOG_ERR
+ "pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
+ i, mfn_x(smfn) + i,
+ pg[i].count_info, pg[i].tlbflush_timestamp);
+ goto out_err;
+ }
+
+ if ( !(memflags & MEMF_no_tlbflush) )
+ accumulate_tlbflush(&need_tlbflush, &pg[i],
+ &tlbflush_timestamp);
+
+ /*
+ * Preserve flag PGC_reserved and change page state
+ * to PGC_state_inuse.
+ */
+ pg[i].count_info = PGC_reserved | PGC_state_inuse;
+ /* Initialise fields which have other uses for free pages. */
+ pg[i].u.inuse.type_info = 0;
+ page_set_owner(&pg[i], NULL);
+ }
+
+ spin_unlock(&heap_lock);
+
+ if ( need_tlbflush )
+ filtered_flush_tlb_mask(tlbflush_timestamp);
+
+ /*
+ * Ensure cache and RAM are consistent for platforms where the guest
+ * can control its own visibility of/through the cache.
+ */
+ for ( i = 0; i < nr_mfns; i++ )
+ flush_page_to_ram(mfn_x(smfn) + i, !(memflags & MEMF_no_icache_flush));
+
+ return pg;
+
+ out_err:
+ while ( i-- )
+ pg[i].count_info = PGC_reserved | PGC_state_free;
+
+ spin_unlock(&heap_lock);
+
+ return NULL;
+}
+
+/*
+ * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
+ * then assign them to one specific domain #d.
+ */
+int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
+ unsigned long nr_mfns, unsigned int memflags)
+{
+ struct page_info *pg;
+
+ ASSERT(!in_irq());
+
+ pg = acquire_staticmem_pages(smfn, nr_mfns, memflags);
+ if ( !pg )
+ return -ENOENT;
+
+ if ( !d || (memflags & (MEMF_no_owner | MEMF_no_refcount)) )
+ {
+ /*
+ * Respective handling omitted here because right now
+ * acquired static memory is only for guest RAM.
+ */
+ ASSERT_UNREACHABLE();
+ return -EINVAL;
+ }
+
+ if ( assign_pages(d, pg, nr_mfns, memflags) )
+ {
+ free_staticmem_pages(pg, nr_mfns, memflags & MEMF_no_scrub);
+ return -EINVAL;
+ }
+
+ return 0;
+}
#endif
/*
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index ded74d29da..7b5e7b7f69 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -108,6 +108,9 @@ struct page_info
/* Page is Xen heap? */
#define _PGC_xen_heap PG_shift(2)
#define PGC_xen_heap PG_mask(1, 2)
+ /* Page is reserved */
+#define _PGC_reserved PG_shift(3)
+#define PGC_reserved PG_mask(1, 3)
/* ... */
/* Page is broken? */
#define _PGC_broken PG_shift(7)
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index a74e93eba8..da1b158693 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -89,6 +89,8 @@ bool scrub_free_pages(void);
/* These functions are for static memory */
void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
bool need_scrub);
+int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned long nr_mfns,
+ unsigned int memflags);
#endif
/* Map machine page range in Xen virtual address space. */
--
2.25.1
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [PATCH v6 6/7] xen/arm: introduce acquire_staticmem_pages and acquire_domstatic_pages
2021-09-08 9:52 ` [PATCH v6 6/7] xen/arm: introduce acquire_staticmem_pages and acquire_domstatic_pages Penny Zheng
@ 2021-09-09 0:03 ` Stefano Stabellini
0 siblings, 0 replies; 18+ messages in thread
From: Stefano Stabellini @ 2021-09-09 0:03 UTC (permalink / raw)
To: Penny Zheng
Cc: xen-devel, sstabellini, julien, Bertrand.Marquis, Wei.Chen, jbeulich
On Wed, 8 Sep 2021, Penny Zheng wrote:
> New function acquire_staticmem_pages aims to acquire nr_mfns contiguous pages
> of static memory, starting at #smfn. And it is the equivalent of
> alloc_heap_pages for static memory.
>
> For each page, it shall check if the page is reserved(PGC_reserved)
> and free. It shall also do a set of necessary initialization, which are
> mostly the same ones in alloc_heap_pages, like, following the same
> cache-coherency policy and turning page status into PGC_state_inuse, etc.
>
> New function acquire_domstatic_pages is the equivalent of alloc_domheap_pages
> for static memory, and it is to acquire nr_mfns contiguous pages of
> static memory and assign them to one specific domain.
>
> It uses acquire_staticmem_pages to acquire nr_mfns pages of static memory.
> Then on success, it will use assign_pages to assign those pages to one
> specific domain.
>
> In order to differentiate pages of static memory from those allocated from
> heap, this patch introduces a new page flag PGC_reserved, then mark pages of
> static memory PGC_reserved when initializing them.
>
> Signed-off-by: Penny Zheng <penny.zheng@arm.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
> ---
> xen/common/page_alloc.c | 118 ++++++++++++++++++++++++++++++++++++++-
> xen/include/asm-arm/mm.h | 3 +
> xen/include/xen/mm.h | 2 +
> 3 files changed, 121 insertions(+), 2 deletions(-)
>
> diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
> index bb19bb10ff..59dffcfa1d 100644
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -151,6 +151,10 @@
> #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
> #endif
>
> +#ifndef PGC_reserved
> +#define PGC_reserved 0
> +#endif
> +
> /*
> * Comma-separated list of hexadecimal page numbers containing bad bytes.
> * e.g. 'badpage=0x3f45,0x8a321'.
> @@ -2283,7 +2287,7 @@ int assign_pages(
>
> for ( i = 0; i < nr; i++ )
> {
> - ASSERT(!(pg[i].count_info & ~PGC_extra));
> + ASSERT(!(pg[i].count_info & ~(PGC_extra | PGC_reserved)));
> if ( pg[i].count_info & PGC_extra )
> extra_pages++;
> }
> @@ -2322,7 +2326,8 @@ int assign_pages(
> page_set_owner(&pg[i], d);
> smp_wmb(); /* Domain pointer must be visible before updating refcnt. */
> pg[i].count_info =
> - (pg[i].count_info & PGC_extra) | PGC_allocated | 1;
> + (pg[i].count_info & (PGC_extra | PGC_reserved)) | PGC_allocated | 1;
> +
> page_list_add_tail(&pg[i], page_to_list(d, &pg[i]));
> }
>
> @@ -2626,8 +2631,117 @@ void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
> /* TODO: asynchronous scrubbing for pages of static memory. */
> scrub_one_page(pg);
> }
> +
> + /* In case initializing page of static memory, mark it PGC_reserved. */
> + pg[i].count_info |= PGC_reserved;
> }
> }
> +
> +/*
> + * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
> + * static memory.
> + * This function needs to be reworked if used outside of boot.
> + */
> +static struct page_info * __init acquire_staticmem_pages(mfn_t smfn,
> + unsigned long nr_mfns,
> + unsigned int memflags)
> +{
> + bool need_tlbflush = false;
> + uint32_t tlbflush_timestamp = 0;
> + unsigned long i;
> + struct page_info *pg;
> +
> + ASSERT(nr_mfns);
> + for ( i = 0; i < nr_mfns; i++ )
> + if ( !mfn_valid(mfn_add(smfn, i)) )
> + return NULL;
> +
> + pg = mfn_to_page(smfn);
> +
> + spin_lock(&heap_lock);
> +
> + for ( i = 0; i < nr_mfns; i++ )
> + {
> + /* The page should be reserved and not yet allocated. */
> + if ( pg[i].count_info != (PGC_state_free | PGC_reserved) )
> + {
> + printk(XENLOG_ERR
> + "pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
> + i, mfn_x(smfn) + i,
> + pg[i].count_info, pg[i].tlbflush_timestamp);
> + goto out_err;
> + }
> +
> + if ( !(memflags & MEMF_no_tlbflush) )
> + accumulate_tlbflush(&need_tlbflush, &pg[i],
> + &tlbflush_timestamp);
> +
> + /*
> + * Preserve flag PGC_reserved and change page state
> + * to PGC_state_inuse.
> + */
> + pg[i].count_info = PGC_reserved | PGC_state_inuse;
> + /* Initialise fields which have other uses for free pages. */
> + pg[i].u.inuse.type_info = 0;
> + page_set_owner(&pg[i], NULL);
> + }
> +
> + spin_unlock(&heap_lock);
> +
> + if ( need_tlbflush )
> + filtered_flush_tlb_mask(tlbflush_timestamp);
> +
> + /*
> + * Ensure cache and RAM are consistent for platforms where the guest
> + * can control its own visibility of/through the cache.
> + */
> + for ( i = 0; i < nr_mfns; i++ )
> + flush_page_to_ram(mfn_x(smfn) + i, !(memflags & MEMF_no_icache_flush));
> +
> + return pg;
> +
> + out_err:
> + while ( i-- )
> + pg[i].count_info = PGC_reserved | PGC_state_free;
> +
> + spin_unlock(&heap_lock);
> +
> + return NULL;
> +}
> +
> +/*
> + * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
> + * then assign them to one specific domain #d.
> + */
> +int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
> + unsigned long nr_mfns, unsigned int memflags)
> +{
> + struct page_info *pg;
> +
> + ASSERT(!in_irq());
> +
> + pg = acquire_staticmem_pages(smfn, nr_mfns, memflags);
> + if ( !pg )
> + return -ENOENT;
> +
> + if ( !d || (memflags & (MEMF_no_owner | MEMF_no_refcount)) )
> + {
> + /*
> + * Respective handling omitted here because right now
> + * acquired static memory is only for guest RAM.
> + */
> + ASSERT_UNREACHABLE();
> + return -EINVAL;
> + }
> +
> + if ( assign_pages(d, pg, nr_mfns, memflags) )
> + {
> + free_staticmem_pages(pg, nr_mfns, memflags & MEMF_no_scrub);
> + return -EINVAL;
> + }
> +
> + return 0;
> +}
> #endif
>
> /*
> diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
> index ded74d29da..7b5e7b7f69 100644
> --- a/xen/include/asm-arm/mm.h
> +++ b/xen/include/asm-arm/mm.h
> @@ -108,6 +108,9 @@ struct page_info
> /* Page is Xen heap? */
> #define _PGC_xen_heap PG_shift(2)
> #define PGC_xen_heap PG_mask(1, 2)
> + /* Page is reserved */
> +#define _PGC_reserved PG_shift(3)
> +#define PGC_reserved PG_mask(1, 3)
> /* ... */
> /* Page is broken? */
> #define _PGC_broken PG_shift(7)
> diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
> index a74e93eba8..da1b158693 100644
> --- a/xen/include/xen/mm.h
> +++ b/xen/include/xen/mm.h
> @@ -89,6 +89,8 @@ bool scrub_free_pages(void);
> /* These functions are for static memory */
> void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
> bool need_scrub);
> +int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned long nr_mfns,
> + unsigned int memflags);
> #endif
>
> /* Map machine page range in Xen virtual address space. */
> --
> 2.25.1
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* [PATCH v6 7/7] xen/arm: introduce allocate_static_memory
2021-09-08 9:52 [PATCH v6 0/7] Domain on Static Allocation Penny Zheng
` (5 preceding siblings ...)
2021-09-08 9:52 ` [PATCH v6 6/7] xen/arm: introduce acquire_staticmem_pages and acquire_domstatic_pages Penny Zheng
@ 2021-09-08 9:52 ` Penny Zheng
2021-09-09 0:15 ` Stefano Stabellini
6 siblings, 1 reply; 18+ messages in thread
From: Penny Zheng @ 2021-09-08 9:52 UTC (permalink / raw)
To: xen-devel, sstabellini, julien; +Cc: Bertrand.Marquis, Wei.Chen, jbeulich
This commit introduces a new function allocate_static_memory to allocate
static memory as guest RAM for Domain on Static Allocation.
It uses acquire_domstatic_pages to acquire pre-configured static memory
for this domain, and uses guest_physmap_add_pages to set up P2M table.
These pre-defined static memory banks shall be mapped to the usual guest
memory addresses (GUEST_RAM0_BASE, GUEST_RAM1_BASE) defined by
xen/include/public/arch-arm.h.
In order to deal with the trouble of count-to-order conversion when page number
is not in a power-of-two, this commit exports p2m_insert_mapping and introduce
a new function guest_physmap_add_pages to cope with adding guest RAM p2m
mapping with nr_pages.
Signed-off-by: Penny Zheng <penny.zheng@arm.com>
---
xen/arch/arm/domain_build.c | 161 +++++++++++++++++++++++++++++++++++-
xen/arch/arm/p2m.c | 7 +-
xen/include/asm-arm/p2m.h | 11 +++
3 files changed, 173 insertions(+), 6 deletions(-)
diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 206038d1c0..b011cc4789 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -480,6 +480,162 @@ fail:
(unsigned long)kinfo->unassigned_mem >> 10);
}
+#ifdef CONFIG_STATIC_MEMORY
+static bool __init append_static_memory_to_bank(struct domain *d,
+ struct membank *bank,
+ mfn_t smfn,
+ paddr_t size)
+{
+ int res;
+ unsigned int nr_pages = PFN_DOWN(size);
+ /* Infer next GFN. */
+ gfn_t sgfn = gaddr_to_gfn(bank->start + bank->size);
+
+ res = guest_physmap_add_pages(d, sgfn, smfn, nr_pages);
+ if ( res )
+ {
+ dprintk(XENLOG_ERR, "Failed to map pages to DOMU: %d", res);
+ return false;
+ }
+
+ bank->size = bank->size + size;
+
+ return true;
+}
+
+/* Allocate memory from static memory as RAM for one specific domain d. */
+static void __init allocate_static_memory(struct domain *d,
+ struct kernel_info *kinfo,
+ const struct dt_device_node *node)
+{
+ const struct dt_property *prop;
+ u32 addr_cells, size_cells, reg_cells;
+ unsigned int nr_banks, gbank, bank = 0;
+ const uint64_t rambase[] = GUEST_RAM_BANK_BASES;
+ const uint64_t ramsize[] = GUEST_RAM_BANK_SIZES;
+ const __be32 *cell;
+ u64 tot_size = 0;
+ paddr_t pbase, psize, gsize;
+ mfn_t smfn;
+ int res;
+
+ prop = dt_find_property(node, "xen,static-mem", NULL);
+ if ( !dt_property_read_u32(node, "#xen,static-mem-address-cells",
+ &addr_cells) )
+ {
+ printk(XENLOG_ERR
+ "%pd: failed to read \"#xen,static-mem-address-cells\".\n", d);
+ goto fail;
+ }
+
+ if ( !dt_property_read_u32(node, "#xen,static-mem-size-cells",
+ &size_cells) )
+ {
+ printk(XENLOG_ERR
+ "%pd: failed to read \"#xen,static-mem-size-cells\".\n", d);
+ goto fail;
+ }
+ reg_cells = addr_cells + size_cells;
+
+ /*
+ * The static memory will be mapped in the guest at the usual guest memory
+ * addresses (GUEST_RAM0_BASE, GUEST_RAM1_BASE) defined by
+ * xen/include/public/arch-arm.h.
+ */
+ gbank = 0;
+ gsize = ramsize[gbank];
+ kinfo->mem.bank[gbank].start = rambase[gbank];
+
+ cell = (const __be32 *)prop->value;
+ nr_banks = (prop->length) / (reg_cells * sizeof (u32));
+
+ for ( ; bank < nr_banks; bank++ )
+ {
+ device_tree_get_reg(&cell, addr_cells, size_cells, &pbase, &psize);
+ ASSERT(IS_ALIGNED(pbase, PAGE_SIZE) && IS_ALIGNED(psize, PAGE_SIZE));
+
+ smfn = maddr_to_mfn(pbase);
+ res = acquire_domstatic_pages(d, smfn, PFN_DOWN(psize), 0);
+ if ( res )
+ {
+ printk(XENLOG_ERR
+ "%pd: failed to acquire static memory: %d.\n", d, res);
+ goto fail;
+ }
+
+ printk(XENLOG_INFO "%pd: STATIC BANK[%u] %#"PRIpaddr"-%#"PRIpaddr"\n",
+ d, bank, pbase, pbase + psize);
+
+ while ( 1 )
+ {
+ /* Map as much as possible the static range to the guest bank */
+ if ( !append_static_memory_to_bank(d, &kinfo->mem.bank[gbank], smfn,
+ min(psize, gsize)) )
+ goto fail;
+
+ /*
+ * The current physical bank is fully mapped.
+ * Handle the next physical bank.
+ */
+ if ( gsize >= psize )
+ {
+ gsize = gsize - psize;
+ break;
+ }
+ /*
+ * When current guest bank is not enough to map, exhaust
+ * the current one and seek to the next.
+ * Before seeking to the next, check if we still have available
+ * guest bank.
+ */
+ else if ( (gbank + 1) >= GUEST_RAM_BANKS )
+ {
+ printk(XENLOG_ERR "Exhausted all possible guest banks.\n");
+ goto fail;
+ }
+ else
+ {
+ psize = psize - gsize;
+ smfn = mfn_add(smfn, gsize >> PAGE_SHIFT);
+ /* Update to the next guest bank. */
+ gbank++;
+ gsize = ramsize[gbank];
+ kinfo->mem.bank[gbank].start = rambase[gbank];
+ }
+ }
+
+ tot_size += psize;
+ }
+
+ kinfo->mem.nr_banks = ++gbank;
+
+ kinfo->unassigned_mem -= tot_size;
+ /*
+ * The property 'memory' should match the amount of memory given to the
+ * guest.
+ * Currently, it is only possible to either acquire static memory or let
+ * Xen allocate. *Mixing* is not supported'.
+ */
+ if ( kinfo->unassigned_mem )
+ {
+ printk(XENLOG_ERR
+ "Size of \"memory\" property doesn't match up with the sum-up of \"xen,static-mem\". Unsupported configuration.\n");
+ goto fail;
+ }
+
+ return;
+
+ fail:
+ panic("Failed to allocate requested static memory for domain %pd.", d);
+}
+#else
+static void __init allocate_static_memory(struct domain *d,
+ struct kernel_info *kinfo,
+ const struct dt_device_node *node)
+{
+}
+#endif
+
static int __init write_properties(struct domain *d, struct kernel_info *kinfo,
const struct dt_device_node *node)
{
@@ -2453,7 +2609,10 @@ static int __init construct_domU(struct domain *d,
/* type must be set before allocate memory */
d->arch.type = kinfo.type;
#endif
- allocate_memory(d, &kinfo);
+ if ( !dt_find_property(node, "xen,static-mem", NULL) )
+ allocate_memory(d, &kinfo);
+ else
+ allocate_static_memory(d, &kinfo, node);
rc = prepare_dtb_domU(d, &kinfo);
if ( rc < 0 )
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index eff9a105e7..6e01e83967 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -1293,11 +1293,8 @@ out:
return resolved;
}
-static inline int p2m_insert_mapping(struct domain *d,
- gfn_t start_gfn,
- unsigned long nr,
- mfn_t mfn,
- p2m_type_t t)
+int p2m_insert_mapping(struct domain *d, gfn_t start_gfn, unsigned long nr,
+ mfn_t mfn, p2m_type_t t)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
int rc;
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 6a2108398f..f885cc522b 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -300,6 +300,9 @@ int map_dev_mmio_region(struct domain *d,
unsigned long nr,
mfn_t mfn);
+int p2m_insert_mapping(struct domain *d, gfn_t start_gfn, unsigned long nr,
+ mfn_t mfn, p2m_type_t t);
+
int guest_physmap_add_entry(struct domain *d,
gfn_t gfn,
mfn_t mfn,
@@ -315,6 +318,14 @@ static inline int guest_physmap_add_page(struct domain *d,
return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
}
+static inline int guest_physmap_add_pages(struct domain *d,
+ gfn_t gfn,
+ mfn_t mfn,
+ unsigned int nr_pages)
+{
+ return p2m_insert_mapping(d, gfn, nr_pages, mfn, p2m_ram_rw);
+}
+
mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn);
/* Look up a GFN and take a reference count on the backing page. */
--
2.25.1
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [PATCH v6 7/7] xen/arm: introduce allocate_static_memory
2021-09-08 9:52 ` [PATCH v6 7/7] xen/arm: introduce allocate_static_memory Penny Zheng
@ 2021-09-09 0:15 ` Stefano Stabellini
0 siblings, 0 replies; 18+ messages in thread
From: Stefano Stabellini @ 2021-09-09 0:15 UTC (permalink / raw)
To: Penny Zheng
Cc: xen-devel, sstabellini, julien, Bertrand.Marquis, Wei.Chen, jbeulich
On Wed, 8 Sep 2021, Penny Zheng wrote:
> This commit introduces a new function allocate_static_memory to allocate
> static memory as guest RAM for Domain on Static Allocation.
^ for domains.
> It uses acquire_domstatic_pages to acquire pre-configured static memory
> for this domain, and uses guest_physmap_add_pages to set up P2M table.
^ the ^the
> These pre-defined static memory banks shall be mapped to the usual guest
> memory addresses (GUEST_RAM0_BASE, GUEST_RAM1_BASE) defined by
> xen/include/public/arch-arm.h.
>
> In order to deal with the trouble of count-to-order conversion when page number
> is not in a power-of-two, this commit exports p2m_insert_mapping and introduce
> a new function guest_physmap_add_pages to cope with adding guest RAM p2m
> mapping with nr_pages.
>
> Signed-off-by: Penny Zheng <penny.zheng@arm.com>
> ---
> xen/arch/arm/domain_build.c | 161 +++++++++++++++++++++++++++++++++++-
> xen/arch/arm/p2m.c | 7 +-
> xen/include/asm-arm/p2m.h | 11 +++
> 3 files changed, 173 insertions(+), 6 deletions(-)
>
> diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
> index 206038d1c0..b011cc4789 100644
> --- a/xen/arch/arm/domain_build.c
> +++ b/xen/arch/arm/domain_build.c
> @@ -480,6 +480,162 @@ fail:
> (unsigned long)kinfo->unassigned_mem >> 10);
> }
>
> +#ifdef CONFIG_STATIC_MEMORY
> +static bool __init append_static_memory_to_bank(struct domain *d,
> + struct membank *bank,
> + mfn_t smfn,
> + paddr_t size)
> +{
> + int res;
> + unsigned int nr_pages = PFN_DOWN(size);
> + /* Infer next GFN. */
> + gfn_t sgfn = gaddr_to_gfn(bank->start + bank->size);
> +
> + res = guest_physmap_add_pages(d, sgfn, smfn, nr_pages);
> + if ( res )
> + {
> + dprintk(XENLOG_ERR, "Failed to map pages to DOMU: %d", res);
> + return false;
> + }
> +
> + bank->size = bank->size + size;
> +
> + return true;
> +}
> +
> +/* Allocate memory from static memory as RAM for one specific domain d. */
> +static void __init allocate_static_memory(struct domain *d,
> + struct kernel_info *kinfo,
> + const struct dt_device_node *node)
> +{
> + const struct dt_property *prop;
> + u32 addr_cells, size_cells, reg_cells;
> + unsigned int nr_banks, gbank, bank = 0;
> + const uint64_t rambase[] = GUEST_RAM_BANK_BASES;
> + const uint64_t ramsize[] = GUEST_RAM_BANK_SIZES;
> + const __be32 *cell;
> + u64 tot_size = 0;
> + paddr_t pbase, psize, gsize;
> + mfn_t smfn;
> + int res;
> +
> + prop = dt_find_property(node, "xen,static-mem", NULL);
> + if ( !dt_property_read_u32(node, "#xen,static-mem-address-cells",
> + &addr_cells) )
> + {
> + printk(XENLOG_ERR
> + "%pd: failed to read \"#xen,static-mem-address-cells\".\n", d);
> + goto fail;
> + }
> +
> + if ( !dt_property_read_u32(node, "#xen,static-mem-size-cells",
> + &size_cells) )
> + {
> + printk(XENLOG_ERR
> + "%pd: failed to read \"#xen,static-mem-size-cells\".\n", d);
> + goto fail;
> + }
> + reg_cells = addr_cells + size_cells;
> +
> + /*
> + * The static memory will be mapped in the guest at the usual guest memory
> + * addresses (GUEST_RAM0_BASE, GUEST_RAM1_BASE) defined by
> + * xen/include/public/arch-arm.h.
> + */
> + gbank = 0;
> + gsize = ramsize[gbank];
> + kinfo->mem.bank[gbank].start = rambase[gbank];
> +
> + cell = (const __be32 *)prop->value;
> + nr_banks = (prop->length) / (reg_cells * sizeof (u32));
> +
> + for ( ; bank < nr_banks; bank++ )
> + {
> + device_tree_get_reg(&cell, addr_cells, size_cells, &pbase, &psize);
> + ASSERT(IS_ALIGNED(pbase, PAGE_SIZE) && IS_ALIGNED(psize, PAGE_SIZE));
> +
> + smfn = maddr_to_mfn(pbase);
> + res = acquire_domstatic_pages(d, smfn, PFN_DOWN(psize), 0);
> + if ( res )
> + {
> + printk(XENLOG_ERR
> + "%pd: failed to acquire static memory: %d.\n", d, res);
> + goto fail;
> + }
> +
> + printk(XENLOG_INFO "%pd: STATIC BANK[%u] %#"PRIpaddr"-%#"PRIpaddr"\n",
> + d, bank, pbase, pbase + psize);
> +
> + while ( 1 )
> + {
> + /* Map as much as possible the static range to the guest bank */
> + if ( !append_static_memory_to_bank(d, &kinfo->mem.bank[gbank], smfn,
> + min(psize, gsize)) )
> + goto fail;
> +
> + /*
> + * The current physical bank is fully mapped.
> + * Handle the next physical bank.
> + */
> + if ( gsize >= psize )
> + {
> + gsize = gsize - psize;
> + break;
> + }
> + /*
> + * When current guest bank is not enough to map, exhaust
> + * the current one and seek to the next.
> + * Before seeking to the next, check if we still have available
> + * guest bank.
> + */
> + else if ( (gbank + 1) >= GUEST_RAM_BANKS )
> + {
> + printk(XENLOG_ERR "Exhausted all possible guest banks.\n");
> + goto fail;
> + }
> + else
> + {
> + psize = psize - gsize;
> + smfn = mfn_add(smfn, gsize >> PAGE_SHIFT);
> + /* Update to the next guest bank. */
> + gbank++;
> + gsize = ramsize[gbank];
> + kinfo->mem.bank[gbank].start = rambase[gbank];
> + }
> + }
> +
> + tot_size += psize;
> + }
> +
> + kinfo->mem.nr_banks = ++gbank;
> +
> + kinfo->unassigned_mem -= tot_size;
> + /*
> + * The property 'memory' should match the amount of memory given to the
> + * guest.
> + * Currently, it is only possible to either acquire static memory or let
> + * Xen allocate. *Mixing* is not supported'.
^ stray '
These are all NITs that I'd be happy to fix on commit if the series
doesn't need another update.
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
> + */
> + if ( kinfo->unassigned_mem )
> + {
> + printk(XENLOG_ERR
> + "Size of \"memory\" property doesn't match up with the sum-up of \"xen,static-mem\". Unsupported configuration.\n");
> + goto fail;
> + }
> +
> + return;
> +
> + fail:
> + panic("Failed to allocate requested static memory for domain %pd.", d);
> +}
> +#else
> +static void __init allocate_static_memory(struct domain *d,
> + struct kernel_info *kinfo,
> + const struct dt_device_node *node)
> +{
> +}
> +#endif
> +
> static int __init write_properties(struct domain *d, struct kernel_info *kinfo,
> const struct dt_device_node *node)
> {
> @@ -2453,7 +2609,10 @@ static int __init construct_domU(struct domain *d,
> /* type must be set before allocate memory */
> d->arch.type = kinfo.type;
> #endif
> - allocate_memory(d, &kinfo);
> + if ( !dt_find_property(node, "xen,static-mem", NULL) )
> + allocate_memory(d, &kinfo);
> + else
> + allocate_static_memory(d, &kinfo, node);
>
> rc = prepare_dtb_domU(d, &kinfo);
> if ( rc < 0 )
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index eff9a105e7..6e01e83967 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -1293,11 +1293,8 @@ out:
> return resolved;
> }
>
> -static inline int p2m_insert_mapping(struct domain *d,
> - gfn_t start_gfn,
> - unsigned long nr,
> - mfn_t mfn,
> - p2m_type_t t)
> +int p2m_insert_mapping(struct domain *d, gfn_t start_gfn, unsigned long nr,
> + mfn_t mfn, p2m_type_t t)
> {
> struct p2m_domain *p2m = p2m_get_hostp2m(d);
> int rc;
> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
> index 6a2108398f..f885cc522b 100644
> --- a/xen/include/asm-arm/p2m.h
> +++ b/xen/include/asm-arm/p2m.h
> @@ -300,6 +300,9 @@ int map_dev_mmio_region(struct domain *d,
> unsigned long nr,
> mfn_t mfn);
>
> +int p2m_insert_mapping(struct domain *d, gfn_t start_gfn, unsigned long nr,
> + mfn_t mfn, p2m_type_t t);
> +
> int guest_physmap_add_entry(struct domain *d,
> gfn_t gfn,
> mfn_t mfn,
> @@ -315,6 +318,14 @@ static inline int guest_physmap_add_page(struct domain *d,
> return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
> }
>
> +static inline int guest_physmap_add_pages(struct domain *d,
> + gfn_t gfn,
> + mfn_t mfn,
> + unsigned int nr_pages)
> +{
> + return p2m_insert_mapping(d, gfn, nr_pages, mfn, p2m_ram_rw);
> +}
> +
> mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn);
>
> /* Look up a GFN and take a reference count on the backing page. */
> --
> 2.25.1
>
^ permalink raw reply [flat|nested] 18+ messages in thread