linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Longpeng(Mike)" <longpeng2@huawei.com>
To: <andraprs@amazon.com>, <lexnv@amazon.com>, <alcioa@amazon.com>
Cc: <linux-kernel@vger.kernel.org>, <arei.gonglei@huawei.com>,
	"Longpeng(Mike)" <longpeng2@huawei.com>
Subject: [PATCH] nitro_enclaves: merge contiguous physical memory regions
Date: Tue, 14 Sep 2021 11:14:50 +0800	[thread overview]
Message-ID: <20210914031450.919-1-longpeng2@huawei.com> (raw)

There maybe too many physical memory regions if the memory regions
backend with 2M hugetlb pages.

Let's merge the adjacent regions if they are physical contiguous.

Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com>
---
 drivers/virt/nitro_enclaves/ne_misc_dev.c | 64 +++++++++++++++++--------------
 1 file changed, 35 insertions(+), 29 deletions(-)

diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c
index e21e1e8..2920f26 100644
--- a/drivers/virt/nitro_enclaves/ne_misc_dev.c
+++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
@@ -824,6 +824,11 @@ static int ne_sanity_check_user_mem_region_page(struct ne_enclave *ne_enclave,
 	return 0;
 }
 
+struct phys_contig_mem_region {
+	u64 paddr;
+	u64 size;
+};
+
 /**
  * ne_set_user_memory_region_ioctl() - Add user space memory region to the slot
  *				       associated with the current enclave.
@@ -843,9 +848,10 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
 	unsigned long max_nr_pages = 0;
 	unsigned long memory_size = 0;
 	struct ne_mem_region *ne_mem_region = NULL;
-	unsigned long nr_phys_contig_mem_regions = 0;
 	struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev;
-	struct page **phys_contig_mem_regions = NULL;
+	struct phys_contig_mem_region *phys_regions = NULL;
+	unsigned long nr_phys_regions = 0;
+	u64 prev_phys_region_end;
 	int rc = -EINVAL;
 
 	rc = ne_sanity_check_user_mem_region(ne_enclave, mem_region);
@@ -866,9 +872,8 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
 		goto free_mem_region;
 	}
 
-	phys_contig_mem_regions = kcalloc(max_nr_pages, sizeof(*phys_contig_mem_regions),
-					  GFP_KERNEL);
-	if (!phys_contig_mem_regions) {
+	phys_regions = kcalloc(max_nr_pages, sizeof(*phys_regions), GFP_KERNEL);
+	if (!phys_regions) {
 		rc = -ENOMEM;
 
 		goto free_mem_region;
@@ -903,25 +908,29 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
 
 		/*
 		 * TODO: Update once handled non-contiguous memory regions
-		 * received from user space or contiguous physical memory regions
-		 * larger than 2 MiB e.g. 8 MiB.
+		 * received from user space.
 		 */
-		phys_contig_mem_regions[i] = ne_mem_region->pages[i];
+		if (nr_phys_regions &&
+		    prev_phys_region_end == page_to_phys(ne_mem_region->pages[i]))
+			phys_regions[nr_phys_regions - 1].size +=
+							page_size(ne_mem_region->pages[i]);
+		else {
+			phys_regions[nr_phys_regions].paddr =
+							page_to_phys(ne_mem_region->pages[i]);
+			phys_regions[nr_phys_regions].size =
+							page_size(ne_mem_region->pages[i]);
+			nr_phys_regions++;
+		}
+
+		prev_phys_region_end = phys_regions[nr_phys_regions - 1].paddr +
+					phys_regions[nr_phys_regions - 1].size;
 
 		memory_size += page_size(ne_mem_region->pages[i]);
 
 		ne_mem_region->nr_pages++;
 	} while (memory_size < mem_region.memory_size);
 
-	/*
-	 * TODO: Update once handled non-contiguous memory regions received
-	 * from user space or contiguous physical memory regions larger than
-	 * 2 MiB e.g. 8 MiB.
-	 */
-	nr_phys_contig_mem_regions = ne_mem_region->nr_pages;
-
-	if ((ne_enclave->nr_mem_regions + nr_phys_contig_mem_regions) >
-	    ne_enclave->max_mem_regions) {
+	if ((ne_enclave->nr_mem_regions + nr_phys_regions) > ne_enclave->max_mem_regions) {
 		dev_err_ratelimited(ne_misc_dev.this_device,
 				    "Reached max memory regions %lld\n",
 				    ne_enclave->max_mem_regions);
@@ -931,11 +940,8 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
 		goto put_pages;
 	}
 
-	for (i = 0; i < nr_phys_contig_mem_regions; i++) {
-		u64 phys_region_addr = page_to_phys(phys_contig_mem_regions[i]);
-		u64 phys_region_size = page_size(phys_contig_mem_regions[i]);
-
-		if (phys_region_size & (NE_MIN_MEM_REGION_SIZE - 1)) {
+	for (i = 0; i < nr_phys_regions; i++) {
+		if (phys_regions[i].size & (NE_MIN_MEM_REGION_SIZE - 1)) {
 			dev_err_ratelimited(ne_misc_dev.this_device,
 					    "Physical mem region size is not multiple of 2 MiB\n");
 
@@ -944,7 +950,7 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
 			goto put_pages;
 		}
 
-		if (!IS_ALIGNED(phys_region_addr, NE_MIN_MEM_REGION_SIZE)) {
+		if (!IS_ALIGNED(phys_regions[i].paddr, NE_MIN_MEM_REGION_SIZE)) {
 			dev_err_ratelimited(ne_misc_dev.this_device,
 					    "Physical mem region address is not 2 MiB aligned\n");
 
@@ -959,13 +965,13 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
 
 	list_add(&ne_mem_region->mem_region_list_entry, &ne_enclave->mem_regions_list);
 
-	for (i = 0; i < nr_phys_contig_mem_regions; i++) {
+	for (i = 0; i < nr_phys_regions; i++) {
 		struct ne_pci_dev_cmd_reply cmd_reply = {};
 		struct slot_add_mem_req slot_add_mem_req = {};
 
 		slot_add_mem_req.slot_uid = ne_enclave->slot_uid;
-		slot_add_mem_req.paddr = page_to_phys(phys_contig_mem_regions[i]);
-		slot_add_mem_req.size = page_size(phys_contig_mem_regions[i]);
+		slot_add_mem_req.paddr = phys_regions[i].paddr;
+		slot_add_mem_req.size = phys_regions[i].size;
 
 		rc = ne_do_request(pdev, SLOT_ADD_MEM,
 				   &slot_add_mem_req, sizeof(slot_add_mem_req),
@@ -974,7 +980,7 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
 			dev_err_ratelimited(ne_misc_dev.this_device,
 					    "Error in slot add mem [rc=%d]\n", rc);
 
-			kfree(phys_contig_mem_regions);
+			kfree(phys_regions);
 
 			/*
 			 * Exit here without put pages as memory regions may
@@ -987,7 +993,7 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
 		ne_enclave->nr_mem_regions++;
 	}
 
-	kfree(phys_contig_mem_regions);
+	kfree(phys_regions);
 
 	return 0;
 
@@ -995,7 +1001,7 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
 	for (i = 0; i < ne_mem_region->nr_pages; i++)
 		put_page(ne_mem_region->pages[i]);
 free_mem_region:
-	kfree(phys_contig_mem_regions);
+	kfree(phys_regions);
 	kfree(ne_mem_region->pages);
 	kfree(ne_mem_region);
 
-- 
1.8.3.1


             reply	other threads:[~2021-09-14  3:14 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-14  3:14 Longpeng(Mike) [this message]
2021-09-14 17:45 ` [PATCH] nitro_enclaves: merge contiguous physical memory regions Paraschiv, Andra-Irina
2021-09-15  7:28   ` Longpeng (Mike, Cloud Infrastructure Service Product Dept.)
2021-09-15 17:46     ` Paraschiv, Andra-Irina
2021-09-16  2:26       ` Longpeng (Mike, Cloud Infrastructure Service Product Dept.)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210914031450.919-1-longpeng2@huawei.com \
    --to=longpeng2@huawei.com \
    --cc=alcioa@amazon.com \
    --cc=andraprs@amazon.com \
    --cc=arei.gonglei@huawei.com \
    --cc=lexnv@amazon.com \
    --cc=linux-kernel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).