linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Kalra, Ashish" <ashish.kalra@amd.com>
To: Alper Gun <alpergun@google.com>, Michael Roth <michael.roth@amd.com>
Cc: kvm@vger.kernel.org, linux-coco@lists.linux.dev,
	linux-mm@kvack.org, linux-crypto@vger.kernel.org, x86@kernel.org,
	linux-kernel@vger.kernel.org, tglx@linutronix.de,
	mingo@redhat.com, jroedel@suse.de, thomas.lendacky@amd.com,
	hpa@zytor.com, ardb@kernel.org, pbonzini@redhat.com,
	seanjc@google.com, vkuznets@redhat.com, wanpengli@tencent.com,
	jmattson@google.com, luto@kernel.org,
	dave.hansen@linux.intel.com, slp@redhat.com, pgonda@google.com,
	peterz@infradead.org, srinivas.pandruvada@linux.intel.com,
	rientjes@google.com, dovmurik@linux.ibm.com, tobin@ibm.com,
	bp@alien8.de, vbabka@suse.cz, kirill@shutemov.name,
	ak@linux.intel.com, tony.luck@intel.com, marcorr@google.com,
	sathyanarayanan.kuppuswamy@linux.intel.com, dgilbert@redhat.com,
	jarkko@kernel.org, harald@profian.com,
	Brijesh Singh <brijesh.singh@amd.com>
Subject: Re: [PATCH RFC v7 29/64] crypto: ccp: Handle the legacy SEV command when SNP is enabled
Date: Fri, 13 Jan 2023 16:03:52 -0600	[thread overview]
Message-ID: <7f1a9568-a570-690e-4b74-22644a3e2094@amd.com> (raw)
In-Reply-To: <CABpDEum=-3_izZJYzQb4B2ef699f2R+SDWC-L2Ca-rPE5-ekpw@mail.gmail.com>

Hello Alper,

On 1/12/2023 2:47 PM, Alper Gun wrote:
> On Wed, Dec 14, 2022 at 11:54 AM Michael Roth <michael.roth@amd.com> wrote:
>>
>> From: Brijesh Singh <brijesh.singh@amd.com>
>>
>> The behavior of the SEV-legacy commands is altered when the SNP firmware
>> is in the INIT state. When SNP is in INIT state, all the SEV-legacy
>> commands that cause the firmware to write to memory must be in the
>> firmware state before issuing the command..
>>
>> A command buffer may contains a system physical address that the firmware
>> may write to. There are two cases that need to be handled:
>>
>> 1) system physical address points to a guest memory
>> 2) system physical address points to a host memory
>>
>> To handle the case #1, change the page state to the firmware in the RMP
>> table before issuing the command and restore the state to shared after the
>> command completes.
>>
>> For the case #2, use a bounce buffer to complete the request.
>>
>> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
>> Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
>> Signed-off-by: Michael Roth <michael.roth@amd.com>
>> ---
>>   drivers/crypto/ccp/sev-dev.c | 370 ++++++++++++++++++++++++++++++++++-
>>   drivers/crypto/ccp/sev-dev.h |  12 ++
>>   2 files changed, 372 insertions(+), 10 deletions(-)
>>
>> diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
>> index 4c12e98a1219..5eb2e8f364d4 100644
>> --- a/drivers/crypto/ccp/sev-dev.c
>> +++ b/drivers/crypto/ccp/sev-dev.c
>> @@ -286,6 +286,30 @@ static int rmp_mark_pages_firmware(unsigned long paddr, unsigned int npages, boo
>>          return rc;
>>   }
>>
>> +static int rmp_mark_pages_shared(unsigned long paddr, unsigned int npages)
>> +{
>> +       /* Cbit maybe set in the paddr */
>> +       unsigned long pfn = __sme_clr(paddr) >> PAGE_SHIFT;
>> +       int rc, n = 0, i;
>> +
>> +       for (i = 0; i < npages; i++, pfn++, n++) {
>> +               rc = rmp_make_shared(pfn, PG_LEVEL_4K);
>> +               if (rc)
>> +                       goto cleanup;
>> +       }
>> +
>> +       return 0;
>> +
>> +cleanup:
>> +       /*
>> +        * If failed to change the page state to shared, then its not safe
>> +        * to release the page back to the system, leak it.
>> +        */
>> +       snp_mark_pages_offline(pfn, npages - n);
>> +
>> +       return rc;
>> +}
>> +
>>   static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order, bool locked)
>>   {
>>          unsigned long npages = 1ul << order, paddr;
>> @@ -487,12 +511,295 @@ static int sev_write_init_ex_file_if_required(int cmd_id)
>>          return sev_write_init_ex_file();
>>   }
>>
>> +static int alloc_snp_host_map(struct sev_device *sev)
>> +{
>> +       struct page *page;
>> +       int i;
>> +
>> +       for (i = 0; i < MAX_SNP_HOST_MAP_BUFS; i++) {
>> +               struct snp_host_map *map = &sev->snp_host_map[i];
>> +
>> +               memset(map, 0, sizeof(*map));
>> +
>> +               page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(SEV_FW_BLOB_MAX_SIZE));
>> +               if (!page)
>> +                       return -ENOMEM;
>> +
>> +               map->host = page_address(page);
>> +       }
>> +
>> +       return 0;
>> +}
>> +
>> +static void free_snp_host_map(struct sev_device *sev)
>> +{
>> +       int i;
>> +
>> +       for (i = 0; i < MAX_SNP_HOST_MAP_BUFS; i++) {
>> +               struct snp_host_map *map = &sev->snp_host_map[i];
>> +
>> +               if (map->host) {
>> +                       __free_pages(virt_to_page(map->host), get_order(SEV_FW_BLOB_MAX_SIZE));
>> +                       memset(map, 0, sizeof(*map));
>> +               }
>> +       }
>> +}
>> +
>> +static int map_firmware_writeable(u64 *paddr, u32 len, bool guest, struct snp_host_map *map)
>> +{
>> +       unsigned int npages = PAGE_ALIGN(len) >> PAGE_SHIFT;
>> +
>> +       map->active = false;
>> +
>> +       if (!paddr || !len)
>> +               return 0;
>> +
>> +       map->paddr = *paddr;
>> +       map->len = len;
>> +
>> +       /* If paddr points to a guest memory then change the page state to firmwware. */
>> +       if (guest) {
>> +               if (rmp_mark_pages_firmware(*paddr, npages, true))
>> +                       return -EFAULT;
>> +
>> +               goto done;
>> +       }
>> +
>> +       if (!map->host)
>> +               return -ENOMEM;
>> +
>> +       /* Check if the pre-allocated buffer can be used to fullfil the request. */
>> +       if (len > SEV_FW_BLOB_MAX_SIZE)
>> +               return -EINVAL;
>> +
>> +       /* Transition the pre-allocated buffer to the firmware state. */
>> +       if (rmp_mark_pages_firmware(__pa(map->host), npages, true))
>> +               return -EFAULT;
>> +
>> +       /* Set the paddr to use pre-allocated firmware buffer */
>> +       *paddr = __psp_pa(map->host);
>> +
>> +done:
>> +       map->active = true;
>> +       return 0;
>> +}
>> +
>> +static int unmap_firmware_writeable(u64 *paddr, u32 len, bool guest, struct snp_host_map *map)
>> +{
>> +       unsigned int npages = PAGE_ALIGN(len) >> PAGE_SHIFT;
>> +
>> +       if (!map->active)
>> +               return 0;
>> +
>> +       /* If paddr points to a guest memory then restore the page state to hypervisor. */
>> +       if (guest) {
>> +               if (snp_reclaim_pages(*paddr, npages, true))
>> +                       return -EFAULT;
>> +
>> +               goto done;
>> +       }
>> +
>> +       /*
>> +        * Transition the pre-allocated buffer to hypervisor state before the access.
>> +        *
>> +        * This is because while changing the page state to firmware, the kernel unmaps
>> +        * the pages from the direct map, and to restore the direct map the pages must
>> +        * be transitioned back to the shared state.
>> +        */
>> +       if (snp_reclaim_pages(__pa(map->host), npages, true))
>> +               return -EFAULT;
>> +
>> +       /* Copy the response data firmware buffer to the callers buffer. */
>> +       memcpy(__va(__sme_clr(map->paddr)), map->host, min_t(size_t, len, map->len));
>> +       *paddr = map->paddr;
>> +
>> +done:
>> +       map->active = false;
>> +       return 0;
>> +}
>> +
>> +static bool sev_legacy_cmd_buf_writable(int cmd)
>> +{
>> +       switch (cmd) {
>> +       case SEV_CMD_PLATFORM_STATUS:
>> +       case SEV_CMD_GUEST_STATUS:
>> +       case SEV_CMD_LAUNCH_START:
>> +       case SEV_CMD_RECEIVE_START:
>> +       case SEV_CMD_LAUNCH_MEASURE:
>> +       case SEV_CMD_SEND_START:
>> +       case SEV_CMD_SEND_UPDATE_DATA:
>> +       case SEV_CMD_SEND_UPDATE_VMSA:
>> +       case SEV_CMD_PEK_CSR:
>> +       case SEV_CMD_PDH_CERT_EXPORT:
>> +       case SEV_CMD_GET_ID:
>> +       case SEV_CMD_ATTESTATION_REPORT:
>> +               return true;
>> +       default:
>> +               return false;
>> +       }
>> +}
>> +
>> +#define prep_buffer(name, addr, len, guest, map) \
>> +       func(&((typeof(name *))cmd_buf)->addr, ((typeof(name *))cmd_buf)->len, guest, map)
>> +
>> +static int __snp_cmd_buf_copy(int cmd, void *cmd_buf, bool to_fw, int fw_err)
>> +{
>> +       int (*func)(u64 *paddr, u32 len, bool guest, struct snp_host_map *map);
>> +       struct sev_device *sev = psp_master->sev_data;
>> +       bool from_fw = !to_fw;
>> +
>> +       /*
>> +        * After the command is completed, change the command buffer memory to
>> +        * hypervisor state.
>> +        *
>> +        * The immutable bit is automatically cleared by the firmware, so
>> +        * no not need to reclaim the page.
>> +        */
>> +       if (from_fw && sev_legacy_cmd_buf_writable(cmd)) {
>> +               if (rmp_mark_pages_shared(__pa(cmd_buf), 1))
>> +                       return -EFAULT;
> 
> If we return here, we will skip calling unmap_firmware_writeable and
> we will leak some pages in firmware state.

Do you mean those (guest) pages which were transitioned to firmware 
state as part of 
snp_aware_copy_to_firmware()->_snp_cmd_buf_copy()->map_firmware_writeable()?

> 
>> +
>> +               /* No need to go further if firmware failed to execute command. */
>> +               if (fw_err)
>> +                       return 0;
>> +       }
>> +
>> +       if (to_fw)
>> +               func = map_firmware_writeable;
>> +       else
>> +               func = unmap_firmware_writeable;
>> +
>> +       /*
>> +        * A command buffer may contains a system physical address. If the address
>> +        * points to a host memory then use an intermediate firmware page otherwise
>> +        * change the page state in the RMP table.
>> +        */
>> +       switch (cmd) {
>> +       case SEV_CMD_PDH_CERT_EXPORT:
>> +               if (prep_buffer(struct sev_data_pdh_cert_export, pdh_cert_address,
>> +                               pdh_cert_len, false, &sev->snp_host_map[0]))
>> +                       goto err;
>> +               if (prep_buffer(struct sev_data_pdh_cert_export, cert_chain_address,
>> +                               cert_chain_len, false, &sev->snp_host_map[1]))
>> +                       goto err;
>> +               break;
>> +       case SEV_CMD_GET_ID:
>> +               if (prep_buffer(struct sev_data_get_id, address, len,
>> +                               false, &sev->snp_host_map[0]))
>> +                       goto err;
>> +               break;
>> +       case SEV_CMD_PEK_CSR:
>> +               if (prep_buffer(struct sev_data_pek_csr, address, len,
>> +                               false, &sev->snp_host_map[0]))
>> +                       goto err;
>> +               break;
>> +       case SEV_CMD_LAUNCH_UPDATE_DATA:
>> +               if (prep_buffer(struct sev_data_launch_update_data, address, len,
>> +                               true, &sev->snp_host_map[0]))
>> +                       goto err;
>> +               break;
>> +       case SEV_CMD_LAUNCH_UPDATE_VMSA:
>> +               if (prep_buffer(struct sev_data_launch_update_vmsa, address, len,
>> +                               true, &sev->snp_host_map[0]))
>> +                       goto err;
>> +               break;
>> +       case SEV_CMD_LAUNCH_MEASURE:
>> +               if (prep_buffer(struct sev_data_launch_measure, address, len,
>> +                               false, &sev->snp_host_map[0]))
>> +                       goto err;
>> +               break;
>> +       case SEV_CMD_LAUNCH_UPDATE_SECRET:
>> +               if (prep_buffer(struct sev_data_launch_secret, guest_address, guest_len,
>> +                               true, &sev->snp_host_map[0]))
>> +                       goto err;
>> +               break;
>> +       case SEV_CMD_DBG_DECRYPT:
>> +               if (prep_buffer(struct sev_data_dbg, dst_addr, len, false,
>> +                               &sev->snp_host_map[0]))
>> +                       goto err;
>> +               break;
>> +       case SEV_CMD_DBG_ENCRYPT:
>> +               if (prep_buffer(struct sev_data_dbg, dst_addr, len, true,
>> +                               &sev->snp_host_map[0]))
>> +                       goto err;
>> +               break;
>> +       case SEV_CMD_ATTESTATION_REPORT:
>> +               if (prep_buffer(struct sev_data_attestation_report, address, len,
>> +                               false, &sev->snp_host_map[0]))
>> +                       goto err;
>> +               break;
>> +       case SEV_CMD_SEND_START:
>> +               if (prep_buffer(struct sev_data_send_start, session_address,
>> +                               session_len, false, &sev->snp_host_map[0]))
>> +                       goto err;
>> +               break;
>> +       case SEV_CMD_SEND_UPDATE_DATA:
>> +               if (prep_buffer(struct sev_data_send_update_data, hdr_address, hdr_len,
>> +                               false, &sev->snp_host_map[0]))
>> +                       goto err;
>> +               if (prep_buffer(struct sev_data_send_update_data, trans_address,
>> +                               trans_len, false, &sev->snp_host_map[1]))
>> +                       goto err;
>> +               break;
>> +       case SEV_CMD_SEND_UPDATE_VMSA:
>> +               if (prep_buffer(struct sev_data_send_update_vmsa, hdr_address, hdr_len,
>> +                               false, &sev->snp_host_map[0]))
>> +                       goto err;
>> +               if (prep_buffer(struct sev_data_send_update_vmsa, trans_address,
>> +                               trans_len, false, &sev->snp_host_map[1]))
>> +                       goto err;
>> +               break;
>> +       case SEV_CMD_RECEIVE_UPDATE_DATA:
>> +               if (prep_buffer(struct sev_data_receive_update_data, guest_address,
>> +                               guest_len, true, &sev->snp_host_map[0]))
>> +                       goto err;
>> +               break;
>> +       case SEV_CMD_RECEIVE_UPDATE_VMSA:
>> +               if (prep_buffer(struct sev_data_receive_update_vmsa, guest_address,
>> +                               guest_len, true, &sev->snp_host_map[0]))
>> +                       goto err;
>> +               break;
>> +       default:
>> +               break;
>> +       }
>> +
>> +       /* The command buffer need to be in the firmware state. */
>> +       if (to_fw && sev_legacy_cmd_buf_writable(cmd)) {
>> +               if (rmp_mark_pages_firmware(__pa(cmd_buf), 1, true))
>> +                       return -EFAULT;
> 
> This function moves two separate pages to firmware state. First
> calling map_firmware_writeable and second calling
> rmp_mark_pages_firmware for cmd_buf.
> In case rmp_mark_pages_firmware fails for cmd_buf, the page which has
> already moved to firmware state in map_firmware_writeable should be
> reclaimed.
> This is a problem especially if we leak a guest owned page in firmware
> state. Since this is used only by legacy SEV VMs, these leaked pages
> will never be reclaimed back when destroying these VMs.
> 

Yes, this looks to be an inherent issue with the original patch, as you 
mentioned there are two pages - guest owned page and the HV cmd_buf, and 
failure to transition the cmd_buf back to HV/shared state has no 
corresponding recovery/reclaim for the transitioned guest page.

Thanks,
Ashish

>>
>> +       }
>> +
>> +       return 0;
>> +
>> +err:
>> +       return -EINVAL;
>> +}
>> +
>> +static inline bool need_firmware_copy(int cmd)
>> +{
>> +       struct sev_device *sev = psp_master->sev_data;
>> +
>> +       /* After SNP is INIT'ed, the behavior of legacy SEV command is changed. */
>> +       return ((cmd < SEV_CMD_SNP_INIT) && sev->snp_initialized) ? true : false;
>> +}
>> +
>> +static int snp_aware_copy_to_firmware(int cmd, void *data)
>> +{
>> +       return __snp_cmd_buf_copy(cmd, data, true, 0);
>> +}
>> +
>> +static int snp_aware_copy_from_firmware(int cmd, void *data, int fw_err)
>> +{
>> +       return __snp_cmd_buf_copy(cmd, data, false, fw_err);
>> +}
>> +
>>   static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
>>   {
>>          struct psp_device *psp = psp_master;
>>          struct sev_device *sev;
>>          unsigned int phys_lsb, phys_msb;
>>          unsigned int reg, ret = 0;
>> +       void *cmd_buf;
>>          int buf_len;
>>
>>          if (!psp || !psp->sev_data)
>> @@ -512,12 +819,28 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
>>           * work for some memory, e.g. vmalloc'd addresses, and @data may not be
>>           * physically contiguous.
>>           */
>> -       if (data)
>> -               memcpy(sev->cmd_buf, data, buf_len);
>> +       if (data) {
>> +               if (sev->cmd_buf_active > 2)
>> +                       return -EBUSY;
>> +
>> +               cmd_buf = sev->cmd_buf_active ? sev->cmd_buf_backup : sev->cmd_buf;
>> +
>> +               memcpy(cmd_buf, data, buf_len);
>> +               sev->cmd_buf_active++;
>> +
>> +               /*
>> +                * The behavior of the SEV-legacy commands is altered when the
>> +                * SNP firmware is in the INIT state.
>> +                */
>> +               if (need_firmware_copy(cmd) && snp_aware_copy_to_firmware(cmd, sev->cmd_buf))
>> +                       return -EFAULT;
>> +       } else {
>> +               cmd_buf = sev->cmd_buf;
>> +       }
>>
>>          /* Get the physical address of the command buffer */
>> -       phys_lsb = data ? lower_32_bits(__psp_pa(sev->cmd_buf)) : 0;
>> -       phys_msb = data ? upper_32_bits(__psp_pa(sev->cmd_buf)) : 0;
>> +       phys_lsb = data ? lower_32_bits(__psp_pa(cmd_buf)) : 0;
>> +       phys_msb = data ? upper_32_bits(__psp_pa(cmd_buf)) : 0;
>>
>>          dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
>>                  cmd, phys_msb, phys_lsb, psp_timeout);
>> @@ -560,15 +883,24 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
>>                  ret = sev_write_init_ex_file_if_required(cmd);
>>          }
>>
>> -       print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
>> -                            buf_len, false);
>> -
>>          /*
>>           * Copy potential output from the PSP back to data.  Do this even on
>>           * failure in case the caller wants to glean something from the error.
>>           */
>> -       if (data)
>> -               memcpy(data, sev->cmd_buf, buf_len);
>> +       if (data) {
>> +               /*
>> +                * Restore the page state after the command completes.
>> +                */
>> +               if (need_firmware_copy(cmd) &&
>> +                   snp_aware_copy_from_firmware(cmd, cmd_buf, ret))
>> +                       return -EFAULT;
>> +
>> +               memcpy(data, cmd_buf, buf_len);
>> +               sev->cmd_buf_active--;
>> +       }
>> +
>> +       print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
>> +                            buf_len, false);
>>
>>          return ret;
>>   }
>> @@ -1579,10 +1911,12 @@ int sev_dev_init(struct psp_device *psp)
>>          if (!sev)
>>                  goto e_err;
>>
>> -       sev->cmd_buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0);
>> +       sev->cmd_buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 1);
>>          if (!sev->cmd_buf)
>>                  goto e_sev;
>>
>> +       sev->cmd_buf_backup = (uint8_t *)sev->cmd_buf + PAGE_SIZE;
>> +
>>          psp->sev_data = sev;
>>
>>          sev->dev = dev;
>> @@ -1648,6 +1982,12 @@ static void sev_firmware_shutdown(struct sev_device *sev)
>>                  snp_range_list = NULL;
>>          }
>>
>> +       /*
>> +        * The host map need to clear the immutable bit so it must be free'd before the
>> +        * SNP firmware shutdown.
>> +        */
>> +       free_snp_host_map(sev);
>> +
>>          sev_snp_shutdown(&error);
>>   }
>>
>> @@ -1722,6 +2062,14 @@ void sev_pci_init(void)
>>                                  dev_err(sev->dev, "SEV-SNP: failed to INIT error %#x\n", error);
>>                          }
>>                  }
>> +
>> +               /*
>> +                * Allocate the intermediate buffers used for the legacy command handling.
>> +                */
>> +               if (alloc_snp_host_map(sev)) {
>> +                       dev_notice(sev->dev, "Failed to alloc host map (disabling legacy SEV)\n");
>> +                       goto skip_legacy;
>> +               }
>>          }
>>
>>          /* Obtain the TMR memory area for SEV-ES use */
>> @@ -1739,12 +2087,14 @@ void sev_pci_init(void)
>>                  dev_err(sev->dev, "SEV: failed to INIT error %#x, rc %d\n",
>>                          error, rc);
>>
>> +skip_legacy:
>>          dev_info(sev->dev, "SEV%s API:%d.%d build:%d\n", sev->snp_initialized ?
>>                  "-SNP" : "", sev->api_major, sev->api_minor, sev->build);
>>
>>          return;
>>
>>   err:
>> +       free_snp_host_map(sev);
>>          psp_master->sev_data = NULL;
>>   }
>>
>> diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
>> index 34767657beb5..19d79f9d4212 100644
>> --- a/drivers/crypto/ccp/sev-dev.h
>> +++ b/drivers/crypto/ccp/sev-dev.h
>> @@ -29,11 +29,20 @@
>>   #define SEV_CMDRESP_CMD_SHIFT          16
>>   #define SEV_CMDRESP_IOC                        BIT(0)
>>
>> +#define MAX_SNP_HOST_MAP_BUFS          2
>> +
>>   struct sev_misc_dev {
>>          struct kref refcount;
>>          struct miscdevice misc;
>>   };
>>
>> +struct snp_host_map {
>> +       u64 paddr;
>> +       u32 len;
>> +       void *host;
>> +       bool active;
>> +};
>> +
>>   struct sev_device {
>>          struct device *dev;
>>          struct psp_device *psp;
>> @@ -52,8 +61,11 @@ struct sev_device {
>>          u8 build;
>>
>>          void *cmd_buf;
>> +       void *cmd_buf_backup;
>> +       int cmd_buf_active;
>>
>>          bool snp_initialized;
>> +       struct snp_host_map snp_host_map[MAX_SNP_HOST_MAP_BUFS];
>>   };
>>
>>   int sev_dev_init(struct psp_device *psp);
>> --
>> 2.25.1
>>

  reply	other threads:[~2023-01-13 22:05 UTC|newest]

Thread overview: 205+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-12-14 19:39 [PATCH RFC v7 00/64] Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support Michael Roth
2022-12-14 19:39 ` [PATCH RFC v7 01/64] KVM: Fix memslot boundary condition for large page Michael Roth
2022-12-22 12:16   ` Borislav Petkov
2023-01-05  3:37     ` Chao Peng
2023-01-04 12:01   ` Jarkko Sakkinen
2023-01-05  3:34     ` Chao Peng
2023-01-05  4:08       ` Nikunj A. Dadhania
2023-01-05  8:14         ` Chao Peng
2022-12-14 19:39 ` [PATCH RFC v7 02/64] KVM: x86: Add KVM_CAP_UNMAPPED_PRIVATE_MEMORY Michael Roth
2022-12-22 12:26   ` Borislav Petkov
2023-01-04 17:47     ` Michael Roth
2023-01-05 11:57       ` Borislav Petkov
2023-01-19 13:03       ` Jarkko Sakkinen
2023-01-04 12:03   ` Jarkko Sakkinen
2023-01-04 17:56     ` Michael Roth
2022-12-14 19:39 ` [PATCH RFC v7 03/64] KVM: SVM: Advertise private memory support to KVM Michael Roth
2022-12-23 16:56   ` Borislav Petkov
2023-01-05  2:14     ` Michael Roth
2023-01-05 15:04       ` Borislav Petkov
2023-01-05 18:17         ` Michael Roth
2023-01-13 14:16           ` Borislav Petkov
2023-01-20 21:20       ` Jarkko Sakkinen
2023-02-20 16:18         ` Michael Roth
2023-01-18  0:20   ` Huang, Kai
2023-01-18 21:33     ` Sean Christopherson
2022-12-14 19:39 ` [PATCH RFC v7 04/64] KVM: x86: Add 'fault_is_private' x86 op Michael Roth
2022-12-29 16:14   ` Borislav Petkov
2023-01-05  2:42     ` Michael Roth
2023-01-13 14:34       ` Borislav Petkov
2023-01-13 15:48         ` Sean Christopherson
2023-01-13 18:45           ` Borislav Petkov
2023-02-20 16:22           ` Michael Roth
2022-12-14 19:39 ` [PATCH RFC v7 05/64] KVM: x86: Add 'update_mem_attr' " Michael Roth
2022-12-30 11:27   ` Borislav Petkov
2022-12-14 19:39 ` [PATCH RFC v7 06/64] KVM: x86: Add platform hooks for private memory invalidations Michael Roth
2022-12-30 11:53   ` Borislav Petkov
2023-01-05  2:27     ` Michael Roth
2023-01-22 12:43   ` Tom Dohrmann
2023-01-26 15:52     ` Jarkko Sakkinen
2022-12-14 19:39 ` [PATCH RFC v7 07/64] KVM: SEV: Handle KVM_HC_MAP_GPA_RANGE hypercall Michael Roth
2023-01-13 16:00   ` Borislav Petkov
2023-01-13 16:17     ` Sean Christopherson
2023-01-16  7:56       ` Nikunj A. Dadhania
2023-01-17 17:19         ` Sean Christopherson
2023-01-27 16:35   ` Jeremi Piotrowski
2023-01-31 14:15     ` Jeremi Piotrowski
2022-12-14 19:40 ` [PATCH RFC v7 08/64] KVM: Move kvm_for_each_memslot_in_hva_range() to be used in SVM Michael Roth
2023-01-13 18:46   ` Borislav Petkov
2022-12-14 19:40 ` [PATCH RFC v7 09/64] KVM: Add HVA range operator Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 10/64] KVM: SEV: Populate private memory fd during LAUNCH_UPDATE_DATA Michael Roth
2023-01-13 19:11   ` Borislav Petkov
2023-02-20 17:49     ` Michael Roth
2023-01-17 23:42   ` Jarkko Sakkinen
2023-02-20 17:54     ` Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 11/64] KVM: SEV: Support private pages in LAUNCH_UPDATE_DATA Michael Roth
2022-12-22 18:24   ` erbse.13
2022-12-23 11:57     ` Nikunj A. Dadhania
2023-01-17 23:30   ` Jarkko Sakkinen
2023-01-18  8:04     ` Nikunj A. Dadhania
2023-02-01 18:22   ` Borislav Petkov
2023-02-02  8:09     ` Nikunj A. Dadhania
2022-12-14 19:40 ` [PATCH RFC v7 12/64] KVM: SEV: Implement .fault_is_private callback Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 13/64] x86/cpufeatures: Add SEV-SNP CPU feature Michael Roth
2023-02-01 18:39   ` Borislav Petkov
2023-02-20 16:26     ` Michael Roth
2023-02-20 17:50       ` Borislav Petkov
2023-02-20 18:00         ` Michael Roth
2023-02-20 18:36           ` Borislav Petkov
2022-12-14 19:40 ` [PATCH RFC v7 14/64] x86/sev: Add the host SEV-SNP initialization support Michael Roth
2023-01-11 14:50   ` Sabin Rapan
2023-01-19 16:26     ` Kalra, Ashish
2023-01-18 15:55   ` Jeremi Piotrowski
2023-01-19 23:59     ` Kalra, Ashish
2023-01-20 16:51       ` Kalra, Ashish
2023-02-02 11:16   ` Borislav Petkov
2022-12-14 19:40 ` [PATCH RFC v7 15/64] x86/sev: Add RMP entry lookup helpers Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 16/64] x86/sev: Add helper functions for RMPUPDATE and PSMASH instruction Michael Roth
2023-01-31 21:26   ` Alexander Graf
2023-02-01 17:14     ` Kalra, Ashish
2023-02-01 17:20       ` Alexander Graf
2023-02-02 19:04         ` Kalra, Ashish
2023-02-08 16:30   ` Liam Merwick
2022-12-14 19:40 ` [PATCH RFC v7 17/64] x86/mm/pat: Introduce set_memory_p Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 18/64] x86/sev: Invalidate pages from the direct map when adding them to the RMP table Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 19/64] x86/traps: Define RMP violation #PF error code Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 20/64] x86/fault: Add support to handle the RMP fault for user address Michael Roth
2023-01-17 10:42   ` Zhi Wang
2022-12-14 19:40 ` [PATCH RFC v7 21/64] x86/fault: fix handle_split_page_fault() to work with memfd backed pages Michael Roth
2022-12-15  1:01   ` Hugh Dickins
2022-12-14 19:40 ` [PATCH RFC v7 22/64] x86/fault: Return pfn from dump_pagetable() for SEV-specific fault handling Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 23/64] x86/fault: Add support to dump RMP entry on fault Michael Roth
2023-01-13 22:56   ` Alper Gun
2023-01-13 23:49     ` Kalra, Ashish
2022-12-14 19:40 ` [PATCH RFC v7 24/64] crypto:ccp: Define the SEV-SNP commands Michael Roth
2023-01-22 16:09   ` Sabin Rapan
2022-12-14 19:40 ` [PATCH RFC v7 25/64] crypto: ccp: Add support to initialize the AMD-SP for SEV-SNP Michael Roth
2022-12-31 15:32   ` Jarkko Sakkinen
2023-01-05 22:40     ` Kalra, Ashish
2023-01-20 22:19       ` Jarkko Sakkinen
2023-01-04 12:12   ` Jarkko Sakkinen
2023-01-05 22:54     ` Kalra, Ashish
2023-01-20 22:56       ` Jarkko Sakkinen
2022-12-14 19:40 ` [PATCH RFC v7 26/64] crypto:ccp: Provide API to issue SEV and SNP commands Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 27/64] crypto: ccp: Introduce snp leaked pages list Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 28/64] crypto: ccp: Handle the legacy TMR allocation when SNP is enabled Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 29/64] crypto: ccp: Handle the legacy SEV command " Michael Roth
2023-01-12 20:47   ` Alper Gun
2023-01-13 22:03     ` Kalra, Ashish [this message]
2023-01-13 22:42       ` Alper Gun
2023-01-13 22:48         ` Kalra, Ashish
2023-01-12 23:45   ` Alper Gun
2023-01-13 22:34     ` Kalra, Ashish
2022-12-14 19:40 ` [PATCH RFC v7 30/64] crypto: ccp: Add the SNP_PLATFORM_STATUS command Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 31/64] crypto: ccp: Add the SNP_{SET,GET}_EXT_CONFIG command Michael Roth
2023-01-19  7:23   ` Dov Murik
2022-12-14 19:40 ` [PATCH RFC v7 32/64] crypto: ccp: Provide APIs to query extended attestation report Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 33/64] KVM: SVM: Add support to handle AP reset MSR protocol Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 34/64] KVM: SVM: Provide the Hypervisor Feature support VMGEXIT Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 35/64] KVM: SVM: Make AVIC backing, VMSA and VMCB memory allocation SNP safe Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 36/64] KVM: SVM: Add initial SEV-SNP support Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 37/64] KVM: SVM: Add KVM_SNP_INIT command Michael Roth
2022-12-31 14:27   ` Jarkko Sakkinen
2022-12-31 14:47     ` Jarkko Sakkinen
2022-12-31 15:16       ` Jarkko Sakkinen
2023-01-05 23:37     ` Kalra, Ashish
2023-01-20 23:17       ` Jarkko Sakkinen
2023-01-23 22:49       ` Kalra, Ashish
2023-01-26 21:25         ` Jarkko Sakkinen
2022-12-14 19:40 ` [PATCH RFC v7 38/64] KVM: SVM: Add KVM_SEV_SNP_LAUNCH_START command Michael Roth
2023-03-15 13:50   ` Peter Gonda
2022-12-14 19:40 ` [PATCH RFC v7 39/64] KVM: SVM: Add KVM_SEV_SNP_LAUNCH_UPDATE command Michael Roth
2023-01-11 13:56   ` Tom Dohrmann
2023-01-11 14:04     ` Harald Hoyer
2022-12-14 19:40 ` [PATCH RFC v7 40/64] KVM: SVM: Add KVM_SEV_SNP_LAUNCH_FINISH command Michael Roth
2022-12-19 18:04   ` Tom Lendacky
2022-12-19 23:24     ` Kalra, Ashish
2022-12-20 14:25       ` Tom Lendacky
2023-01-11 13:27   ` Sabin Rapan
2023-01-11 23:18     ` Kalra, Ashish
2022-12-14 19:40 ` [PATCH RFC v7 41/64] KVM: X86: Keep the NPT and RMP page level in sync Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 42/64] KVM: x86: Define RMP page fault error bits for #NPF Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 43/64] KVM: SVM: Do not use long-lived GHCB map while setting scratch area Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 44/64] KVM: SVM: Remove the long-lived GHCB host map Michael Roth
2023-01-18 15:27   ` Jeremi Piotrowski
2023-01-18 18:15     ` Alper Gun
2023-01-20 20:10       ` Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 45/64] KVM: SVM: Add support to handle GHCB GPA register VMGEXIT Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 46/64] KVM: SVM: Add KVM_EXIT_VMGEXIT Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 47/64] KVM: SVM: Add support to handle MSR based Page State Change VMGEXIT Michael Roth
2023-01-11 14:38   ` Tom Dohrmann
2022-12-14 19:40 ` [PATCH RFC v7 48/64] KVM: SVM: Add support to handle " Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 49/64] KVM: SVM: Introduce ops for the post gfn map and unmap Michael Roth
2023-01-11 13:49   ` Sabin Rapan
2022-12-14 19:40 ` [PATCH RFC v7 50/64] KVM: x86: Export the kvm_zap_gfn_range() for the SNP use Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 51/64] KVM: SVM: Add support to handle the RMP nested page fault Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 52/64] KVM: SVM: Provide support for SNP_GUEST_REQUEST NAE event Michael Roth
2023-01-09  3:33   ` Alexey Kardashevskiy
2023-01-09 23:41     ` Kalra, Ashish
2023-01-10  2:28       ` Alexey Kardashevskiy
2023-01-10  8:33         ` Kalra, Ashish
2023-01-11  0:48           ` Alexey Kardashevskiy
2023-01-11  2:01             ` Kalra, Ashish
2023-01-31  1:54               ` Alexey Kardashevskiy
2023-01-31 16:23                 ` Tom Lendacky
2023-01-31 20:21                   ` Alexey Kardashevskiy
2023-01-31 21:21                     ` Tom Lendacky
2023-01-31 22:00                       ` Alexey Kardashevskiy
2023-01-31 22:42                         ` Tom Lendacky
2023-05-11 23:02                           ` Dionna Amalie Glaze
2023-05-11 23:32                             ` Sean Christopherson
2023-05-15 16:45                               ` Dionna Amalie Glaze
2023-01-31 17:52                 ` Kalra, Ashish
2023-02-06  3:13               ` [PATCH kernel] KVM: SVM: Fix SVM_VMGEXIT_EXT_GUEST_REQUEST to follow the rest of API Alexey Kardashevskiy
2023-02-06 21:57                 ` Kalra, Ashish
2023-02-07  1:24                   ` Alexey Kardashevskiy
2023-02-08 21:50                     ` Kalra, Ashish
2023-01-19 20:35   ` [PATCH RFC v7 52/64] KVM: SVM: Provide support for SNP_GUEST_REQUEST NAE event Dionna Amalie Glaze
2023-01-19 20:54     ` Kalra, Ashish
2023-01-19 21:06       ` Dov Murik
2022-12-14 19:40 ` [PATCH RFC v7 53/64] KVM: SVM: Use a VMSA physical address variable for populating VMCB Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 54/64] KVM: SVM: Support SEV-SNP AP Creation NAE event Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 55/64] KVM: SVM: Add SNP-specific handling for memory attribute updates Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 56/64] KVM: x86/mmu: Generate KVM_EXIT_MEMORY_FAULT for implicit conversions for SNP Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 57/64] KVM: SEV: Handle restricted memory invalidations " Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 58/64] KVM: SVM: Add module parameter to enable the SEV-SNP Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 59/64] ccp: Add support to decrypt the page Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 60/64] KVM: SVM: Sync the GHCB scratch buffer using already mapped ghcb Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 61/64] KVM: SVM: Make VMSAVE target area memory allocation SNP safe Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 62/64] x86/sev: Add KVM commands for instance certs Michael Roth
2022-12-22 14:57   ` Dov Murik
2023-01-09 16:55     ` Dionna Amalie Glaze
2023-01-09 22:27       ` Tom Lendacky
2023-01-10  7:10         ` Dov Murik
2023-01-10 15:10           ` Tom Lendacky
2023-01-10 15:23             ` Peter Gonda
2023-01-11  7:26               ` Dov Murik
2023-01-11  6:00             ` Dov Murik
2023-01-11 14:32               ` Tom Lendacky
2023-01-19 18:49   ` Dionna Amalie Glaze
2023-01-19 22:18     ` Kalra, Ashish
2023-01-20  1:40       ` Dionna Amalie Glaze
2022-12-14 19:40 ` [PATCH RFC v7 63/64] x86/sev: Document KVM_SEV_SNP_{G,S}ET_CERTS Michael Roth
2022-12-14 19:40 ` [PATCH RFC v7 64/64] iommu/amd: Add IOMMU_SNP_SHUTDOWN support Michael Roth
2022-12-23 20:33 ` [PATCH RFC v7 00/64] Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support Borislav Petkov
2023-01-04 17:21   ` Michael Roth

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=7f1a9568-a570-690e-4b74-22644a3e2094@amd.com \
    --to=ashish.kalra@amd.com \
    --cc=ak@linux.intel.com \
    --cc=alpergun@google.com \
    --cc=ardb@kernel.org \
    --cc=bp@alien8.de \
    --cc=brijesh.singh@amd.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=dgilbert@redhat.com \
    --cc=dovmurik@linux.ibm.com \
    --cc=harald@profian.com \
    --cc=hpa@zytor.com \
    --cc=jarkko@kernel.org \
    --cc=jmattson@google.com \
    --cc=jroedel@suse.de \
    --cc=kirill@shutemov.name \
    --cc=kvm@vger.kernel.org \
    --cc=linux-coco@lists.linux.dev \
    --cc=linux-crypto@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=marcorr@google.com \
    --cc=michael.roth@amd.com \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=pgonda@google.com \
    --cc=rientjes@google.com \
    --cc=sathyanarayanan.kuppuswamy@linux.intel.com \
    --cc=seanjc@google.com \
    --cc=slp@redhat.com \
    --cc=srinivas.pandruvada@linux.intel.com \
    --cc=tglx@linutronix.de \
    --cc=thomas.lendacky@amd.com \
    --cc=tobin@ibm.com \
    --cc=tony.luck@intel.com \
    --cc=vbabka@suse.cz \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).