All of lore.kernel.org
 help / color / mirror / Atom feed
From: Quentin Perret <qperret@google.com>
To: Fuad Tabba <tabba@google.com>
Cc: maz@kernel.org, james.morse@arm.com, alexandru.elisei@arm.com,
	suzuki.poulose@arm.com, catalin.marinas@arm.com, will@kernel.org,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, linux-kernel@vger.kernel.org,
	ardb@kernel.org, qwandor@google.com, dbrazdil@google.com,
	kernel-team@android.com
Subject: Re: [PATCH v3 20/21] KVM: arm64: Restrict EL2 stage-1 changes in protected mode
Date: Tue, 3 Aug 2021 11:43:26 +0100	[thread overview]
Message-ID: <YQkdztIHwXfj7Sbc@google.com> (raw)
In-Reply-To: <CA+EHjTw7W=5JqH+oZAqLPrf_6222eazDnSk24h4EuGE1VLwKYg@mail.gmail.com>

On Tuesday 03 Aug 2021 at 10:22:03 (+0200), Fuad Tabba wrote:
> Hi Quentin,
> 
> > diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> > index 0ccea58df7e0..1b67f562b6fc 100644
> > --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> > +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> > @@ -338,6 +338,95 @@ static int host_stage2_idmap(u64 addr)
> >         return ret;
> >  }
> >
> > +static inline bool check_prot(enum kvm_pgtable_prot prot,
> > +                             enum kvm_pgtable_prot required,
> > +                             enum kvm_pgtable_prot denied)
> > +{
> > +       return (prot & (required | denied)) == required;
> > +}
> > +
> > +int __pkvm_host_share_hyp(u64 pfn)
> > +{
> > +       phys_addr_t addr = hyp_pfn_to_phys(pfn);
> > +       enum kvm_pgtable_prot prot, cur;
> > +       void *virt = __hyp_va(addr);
> > +       enum pkvm_page_state state;
> > +       kvm_pte_t pte;
> > +       u32 level;
> > +       int ret;
> > +
> > +       if (!range_is_memory(addr, addr + PAGE_SIZE))
> > +               return -EINVAL;
> > +
> > +       hyp_spin_lock(&host_kvm.lock);
> > +       hyp_spin_lock(&pkvm_pgd_lock);
> > +
> > +       ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, &level);
> > +       if (ret)
> > +               goto unlock;
> > +       if (!pte)
> > +               goto map_shared;
> 
> Should this check whether kvm_pte_valid as well, is that guaranteed to
> always be the case, or implicitly handled later?

Yep, this is implicitly handled by kvm_pgtable_stage2_pte_prot() which
is guaranteed not to return KVM_PGTABLE_PROT_RWX for an invalid mapping.

> > +
> > +       /*
> > +        * Check attributes in the host stage-2 PTE. We need the page to be:
> > +        *  - mapped RWX as we're sharing memory;
> > +        *  - not borrowed, as that implies absence of ownership.
> > +        * Otherwise, we can't let it got through
> > +        */
> > +       cur = kvm_pgtable_stage2_pte_prot(pte);
> > +       prot = pkvm_mkstate(0, PKVM_PAGE_SHARED_BORROWED);
> > +       if (!check_prot(cur, KVM_PGTABLE_PROT_RWX, prot)) {
> > +               ret = -EPERM;
> > +               goto unlock;
> > +       }
> > +
> > +       state = pkvm_getstate(cur);
> > +       if (state == PKVM_PAGE_OWNED)
> > +               goto map_shared;
> > +
> > +       /*
> > +        * Tolerate double-sharing the same page, but this requires
> > +        * cross-checking the hypervisor stage-1.
> > +        */
> > +       if (state != PKVM_PAGE_SHARED_OWNED) {
> > +               ret = -EPERM;
> > +               goto unlock;
> > +       }
> > +
> > +       ret = kvm_pgtable_get_leaf(&pkvm_pgtable, (u64)virt, &pte, &level);
> > +       if (ret)
> > +               goto unlock;
> > +
> > +       /*
> > +        * If the page has been shared with the hypervisor, it must be
> > +        * SHARED_BORROWED already.
> > +        */
> 
> This comment confused me at first, but then I realized it's referring
> to the page from the hyp's point of view. Could you add something to
> the comment to that effect?

Sure thing.

> It might also make it easier to follow if the variables could be
> annotated to specify whether cur, state, and prot are the host's or
> hyps (and not reuse the same one for both).
> 
> > +       cur = kvm_pgtable_hyp_pte_prot(pte);
> > +       prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
> > +       if (!check_prot(cur, prot, ~prot))
> > +               ret = EPERM;
> > +       goto unlock;
> > +
> > +map_shared:
> > +       /*
> > +        * If the page is not yet shared, adjust mappings in both page-tables
> > +        * while both locks are held.
> > +        */
> > +       prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
> > +       ret = pkvm_create_mappings_locked(virt, virt + PAGE_SIZE, prot);
> > +       BUG_ON(ret);
> > +
> > +       prot = pkvm_mkstate(KVM_PGTABLE_PROT_RWX, PKVM_PAGE_SHARED_OWNED);
> > +       ret = host_stage2_idmap_locked(addr, addr + PAGE_SIZE, prot);
> > +       BUG_ON(ret);
> > +
> > +unlock:
> > +       hyp_spin_unlock(&pkvm_pgd_lock);
> > +       hyp_spin_unlock(&host_kvm.lock);
> > +
> > +       return ret;
> > +}
> > +
> >  void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
> >  {
> >         struct kvm_vcpu_fault_info fault;
> > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> > index 0625bf2353c2..cbab146cda6a 100644
> > --- a/arch/arm64/kvm/mmu.c
> > +++ b/arch/arm64/kvm/mmu.c
> > @@ -259,10 +259,8 @@ static int __create_hyp_mappings(unsigned long start, unsigned long size,
> >  {
> >         int err;
> >
> > -       if (!kvm_host_owns_hyp_mappings()) {
> > -               return kvm_call_hyp_nvhe(__pkvm_create_mappings,
> > -                                        start, size, phys, prot);
> > -       }
> > +       if (WARN_ON(!kvm_host_owns_hyp_mappings()))
> > +               return -EINVAL;
> >
> >         mutex_lock(&kvm_hyp_pgd_mutex);
> >         err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot);
> > @@ -282,6 +280,21 @@ static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
> >         }
> >  }
> >
> > +static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end)
> > +{
> > +       phys_addr_t addr;
> > +       int ret;
> > +
> > +       for (addr = ALIGN_DOWN(start, PAGE_SIZE); addr < end; addr += PAGE_SIZE) {
> > +               ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp,
> > +                                       __phys_to_pfn(addr));
> 
> I guess we don't expect this to happen often, but I wonder if it would
> be better to have the looping in the hyp call rather than here, to
> reduce the number of hyp calls when sharing.

Yes, I was wondering the same thing, but ended up doing the looping here
to avoid spending long periods of time in a non-preemptible state at
EL2. Probably doesn't make a big difference for now, but it might if we
ever need to share large memory regions.

Cheers,
Quentin

> 
> Thanks,
> /fuad
> 
> > +               if (ret)
> > +                       return ret;
> > +       }
> > +
> > +       return 0;
> > +}
> > +
> >  /**
> >   * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
> >   * @from:      The virtual kernel start address of the range
> > @@ -302,6 +315,13 @@ int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
> >         if (is_kernel_in_hyp_mode())
> >                 return 0;
> >
> > +       if (!kvm_host_owns_hyp_mappings()) {
> > +               if (WARN_ON(prot != PAGE_HYP))
> > +                       return -EPERM;
> > +               return pkvm_share_hyp(kvm_kaddr_to_phys(from),
> > +                                     kvm_kaddr_to_phys(to));
> > +       }
> > +
> >         start = start & PAGE_MASK;
> >         end = PAGE_ALIGN(end);
> >
> > --
> > 2.32.0.432.gabb21c7263-goog
> >

WARNING: multiple messages have this Message-ID (diff)
From: Quentin Perret <qperret@google.com>
To: Fuad Tabba <tabba@google.com>
Cc: kernel-team@android.com, qwandor@google.com, maz@kernel.org,
	linux-kernel@vger.kernel.org, catalin.marinas@arm.com,
	will@kernel.org, kvmarm@lists.cs.columbia.edu,
	linux-arm-kernel@lists.infradead.org
Subject: Re: [PATCH v3 20/21] KVM: arm64: Restrict EL2 stage-1 changes in protected mode
Date: Tue, 3 Aug 2021 11:43:26 +0100	[thread overview]
Message-ID: <YQkdztIHwXfj7Sbc@google.com> (raw)
In-Reply-To: <CA+EHjTw7W=5JqH+oZAqLPrf_6222eazDnSk24h4EuGE1VLwKYg@mail.gmail.com>

On Tuesday 03 Aug 2021 at 10:22:03 (+0200), Fuad Tabba wrote:
> Hi Quentin,
> 
> > diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> > index 0ccea58df7e0..1b67f562b6fc 100644
> > --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> > +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> > @@ -338,6 +338,95 @@ static int host_stage2_idmap(u64 addr)
> >         return ret;
> >  }
> >
> > +static inline bool check_prot(enum kvm_pgtable_prot prot,
> > +                             enum kvm_pgtable_prot required,
> > +                             enum kvm_pgtable_prot denied)
> > +{
> > +       return (prot & (required | denied)) == required;
> > +}
> > +
> > +int __pkvm_host_share_hyp(u64 pfn)
> > +{
> > +       phys_addr_t addr = hyp_pfn_to_phys(pfn);
> > +       enum kvm_pgtable_prot prot, cur;
> > +       void *virt = __hyp_va(addr);
> > +       enum pkvm_page_state state;
> > +       kvm_pte_t pte;
> > +       u32 level;
> > +       int ret;
> > +
> > +       if (!range_is_memory(addr, addr + PAGE_SIZE))
> > +               return -EINVAL;
> > +
> > +       hyp_spin_lock(&host_kvm.lock);
> > +       hyp_spin_lock(&pkvm_pgd_lock);
> > +
> > +       ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, &level);
> > +       if (ret)
> > +               goto unlock;
> > +       if (!pte)
> > +               goto map_shared;
> 
> Should this check whether kvm_pte_valid as well, is that guaranteed to
> always be the case, or implicitly handled later?

Yep, this is implicitly handled by kvm_pgtable_stage2_pte_prot() which
is guaranteed not to return KVM_PGTABLE_PROT_RWX for an invalid mapping.

> > +
> > +       /*
> > +        * Check attributes in the host stage-2 PTE. We need the page to be:
> > +        *  - mapped RWX as we're sharing memory;
> > +        *  - not borrowed, as that implies absence of ownership.
> > +        * Otherwise, we can't let it got through
> > +        */
> > +       cur = kvm_pgtable_stage2_pte_prot(pte);
> > +       prot = pkvm_mkstate(0, PKVM_PAGE_SHARED_BORROWED);
> > +       if (!check_prot(cur, KVM_PGTABLE_PROT_RWX, prot)) {
> > +               ret = -EPERM;
> > +               goto unlock;
> > +       }
> > +
> > +       state = pkvm_getstate(cur);
> > +       if (state == PKVM_PAGE_OWNED)
> > +               goto map_shared;
> > +
> > +       /*
> > +        * Tolerate double-sharing the same page, but this requires
> > +        * cross-checking the hypervisor stage-1.
> > +        */
> > +       if (state != PKVM_PAGE_SHARED_OWNED) {
> > +               ret = -EPERM;
> > +               goto unlock;
> > +       }
> > +
> > +       ret = kvm_pgtable_get_leaf(&pkvm_pgtable, (u64)virt, &pte, &level);
> > +       if (ret)
> > +               goto unlock;
> > +
> > +       /*
> > +        * If the page has been shared with the hypervisor, it must be
> > +        * SHARED_BORROWED already.
> > +        */
> 
> This comment confused me at first, but then I realized it's referring
> to the page from the hyp's point of view. Could you add something to
> the comment to that effect?

Sure thing.

> It might also make it easier to follow if the variables could be
> annotated to specify whether cur, state, and prot are the host's or
> hyps (and not reuse the same one for both).
> 
> > +       cur = kvm_pgtable_hyp_pte_prot(pte);
> > +       prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
> > +       if (!check_prot(cur, prot, ~prot))
> > +               ret = EPERM;
> > +       goto unlock;
> > +
> > +map_shared:
> > +       /*
> > +        * If the page is not yet shared, adjust mappings in both page-tables
> > +        * while both locks are held.
> > +        */
> > +       prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
> > +       ret = pkvm_create_mappings_locked(virt, virt + PAGE_SIZE, prot);
> > +       BUG_ON(ret);
> > +
> > +       prot = pkvm_mkstate(KVM_PGTABLE_PROT_RWX, PKVM_PAGE_SHARED_OWNED);
> > +       ret = host_stage2_idmap_locked(addr, addr + PAGE_SIZE, prot);
> > +       BUG_ON(ret);
> > +
> > +unlock:
> > +       hyp_spin_unlock(&pkvm_pgd_lock);
> > +       hyp_spin_unlock(&host_kvm.lock);
> > +
> > +       return ret;
> > +}
> > +
> >  void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
> >  {
> >         struct kvm_vcpu_fault_info fault;
> > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> > index 0625bf2353c2..cbab146cda6a 100644
> > --- a/arch/arm64/kvm/mmu.c
> > +++ b/arch/arm64/kvm/mmu.c
> > @@ -259,10 +259,8 @@ static int __create_hyp_mappings(unsigned long start, unsigned long size,
> >  {
> >         int err;
> >
> > -       if (!kvm_host_owns_hyp_mappings()) {
> > -               return kvm_call_hyp_nvhe(__pkvm_create_mappings,
> > -                                        start, size, phys, prot);
> > -       }
> > +       if (WARN_ON(!kvm_host_owns_hyp_mappings()))
> > +               return -EINVAL;
> >
> >         mutex_lock(&kvm_hyp_pgd_mutex);
> >         err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot);
> > @@ -282,6 +280,21 @@ static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
> >         }
> >  }
> >
> > +static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end)
> > +{
> > +       phys_addr_t addr;
> > +       int ret;
> > +
> > +       for (addr = ALIGN_DOWN(start, PAGE_SIZE); addr < end; addr += PAGE_SIZE) {
> > +               ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp,
> > +                                       __phys_to_pfn(addr));
> 
> I guess we don't expect this to happen often, but I wonder if it would
> be better to have the looping in the hyp call rather than here, to
> reduce the number of hyp calls when sharing.

Yes, I was wondering the same thing, but ended up doing the looping here
to avoid spending long periods of time in a non-preemptible state at
EL2. Probably doesn't make a big difference for now, but it might if we
ever need to share large memory regions.

Cheers,
Quentin

> 
> Thanks,
> /fuad
> 
> > +               if (ret)
> > +                       return ret;
> > +       }
> > +
> > +       return 0;
> > +}
> > +
> >  /**
> >   * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
> >   * @from:      The virtual kernel start address of the range
> > @@ -302,6 +315,13 @@ int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
> >         if (is_kernel_in_hyp_mode())
> >                 return 0;
> >
> > +       if (!kvm_host_owns_hyp_mappings()) {
> > +               if (WARN_ON(prot != PAGE_HYP))
> > +                       return -EPERM;
> > +               return pkvm_share_hyp(kvm_kaddr_to_phys(from),
> > +                                     kvm_kaddr_to_phys(to));
> > +       }
> > +
> >         start = start & PAGE_MASK;
> >         end = PAGE_ALIGN(end);
> >
> > --
> > 2.32.0.432.gabb21c7263-goog
> >
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Quentin Perret <qperret@google.com>
To: Fuad Tabba <tabba@google.com>
Cc: maz@kernel.org, james.morse@arm.com, alexandru.elisei@arm.com,
	suzuki.poulose@arm.com, catalin.marinas@arm.com, will@kernel.org,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, linux-kernel@vger.kernel.org,
	ardb@kernel.org, qwandor@google.com, dbrazdil@google.com,
	kernel-team@android.com
Subject: Re: [PATCH v3 20/21] KVM: arm64: Restrict EL2 stage-1 changes in protected mode
Date: Tue, 3 Aug 2021 11:43:26 +0100	[thread overview]
Message-ID: <YQkdztIHwXfj7Sbc@google.com> (raw)
In-Reply-To: <CA+EHjTw7W=5JqH+oZAqLPrf_6222eazDnSk24h4EuGE1VLwKYg@mail.gmail.com>

On Tuesday 03 Aug 2021 at 10:22:03 (+0200), Fuad Tabba wrote:
> Hi Quentin,
> 
> > diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> > index 0ccea58df7e0..1b67f562b6fc 100644
> > --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> > +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> > @@ -338,6 +338,95 @@ static int host_stage2_idmap(u64 addr)
> >         return ret;
> >  }
> >
> > +static inline bool check_prot(enum kvm_pgtable_prot prot,
> > +                             enum kvm_pgtable_prot required,
> > +                             enum kvm_pgtable_prot denied)
> > +{
> > +       return (prot & (required | denied)) == required;
> > +}
> > +
> > +int __pkvm_host_share_hyp(u64 pfn)
> > +{
> > +       phys_addr_t addr = hyp_pfn_to_phys(pfn);
> > +       enum kvm_pgtable_prot prot, cur;
> > +       void *virt = __hyp_va(addr);
> > +       enum pkvm_page_state state;
> > +       kvm_pte_t pte;
> > +       u32 level;
> > +       int ret;
> > +
> > +       if (!range_is_memory(addr, addr + PAGE_SIZE))
> > +               return -EINVAL;
> > +
> > +       hyp_spin_lock(&host_kvm.lock);
> > +       hyp_spin_lock(&pkvm_pgd_lock);
> > +
> > +       ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, &level);
> > +       if (ret)
> > +               goto unlock;
> > +       if (!pte)
> > +               goto map_shared;
> 
> Should this check whether kvm_pte_valid as well, is that guaranteed to
> always be the case, or implicitly handled later?

Yep, this is implicitly handled by kvm_pgtable_stage2_pte_prot() which
is guaranteed not to return KVM_PGTABLE_PROT_RWX for an invalid mapping.

> > +
> > +       /*
> > +        * Check attributes in the host stage-2 PTE. We need the page to be:
> > +        *  - mapped RWX as we're sharing memory;
> > +        *  - not borrowed, as that implies absence of ownership.
> > +        * Otherwise, we can't let it got through
> > +        */
> > +       cur = kvm_pgtable_stage2_pte_prot(pte);
> > +       prot = pkvm_mkstate(0, PKVM_PAGE_SHARED_BORROWED);
> > +       if (!check_prot(cur, KVM_PGTABLE_PROT_RWX, prot)) {
> > +               ret = -EPERM;
> > +               goto unlock;
> > +       }
> > +
> > +       state = pkvm_getstate(cur);
> > +       if (state == PKVM_PAGE_OWNED)
> > +               goto map_shared;
> > +
> > +       /*
> > +        * Tolerate double-sharing the same page, but this requires
> > +        * cross-checking the hypervisor stage-1.
> > +        */
> > +       if (state != PKVM_PAGE_SHARED_OWNED) {
> > +               ret = -EPERM;
> > +               goto unlock;
> > +       }
> > +
> > +       ret = kvm_pgtable_get_leaf(&pkvm_pgtable, (u64)virt, &pte, &level);
> > +       if (ret)
> > +               goto unlock;
> > +
> > +       /*
> > +        * If the page has been shared with the hypervisor, it must be
> > +        * SHARED_BORROWED already.
> > +        */
> 
> This comment confused me at first, but then I realized it's referring
> to the page from the hyp's point of view. Could you add something to
> the comment to that effect?

Sure thing.

> It might also make it easier to follow if the variables could be
> annotated to specify whether cur, state, and prot are the host's or
> hyps (and not reuse the same one for both).
> 
> > +       cur = kvm_pgtable_hyp_pte_prot(pte);
> > +       prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
> > +       if (!check_prot(cur, prot, ~prot))
> > +               ret = EPERM;
> > +       goto unlock;
> > +
> > +map_shared:
> > +       /*
> > +        * If the page is not yet shared, adjust mappings in both page-tables
> > +        * while both locks are held.
> > +        */
> > +       prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
> > +       ret = pkvm_create_mappings_locked(virt, virt + PAGE_SIZE, prot);
> > +       BUG_ON(ret);
> > +
> > +       prot = pkvm_mkstate(KVM_PGTABLE_PROT_RWX, PKVM_PAGE_SHARED_OWNED);
> > +       ret = host_stage2_idmap_locked(addr, addr + PAGE_SIZE, prot);
> > +       BUG_ON(ret);
> > +
> > +unlock:
> > +       hyp_spin_unlock(&pkvm_pgd_lock);
> > +       hyp_spin_unlock(&host_kvm.lock);
> > +
> > +       return ret;
> > +}
> > +
> >  void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
> >  {
> >         struct kvm_vcpu_fault_info fault;
> > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> > index 0625bf2353c2..cbab146cda6a 100644
> > --- a/arch/arm64/kvm/mmu.c
> > +++ b/arch/arm64/kvm/mmu.c
> > @@ -259,10 +259,8 @@ static int __create_hyp_mappings(unsigned long start, unsigned long size,
> >  {
> >         int err;
> >
> > -       if (!kvm_host_owns_hyp_mappings()) {
> > -               return kvm_call_hyp_nvhe(__pkvm_create_mappings,
> > -                                        start, size, phys, prot);
> > -       }
> > +       if (WARN_ON(!kvm_host_owns_hyp_mappings()))
> > +               return -EINVAL;
> >
> >         mutex_lock(&kvm_hyp_pgd_mutex);
> >         err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot);
> > @@ -282,6 +280,21 @@ static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
> >         }
> >  }
> >
> > +static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end)
> > +{
> > +       phys_addr_t addr;
> > +       int ret;
> > +
> > +       for (addr = ALIGN_DOWN(start, PAGE_SIZE); addr < end; addr += PAGE_SIZE) {
> > +               ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp,
> > +                                       __phys_to_pfn(addr));
> 
> I guess we don't expect this to happen often, but I wonder if it would
> be better to have the looping in the hyp call rather than here, to
> reduce the number of hyp calls when sharing.

Yes, I was wondering the same thing, but ended up doing the looping here
to avoid spending long periods of time in a non-preemptible state at
EL2. Probably doesn't make a big difference for now, but it might if we
ever need to share large memory regions.

Cheers,
Quentin

> 
> Thanks,
> /fuad
> 
> > +               if (ret)
> > +                       return ret;
> > +       }
> > +
> > +       return 0;
> > +}
> > +
> >  /**
> >   * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
> >   * @from:      The virtual kernel start address of the range
> > @@ -302,6 +315,13 @@ int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
> >         if (is_kernel_in_hyp_mode())
> >                 return 0;
> >
> > +       if (!kvm_host_owns_hyp_mappings()) {
> > +               if (WARN_ON(prot != PAGE_HYP))
> > +                       return -EPERM;
> > +               return pkvm_share_hyp(kvm_kaddr_to_phys(from),
> > +                                     kvm_kaddr_to_phys(to));
> > +       }
> > +
> >         start = start & PAGE_MASK;
> >         end = PAGE_ALIGN(end);
> >
> > --
> > 2.32.0.432.gabb21c7263-goog
> >

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2021-08-03 10:43 UTC|newest]

Thread overview: 135+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-29 13:27 [PATCH v3 00/21] Track shared pages at EL2 in protected mode Quentin Perret
2021-07-29 13:27 ` Quentin Perret
2021-07-29 13:27 ` Quentin Perret
2021-07-29 13:27 ` [PATCH v3 01/21] KVM: arm64: Add hyp_spin_is_locked() for basic locking assertions at EL2 Quentin Perret
2021-07-29 13:27   ` Quentin Perret
2021-07-29 13:27   ` Quentin Perret
2021-07-29 13:27 ` [PATCH v3 02/21] KVM: arm64: Introduce hyp_assert_lock_held() Quentin Perret
2021-07-29 13:27   ` Quentin Perret
2021-07-29 13:27   ` Quentin Perret
2021-07-29 13:28 ` [PATCH v3 03/21] KVM: arm64: Provide the host_stage2_try() helper macro Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-08-02  9:36   ` Fuad Tabba
2021-08-02  9:36     ` Fuad Tabba
2021-08-02  9:36     ` Fuad Tabba
2021-07-29 13:28 ` [PATCH v3 04/21] KVM: arm64: Introduce helper to retrieve a PTE and its level Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28 ` [PATCH v3 05/21] KVM: arm64: Expose page-table helpers Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28 ` [PATCH v3 06/21] KVM: arm64: Optimize host memory aborts Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-08-02  9:37   ` Fuad Tabba
2021-08-02  9:37     ` Fuad Tabba
2021-08-02  9:37     ` Fuad Tabba
2021-07-29 13:28 ` [PATCH v3 07/21] KVM: arm64: Rename KVM_PTE_LEAF_ATTR_S2_IGNORED Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-08-02  9:37   ` Fuad Tabba
2021-08-02  9:37     ` Fuad Tabba
2021-08-02  9:37     ` Fuad Tabba
2021-07-29 13:28 ` [PATCH v3 08/21] KVM: arm64: Don't overwrite software bits with owner id Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-08-02  9:38   ` Fuad Tabba
2021-08-02  9:38     ` Fuad Tabba
2021-08-02  9:38     ` Fuad Tabba
2021-07-29 13:28 ` [PATCH v3 09/21] KVM: arm64: Tolerate re-creating hyp mappings to set software bits Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-08-02  9:50   ` Fuad Tabba
2021-08-02  9:50     ` Fuad Tabba
2021-08-02  9:50     ` Fuad Tabba
2021-07-29 13:28 ` [PATCH v3 10/21] KVM: arm64: Enable forcing page-level stage-2 mappings Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-08-02  9:49   ` Fuad Tabba
2021-08-02  9:49     ` Fuad Tabba
2021-08-02  9:49     ` Fuad Tabba
2021-08-03 10:13     ` Quentin Perret
2021-08-03 10:13       ` Quentin Perret
2021-08-03 10:13       ` Quentin Perret
2021-08-03 10:43       ` Fuad Tabba
2021-08-03 10:43         ` Fuad Tabba
2021-08-03 10:43         ` Fuad Tabba
2021-07-29 13:28 ` [PATCH v3 11/21] KVM: arm64: Allow populating software bits Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28 ` [PATCH v3 12/21] KVM: arm64: Add helpers to tag shared pages in SW bits Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-08-02 10:30   ` Fuad Tabba
2021-08-02 10:30     ` Fuad Tabba
2021-08-02 10:30     ` Fuad Tabba
2021-07-29 13:28 ` [PATCH v3 13/21] KVM: arm64: Expose host stage-2 manipulation helpers Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-08-02 11:13   ` Fuad Tabba
2021-08-02 11:13     ` Fuad Tabba
2021-08-02 11:13     ` Fuad Tabba
2021-08-03 10:20     ` Quentin Perret
2021-08-03 10:20       ` Quentin Perret
2021-08-03 10:20       ` Quentin Perret
2021-07-29 13:28 ` [PATCH v3 14/21] KVM: arm64: Expose pkvm_hyp_id Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28 ` [PATCH v3 15/21] KVM: arm64: Introduce addr_is_memory() Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-08-02 14:52   ` Fuad Tabba
2021-08-02 14:52     ` Fuad Tabba
2021-08-02 14:52     ` Fuad Tabba
2021-08-03 10:23     ` Quentin Perret
2021-08-03 10:23       ` Quentin Perret
2021-08-03 10:23       ` Quentin Perret
2021-07-29 13:28 ` [PATCH v3 16/21] KVM: arm64: Enable retrieving protections attributes of PTEs Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-08-02 14:52   ` Fuad Tabba
2021-08-02 14:52     ` Fuad Tabba
2021-08-02 14:52     ` Fuad Tabba
2021-08-03 10:24     ` Quentin Perret
2021-08-03 10:24       ` Quentin Perret
2021-08-03 10:24       ` Quentin Perret
2021-07-29 13:28 ` [PATCH v3 17/21] KVM: arm64: Mark host bss and rodata section as shared Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-08-03  5:02   ` Fuad Tabba
2021-08-03  5:02     ` Fuad Tabba
2021-08-03  5:02     ` Fuad Tabba
2021-08-03 10:34     ` Quentin Perret
2021-08-03 10:34       ` Quentin Perret
2021-08-03 10:34       ` Quentin Perret
2021-08-03 10:54       ` Fuad Tabba
2021-08-03 10:54         ` Fuad Tabba
2021-08-03 10:54         ` Fuad Tabba
2021-07-29 13:28 ` [PATCH v3 18/21] KVM: arm64: Remove __pkvm_mark_hyp Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28 ` [PATCH v3 19/21] KVM: arm64: Refactor protected nVHE stage-1 locking Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-08-03  5:31   ` Fuad Tabba
2021-08-03  5:31     ` Fuad Tabba
2021-08-03  5:31     ` Fuad Tabba
2021-08-03 10:37     ` Quentin Perret
2021-08-03 10:37       ` Quentin Perret
2021-08-03 10:37       ` Quentin Perret
2021-08-03 10:51       ` Fuad Tabba
2021-08-03 10:51         ` Fuad Tabba
2021-08-03 10:51         ` Fuad Tabba
2021-07-29 13:28 ` [PATCH v3 20/21] KVM: arm64: Restrict EL2 stage-1 changes in protected mode Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-08-03  8:22   ` Fuad Tabba
2021-08-03  8:22     ` Fuad Tabba
2021-08-03  8:22     ` Fuad Tabba
2021-08-03 10:43     ` Quentin Perret [this message]
2021-08-03 10:43       ` Quentin Perret
2021-08-03 10:43       ` Quentin Perret
2021-07-29 13:28 ` [PATCH v3 21/21] KVM: arm64: Make __pkvm_create_mappings static Quentin Perret
2021-07-29 13:28   ` Quentin Perret
2021-07-29 13:28   ` Quentin Perret

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=YQkdztIHwXfj7Sbc@google.com \
    --to=qperret@google.com \
    --cc=alexandru.elisei@arm.com \
    --cc=ardb@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=dbrazdil@google.com \
    --cc=james.morse@arm.com \
    --cc=kernel-team@android.com \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=maz@kernel.org \
    --cc=qwandor@google.com \
    --cc=suzuki.poulose@arm.com \
    --cc=tabba@google.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.