All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Kai Huang <kai.huang@linux.intel.com>,
	jbeulich@suse.com, tim@xen.org, kevin.tian@intel.com,
	yang.z.zhang@intel.com, xen-devel@lists.xen.org
Subject: Re: [PATCH 05/10] VMX: add help functions to support PML
Date: Fri, 27 Mar 2015 21:09:41 +0000	[thread overview]
Message-ID: <5515C715.3050404@citrix.com> (raw)
In-Reply-To: <1427423754-11841-6-git-send-email-kai.huang@linux.intel.com>

On 27/03/15 02:35, Kai Huang wrote:
> This patch adds help functions to enable/disable PML, and flush PML buffer for
> single vcpu and particular domain for further use.
>
> Signed-off-by: Kai Huang <kai.huang@linux.intel.com>
> ---
>  xen/arch/x86/hvm/vmx/vmcs.c        | 190 +++++++++++++++++++++++++++++++++++++
>  xen/include/asm-x86/hvm/vmx/vmcs.h |   9 ++
>  2 files changed, 199 insertions(+)
>
> diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
> index 2798b0b..17cbef4 100644
> --- a/xen/arch/x86/hvm/vmx/vmcs.c
> +++ b/xen/arch/x86/hvm/vmx/vmcs.c
> @@ -1326,6 +1326,196 @@ void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector)
>                  &v->arch.hvm_vmx.eoi_exitmap_changed);
>  }
>  
> +int vmx_vcpu_pml_enabled(struct vcpu *v)

bool_t vmx_vcpu_pml_enabled(const struct vcpu *v)

> +{
> +    return (v->arch.hvm_vmx.secondary_exec_control &
> +            SECONDARY_EXEC_ENABLE_PML) ? 1 : 0;

This would be slightly shorter as
!!(v->arch.hvm_vmx.secondary_exec_control & SECONDARY_EXEC_ENABLE_PML)

> +}
> +
> +int vmx_vcpu_enable_pml(struct vcpu *v)
> +{
> +    struct domain *d = v->domain;
> +
> +    ASSERT(!vmx_vcpu_pml_enabled(v));
> +
> +    v->arch.hvm_vmx.pml_pg = d->arch.paging.alloc_page(d);
> +    if ( !v->arch.hvm_vmx.pml_pg )
> +        return -ENOMEM;
> +
> +    vmx_vmcs_enter(v);
> +
> +    __vmwrite(PML_ADDRESS, page_to_mfn(v->arch.hvm_vmx.pml_pg) << PAGE_SHIFT);
> +    __vmwrite(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
> +
> +    v->arch.hvm_vmx.secondary_exec_control |= SECONDARY_EXEC_ENABLE_PML;
> +
> +    __vmwrite(SECONDARY_VM_EXEC_CONTROL,
> +            v->arch.hvm_vmx.secondary_exec_control);

Alignment.

> +
> +    vmx_vmcs_exit(v);
> +
> +    return 0;
> +}
> +
> +void vmx_vcpu_disable_pml(struct vcpu *v)
> +{
> +    ASSERT(vmx_vcpu_pml_enabled(v));
> +
> +    vmx_vmcs_enter(v);
> +
> +    v->arch.hvm_vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
> +    __vmwrite(SECONDARY_VM_EXEC_CONTROL,
> +            v->arch.hvm_vmx.secondary_exec_control);
> +
> +    vmx_vmcs_exit(v);
> +
> +    v->domain->arch.paging.free_page(v->domain, v->arch.hvm_vmx.pml_pg);
> +    v->arch.hvm_vmx.pml_pg = NULL;
> +}
> +
> +void vmx_vcpu_flush_pml_buffer(struct vcpu *v)
> +{
> +    uint64_t *pml_buf;
> +    unsigned long pml_idx;
> +
> +    ASSERT(vmx_vcpu_pml_enabled(v));
> +
> +    vmx_vmcs_enter(v);
> +
> +    __vmread(GUEST_PML_INDEX, &pml_idx);
> +
> +    /* Do nothing if PML buffer is empty */
> +    if ( pml_idx == (PML_ENTITY_NUM - 1) )
> +        goto out;
> +
> +    pml_buf = map_domain_page(page_to_mfn(v->arch.hvm_vmx.pml_pg));

__map_domain_page() is a wrapper which takes a struct page_info

> +
> +    /*
> +     * PML index can be either 2^16-1 (buffer is full), or 0~511 (buffer is not
> +     * full), and in latter case PML index always points to next available
> +     * entity.
> +     */
> +    if (pml_idx >= PML_ENTITY_NUM)
> +        pml_idx = 0;
> +    else
> +        pml_idx++;
> +
> +    for ( ; pml_idx < PML_ENTITY_NUM; pml_idx++ )
> +    {
> +        struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);

This p2m_get_host_p2m() call should be hoisted out of the loop.

> +        unsigned long gfn;
> +        mfn_t mfn;
> +        p2m_type_t t;
> +        p2m_access_t a;
> +
> +        gfn = pml_buf[pml_idx] >> PAGE_SHIFT;
> +        mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL);
> +        if ( mfn_x(mfn) == INVALID_MFN )
> +        {
> +            /*
> +             * Either EPT table entry for mapping the GFN has been destroyed, or
> +             * there's something wrong with hardware behavior, in both cases we
> +             * should report a warning.
> +             */
> +            dprintk(XENLOG_WARNING, "PML: vcpu %d: invalid GPA 0x%lx logged\n",
> +                    v->vcpu_id, pml_buf[pml_idx]);

It would be shorter to log gfn rather than gpa.

> +            continue;
> +        }
> +
> +        /*
> +         * Need to change type from log-dirty to normal memory for logged GFN.
> +         * hap_track_dirty_vram depends on it to work. And we really only need
> +         * to mark GFNs which hve been successfully changed from log-dirty to
> +         * normal memory to be dirty.
> +         */
> +        if ( !p2m_change_type_one(v->domain, gfn, p2m_ram_logdirty,
> +                    p2m_ram_rw) )
> +            paging_mark_dirty(v->domain, mfn_x(mfn));
> +    }
> +
> +    unmap_domain_page(pml_buf);
> +
> +    /* Reset PML index */
> +    __vmwrite(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
> +
> +out:
> +    vmx_vmcs_exit(v);
> +}
> +
> +int vmx_domain_pml_enabled(struct domain *d)

bool_t and const as per vcpu variant.

> +{
> +    return (d->arch.hvm_domain.vmx.status & VMX_DOMAIN_PML_ENABLED) ? 1 : 0;
> +}
> +
> +/*
> + * This function enables PML for particular domain. It should be called when
> + * domain is paused.

In which case assert that the domain is paused, or call domain_pause()
yourself to take an extra pause refcount.

> + *
> + * PML needs to be enabled globally for all vcpus of the domain, as PML buffer
> + * and PML index are pre-vcpu, but EPT table is shared by vcpus, therefore
> + * enabling PML on partial vcpus won't work.
> + */
> +int vmx_domain_enable_pml(struct domain *d)
> +{
> +    struct vcpu *v;
> +
> +    ASSERT(!vmx_domain_pml_enabled(d));
> +
> +    for_each_vcpu( d, v )
> +    {
> +        if ( vmx_vcpu_enable_pml(v) )
> +            goto error;

Please catch the actual rc from vmx_vcpu_enable_pml() and propagate out
of this function, rather than clobbering -ENOMEM with -EINVAL.

Also, per Xen style, you can drop the braces.

~Andrew

> +    }
> +
> +    d->arch.hvm_domain.vmx.status |= VMX_DOMAIN_PML_ENABLED;
> +
> +    return 0;
> +
> +error:
> +    for_each_vcpu( d, v )
> +    {
> +        if ( vmx_vcpu_pml_enabled(v) )
> +            vmx_vcpu_disable_pml(v);
> +    }
> +    return -EINVAL;
> +}
> +
> +/*
> + * Disable PML for particular domain. Called when domain is paused.
> + *
> + * The same as enabling PML for domain, disabling PML should be done for all
> + * vcpus at once.
> + */
> +void vmx_domain_disable_pml(struct domain *d)
> +{
> +    struct vcpu *v;
> +
> +    ASSERT(vmx_domain_pml_enabled(d));
> +
> +    for_each_vcpu( d, v )
> +    {
> +        vmx_vcpu_disable_pml(v);
> +    }
> +
> +    d->arch.hvm_domain.vmx.status &= ~VMX_DOMAIN_PML_ENABLED;
> +}
> +
> +/*
> + * Flush PML buffer of all vcpus, and update the logged dirty pages to log-dirty
> + * radix tree. Called when domain is paused.
> + */
> +void vmx_domain_flush_pml_buffers(struct domain *d)
> +{
> +    struct vcpu *v;
> +
> +    ASSERT(vmx_domain_pml_enabled(d));
> +
> +    for_each_vcpu( d, v )
> +    {
> +        vmx_vcpu_flush_pml_buffer(v);
> +    }
> +}
> +
>  int vmx_create_vmcs(struct vcpu *v)
>  {
>      struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
> diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
> index 8cc1122..939d097 100644
> --- a/xen/include/asm-x86/hvm/vmx/vmcs.h
> +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
> @@ -499,6 +499,15 @@ static inline int vmx_add_host_load_msr(u32 msr)
>  
>  DECLARE_PER_CPU(bool_t, vmxon);
>  
> +int vmx_vcpu_pml_enabled(struct vcpu *v);
> +int vmx_vcpu_enable_pml(struct vcpu *v);
> +void vmx_vcpu_disable_pml(struct vcpu *v);
> +void vmx_vcpu_flush_pml_buffer(struct vcpu *v);
> +int vmx_domain_pml_enabled(struct domain *d);
> +int vmx_domain_enable_pml(struct domain *d);
> +void vmx_domain_disable_pml(struct domain *d);
> +void vmx_domain_flush_pml_buffers(struct domain *d);
> +
>  #endif /* ASM_X86_HVM_VMX_VMCS_H__ */
>  
>  /*

  reply	other threads:[~2015-03-27 21:09 UTC|newest]

Thread overview: 65+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-03-27  2:35 [PATCH 00/10] PML (Paging Modification Logging) support Kai Huang
2015-03-27  2:35 ` [PATCH 01/10] VMX: Enable EPT A/D bit support Kai Huang
2015-03-27 20:38   ` Andrew Cooper
2015-03-30  6:11     ` Kai Huang
2015-03-30  9:36       ` Andrew Cooper
2015-03-30 13:35         ` Kai Huang
2015-03-30 13:39           ` Andrew Cooper
2015-04-02  6:32     ` Kai Huang
2015-04-02  9:55       ` Andrew Cooper
2015-04-09 11:21   ` Tim Deegan
2015-04-10  6:40     ` Kai Huang
2015-04-10  8:54       ` Tim Deegan
2015-04-10  9:26         ` Kai Huang
2015-04-10  9:51           ` Tim Deegan
2015-04-10 13:14             ` Kai Huang
2015-03-27  2:35 ` [PATCH 02/10] VMX: New parameter to control PML enabling Kai Huang
2015-03-27 20:42   ` Andrew Cooper
2015-03-30  6:16     ` Kai Huang
2015-04-02  5:46     ` Kai Huang
2015-04-02  9:58       ` Andrew Cooper
2015-04-02 13:34         ` Kai Huang
2015-03-27  2:35 ` [PATCH 03/10] VMX: Add PML definition and feature detection Kai Huang
2015-03-27 20:46   ` Andrew Cooper
2015-03-30  6:18     ` Kai Huang
2015-03-27  2:35 ` [PATCH 04/10] VMX: New data structure member to support PML Kai Huang
2015-03-27 20:48   ` Andrew Cooper
2015-03-30  6:19     ` Kai Huang
2015-03-27  2:35 ` [PATCH 05/10] VMX: add help functions " Kai Huang
2015-03-27 21:09   ` Andrew Cooper [this message]
2015-03-30  6:43     ` Kai Huang
2015-03-30  9:54       ` Andrew Cooper
2015-03-30 13:40         ` Kai Huang
2015-04-09 12:00   ` Tim Deegan
2015-04-10  7:05     ` Kai Huang
2015-04-10  9:03       ` Tim Deegan
2015-04-10  9:28         ` Kai Huang
2015-04-09 12:31   ` Tim Deegan
2015-04-10  7:07     ` Kai Huang
2015-03-27  2:35 ` [PATCH 06/10] VMX: handle PML buffer full VMEXIT Kai Huang
2015-03-27  2:35 ` [PATCH 07/10] VMX: handle PML enabling in vmx_vcpu_initialise Kai Huang
2015-03-27 21:12   ` Andrew Cooper
2015-03-30  7:03     ` Kai Huang
2015-03-30 10:00       ` Andrew Cooper
2015-03-27  2:35 ` [PATCH 08/10] VMX: disable PML in vmx_vcpu_destroy Kai Huang
2015-04-09 12:04   ` Tim Deegan
2015-04-10  7:25     ` Kai Huang
2015-04-10  9:30       ` Tim Deegan
2015-03-27  2:35 ` [PATCH 09/10] log-dirty: Refine common code to support PML Kai Huang
2015-04-09 12:27   ` Tim Deegan
2015-04-10  7:38     ` Kai Huang
2015-04-10  9:31       ` Tim Deegan
2015-04-10  9:33         ` Kai Huang
2015-03-27  2:35 ` [PATCH 10/10] p2m/ept: Enable PML in p2m-ept for log-dirty Kai Huang
2015-04-09 12:20   ` Tim Deegan
2015-04-10  8:44     ` Kai Huang
2015-04-10  9:46       ` Tim Deegan
2015-04-10 13:18         ` Kai Huang
2015-04-10 14:35           ` Tim Deegan
2015-03-27 21:26 ` [PATCH 00/10] PML (Paging Modification Logging) support Andrew Cooper
2015-03-30  5:50   ` Kai Huang
2015-04-07  8:30     ` Kai Huang
2015-04-07  9:24       ` Tim Deegan
2015-04-08  2:23         ` Kai Huang
2015-04-09 12:32         ` Tim Deegan
2015-04-10  6:40           ` Kai Huang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5515C715.3050404@citrix.com \
    --to=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=kai.huang@linux.intel.com \
    --cc=kevin.tian@intel.com \
    --cc=tim@xen.org \
    --cc=xen-devel@lists.xen.org \
    --cc=yang.z.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.