From mboxrd@z Thu Jan 1 00:00:00 1970 From: Tamas K Lengyel Subject: [PATCH for-4.5 v8 08/19] xen/mem_event: Clean out superfluous white-spaces Date: Tue, 23 Sep 2014 15:14:19 +0200 Message-ID: <1411478070-13836-9-git-send-email-tklengyel@sec.in.tum.de> References: <1411478070-13836-1-git-send-email-tklengyel@sec.in.tum.de> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <1411478070-13836-1-git-send-email-tklengyel@sec.in.tum.de> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xen.org Cc: ian.campbell@citrix.com, tim@xen.org, julien.grall@linaro.org, ian.jackson@eu.citrix.com, stefano.stabellini@citrix.com, andres@lagarcavilla.org, jbeulich@suse.com, dgdegra@tycho.nsa.gov, Tamas K Lengyel List-Id: xen-devel@lists.xenproject.org Signed-off-by: Tamas K Lengyel Acked-by: Tim Deegan --- v2: Clean the mem_event header as well. --- xen/common/mem_event.c | 20 ++++++++++---------- xen/include/xen/mem_event.h | 8 ++++---- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/xen/common/mem_event.c b/xen/common/mem_event.c index aa39f7b..638b4e3 100644 --- a/xen/common/mem_event.c +++ b/xen/common/mem_event.c @@ -64,7 +64,7 @@ static int mem_event_enable( if ( med->ring_page ) return -EBUSY; - /* The parameter defaults to zero, and it should be + /* The parameter defaults to zero, and it should be * set to something */ if ( ring_gfn == 0 ) return -ENOSYS; @@ -72,7 +72,7 @@ static int mem_event_enable( mem_event_ring_lock_init(med); mem_event_ring_lock(med); - rc = prepare_ring_for_helper(d, ring_gfn, &med->ring_pg_struct, + rc = prepare_ring_for_helper(d, ring_gfn, &med->ring_pg_struct, &med->ring_page); if ( rc < 0 ) goto err; @@ -104,7 +104,7 @@ static int mem_event_enable( return 0; err: - destroy_ring_for_helper(&med->ring_page, + destroy_ring_for_helper(&med->ring_page, med->ring_pg_struct); mem_event_ring_unlock(med); @@ -233,7 +233,7 @@ static int mem_event_disable(struct domain *d, struct mem_event_domain *med) } } - destroy_ring_for_helper(&med->ring_page, + destroy_ring_for_helper(&med->ring_page, med->ring_pg_struct); mem_event_ring_unlock(med); } @@ -495,7 +495,7 @@ void mem_event_cleanup(struct domain *d) * the disable routine to complete. It will also drop * all domain refs the wait-queued vcpus are holding. * Finally, because this code path involves previously - * pausing the domain (domain_kill), unpausing the + * pausing the domain (domain_kill), unpausing the * vcpus causes no harm. */ destroy_waitqueue_head(&d->mem_event->paging.wq); (void)mem_event_disable(d, &d->mem_event->paging); @@ -579,7 +579,7 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec, if ( p2m->pod.entry_count ) break; - rc = mem_event_enable(d, mec, med, _VPF_mem_paging, + rc = mem_event_enable(d, mec, med, _VPF_mem_paging, HVM_PARAM_PAGING_RING_PFN, mem_paging_notification); } @@ -600,7 +600,7 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec, break; #endif - case XEN_DOMCTL_MEM_EVENT_OP_ACCESS: + case XEN_DOMCTL_MEM_EVENT_OP_ACCESS: { struct mem_event_domain *med = &d->mem_event->access; rc = -EINVAL; @@ -619,7 +619,7 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec, if ( !cpu_has_vmx ) break; - rc = mem_event_enable(d, mec, med, _VPF_mem_access, + rc = mem_event_enable(d, mec, med, _VPF_mem_access, HVM_PARAM_ACCESS_RING_PFN, mem_access_notification); @@ -647,7 +647,7 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec, break; #ifdef HAS_MEM_SHARING - case XEN_DOMCTL_MEM_EVENT_OP_SHARING: + case XEN_DOMCTL_MEM_EVENT_OP_SHARING: { struct mem_event_domain *med = &d->mem_event->share; rc = -EINVAL; @@ -666,7 +666,7 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec, if ( !hap_enabled(d) ) break; - rc = mem_event_enable(d, mec, med, _VPF_mem_sharing, + rc = mem_event_enable(d, mec, med, _VPF_mem_sharing, HVM_PARAM_SHARING_RING_PFN, mem_sharing_notification); } diff --git a/xen/include/xen/mem_event.h b/xen/include/xen/mem_event.h index 8612b26..4f3ad8e 100644 --- a/xen/include/xen/mem_event.h +++ b/xen/include/xen/mem_event.h @@ -37,19 +37,19 @@ bool_t mem_event_check_ring(struct mem_event_domain *med); /* Returns 0 on success, -ENOSYS if there is no ring, -EBUSY if there is no * available space and the caller is a foreign domain. If the guest itself * is the caller, -EBUSY is avoided by sleeping on a wait queue to ensure - * that the ring does not lose future events. + * that the ring does not lose future events. * * However, the allow_sleep flag can be set to false in cases in which it is ok * to lose future events, and thus -EBUSY can be returned to guest vcpus - * (handle with care!). + * (handle with care!). * * In general, you must follow a claim_slot() call with either put_request() or * cancel_slot(), both of which are guaranteed to - * succeed. + * succeed. */ int __mem_event_claim_slot(struct domain *d, struct mem_event_domain *med, bool_t allow_sleep); -static inline int mem_event_claim_slot(struct domain *d, +static inline int mem_event_claim_slot(struct domain *d, struct mem_event_domain *med) { return __mem_event_claim_slot(d, med, 1); -- 2.1.0