From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1758620Ab3BFVcQ (ORCPT ); Wed, 6 Feb 2013 16:32:16 -0500 Received: from userp1040.oracle.com ([156.151.31.81]:18375 "EHLO userp1040.oracle.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1758347Ab3BFVcO (ORCPT ); Wed, 6 Feb 2013 16:32:14 -0500 Date: Wed, 6 Feb 2013 13:31:54 -0800 From: Mukesh Rathor To: Konrad Rzeszutek Wilk Cc: Konrad Rzeszutek Wilk , "Xen-devel@lists.xensource.com" , "linux-kernel@vger.kernel.org" , Ian Campbell , "stefano.stabellini@eu.citrix.com" Subject: Re: [PATCH] PVH linux: Use ballooning to allocate grant table pages Message-ID: <20130206133154.76830173@mantra.us.oracle.com> In-Reply-To: <20130206154910.GA31828@konrad-lan.dumpdata.com> References: <20130131183015.13bc2bff@mantra.us.oracle.com> <20130206154910.GA31828@konrad-lan.dumpdata.com> Organization: Oracle Corporation X-Mailer: Claws Mail 3.7.6 (GTK+ 2.18.9; x86_64-redhat-linux-gnu) Mime-Version: 1.0 Content-Type: text/plain; charset=US-ASCII Content-Transfer-Encoding: 7bit X-Source-IP: acsinet21.oracle.com [141.146.126.237] Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org On Wed, 6 Feb 2013 10:49:13 -0500 Konrad Rzeszutek Wilk wrote: > On Thu, Jan 31, 2013 at 06:30:15PM -0800, Mukesh Rathor wrote: > > This patch fixes a fixme in Linux to use alloc_xenballooned_pages() > > to allocate pfns for grant table pages instead of kmalloc. This also > > simplifies add to physmap on the xen side a bit. > > Pulled this. > > Konrad, no, there was a follow up email on this thread to discard this. Please discard this. I resent yesterday with proper fixes. I realize now I should've given one yesterday version number. My bad, this head cold is crippling my brain :).. Sorry for the confusion. Mukesh Following is the latest patch I emailed yesterday : This patch fixes a fixme in Linux to use alloc_xenballooned_pages() to allocate pfns for grant table pages instead of kmalloc. This also simplifies add to physmap on the xen side a bit. Signed-off-by: Mukesh Rathor diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 9c0019d..fdb1d88 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include @@ -1026,10 +1027,22 @@ static void gnttab_unmap_frames_v2(void) arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames)); } +static xen_pfn_t pvh_get_grant_pfn(int grant_idx) +{ + unsigned long vaddr; + unsigned int level; + pte_t *pte; + + vaddr = (unsigned long)(gnttab_shared.addr) + grant_idx * PAGE_SIZE; + pte = lookup_address(vaddr, &level); + BUG_ON(pte == NULL); + return pte_mfn(*pte); +} + static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; - unsigned long *frames, start_gpfn; + unsigned long *frames, start_gpfn = 0; unsigned int nr_gframes = end_idx + 1; int rc; @@ -1040,8 +1053,6 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) if (xen_hvm_domain()) start_gpfn = xen_hvm_resume_frames >> PAGE_SHIFT; - else - start_gpfn = virt_to_pfn(gnttab_shared.addr); /* * Loop backwards, so that the first hypercall has the largest * index, ensuring that the table will grow only once. @@ -1050,7 +1061,11 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) xatp.domid = DOMID_SELF; xatp.idx = i; xatp.space = XENMAPSPACE_grant_table; - xatp.gpfn = start_gpfn + i; + if (xen_hvm_domain()) + xatp.gpfn = start_gpfn + i; + else + xatp.gpfn = pvh_get_grant_pfn(i); + rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp); if (rc != 0) { printk(KERN_WARNING @@ -1138,27 +1153,51 @@ static void gnttab_request_version(void) grant_table_version); } +/* + * PVH: we need three things: virtual address, pfns, and mfns. The pfns + * are allocated via ballooning, then we call arch_gnttab_map_shared to + * allocate the VA and put pfn's in the pte's for the VA. The mfn's are + * finally allocated in gnttab_map() by xen which also populates the P2M. + */ +static int xlated_setup_gnttab_pages(unsigned long numpages, void **addr) +{ + int i, rc; + unsigned long pfns[numpages]; + struct page *pages[numpages]; + + rc = alloc_xenballooned_pages(numpages, pages, 0); + if (rc != 0) { + pr_warn("%s Couldn't balloon alloc %ld pfns rc:%d\n", __func__, + numpages, rc); + return rc; + } + for (i = 0; i < numpages; i++) + pfns[i] = page_to_pfn(pages[i]); + + rc = arch_gnttab_map_shared(pfns, numpages, numpages, addr); + if (rc != 0) + free_xenballooned_pages(numpages, pages); + + return rc; +} + int gnttab_resume(void) { + int rc; unsigned int max_nr_gframes; - char *kmsg = "Failed to kmalloc pages for pv in hvm grant frames\n"; gnttab_request_version(); max_nr_gframes = gnttab_max_grant_frames(); if (max_nr_gframes < nr_grant_frames) return -ENOSYS; - /* PVH note: xen will free existing kmalloc'd mfn in - * XENMEM_add_to_physmap. TBD/FIXME: use xen ballooning instead of - * kmalloc(). */ if (xen_pv_domain() && xen_feature(XENFEAT_auto_translated_physmap) && !gnttab_shared.addr) { - gnttab_shared.addr = - kmalloc(max_nr_gframes * PAGE_SIZE, GFP_KERNEL); - if (!gnttab_shared.addr) { - pr_warn("%s", kmsg); - return -ENOMEM; - } + + rc = xlated_setup_gnttab_pages((unsigned long)max_nr_gframes, + &gnttab_shared.addr); + if (rc != 0) + return rc; } if (xen_pv_domain()) return gnttab_map(0, nr_grant_frames - 1);