From f0c7abdf9a17db9e512fc4427c122afcf386704b Mon Sep 17 00:00:00 2001 From: Tiejun Chen Date: Thu, 16 Jul 2015 14:52:52 +0800 Subject: [PATCH] hvmloader/pci: Try to avoid placing BARs in RMRRs Try to avoid placing PCI BARs over RMRRs: - If mmio_hole_size is not specified, and the existing MMIO range has RMRRs in it, and there is space to expand the hole in lowmem without moving more memory, then make the MMIO hole as large as possible. - When placing RMRRs, find the next RMRR higher than the current base in the lowmem mmio hole. If it overlaps, skip ahead of it and find the next one. This certainly won't work in all cases, but it should work in a significant number of cases. Additionally, users should be able to work around problems by setting mmio_hole_size larger in the guest config. Signed-off-by: George Dunlap --- THIS WILL NOT COMPILE, as it needs check_overlap_all() to be implemented. It's just a proof-of-concept for discussion. --- tools/firmware/hvmloader/pci.c | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/tools/firmware/hvmloader/pci.c b/tools/firmware/hvmloader/pci.c index 5ff87a7..dcb8cd0 100644 --- a/tools/firmware/hvmloader/pci.c +++ b/tools/firmware/hvmloader/pci.c @@ -38,6 +38,25 @@ uint64_t pci_hi_mem_start = 0, pci_hi_mem_end = 0; enum virtual_vga virtual_vga = VGA_none; unsigned long igd_opregion_pgbase = 0; +/* Find the lowest RMRR higher than base */ +int find_next_rmrr(uint32_t base) +{ + int next_rmrr=-1; + uing64_t min_base = (1ull << 32); + + for ( i = 0; i < memory_map.nr_map ; i++ ) + { + if ( memory_map.map[i].type == E820_RESERVED + && memory_map.map[i].addr > base + && memory_map.map[i].addr < min_base) + { + next_rmrr = i; + min_base = memory_map.map[i].addr; + } + } + return next_rmrr; +} + void pci_setup(void) { uint8_t is_64bar, using_64bar, bar64_relocate = 0; @@ -299,6 +318,15 @@ void pci_setup(void) || (((pci_mem_start << 1) >> PAGE_SHIFT) >= hvm_info->low_mem_pgend)) ) pci_mem_start <<= 1; + + /* + * Try to accomodate RMRRs in our MMIO region on a best-effort basis. + * If we have RMRRs in the range, then make pci_mem_start just after + * hvm_info->low_mem_pgend. + */ + if ( pci_mem_start > (hvm_info->low_mem_pgend << PAGE_SHIFT) && + check_overlap_all(pci_mem_start, pci_mem_end-pci_mem_start) ) + pci_mem_start = (hvm_info->low_mem_pgend + 1 ) << PAGE_SHIFT); } if ( mmio_total > (pci_mem_end - pci_mem_start) ) @@ -352,6 +380,8 @@ void pci_setup(void) io_resource.base = 0xc000; io_resource.max = 0x10000; + next_rmrr = find_next_rmrr(pci_mem_start); + /* Assign iomem and ioport resources in descending order of size. */ for ( i = 0; i < nr_bars; i++ ) { @@ -407,6 +437,18 @@ void pci_setup(void) } base = (resource->base + bar_sz - 1) & ~(uint64_t)(bar_sz - 1); + + /* If we're using mem_resource, check for RMRR conflicts */ + while ( resource == &mem_resource && + next_rmrr > 0 && + check_overlap(base, bar_sz, + memory_map.map[next_rmrr].addr, + memory_map.map[next_rmrr].size)) { + base = memory_map.map[next_rmrr].addr + memory_map.map[next_rmrr].size; + base = (resource->base + bar_sz - 1) & ~(uint64_t)(bar_sz - 1); + next_rmrr=find_next_rmrr(base); + } + bar_data |= (uint32_t)base; bar_data_upper = (uint32_t)(base >> 32); base += bar_sz; -- 1.9.1