tree: https://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms.git kvm-arm64/nv-wip-v5.0-rc5 head: 89419a20770a8ec94400940e3db1c4d5c276b470 commit: 89419a20770a8ec94400940e3db1c4d5c276b470 [96/96] arm64: KVM: Tag shadow s2 entries with nested level config: arm-axm55xx_defconfig (attached as .config) compiler: arm-linux-gnueabi-gcc (Debian 8.2.0-11) 8.2.0 reproduce: wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross git checkout 89419a20770a8ec94400940e3db1c4d5c276b470 # save the attached .config to linux build tree GCC_VERSION=8.2.0 make.cross ARCH=arm All errors (new ones prefixed by >>): arch/arm/kvm/../../../virt/kvm/arm/mmu.c: In function 'user_mem_abort': >> arch/arm/kvm/../../../virt/kvm/arm/mmu.c:1851:17: error: implicit declaration of function 'FIELD_PREP'; did you mean 'FIELD_SIZEOF'? [-Werror=implicit-function-declaration] l1_s2_level = FIELD_PREP(KVM_NV_GUEST_MAP_SZ, ^~~~~~~~~~ FIELD_SIZEOF >> arch/arm/kvm/../../../virt/kvm/arm/mmu.c:1851:28: error: 'KVM_NV_GUEST_MAP_SZ' undeclared (first use in this function); did you mean 'KVM_SEV_GUEST_STATUS'? l1_s2_level = FIELD_PREP(KVM_NV_GUEST_MAP_SZ, ^~~~~~~~~~~~~~~~~~~ KVM_SEV_GUEST_STATUS arch/arm/kvm/../../../virt/kvm/arm/mmu.c:1851:28: note: each undeclared identifier is reported only once for each function it appears in arch/arm/kvm/../../../virt/kvm/arm/mmu.c: In function 'kvm_handle_guest_abort': arch/arm/kvm/../../../virt/kvm/arm/mmu.c:2074:40: warning: passing argument 2 of 'kvm_s2_handle_perm_fault' makes integer from pointer without a cast [-Wint-conversion] ret = kvm_s2_handle_perm_fault(vcpu, &nested_trans); ^~~~~~~~~~~~~ In file included from arch/arm/kvm/../../../virt/kvm/arm/mmu.c:28: arch/arm/include/asm/kvm_mmu.h:468:21: note: expected 'phys_addr_t' {aka 'long long unsigned int'} but argument is of type 'struct kvm_s2_trans *' phys_addr_t ipa, ~~~~~~~~~~~~^~~ arch/arm/kvm/../../../virt/kvm/arm/mmu.c:2074:9: error: too few arguments to function 'kvm_s2_handle_perm_fault' ret = kvm_s2_handle_perm_fault(vcpu, &nested_trans); ^~~~~~~~~~~~~~~~~~~~~~~~ In file included from arch/arm/kvm/../../../virt/kvm/arm/mmu.c:28: arch/arm/include/asm/kvm_mmu.h:467:19: note: declared here static inline int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu, ^~~~~~~~~~~~~~~~~~~~~~~~ cc1: some warnings being treated as errors vim +1851 arch/arm/kvm/../../../virt/kvm/arm/mmu.c 1716 1717 #define set_desc_bits(which, desc, val) \ 1718 ({ desc = __ ## which(which ## _val(desc) | val); }) 1719 1720 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 1721 struct kvm_s2_trans *nested, 1722 struct kvm_memory_slot *memslot, 1723 unsigned long hva, unsigned long fault_status) 1724 { 1725 int ret; 1726 bool write_fault, writable, readable = true; 1727 bool exec_fault, needs_exec; 1728 unsigned long mmu_seq; 1729 phys_addr_t ipa = fault_ipa; 1730 gfn_t gfn; 1731 struct kvm *kvm = vcpu->kvm; 1732 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; 1733 struct vm_area_struct *vma; 1734 kvm_pfn_t pfn; 1735 pgprot_t mem_type = PAGE_S2; 1736 bool logging_active = memslot_is_logging(memslot); 1737 unsigned long vma_pagesize, flags = 0; 1738 struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu; 1739 unsigned long max_map_size = PUD_SIZE; 1740 pteval_t l1_s2_level = 0; 1741 1742 write_fault = kvm_is_write_fault(vcpu); 1743 exec_fault = kvm_vcpu_trap_is_iabt(vcpu); 1744 VM_BUG_ON(write_fault && exec_fault); 1745 1746 if (fault_status == FSC_PERM && !write_fault && !exec_fault) { 1747 kvm_err("Unexpected L2 read permission error\n"); 1748 return -EFAULT; 1749 } 1750 1751 if (!fault_supports_stage2_pmd_mappings(memslot, hva)) 1752 max_map_size = PAGE_SIZE; 1753 1754 if (logging_active) 1755 max_map_size = PAGE_SIZE; 1756 1757 /* Let's check if we will get back a huge page backed by hugetlbfs */ 1758 down_read(¤t->mm->mmap_sem); 1759 vma = find_vma_intersection(current->mm, hva, hva + 1); 1760 if (unlikely(!vma)) { 1761 kvm_err("Failed to find VMA for hva 0x%lx\n", hva); 1762 up_read(¤t->mm->mmap_sem); 1763 return -EFAULT; 1764 } 1765 1766 if (kvm_is_shadow_s2_fault(vcpu)) { 1767 ipa = kvm_s2_trans_output(nested); 1768 1769 /* 1770 * If we're about to create a shadow stage 2 entry, then we 1771 * can only create a block mapping if the guest stage 2 page 1772 * table uses at least as big a mapping. 1773 */ 1774 max_map_size = min(kvm_s2_trans_size(nested), max_map_size); 1775 } 1776 gfn = ipa >> PAGE_SHIFT; 1777 1778 vma_pagesize = min(vma_kernel_pagesize(vma), max_map_size); 1779 /* 1780 * The stage2 has a minimum of 2 level table (For arm64 see 1781 * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can 1782 * use PMD_SIZE huge mappings (even when the PMD is folded into PGD). 1783 * As for PUD huge maps, we must make sure that we have at least 1784 * 3 levels, i.e, PMD is not folded. 1785 */ 1786 if ((vma_pagesize == PMD_SIZE || 1787 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))) { 1788 gfn = (ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; 1789 } 1790 up_read(¤t->mm->mmap_sem); 1791 1792 1793 /* We need minimum second+third level pages */ 1794 ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm), 1795 KVM_NR_MEM_OBJS); 1796 if (ret) 1797 return ret; 1798 1799 mmu_seq = vcpu->kvm->mmu_notifier_seq; 1800 /* 1801 * Ensure the read of mmu_notifier_seq happens before we call 1802 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk 1803 * the page we just got a reference to gets unmapped before we have a 1804 * chance to grab the mmu_lock, which ensure that if the page gets 1805 * unmapped afterwards, the call to kvm_unmap_hva will take it away 1806 * from us again properly. This smp_rmb() interacts with the smp_wmb() 1807 * in kvm_mmu_notifier_invalidate_. 1808 */ 1809 smp_rmb(); 1810 1811 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); 1812 if (pfn == KVM_PFN_ERR_HWPOISON) { 1813 kvm_send_hwpoison_signal(hva, vma); 1814 return 0; 1815 } 1816 if (is_error_noslot_pfn(pfn)) 1817 return -EFAULT; 1818 1819 if (kvm_is_device_pfn(pfn)) { 1820 mem_type = PAGE_S2_DEVICE; 1821 flags |= KVM_S2PTE_FLAG_IS_IOMAP; 1822 } else if (logging_active) { 1823 /* 1824 * Faults on pages in a memslot with logging enabled 1825 * should not be mapped with huge pages (it introduces churn 1826 * and performance degradation), so force a pte mapping. 1827 */ 1828 flags |= KVM_S2_FLAG_LOGGING_ACTIVE; 1829 1830 /* 1831 * Only actually map the page as writable if this was a write 1832 * fault. 1833 */ 1834 if (!write_fault) 1835 writable = false; 1836 } 1837 1838 /* 1839 * Potentially reduce shadow S2 permissions to match the guest's own 1840 * S2. For exec faults, we'd only reach this point if the guest 1841 * actually allowed it (see kvm_s2_handle_perm_fault). 1842 * 1843 * Also encode the level of the nested translation in the SW bits of 1844 * the PTE/PMD/PUD. This will be retrived on TLB invalidation from 1845 * the guest. 1846 */ 1847 if (kvm_is_shadow_s2_fault(vcpu)) { 1848 writable &= kvm_s2_trans_writable(nested); 1849 readable &= kvm_s2_trans_readable(nested); 1850 > 1851 l1_s2_level = FIELD_PREP(KVM_NV_GUEST_MAP_SZ, 1852 kvm_s2_trans_level(nested)); 1853 } 1854 1855 spin_lock(&kvm->mmu_lock); 1856 if (mmu_notifier_retry(kvm, mmu_seq)) 1857 goto out_unlock; 1858 1859 if (vma_pagesize == PAGE_SIZE && max_map_size >= PMD_SIZE) { 1860 /* 1861 * Only PMD_SIZE transparent hugepages(THP) are 1862 * currently supported. This code will need to be 1863 * updated to support other THP sizes. 1864 */ 1865 if (transparent_hugepage_adjust(&pfn, &ipa, &fault_ipa)) 1866 vma_pagesize = PMD_SIZE; 1867 } 1868 1869 if (writable) 1870 kvm_set_pfn_dirty(pfn); 1871 1872 if (fault_status != FSC_PERM) 1873 clean_dcache_guest_page(pfn, vma_pagesize); 1874 1875 if (exec_fault) 1876 invalidate_icache_guest_page(pfn, vma_pagesize); 1877 1878 /* 1879 * If we took an execution fault we have made the 1880 * icache/dcache coherent above and should now let the s2 1881 * mapping be executable. 1882 * 1883 * Write faults (!exec_fault && FSC_PERM) are orthogonal to 1884 * execute permissions, and we preserve whatever we have. 1885 */ 1886 needs_exec = exec_fault || 1887 (fault_status == FSC_PERM && stage2_is_exec(mmu, fault_ipa)); 1888 1889 if (vma_pagesize == PUD_SIZE) { 1890 pud_t new_pud = kvm_pfn_pud(pfn, mem_type); 1891 1892 new_pud = kvm_pud_mkhuge(new_pud); 1893 if (writable) 1894 new_pud = kvm_s2pud_mkwrite(new_pud); 1895 1896 if (!readable) 1897 new_pud = kvm_s2pud_revoke_read(new_pud); 1898 1899 if (needs_exec) 1900 new_pud = kvm_s2pud_mkexec(new_pud); 1901 1902 set_desc_bits(pud, new_pud, l1_s2_level); 1903 1904 ret = stage2_set_pud_huge(mmu, memcache, fault_ipa, &new_pud); 1905 } else if (vma_pagesize == PMD_SIZE) { 1906 pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type); 1907 1908 new_pmd = kvm_pmd_mkhuge(new_pmd); 1909 1910 if (writable) 1911 new_pmd = kvm_s2pmd_mkwrite(new_pmd); 1912 1913 if (!readable) 1914 new_pmd = kvm_s2pmd_revoke_read(new_pmd); 1915 1916 if (needs_exec) 1917 new_pmd = kvm_s2pmd_mkexec(new_pmd); 1918 1919 set_desc_bits(pmd, new_pmd, l1_s2_level); 1920 1921 ret = stage2_set_pmd_huge(mmu, memcache, fault_ipa, &new_pmd); 1922 } else { 1923 pte_t new_pte = kvm_pfn_pte(pfn, mem_type); 1924 1925 if (writable) { 1926 new_pte = kvm_s2pte_mkwrite(new_pte); 1927 mark_page_dirty(kvm, gfn); 1928 } 1929 1930 if (!readable) 1931 new_pte = kvm_s2pte_revoke_read(new_pte); 1932 1933 if (needs_exec) 1934 new_pte = kvm_s2pte_mkexec(new_pte); 1935 1936 set_desc_bits(pte, new_pte, l1_s2_level); 1937 1938 ret = stage2_set_pte(mmu, memcache, fault_ipa, &new_pte, flags); 1939 } 1940 1941 out_unlock: 1942 spin_unlock(&kvm->mmu_lock); 1943 kvm_set_pfn_accessed(pfn); 1944 kvm_release_pfn_clean(pfn); 1945 return ret; 1946 } 1947 --- 0-DAY kernel test infrastructure Open Source Technology Center https://lists.01.org/pipermail/kbuild-all Intel Corporation