All of lore.kernel.org
 help / color / mirror / Atom feed
* [linux-next:master 6316/11956] arch/arm64/kvm/mmu.c:1642 user_mem_abort() error: uninitialized symbol 'ret'.
@ 2020-09-30  0:54 kernel test robot
  0 siblings, 0 replies; 3+ messages in thread
From: kernel test robot @ 2020-09-30  0:54 UTC (permalink / raw)
  To: kbuild

[-- Attachment #1: Type: text/plain, Size: 16585 bytes --]

CC: kbuild-all(a)lists.01.org
TO: Will Deacon <will@kernel.org>
CC: Marc Zyngier <maz@kernel.org>
CC: Gavin Shan <gshan@redhat.com>
CC: Alexandru Elisei <alexandru.elisei@arm.com>

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
head:   49e7e3e905e437a02782019570f70997e2da9101
commit: 6f745f1bb5bf2914be2344b0dd7781d0528b3e42 [6316/11956] KVM: arm64: Convert user_mem_abort() to generic page-table API
:::::: branch date: 15 hours ago
:::::: commit date: 3 weeks ago
config: arm64-randconfig-m031-20200929 (attached as .config)
compiler: aarch64-linux-gcc (GCC) 9.3.0

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>

smatch warnings:
arch/arm64/kvm/mmu.c:1642 user_mem_abort() error: uninitialized symbol 'ret'.

vim +/ret +1642 arch/arm64/kvm/mmu.c

0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose  2020-05-07  1495  
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1496  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
98047888bb9fd5 arch/arm/kvm/mmu.c   Christoffer Dall  2014-08-19  1497  			  struct kvm_memory_slot *memslot, unsigned long hva,
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1498  			  unsigned long fault_status)
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1499  {
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1500  	int ret;
6396b852e46e56 virt/kvm/arm/mmu.c   Punit Agrawal     2018-12-11  1501  	bool write_fault, writable, force_pte = false;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1502  	bool exec_fault;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1503  	bool device = false;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1504  	unsigned long mmu_seq;
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1505  	struct kvm *kvm = vcpu->kvm;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1506  	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1507  	struct vm_area_struct *vma;
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1508  	short vma_shift;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1509  	gfn_t gfn;
ba049e93aef7e8 arch/arm/kvm/mmu.c   Dan Williams      2016-01-15  1510  	kvm_pfn_t pfn;
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1511  	bool logging_active = memslot_is_logging(memslot);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1512  	unsigned long vma_pagesize;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1513  	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1514  	struct kvm_pgtable *pgt;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1515  
a7d079cea2dffb arch/arm/kvm/mmu.c   Ard Biesheuvel    2014-09-09  1516  	write_fault = kvm_is_write_fault(vcpu);
d0e22b4ac3ba23 virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1517  	exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
d0e22b4ac3ba23 virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1518  	VM_BUG_ON(write_fault && exec_fault);
d0e22b4ac3ba23 virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1519  
d0e22b4ac3ba23 virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1520  	if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1521  		kvm_err("Unexpected L2 read permission error\n");
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1522  		return -EFAULT;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1523  	}
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1524  
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1525  	/* Let's check if we will get back a huge page backed by hugetlbfs */
89154dd5313f77 arch/arm64/kvm/mmu.c Michel Lespinasse 2020-06-08  1526  	mmap_read_lock(current->mm);
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1527  	vma = find_vma_intersection(current->mm, hva, hva + 1);
37b544087ef3f6 arch/arm/kvm/mmu.c   Ard Biesheuvel    2014-09-17  1528  	if (unlikely(!vma)) {
37b544087ef3f6 arch/arm/kvm/mmu.c   Ard Biesheuvel    2014-09-17  1529  		kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
89154dd5313f77 arch/arm64/kvm/mmu.c Michel Lespinasse 2020-06-08  1530  		mmap_read_unlock(current->mm);
37b544087ef3f6 arch/arm/kvm/mmu.c   Ard Biesheuvel    2014-09-17  1531  		return -EFAULT;
37b544087ef3f6 arch/arm/kvm/mmu.c   Ard Biesheuvel    2014-09-17  1532  	}
37b544087ef3f6 arch/arm/kvm/mmu.c   Ard Biesheuvel    2014-09-17  1533  
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1534  	if (is_vm_hugetlb_page(vma))
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1535  		vma_shift = huge_page_shift(hstate_vma(vma));
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1536  	else
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1537  		vma_shift = PAGE_SHIFT;
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1538  
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1539  	vma_pagesize = 1ULL << vma_shift;
a80868f3985548 virt/kvm/arm/mmu.c   Suzuki K Poulose  2019-03-12  1540  	if (logging_active ||
6d674e28f642e3 virt/kvm/arm/mmu.c   Marc Zyngier      2019-12-11  1541  	    (vma->vm_flags & VM_PFNMAP) ||
a80868f3985548 virt/kvm/arm/mmu.c   Suzuki K Poulose  2019-03-12  1542  	    !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
a80868f3985548 virt/kvm/arm/mmu.c   Suzuki K Poulose  2019-03-12  1543  		force_pte = true;
a80868f3985548 virt/kvm/arm/mmu.c   Suzuki K Poulose  2019-03-12  1544  		vma_pagesize = PAGE_SIZE;
a80868f3985548 virt/kvm/arm/mmu.c   Suzuki K Poulose  2019-03-12  1545  	}
a80868f3985548 virt/kvm/arm/mmu.c   Suzuki K Poulose  2019-03-12  1546  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1547  	if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1548  		fault_ipa &= huge_page_mask(hstate_vma(vma));
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1549  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1550  	gfn = fault_ipa >> PAGE_SHIFT;
89154dd5313f77 arch/arm64/kvm/mmu.c Michel Lespinasse 2020-06-08  1551  	mmap_read_unlock(current->mm);
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1552  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1553  	/*
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1554  	 * Permission faults just need to update the existing leaf entry,
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1555  	 * and so normally don't require allocations from the memcache. The
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1556  	 * only exception to this is when dirty logging is enabled at runtime
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1557  	 * and a write fault needs to collapse a block entry into a table.
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1558  	 */
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1559  	if (fault_status != FSC_PERM || (logging_active && write_fault)) {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1560  		ret = kvm_mmu_topup_memory_cache(memcache,
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1561  						 kvm_mmu_cache_min_pages(kvm));
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1562  		if (ret)
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1563  			return ret;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1564  	}
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1565  
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1566  	mmu_seq = vcpu->kvm->mmu_notifier_seq;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1567  	/*
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1568  	 * Ensure the read of mmu_notifier_seq happens before we call
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1569  	 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1570  	 * the page we just got a reference to gets unmapped before we have a
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1571  	 * chance to grab the mmu_lock, which ensure that if the page gets
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1572  	 * unmapped afterwards, the call to kvm_unmap_hva will take it away
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1573  	 * from us again properly. This smp_rmb() interacts with the smp_wmb()
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1574  	 * in kvm_mmu_notifier_invalidate_<page|range_end>.
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1575  	 */
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1576  	smp_rmb();
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1577  
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1578  	pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
196f878a7ac2e7 virt/kvm/arm/mmu.c   James Morse       2017-06-20  1579  	if (pfn == KVM_PFN_ERR_HWPOISON) {
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1580  		kvm_send_hwpoison_signal(hva, vma_shift);
196f878a7ac2e7 virt/kvm/arm/mmu.c   James Morse       2017-06-20  1581  		return 0;
196f878a7ac2e7 virt/kvm/arm/mmu.c   James Morse       2017-06-20  1582  	}
9ac715954682b2 arch/arm/kvm/mmu.c   Christoffer Dall  2016-08-17  1583  	if (is_error_noslot_pfn(pfn))
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1584  		return -EFAULT;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1585  
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1586  	if (kvm_is_device_pfn(pfn)) {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1587  		device = true;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1588  	} else if (logging_active && !write_fault) {
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1589  		/*
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1590  		 * Only actually map the page as writable if this was a write
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1591  		 * fault.
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1592  		 */
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1593  		writable = false;
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1594  	}
b88657674d39fc arch/arm/kvm/mmu.c   Kim Phillips      2014-06-26  1595  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1596  	if (exec_fault && device)
6d674e28f642e3 virt/kvm/arm/mmu.c   Marc Zyngier      2019-12-11  1597  		return -ENOEXEC;
6d674e28f642e3 virt/kvm/arm/mmu.c   Marc Zyngier      2019-12-11  1598  
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1599  	spin_lock(&kvm->mmu_lock);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1600  	pgt = vcpu->arch.hw_mmu->pgt;
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1601  	if (mmu_notifier_retry(kvm, mmu_seq))
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1602  		goto out_unlock;
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1603  
3f58bf63455588 virt/kvm/arm/mmu.c   Punit Agrawal     2018-12-11  1604  	/*
0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose  2020-05-07  1605  	 * If we are not forced to use page mapping, check if we are
0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose  2020-05-07  1606  	 * backed by a THP and thus use block mapping if possible.
3f58bf63455588 virt/kvm/arm/mmu.c   Punit Agrawal     2018-12-11  1607  	 */
0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose  2020-05-07  1608  	if (vma_pagesize == PAGE_SIZE && !force_pte)
0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose  2020-05-07  1609  		vma_pagesize = transparent_hugepage_adjust(memslot, hva,
0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose  2020-05-07  1610  							   &pfn, &fault_ipa);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1611  	if (writable) {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1612  		prot |= KVM_PGTABLE_PROT_W;
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1613  		kvm_set_pfn_dirty(pfn);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1614  		mark_page_dirty(kvm, gfn);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1615  	}
a9c0e12ebee56e virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1616  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1617  	if (fault_status != FSC_PERM && !device)
3f58bf63455588 virt/kvm/arm/mmu.c   Punit Agrawal     2018-12-11  1618  		clean_dcache_guest_page(pfn, vma_pagesize);
3f58bf63455588 virt/kvm/arm/mmu.c   Punit Agrawal     2018-12-11  1619  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1620  	if (exec_fault) {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1621  		prot |= KVM_PGTABLE_PROT_X;
3f58bf63455588 virt/kvm/arm/mmu.c   Punit Agrawal     2018-12-11  1622  		invalidate_icache_guest_page(pfn, vma_pagesize);
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1623  	}
a9c0e12ebee56e virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1624  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1625  	if (device)
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1626  		prot |= KVM_PGTABLE_PROT_DEVICE;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1627  	else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1628  		prot |= KVM_PGTABLE_PROT_X;
a15f693935a9f1 virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1629  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1630  	if (fault_status == FSC_PERM && !(logging_active && writable)) {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1631  		ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1632  	} else {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1633  		ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1634  					     __pfn_to_phys(pfn), prot,
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1635  					     memcache);
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1636  	}
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1637  
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1638  out_unlock:
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1639  	spin_unlock(&kvm->mmu_lock);
35307b9a5f7ebc arch/arm/kvm/mmu.c   Marc Zyngier      2015-03-12  1640  	kvm_set_pfn_accessed(pfn);
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1641  	kvm_release_pfn_clean(pfn);
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01 @1642  	return ret;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1643  }
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1644  

:::::: The code at line 1642 was first introduced by commit
:::::: ad361f093c1e31d0b43946210a32ab4ff5c49850 KVM: ARM: Support hugetlbfs backed huge pages

:::::: TO: Christoffer Dall <christoffer.dall@linaro.org>
:::::: CC: Christoffer Dall <christoffer.dall@linaro.org>

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org

[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 36568 bytes --]

^ permalink raw reply	[flat|nested] 3+ messages in thread

* [linux-next:master 6316/11956] arch/arm64/kvm/mmu.c:1642 user_mem_abort() error: uninitialized symbol 'ret'.
@ 2020-09-30  6:16 ` Dan Carpenter
  0 siblings, 0 replies; 3+ messages in thread
From: Dan Carpenter @ 2020-09-30  6:16 UTC (permalink / raw)
  To: kbuild

[-- Attachment #1: Type: text/plain, Size: 15930 bytes --]

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
head:   49e7e3e905e437a02782019570f70997e2da9101
commit: 6f745f1bb5bf2914be2344b0dd7781d0528b3e42 [6316/11956] KVM: arm64: Convert user_mem_abort() to generic page-table API
config: arm64-randconfig-m031-20200929 (attached as .config)
compiler: aarch64-linux-gcc (GCC) 9.3.0

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>

smatch warnings:
arch/arm64/kvm/mmu.c:1642 user_mem_abort() error: uninitialized symbol 'ret'.

vim +/ret +1642 arch/arm64/kvm/mmu.c

94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1496  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
98047888bb9fd5 arch/arm/kvm/mmu.c   Christoffer Dall  2014-08-19  1497  			  struct kvm_memory_slot *memslot, unsigned long hva,
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1498  			  unsigned long fault_status)
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1499  {
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1500  	int ret;
6396b852e46e56 virt/kvm/arm/mmu.c   Punit Agrawal     2018-12-11  1501  	bool write_fault, writable, force_pte = false;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1502  	bool exec_fault;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1503  	bool device = false;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1504  	unsigned long mmu_seq;
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1505  	struct kvm *kvm = vcpu->kvm;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1506  	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1507  	struct vm_area_struct *vma;
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1508  	short vma_shift;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1509  	gfn_t gfn;
ba049e93aef7e8 arch/arm/kvm/mmu.c   Dan Williams      2016-01-15  1510  	kvm_pfn_t pfn;
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1511  	bool logging_active = memslot_is_logging(memslot);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1512  	unsigned long vma_pagesize;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1513  	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1514  	struct kvm_pgtable *pgt;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1515  
a7d079cea2dffb arch/arm/kvm/mmu.c   Ard Biesheuvel    2014-09-09  1516  	write_fault = kvm_is_write_fault(vcpu);
d0e22b4ac3ba23 virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1517  	exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
d0e22b4ac3ba23 virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1518  	VM_BUG_ON(write_fault && exec_fault);
d0e22b4ac3ba23 virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1519  
d0e22b4ac3ba23 virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1520  	if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1521  		kvm_err("Unexpected L2 read permission error\n");
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1522  		return -EFAULT;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1523  	}
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1524  
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1525  	/* Let's check if we will get back a huge page backed by hugetlbfs */
89154dd5313f77 arch/arm64/kvm/mmu.c Michel Lespinasse 2020-06-08  1526  	mmap_read_lock(current->mm);
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1527  	vma = find_vma_intersection(current->mm, hva, hva + 1);
37b544087ef3f6 arch/arm/kvm/mmu.c   Ard Biesheuvel    2014-09-17  1528  	if (unlikely(!vma)) {
37b544087ef3f6 arch/arm/kvm/mmu.c   Ard Biesheuvel    2014-09-17  1529  		kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
89154dd5313f77 arch/arm64/kvm/mmu.c Michel Lespinasse 2020-06-08  1530  		mmap_read_unlock(current->mm);
37b544087ef3f6 arch/arm/kvm/mmu.c   Ard Biesheuvel    2014-09-17  1531  		return -EFAULT;
37b544087ef3f6 arch/arm/kvm/mmu.c   Ard Biesheuvel    2014-09-17  1532  	}
37b544087ef3f6 arch/arm/kvm/mmu.c   Ard Biesheuvel    2014-09-17  1533  
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1534  	if (is_vm_hugetlb_page(vma))
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1535  		vma_shift = huge_page_shift(hstate_vma(vma));
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1536  	else
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1537  		vma_shift = PAGE_SHIFT;
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1538  
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1539  	vma_pagesize = 1ULL << vma_shift;
a80868f3985548 virt/kvm/arm/mmu.c   Suzuki K Poulose  2019-03-12  1540  	if (logging_active ||
6d674e28f642e3 virt/kvm/arm/mmu.c   Marc Zyngier      2019-12-11  1541  	    (vma->vm_flags & VM_PFNMAP) ||
a80868f3985548 virt/kvm/arm/mmu.c   Suzuki K Poulose  2019-03-12  1542  	    !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
a80868f3985548 virt/kvm/arm/mmu.c   Suzuki K Poulose  2019-03-12  1543  		force_pte = true;
a80868f3985548 virt/kvm/arm/mmu.c   Suzuki K Poulose  2019-03-12  1544  		vma_pagesize = PAGE_SIZE;
a80868f3985548 virt/kvm/arm/mmu.c   Suzuki K Poulose  2019-03-12  1545  	}
a80868f3985548 virt/kvm/arm/mmu.c   Suzuki K Poulose  2019-03-12  1546  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1547  	if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1548  		fault_ipa &= huge_page_mask(hstate_vma(vma));
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1549  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1550  	gfn = fault_ipa >> PAGE_SHIFT;
89154dd5313f77 arch/arm64/kvm/mmu.c Michel Lespinasse 2020-06-08  1551  	mmap_read_unlock(current->mm);
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1552  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1553  	/*
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1554  	 * Permission faults just need to update the existing leaf entry,
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1555  	 * and so normally don't require allocations from the memcache. The
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1556  	 * only exception to this is when dirty logging is enabled at runtime
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1557  	 * and a write fault needs to collapse a block entry into a table.
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1558  	 */
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1559  	if (fault_status != FSC_PERM || (logging_active && write_fault)) {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1560  		ret = kvm_mmu_topup_memory_cache(memcache,
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1561  						 kvm_mmu_cache_min_pages(kvm));
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1562  		if (ret)
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1563  			return ret;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1564  	}
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1565  
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1566  	mmu_seq = vcpu->kvm->mmu_notifier_seq;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1567  	/*
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1568  	 * Ensure the read of mmu_notifier_seq happens before we call
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1569  	 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1570  	 * the page we just got a reference to gets unmapped before we have a
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1571  	 * chance to grab the mmu_lock, which ensure that if the page gets
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1572  	 * unmapped afterwards, the call to kvm_unmap_hva will take it away
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1573  	 * from us again properly. This smp_rmb() interacts with the smp_wmb()
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1574  	 * in kvm_mmu_notifier_invalidate_<page|range_end>.
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1575  	 */
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1576  	smp_rmb();
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1577  
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1578  	pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
196f878a7ac2e7 virt/kvm/arm/mmu.c   James Morse       2017-06-20  1579  	if (pfn == KVM_PFN_ERR_HWPOISON) {
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1580  		kvm_send_hwpoison_signal(hva, vma_shift);
196f878a7ac2e7 virt/kvm/arm/mmu.c   James Morse       2017-06-20  1581  		return 0;
196f878a7ac2e7 virt/kvm/arm/mmu.c   James Morse       2017-06-20  1582  	}
9ac715954682b2 arch/arm/kvm/mmu.c   Christoffer Dall  2016-08-17  1583  	if (is_error_noslot_pfn(pfn))
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1584  		return -EFAULT;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1585  
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1586  	if (kvm_is_device_pfn(pfn)) {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1587  		device = true;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1588  	} else if (logging_active && !write_fault) {
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1589  		/*
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1590  		 * Only actually map the page as writable if this was a write
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1591  		 * fault.
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1592  		 */
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1593  		writable = false;
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1594  	}
b88657674d39fc arch/arm/kvm/mmu.c   Kim Phillips      2014-06-26  1595  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1596  	if (exec_fault && device)
6d674e28f642e3 virt/kvm/arm/mmu.c   Marc Zyngier      2019-12-11  1597  		return -ENOEXEC;
6d674e28f642e3 virt/kvm/arm/mmu.c   Marc Zyngier      2019-12-11  1598  
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1599  	spin_lock(&kvm->mmu_lock);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1600  	pgt = vcpu->arch.hw_mmu->pgt;
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1601  	if (mmu_notifier_retry(kvm, mmu_seq))
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1602  		goto out_unlock;

Set the error code?

15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1603  
3f58bf63455588 virt/kvm/arm/mmu.c   Punit Agrawal     2018-12-11  1604  	/*
0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose  2020-05-07  1605  	 * If we are not forced to use page mapping, check if we are
0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose  2020-05-07  1606  	 * backed by a THP and thus use block mapping if possible.
3f58bf63455588 virt/kvm/arm/mmu.c   Punit Agrawal     2018-12-11  1607  	 */
0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose  2020-05-07  1608  	if (vma_pagesize == PAGE_SIZE && !force_pte)
0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose  2020-05-07  1609  		vma_pagesize = transparent_hugepage_adjust(memslot, hva,
0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose  2020-05-07  1610  							   &pfn, &fault_ipa);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1611  	if (writable) {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1612  		prot |= KVM_PGTABLE_PROT_W;
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1613  		kvm_set_pfn_dirty(pfn);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1614  		mark_page_dirty(kvm, gfn);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1615  	}
a9c0e12ebee56e virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1616  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1617  	if (fault_status != FSC_PERM && !device)
3f58bf63455588 virt/kvm/arm/mmu.c   Punit Agrawal     2018-12-11  1618  		clean_dcache_guest_page(pfn, vma_pagesize);
3f58bf63455588 virt/kvm/arm/mmu.c   Punit Agrawal     2018-12-11  1619  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1620  	if (exec_fault) {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1621  		prot |= KVM_PGTABLE_PROT_X;
3f58bf63455588 virt/kvm/arm/mmu.c   Punit Agrawal     2018-12-11  1622  		invalidate_icache_guest_page(pfn, vma_pagesize);
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1623  	}
a9c0e12ebee56e virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1624  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1625  	if (device)
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1626  		prot |= KVM_PGTABLE_PROT_DEVICE;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1627  	else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1628  		prot |= KVM_PGTABLE_PROT_X;
a15f693935a9f1 virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1629  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1630  	if (fault_status == FSC_PERM && !(logging_active && writable)) {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1631  		ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1632  	} else {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1633  		ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1634  					     __pfn_to_phys(pfn), prot,
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1635  					     memcache);
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1636  	}
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1637  
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1638  out_unlock:
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1639  	spin_unlock(&kvm->mmu_lock);
35307b9a5f7ebc arch/arm/kvm/mmu.c   Marc Zyngier      2015-03-12  1640  	kvm_set_pfn_accessed(pfn);
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1641  	kvm_release_pfn_clean(pfn);
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01 @1642  	return ret;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1643  }

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org

[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 36568 bytes --]

^ permalink raw reply	[flat|nested] 3+ messages in thread

* [linux-next:master 6316/11956] arch/arm64/kvm/mmu.c:1642 user_mem_abort() error: uninitialized symbol 'ret'.
@ 2020-09-30  6:16 ` Dan Carpenter
  0 siblings, 0 replies; 3+ messages in thread
From: Dan Carpenter @ 2020-09-30  6:16 UTC (permalink / raw)
  To: kbuild-all

[-- Attachment #1: Type: text/plain, Size: 15930 bytes --]

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
head:   49e7e3e905e437a02782019570f70997e2da9101
commit: 6f745f1bb5bf2914be2344b0dd7781d0528b3e42 [6316/11956] KVM: arm64: Convert user_mem_abort() to generic page-table API
config: arm64-randconfig-m031-20200929 (attached as .config)
compiler: aarch64-linux-gcc (GCC) 9.3.0

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>

smatch warnings:
arch/arm64/kvm/mmu.c:1642 user_mem_abort() error: uninitialized symbol 'ret'.

vim +/ret +1642 arch/arm64/kvm/mmu.c

94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1496  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
98047888bb9fd5 arch/arm/kvm/mmu.c   Christoffer Dall  2014-08-19  1497  			  struct kvm_memory_slot *memslot, unsigned long hva,
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1498  			  unsigned long fault_status)
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1499  {
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1500  	int ret;
6396b852e46e56 virt/kvm/arm/mmu.c   Punit Agrawal     2018-12-11  1501  	bool write_fault, writable, force_pte = false;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1502  	bool exec_fault;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1503  	bool device = false;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1504  	unsigned long mmu_seq;
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1505  	struct kvm *kvm = vcpu->kvm;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1506  	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1507  	struct vm_area_struct *vma;
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1508  	short vma_shift;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1509  	gfn_t gfn;
ba049e93aef7e8 arch/arm/kvm/mmu.c   Dan Williams      2016-01-15  1510  	kvm_pfn_t pfn;
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1511  	bool logging_active = memslot_is_logging(memslot);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1512  	unsigned long vma_pagesize;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1513  	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1514  	struct kvm_pgtable *pgt;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1515  
a7d079cea2dffb arch/arm/kvm/mmu.c   Ard Biesheuvel    2014-09-09  1516  	write_fault = kvm_is_write_fault(vcpu);
d0e22b4ac3ba23 virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1517  	exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
d0e22b4ac3ba23 virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1518  	VM_BUG_ON(write_fault && exec_fault);
d0e22b4ac3ba23 virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1519  
d0e22b4ac3ba23 virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1520  	if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1521  		kvm_err("Unexpected L2 read permission error\n");
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1522  		return -EFAULT;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1523  	}
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1524  
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1525  	/* Let's check if we will get back a huge page backed by hugetlbfs */
89154dd5313f77 arch/arm64/kvm/mmu.c Michel Lespinasse 2020-06-08  1526  	mmap_read_lock(current->mm);
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1527  	vma = find_vma_intersection(current->mm, hva, hva + 1);
37b544087ef3f6 arch/arm/kvm/mmu.c   Ard Biesheuvel    2014-09-17  1528  	if (unlikely(!vma)) {
37b544087ef3f6 arch/arm/kvm/mmu.c   Ard Biesheuvel    2014-09-17  1529  		kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
89154dd5313f77 arch/arm64/kvm/mmu.c Michel Lespinasse 2020-06-08  1530  		mmap_read_unlock(current->mm);
37b544087ef3f6 arch/arm/kvm/mmu.c   Ard Biesheuvel    2014-09-17  1531  		return -EFAULT;
37b544087ef3f6 arch/arm/kvm/mmu.c   Ard Biesheuvel    2014-09-17  1532  	}
37b544087ef3f6 arch/arm/kvm/mmu.c   Ard Biesheuvel    2014-09-17  1533  
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1534  	if (is_vm_hugetlb_page(vma))
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1535  		vma_shift = huge_page_shift(hstate_vma(vma));
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1536  	else
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1537  		vma_shift = PAGE_SHIFT;
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1538  
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1539  	vma_pagesize = 1ULL << vma_shift;
a80868f3985548 virt/kvm/arm/mmu.c   Suzuki K Poulose  2019-03-12  1540  	if (logging_active ||
6d674e28f642e3 virt/kvm/arm/mmu.c   Marc Zyngier      2019-12-11  1541  	    (vma->vm_flags & VM_PFNMAP) ||
a80868f3985548 virt/kvm/arm/mmu.c   Suzuki K Poulose  2019-03-12  1542  	    !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
a80868f3985548 virt/kvm/arm/mmu.c   Suzuki K Poulose  2019-03-12  1543  		force_pte = true;
a80868f3985548 virt/kvm/arm/mmu.c   Suzuki K Poulose  2019-03-12  1544  		vma_pagesize = PAGE_SIZE;
a80868f3985548 virt/kvm/arm/mmu.c   Suzuki K Poulose  2019-03-12  1545  	}
a80868f3985548 virt/kvm/arm/mmu.c   Suzuki K Poulose  2019-03-12  1546  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1547  	if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1548  		fault_ipa &= huge_page_mask(hstate_vma(vma));
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1549  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1550  	gfn = fault_ipa >> PAGE_SHIFT;
89154dd5313f77 arch/arm64/kvm/mmu.c Michel Lespinasse 2020-06-08  1551  	mmap_read_unlock(current->mm);
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1552  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1553  	/*
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1554  	 * Permission faults just need to update the existing leaf entry,
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1555  	 * and so normally don't require allocations from the memcache. The
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1556  	 * only exception to this is when dirty logging is enabled at runtime
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1557  	 * and a write fault needs to collapse a block entry into a table.
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1558  	 */
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1559  	if (fault_status != FSC_PERM || (logging_active && write_fault)) {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1560  		ret = kvm_mmu_topup_memory_cache(memcache,
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1561  						 kvm_mmu_cache_min_pages(kvm));
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1562  		if (ret)
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1563  			return ret;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1564  	}
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1565  
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1566  	mmu_seq = vcpu->kvm->mmu_notifier_seq;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1567  	/*
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1568  	 * Ensure the read of mmu_notifier_seq happens before we call
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1569  	 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1570  	 * the page we just got a reference to gets unmapped before we have a
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1571  	 * chance to grab the mmu_lock, which ensure that if the page gets
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1572  	 * unmapped afterwards, the call to kvm_unmap_hva will take it away
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1573  	 * from us again properly. This smp_rmb() interacts with the smp_wmb()
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1574  	 * in kvm_mmu_notifier_invalidate_<page|range_end>.
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1575  	 */
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1576  	smp_rmb();
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1577  
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1578  	pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
196f878a7ac2e7 virt/kvm/arm/mmu.c   James Morse       2017-06-20  1579  	if (pfn == KVM_PFN_ERR_HWPOISON) {
1559b7583ff6ed virt/kvm/arm/mmu.c   James Morse       2019-12-17  1580  		kvm_send_hwpoison_signal(hva, vma_shift);
196f878a7ac2e7 virt/kvm/arm/mmu.c   James Morse       2017-06-20  1581  		return 0;
196f878a7ac2e7 virt/kvm/arm/mmu.c   James Morse       2017-06-20  1582  	}
9ac715954682b2 arch/arm/kvm/mmu.c   Christoffer Dall  2016-08-17  1583  	if (is_error_noslot_pfn(pfn))
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1584  		return -EFAULT;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1585  
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1586  	if (kvm_is_device_pfn(pfn)) {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1587  		device = true;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1588  	} else if (logging_active && !write_fault) {
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1589  		/*
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1590  		 * Only actually map the page as writable if this was a write
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1591  		 * fault.
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1592  		 */
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1593  		writable = false;
15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1594  	}
b88657674d39fc arch/arm/kvm/mmu.c   Kim Phillips      2014-06-26  1595  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1596  	if (exec_fault && device)
6d674e28f642e3 virt/kvm/arm/mmu.c   Marc Zyngier      2019-12-11  1597  		return -ENOEXEC;
6d674e28f642e3 virt/kvm/arm/mmu.c   Marc Zyngier      2019-12-11  1598  
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1599  	spin_lock(&kvm->mmu_lock);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1600  	pgt = vcpu->arch.hw_mmu->pgt;
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1601  	if (mmu_notifier_retry(kvm, mmu_seq))
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1602  		goto out_unlock;

Set the error code?

15a49a44fc3620 arch/arm/kvm/mmu.c   Mario Smarduch    2015-01-15  1603  
3f58bf63455588 virt/kvm/arm/mmu.c   Punit Agrawal     2018-12-11  1604  	/*
0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose  2020-05-07  1605  	 * If we are not forced to use page mapping, check if we are
0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose  2020-05-07  1606  	 * backed by a THP and thus use block mapping if possible.
3f58bf63455588 virt/kvm/arm/mmu.c   Punit Agrawal     2018-12-11  1607  	 */
0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose  2020-05-07  1608  	if (vma_pagesize == PAGE_SIZE && !force_pte)
0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose  2020-05-07  1609  		vma_pagesize = transparent_hugepage_adjust(memslot, hva,
0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose  2020-05-07  1610  							   &pfn, &fault_ipa);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1611  	if (writable) {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1612  		prot |= KVM_PGTABLE_PROT_W;
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1613  		kvm_set_pfn_dirty(pfn);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1614  		mark_page_dirty(kvm, gfn);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1615  	}
a9c0e12ebee56e virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1616  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1617  	if (fault_status != FSC_PERM && !device)
3f58bf63455588 virt/kvm/arm/mmu.c   Punit Agrawal     2018-12-11  1618  		clean_dcache_guest_page(pfn, vma_pagesize);
3f58bf63455588 virt/kvm/arm/mmu.c   Punit Agrawal     2018-12-11  1619  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1620  	if (exec_fault) {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1621  		prot |= KVM_PGTABLE_PROT_X;
3f58bf63455588 virt/kvm/arm/mmu.c   Punit Agrawal     2018-12-11  1622  		invalidate_icache_guest_page(pfn, vma_pagesize);
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1623  	}
a9c0e12ebee56e virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1624  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1625  	if (device)
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1626  		prot |= KVM_PGTABLE_PROT_DEVICE;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1627  	else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1628  		prot |= KVM_PGTABLE_PROT_X;
a15f693935a9f1 virt/kvm/arm/mmu.c   Marc Zyngier      2017-10-23  1629  
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1630  	if (fault_status == FSC_PERM && !(logging_active && writable)) {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1631  		ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1632  	} else {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1633  		ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1634  					     __pfn_to_phys(pfn), prot,
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon       2020-09-11  1635  					     memcache);
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1636  	}
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1637  
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1638  out_unlock:
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01  1639  	spin_unlock(&kvm->mmu_lock);
35307b9a5f7ebc arch/arm/kvm/mmu.c   Marc Zyngier      2015-03-12  1640  	kvm_set_pfn_accessed(pfn);
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1641  	kvm_release_pfn_clean(pfn);
ad361f093c1e31 arch/arm/kvm/mmu.c   Christoffer Dall  2012-11-01 @1642  	return ret;
94f8e6418d3915 arch/arm/kvm/mmu.c   Christoffer Dall  2013-01-20  1643  }

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org

[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 36568 bytes --]

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2020-09-30  6:16 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-30  0:54 [linux-next:master 6316/11956] arch/arm64/kvm/mmu.c:1642 user_mem_abort() error: uninitialized symbol 'ret' kernel test robot
2020-09-30  6:16 Dan Carpenter
2020-09-30  6:16 ` Dan Carpenter

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.