From: kbuild test robot <lkp@intel.com>
To: kbuild-all@lists.01.org
Subject: Re: [PATCH v4 13/22] powerpc/kvm/book3s: Use find_kvm_host_pte in page fault handler
Date: Wed, 06 May 2020 11:29:42 +0800 [thread overview]
Message-ID: <202005061129.D6579th6%lkp@intel.com> (raw)
In-Reply-To: <20200505071729.54912-14-aneesh.kumar@linux.ibm.com>
[-- Attachment #1: Type: text/plain, Size: 10967 bytes --]
Hi "Aneesh,
I love your patch! Yet something to improve:
[auto build test ERROR on v5.7-rc4]
[also build test ERROR on next-20200505]
[cannot apply to powerpc/next kvm-ppc/kvm-ppc-next scottwood/next]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see https://stackoverflow.com/a/37406982]
url: https://github.com/0day-ci/linux/commits/Aneesh-Kumar-K-V/Avoid-IPI-while-updating-page-table-entries/20200506-031839
base: 0e698dfa282211e414076f9dc7e83c1c288314fd
config: powerpc-defconfig (attached as .config)
compiler: powerpc64-linux-gcc (GCC) 9.3.0
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day GCC_VERSION=9.3.0 make.cross ARCH=powerpc
If you fix the issue, kindly add following tag as appropriate
Reported-by: kbuild test robot <lkp@intel.com>
All errors (new ones prefixed by >>):
arch/powerpc/kvm/book3s_64_mmu_hv.c: In function 'kvmppc_book3s_hv_page_fault':
>> arch/powerpc/kvm/book3s_64_mmu_hv.c:606:53: error: expected ';' before 'pte'
606 | ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift)
| ^
| ;
607 | pte = __pte(0);
| ~~~
vim +606 arch/powerpc/kvm/book3s_64_mmu_hv.c
478
479 int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
480 unsigned long ea, unsigned long dsisr)
481 {
482 struct kvm *kvm = vcpu->kvm;
483 unsigned long hpte[3], r;
484 unsigned long hnow_v, hnow_r;
485 __be64 *hptep;
486 unsigned long mmu_seq, psize, pte_size;
487 unsigned long gpa_base, gfn_base;
488 unsigned long gpa, gfn, hva, pfn, hpa;
489 struct kvm_memory_slot *memslot;
490 unsigned long *rmap;
491 struct revmap_entry *rev;
492 struct page *page;
493 long index, ret;
494 bool is_ci;
495 bool writing, write_ok;
496 unsigned int shift;
497 unsigned long rcbits;
498 long mmio_update;
499 pte_t pte, *ptep;
500
501 if (kvm_is_radix(kvm))
502 return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr);
503
504 /*
505 * Real-mode code has already searched the HPT and found the
506 * entry we're interested in. Lock the entry and check that
507 * it hasn't changed. If it has, just return and re-execute the
508 * instruction.
509 */
510 if (ea != vcpu->arch.pgfault_addr)
511 return RESUME_GUEST;
512
513 if (vcpu->arch.pgfault_cache) {
514 mmio_update = atomic64_read(&kvm->arch.mmio_update);
515 if (mmio_update == vcpu->arch.pgfault_cache->mmio_update) {
516 r = vcpu->arch.pgfault_cache->rpte;
517 psize = kvmppc_actual_pgsz(vcpu->arch.pgfault_hpte[0],
518 r);
519 gpa_base = r & HPTE_R_RPN & ~(psize - 1);
520 gfn_base = gpa_base >> PAGE_SHIFT;
521 gpa = gpa_base | (ea & (psize - 1));
522 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
523 dsisr & DSISR_ISSTORE);
524 }
525 }
526 index = vcpu->arch.pgfault_index;
527 hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
528 rev = &kvm->arch.hpt.rev[index];
529 preempt_disable();
530 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
531 cpu_relax();
532 hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
533 hpte[1] = be64_to_cpu(hptep[1]);
534 hpte[2] = r = rev->guest_rpte;
535 unlock_hpte(hptep, hpte[0]);
536 preempt_enable();
537
538 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
539 hpte[0] = hpte_new_to_old_v(hpte[0], hpte[1]);
540 hpte[1] = hpte_new_to_old_r(hpte[1]);
541 }
542 if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
543 hpte[1] != vcpu->arch.pgfault_hpte[1])
544 return RESUME_GUEST;
545
546 /* Translate the logical address and get the page */
547 psize = kvmppc_actual_pgsz(hpte[0], r);
548 gpa_base = r & HPTE_R_RPN & ~(psize - 1);
549 gfn_base = gpa_base >> PAGE_SHIFT;
550 gpa = gpa_base | (ea & (psize - 1));
551 gfn = gpa >> PAGE_SHIFT;
552 memslot = gfn_to_memslot(kvm, gfn);
553
554 trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr);
555
556 /* No memslot means it's an emulated MMIO region */
557 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
558 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
559 dsisr & DSISR_ISSTORE);
560
561 /*
562 * This should never happen, because of the slot_is_aligned()
563 * check in kvmppc_do_h_enter().
564 */
565 if (gfn_base < memslot->base_gfn)
566 return -EFAULT;
567
568 /* used to check for invalidations in progress */
569 mmu_seq = kvm->mmu_notifier_seq;
570 smp_rmb();
571
572 ret = -EFAULT;
573 page = NULL;
574 writing = (dsisr & DSISR_ISSTORE) != 0;
575 /* If writing != 0, then the HPTE must allow writing, if we get here */
576 write_ok = writing;
577 hva = gfn_to_hva_memslot(memslot, gfn);
578
579 /*
580 * Do a fast check first, since __gfn_to_pfn_memslot doesn't
581 * do it with !atomic && !async, which is how we call it.
582 * We always ask for write permission since the common case
583 * is that the page is writable.
584 */
585 if (__get_user_pages_fast(hva, 1, 1, &page) == 1) {
586 write_ok = true;
587 } else {
588 /* Call KVM generic code to do the slow-path check */
589 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
590 writing, &write_ok);
591 if (is_error_noslot_pfn(pfn))
592 return -EFAULT;
593 page = NULL;
594 if (pfn_valid(pfn)) {
595 page = pfn_to_page(pfn);
596 if (PageReserved(page))
597 page = NULL;
598 }
599 }
600
601 /*
602 * Read the PTE from the process' radix tree and use that
603 * so we get the shift and attribute bits.
604 */
605 spin_lock(&kvm->mmu_lock);
> 606 ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift)
607 pte = __pte(0);
608 if (ptep)
609 pte = READ_ONCE(*ptep);
610 spin_unlock(&kvm->mmu_lock);
611 /*
612 * If the PTE disappeared temporarily due to a THP
613 * collapse, just return and let the guest try again.
614 */
615 if (!pte_present(pte)) {
616 if (page)
617 put_page(page);
618 return RESUME_GUEST;
619 }
620 hpa = pte_pfn(pte) << PAGE_SHIFT;
621 pte_size = PAGE_SIZE;
622 if (shift)
623 pte_size = 1ul << shift;
624 is_ci = pte_ci(pte);
625
626 if (psize > pte_size)
627 goto out_put;
628 if (pte_size > psize)
629 hpa |= hva & (pte_size - psize);
630
631 /* Check WIMG vs. the actual page we're accessing */
632 if (!hpte_cache_flags_ok(r, is_ci)) {
633 if (is_ci)
634 goto out_put;
635 /*
636 * Allow guest to map emulated device memory as
637 * uncacheable, but actually make it cacheable.
638 */
639 r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
640 }
641
642 /*
643 * Set the HPTE to point to hpa.
644 * Since the hpa is at PAGE_SIZE granularity, make sure we
645 * don't mask out lower-order bits if psize < PAGE_SIZE.
646 */
647 if (psize < PAGE_SIZE)
648 psize = PAGE_SIZE;
649 r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) | hpa;
650 if (hpte_is_writable(r) && !write_ok)
651 r = hpte_make_readonly(r);
652 ret = RESUME_GUEST;
653 preempt_disable();
654 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
655 cpu_relax();
656 hnow_v = be64_to_cpu(hptep[0]);
657 hnow_r = be64_to_cpu(hptep[1]);
658 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
659 hnow_v = hpte_new_to_old_v(hnow_v, hnow_r);
660 hnow_r = hpte_new_to_old_r(hnow_r);
661 }
662
663 /*
664 * If the HPT is being resized, don't update the HPTE,
665 * instead let the guest retry after the resize operation is complete.
666 * The synchronization for mmu_ready test vs. set is provided
667 * by the HPTE lock.
668 */
669 if (!kvm->arch.mmu_ready)
670 goto out_unlock;
671
672 if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] ||
673 rev->guest_rpte != hpte[2])
674 /* HPTE has been changed under us; let the guest retry */
675 goto out_unlock;
676 hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
677
678 /* Always put the HPTE in the rmap chain for the page base address */
679 rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn];
680 lock_rmap(rmap);
681
682 /* Check if we might have been invalidated; let the guest retry if so */
683 ret = RESUME_GUEST;
684 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
685 unlock_rmap(rmap);
686 goto out_unlock;
687 }
688
689 /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */
690 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
691 r &= rcbits | ~(HPTE_R_R | HPTE_R_C);
692
693 if (be64_to_cpu(hptep[0]) & HPTE_V_VALID) {
694 /* HPTE was previously valid, so we need to invalidate it */
695 unlock_rmap(rmap);
696 hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
697 kvmppc_invalidate_hpte(kvm, hptep, index);
698 /* don't lose previous R and C bits */
699 r |= be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
700 } else {
701 kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
702 }
703
704 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
705 r = hpte_old_to_new_r(hpte[0], r);
706 hpte[0] = hpte_old_to_new_v(hpte[0]);
707 }
708 hptep[1] = cpu_to_be64(r);
709 eieio();
710 __unlock_hpte(hptep, hpte[0]);
711 asm volatile("ptesync" : : : "memory");
712 preempt_enable();
713 if (page && hpte_is_writable(r))
714 set_page_dirty_lock(page);
715
716 out_put:
717 trace_kvm_page_fault_exit(vcpu, hpte, ret);
718
719 if (page)
720 put_page(page);
721 return ret;
722
723 out_unlock:
724 __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
725 preempt_enable();
726 goto out_put;
727 }
728
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org
[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 26146 bytes --]
next prev parent reply other threads:[~2020-05-06 3:29 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-05-05 7:17 [PATCH v4 00/22] Avoid IPI while updating page table entries Aneesh Kumar K.V
2020-05-05 7:17 ` [PATCH v4 01/22] powerpc/pkeys: Avoid using lockless page table walk Aneesh Kumar K.V
2020-05-13 8:49 ` Michael Ellerman
2020-05-05 7:17 ` [PATCH v4 02/22] powerpc/pkeys: Check vma before returning key fault error to the user Aneesh Kumar K.V
2020-05-05 7:17 ` [PATCH v4 03/22] powerpc/mm/hash64: use _PAGE_PTE when checking for pte_present Aneesh Kumar K.V
2020-05-05 7:17 ` [PATCH v4 04/22] powerpc/hash64: Restrict page table lookup using init_mm with __flush_hash_table_range Aneesh Kumar K.V
2020-05-05 7:17 ` [PATCH v4 05/22] powerpc/book3s64/hash: Use the pte_t address from the caller Aneesh Kumar K.V
2020-05-05 7:17 ` [PATCH v4 06/22] powerpc/mce: Don't reload pte val in addr_to_pfn Aneesh Kumar K.V
2020-05-05 7:17 ` [PATCH v4 07/22] powerpc/perf/callchain: Use __get_user_pages_fast in read_user_stack_slow Aneesh Kumar K.V
2020-05-05 7:17 ` [PATCH v4 08/22] powerpc/kvm/book3s: switch from raw_spin_*lock to arch_spin_lock Aneesh Kumar K.V
2020-05-05 7:17 ` [PATCH v4 09/22] powerpc/kvm/book3s: Add helper to walk partition scoped linux page table Aneesh Kumar K.V
2020-05-28 1:43 ` Paul Mackerras
2020-05-28 6:01 ` Aneesh Kumar K.V
2020-05-28 7:25 ` Paul Mackerras
2020-05-05 7:17 ` [PATCH v4 10/22] powerpc/kvm/nested: Add helper to walk nested shadow " Aneesh Kumar K.V
2020-05-05 7:17 ` [PATCH v4 11/22] powerpc/kvm/book3s: Use kvm helpers to walk shadow or secondary table Aneesh Kumar K.V
2020-05-05 7:17 ` [PATCH v4 12/22] powerpc/kvm/book3s: Add helper for host page table walk Aneesh Kumar K.V
2020-05-05 7:17 ` [PATCH v4 13/22] powerpc/kvm/book3s: Use find_kvm_host_pte in page fault handler Aneesh Kumar K.V
2020-05-06 3:29 ` kbuild test robot [this message]
2020-05-05 7:17 ` [PATCH v4 14/22] powerpc/kvm/book3s: Use find_kvm_host_pte in h_enter Aneesh Kumar K.V
2020-05-05 7:17 ` [PATCH v4 15/22] powerpc/kvm/book3s: use find_kvm_host_pte in pute_tce functions Aneesh Kumar K.V
2020-05-05 7:17 ` [PATCH v4 16/22] powerpc/kvm/book3s: Avoid using rmap to protect parallel page table update Aneesh Kumar K.V
2020-05-05 7:17 ` [PATCH v4 17/22] powerpc/kvm/book3s: use find_kvm_host_pte in kvmppc_book3s_instantiate_page Aneesh Kumar K.V
2020-05-05 7:17 ` [PATCH v4 18/22] powerpc/kvm/book3s: Use find_kvm_host_pte in kvmppc_get_hpa Aneesh Kumar K.V
2020-05-05 7:17 ` [PATCH v4 19/22] powerpc/kvm/book3s: Use pte_present instead of opencoding _PAGE_PRESENT check Aneesh Kumar K.V
2020-05-05 7:17 ` [PATCH v4 20/22] powerpc/mm/book3s64: Avoid sending IPI on clearing PMD Aneesh Kumar K.V
2020-05-05 7:17 ` [PATCH v4 21/22] mm: change pmdp_huge_get_and_clear_full take vm_area_struct as arg Aneesh Kumar K.V
2020-05-05 7:17 ` [PATCH v4 22/22] powerpc/mm/book3s64: Fix MADV_DONTNEED and parallel page fault race Aneesh Kumar K.V
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=202005061129.D6579th6%lkp@intel.com \
--to=lkp@intel.com \
--cc=kbuild-all@lists.01.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.