All of lore.kernel.org
 help / color / mirror / Atom feed
* [tglx-devel:kvm 2/7] arch/x86/kvm/vmx/vmx.c:6776:2: error: implicit declaration of function 'trace_hardirqs_off_prepare'; did you mean
@ 2020-07-06 18:27 kernel test robot
  0 siblings, 0 replies; only message in thread
From: kernel test robot @ 2020-07-06 18:27 UTC (permalink / raw)
  To: kbuild-all

[-- Attachment #1: Type: text/plain, Size: 7410 bytes --]

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/tglx/devel.git kvm
head:   d7d7aa7cdcda59b53f97b9dffcc01bde96110313
commit: b2c3be4393505b9906fbd5419df2080a99d6807f [2/7] x86/kvm/vmx: Add hardirq tracing to guest enter/exit
config: x86_64-rhel-7.6 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-14) 9.3.0
reproduce (this is a W=1 build):
        git checkout b2c3be4393505b9906fbd5419df2080a99d6807f
        # save the attached .config to linux build tree
        make W=1 ARCH=x86_64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   arch/x86/kvm/vmx/vmx.c: In function 'vmx_vcpu_run':
>> arch/x86/kvm/vmx/vmx.c:6776:2: error: implicit declaration of function 'trace_hardirqs_off_prepare'; did you mean 'trace_hardirqs_on_prepare'? [-Werror=implicit-function-declaration]
    6776 |  trace_hardirqs_off_prepare();
         |  ^~~~~~~~~~~~~~~~~~~~~~~~~~
         |  trace_hardirqs_on_prepare
   cc1: some warnings being treated as errors

vim +6776 arch/x86/kvm/vmx/vmx.c

  6656	
  6657	static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
  6658	{
  6659		fastpath_t exit_fastpath;
  6660		struct vcpu_vmx *vmx = to_vmx(vcpu);
  6661		unsigned long cr3, cr4;
  6662	
  6663	reenter_guest:
  6664		/* Record the guest's net vcpu time for enforced NMI injections. */
  6665		if (unlikely(!enable_vnmi &&
  6666			     vmx->loaded_vmcs->soft_vnmi_blocked))
  6667			vmx->loaded_vmcs->entry_time = ktime_get();
  6668	
  6669		/* Don't enter VMX if guest state is invalid, let the exit handler
  6670		   start emulation until we arrive back to a valid state */
  6671		if (vmx->emulation_required)
  6672			return EXIT_FASTPATH_NONE;
  6673	
  6674		if (vmx->ple_window_dirty) {
  6675			vmx->ple_window_dirty = false;
  6676			vmcs_write32(PLE_WINDOW, vmx->ple_window);
  6677		}
  6678	
  6679		/*
  6680		 * We did this in prepare_switch_to_guest, because it needs to
  6681		 * be within srcu_read_lock.
  6682		 */
  6683		WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync);
  6684	
  6685		if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP))
  6686			vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
  6687		if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP))
  6688			vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
  6689	
  6690		cr3 = __get_current_cr3_fast();
  6691		if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
  6692			vmcs_writel(HOST_CR3, cr3);
  6693			vmx->loaded_vmcs->host_state.cr3 = cr3;
  6694		}
  6695	
  6696		cr4 = cr4_read_shadow();
  6697		if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
  6698			vmcs_writel(HOST_CR4, cr4);
  6699			vmx->loaded_vmcs->host_state.cr4 = cr4;
  6700		}
  6701	
  6702		/* When single-stepping over STI and MOV SS, we must clear the
  6703		 * corresponding interruptibility bits in the guest state. Otherwise
  6704		 * vmentry fails as it then expects bit 14 (BS) in pending debug
  6705		 * exceptions being set, but that's not correct for the guest debugging
  6706		 * case. */
  6707		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  6708			vmx_set_interrupt_shadow(vcpu, 0);
  6709	
  6710		kvm_load_guest_xsave_state(vcpu);
  6711	
  6712		pt_guest_enter(vmx);
  6713	
  6714		atomic_switch_perf_msrs(vmx);
  6715	
  6716		if (enable_preemption_timer)
  6717			vmx_update_hv_timer(vcpu);
  6718	
  6719		if (lapic_in_kernel(vcpu) &&
  6720			vcpu->arch.apic->lapic_timer.timer_advance_ns)
  6721			kvm_wait_lapic_expire(vcpu);
  6722	
  6723		/*
  6724		 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
  6725		 * it's non-zero. Since vmentry is serialising on affected CPUs, there
  6726		 * is no need to worry about the conditional branch over the wrmsr
  6727		 * being speculatively taken.
  6728		 */
  6729		x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
  6730	
  6731		/*
  6732		 * VMENTER enables interrupts (host state), but the kernel state is
  6733		 * interrupts disabled when this is invoked. Also tell RCU about
  6734		 * it. This is the same logic as for exit_to_user_mode().
  6735		 *
  6736		 * This ensures that e.g. latency analysis on the host observes
  6737		 * guest mode as interrupt enabled.
  6738		 *
  6739		 * guest_enter_irqoff() informs context tracking about the
  6740		 * transition to guest mode and if enabled adjusts RCU state
  6741		 * accordingly.
  6742		 */
  6743		trace_hardirqs_on_prepare();
  6744		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
  6745		guest_enter_irqoff();
  6746		lockdep_hardirqs_on(CALLER_ADDR0);
  6747	
  6748		/* L1D Flush includes CPU buffer clear to mitigate MDS */
  6749		if (static_branch_unlikely(&vmx_l1d_should_flush))
  6750			vmx_l1d_flush(vcpu);
  6751		else if (static_branch_unlikely(&mds_user_clear))
  6752			mds_clear_cpu_buffers();
  6753	
  6754		if (vcpu->arch.cr2 != read_cr2())
  6755			write_cr2(vcpu->arch.cr2);
  6756	
  6757		vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
  6758					   vmx->loaded_vmcs->launched);
  6759	
  6760		vcpu->arch.cr2 = read_cr2();
  6761	
  6762		/*
  6763		 * VMEXIT disables interrupts (host state), but tracing and lockdep
  6764		 * have them in state 'on' as recorded before entering guest mode.
  6765		 * Same as enter_from_user_mode().
  6766		 *
  6767		 * guest_exit_irqoff() restores host context and reinstates RCU if
  6768		 * enabled and required.
  6769		 *
  6770		 * This needs to be done before the below as native_read_msr()
  6771		 * contains a tracepoint and x86_spec_ctrl_restore_host() calls
  6772		 * into world and some more.
  6773		 */
  6774		lockdep_hardirqs_off(CALLER_ADDR0);
  6775		guest_exit_irqoff();
> 6776		trace_hardirqs_off_prepare();
  6777	
  6778		/*
  6779		 * We do not use IBRS in the kernel. If this vCPU has used the
  6780		 * SPEC_CTRL MSR it may have left it on; save the value and
  6781		 * turn it off. This is much more efficient than blindly adding
  6782		 * it to the atomic save/restore list. Especially as the former
  6783		 * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
  6784		 *
  6785		 * For non-nested case:
  6786		 * If the L01 MSR bitmap does not intercept the MSR, then we need to
  6787		 * save it.
  6788		 *
  6789		 * For nested case:
  6790		 * If the L02 MSR bitmap does not intercept the MSR, then we need to
  6791		 * save it.
  6792		 */
  6793		if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
  6794			vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
  6795	
  6796		x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
  6797	
  6798		/* All fields are clean at this point */
  6799		if (static_branch_unlikely(&enable_evmcs))
  6800			current_evmcs->hv_clean_fields |=
  6801				HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
  6802	
  6803		if (static_branch_unlikely(&enable_evmcs))
  6804			current_evmcs->hv_vp_id = vcpu->arch.hyperv.vp_index;
  6805	
  6806		/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
  6807		if (vmx->host_debugctlmsr)
  6808			update_debugctlmsr(vmx->host_debugctlmsr);
  6809	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org

[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 49219 bytes --]

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2020-07-06 18:27 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-06 18:27 [tglx-devel:kvm 2/7] arch/x86/kvm/vmx/vmx.c:6776:2: error: implicit declaration of function 'trace_hardirqs_off_prepare'; did you mean kernel test robot

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.