Hi Kai, I love your patch! Yet something to improve: [auto build test ERROR on kvm/queue] [cannot apply to vhost/linux-next v5.12-rc7 next-20210409] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Kai-Huang/KVM-SGX-virtualization-support-KVM-part/20210412-122425 base: https://git.kernel.org/pub/scm/virt/kvm/kvm.git queue config: x86_64-rhel-8.3-kselftests (attached as .config) compiler: gcc-9 (Debian 9.3.0-22) 9.3.0 reproduce (this is a W=1 build): # https://github.com/0day-ci/linux/commit/ee406a5de64531c5ec7886a5097f5a832ad2b1e4 git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Kai-Huang/KVM-SGX-virtualization-support-KVM-part/20210412-122425 git checkout ee406a5de64531c5ec7886a5097f5a832ad2b1e4 # save the attached .config to linux build tree make W=1 ARCH=x86_64 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot All errors (new ones prefixed by >>): In file included from arch/x86/kvm/cpuid.c:22: arch/x86/kvm/cpuid.h: In function '__feature_translate': arch/x86/kvm/cpuid.h:128:21: error: 'X86_FEATURE_SGX1' undeclared (first use in this function); did you mean 'X86_FEATURE_SGX'? 128 | if (x86_feature == X86_FEATURE_SGX1) | ^~~~~~~~~~~~~~~~ | X86_FEATURE_SGX arch/x86/kvm/cpuid.h:128:21: note: each undeclared identifier is reported only once for each function it appears in arch/x86/kvm/cpuid.h:130:26: error: 'X86_FEATURE_SGX2' undeclared (first use in this function); did you mean 'X86_FEATURE_SGX'? 130 | else if (x86_feature == X86_FEATURE_SGX2) | ^~~~~~~~~~~~~~~~ | X86_FEATURE_SGX In file included from arch/x86/include/asm/thread_info.h:53, from include/linux/thread_info.h:58, from arch/x86/include/asm/preempt.h:7, from include/linux/preempt.h:78, from include/linux/percpu.h:6, from include/linux/context_tracking_state.h:5, from include/linux/hardirq.h:5, from include/linux/kvm_host.h:7, from arch/x86/kvm/cpuid.c:12: arch/x86/kvm/cpuid.c: In function 'kvm_set_cpu_caps': arch/x86/kvm/cpuid.c:57:32: error: 'X86_FEATURE_SGX1' undeclared (first use in this function); did you mean 'X86_FEATURE_SGX'? 57 | #define SF(name) (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0) | ^~~~~~~~~~~~ arch/x86/include/asm/cpufeature.h:121:24: note: in definition of macro 'cpu_has' 121 | (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ | ^~~ arch/x86/kvm/cpuid.c:57:19: note: in expansion of macro 'boot_cpu_has' 57 | #define SF(name) (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0) | ^~~~~~~~~~~~ arch/x86/kvm/cpuid.c:500:3: note: in expansion of macro 'SF' 500 | SF(SGX1) | SF(SGX2) | ^~ arch/x86/kvm/cpuid.c:57:32: error: 'X86_FEATURE_SGX2' undeclared (first use in this function); did you mean 'X86_FEATURE_SGX'? 57 | #define SF(name) (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0) | ^~~~~~~~~~~~ arch/x86/include/asm/cpufeature.h:121:24: note: in definition of macro 'cpu_has' 121 | (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ | ^~~ arch/x86/kvm/cpuid.c:57:19: note: in expansion of macro 'boot_cpu_has' 57 | #define SF(name) (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0) | ^~~~~~~~~~~~ arch/x86/kvm/cpuid.c:500:14: note: in expansion of macro 'SF' 500 | SF(SGX1) | SF(SGX2) | ^~ arch/x86/kvm/cpuid.c: In function '__do_cpuid_func': arch/x86/kvm/cpuid.c:838:17: error: 'SGX_MISC_EXINFO' undeclared (first use in this function) 838 | entry->ebx &= SGX_MISC_EXINFO; | ^~~~~~~~~~~~~~~ arch/x86/kvm/cpuid.c:851:17: error: 'SGX_ATTR_DEBUG' undeclared (first use in this function) 851 | entry->eax &= SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT | | ^~~~~~~~~~~~~~ arch/x86/kvm/cpuid.c:851:34: error: 'SGX_ATTR_MODE64BIT' undeclared (first use in this function) 851 | entry->eax &= SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT | | ^~~~~~~~~~~~~~~~~~ >> arch/x86/kvm/cpuid.c:852:10: error: 'SGX_ATTR_PROVISIONKEY' undeclared (first use in this function) 852 | SGX_ATTR_PROVISIONKEY | SGX_ATTR_EINITTOKENKEY | | ^~~~~~~~~~~~~~~~~~~~~ arch/x86/kvm/cpuid.c:852:34: error: 'SGX_ATTR_EINITTOKENKEY' undeclared (first use in this function) 852 | SGX_ATTR_PROVISIONKEY | SGX_ATTR_EINITTOKENKEY | | ^~~~~~~~~~~~~~~~~~~~~~ arch/x86/kvm/cpuid.c:853:10: error: 'SGX_ATTR_KSS' undeclared (first use in this function) 853 | SGX_ATTR_KSS; | ^~~~~~~~~~~~ vim +/SGX_ATTR_PROVISIONKEY +852 arch/x86/kvm/cpuid.c 643 644 static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) 645 { 646 struct kvm_cpuid_entry2 *entry; 647 int r, i, max_idx; 648 649 /* all calls to cpuid_count() should be made on the same cpu */ 650 get_cpu(); 651 652 r = -E2BIG; 653 654 entry = do_host_cpuid(array, function, 0); 655 if (!entry) 656 goto out; 657 658 switch (function) { 659 case 0: 660 /* Limited to the highest leaf implemented in KVM. */ 661 entry->eax = min(entry->eax, 0x1fU); 662 break; 663 case 1: 664 cpuid_entry_override(entry, CPUID_1_EDX); 665 cpuid_entry_override(entry, CPUID_1_ECX); 666 break; 667 case 2: 668 /* 669 * On ancient CPUs, function 2 entries are STATEFUL. That is, 670 * CPUID(function=2, index=0) may return different results each 671 * time, with the least-significant byte in EAX enumerating the 672 * number of times software should do CPUID(2, 0). 673 * 674 * Modern CPUs, i.e. every CPU KVM has *ever* run on are less 675 * idiotic. Intel's SDM states that EAX & 0xff "will always 676 * return 01H. Software should ignore this value and not 677 * interpret it as an informational descriptor", while AMD's 678 * APM states that CPUID(2) is reserved. 679 * 680 * WARN if a frankenstein CPU that supports virtualization and 681 * a stateful CPUID.0x2 is encountered. 682 */ 683 WARN_ON_ONCE((entry->eax & 0xff) > 1); 684 break; 685 /* functions 4 and 0x8000001d have additional index. */ 686 case 4: 687 case 0x8000001d: 688 /* 689 * Read entries until the cache type in the previous entry is 690 * zero, i.e. indicates an invalid entry. 691 */ 692 for (i = 1; entry->eax & 0x1f; ++i) { 693 entry = do_host_cpuid(array, function, i); 694 if (!entry) 695 goto out; 696 } 697 break; 698 case 6: /* Thermal management */ 699 entry->eax = 0x4; /* allow ARAT */ 700 entry->ebx = 0; 701 entry->ecx = 0; 702 entry->edx = 0; 703 break; 704 /* function 7 has additional index. */ 705 case 7: 706 entry->eax = min(entry->eax, 1u); 707 cpuid_entry_override(entry, CPUID_7_0_EBX); 708 cpuid_entry_override(entry, CPUID_7_ECX); 709 cpuid_entry_override(entry, CPUID_7_EDX); 710 711 /* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */ 712 if (entry->eax == 1) { 713 entry = do_host_cpuid(array, function, 1); 714 if (!entry) 715 goto out; 716 717 cpuid_entry_override(entry, CPUID_7_1_EAX); 718 entry->ebx = 0; 719 entry->ecx = 0; 720 entry->edx = 0; 721 } 722 break; 723 case 9: 724 break; 725 case 0xa: { /* Architectural Performance Monitoring */ 726 struct x86_pmu_capability cap; 727 union cpuid10_eax eax; 728 union cpuid10_edx edx; 729 730 perf_get_x86_pmu_capability(&cap); 731 732 /* 733 * Only support guest architectural pmu on a host 734 * with architectural pmu. 735 */ 736 if (!cap.version) 737 memset(&cap, 0, sizeof(cap)); 738 739 eax.split.version_id = min(cap.version, 2); 740 eax.split.num_counters = cap.num_counters_gp; 741 eax.split.bit_width = cap.bit_width_gp; 742 eax.split.mask_length = cap.events_mask_len; 743 744 edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS); 745 edx.split.bit_width_fixed = cap.bit_width_fixed; 746 edx.split.anythread_deprecated = 1; 747 edx.split.reserved1 = 0; 748 edx.split.reserved2 = 0; 749 750 entry->eax = eax.full; 751 entry->ebx = cap.events_mask; 752 entry->ecx = 0; 753 entry->edx = edx.full; 754 break; 755 } 756 /* 757 * Per Intel's SDM, the 0x1f is a superset of 0xb, 758 * thus they can be handled by common code. 759 */ 760 case 0x1f: 761 case 0xb: 762 /* 763 * Populate entries until the level type (ECX[15:8]) of the 764 * previous entry is zero. Note, CPUID EAX.{0x1f,0xb}.0 is 765 * the starting entry, filled by the primary do_host_cpuid(). 766 */ 767 for (i = 1; entry->ecx & 0xff00; ++i) { 768 entry = do_host_cpuid(array, function, i); 769 if (!entry) 770 goto out; 771 } 772 break; 773 case 0xd: 774 entry->eax &= supported_xcr0; 775 entry->ebx = xstate_required_size(supported_xcr0, false); 776 entry->ecx = entry->ebx; 777 entry->edx &= supported_xcr0 >> 32; 778 if (!supported_xcr0) 779 break; 780 781 entry = do_host_cpuid(array, function, 1); 782 if (!entry) 783 goto out; 784 785 cpuid_entry_override(entry, CPUID_D_1_EAX); 786 if (entry->eax & (F(XSAVES)|F(XSAVEC))) 787 entry->ebx = xstate_required_size(supported_xcr0 | supported_xss, 788 true); 789 else { 790 WARN_ON_ONCE(supported_xss != 0); 791 entry->ebx = 0; 792 } 793 entry->ecx &= supported_xss; 794 entry->edx &= supported_xss >> 32; 795 796 for (i = 2; i < 64; ++i) { 797 bool s_state; 798 if (supported_xcr0 & BIT_ULL(i)) 799 s_state = false; 800 else if (supported_xss & BIT_ULL(i)) 801 s_state = true; 802 else 803 continue; 804 805 entry = do_host_cpuid(array, function, i); 806 if (!entry) 807 goto out; 808 809 /* 810 * The supported check above should have filtered out 811 * invalid sub-leafs. Only valid sub-leafs should 812 * reach this point, and they should have a non-zero 813 * save state size. Furthermore, check whether the 814 * processor agrees with supported_xcr0/supported_xss 815 * on whether this is an XCR0- or IA32_XSS-managed area. 816 */ 817 if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) { 818 --array->nent; 819 continue; 820 } 821 entry->edx = 0; 822 } 823 break; 824 case 0x12: 825 /* Intel SGX */ 826 if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) { 827 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 828 break; 829 } 830 831 /* 832 * Index 0: Sub-features, MISCSELECT (a.k.a extended features) 833 * and max enclave sizes. The SGX sub-features and MISCSELECT 834 * are restricted by kernel and KVM capabilities (like most 835 * feature flags), while enclave size is unrestricted. 836 */ 837 cpuid_entry_override(entry, CPUID_12_EAX); 838 entry->ebx &= SGX_MISC_EXINFO; 839 840 entry = do_host_cpuid(array, function, 1); 841 if (!entry) 842 goto out; 843 844 /* 845 * Index 1: SECS.ATTRIBUTES. ATTRIBUTES are restricted a la 846 * feature flags. Advertise all supported flags, including 847 * privileged attributes that require explicit opt-in from 848 * userspace. ATTRIBUTES.XFRM is not adjusted as userspace is 849 * expected to derive it from supported XCR0. 850 */ 851 entry->eax &= SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT | > 852 SGX_ATTR_PROVISIONKEY | SGX_ATTR_EINITTOKENKEY | 853 SGX_ATTR_KSS; 854 entry->ebx &= 0; 855 break; 856 /* Intel PT */ 857 case 0x14: 858 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) { 859 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 860 break; 861 } 862 863 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) { 864 if (!do_host_cpuid(array, function, i)) 865 goto out; 866 } 867 break; 868 case KVM_CPUID_SIGNATURE: { 869 static const char signature[12] = "KVMKVMKVM\0\0"; 870 const u32 *sigptr = (const u32 *)signature; 871 entry->eax = KVM_CPUID_FEATURES; 872 entry->ebx = sigptr[0]; 873 entry->ecx = sigptr[1]; 874 entry->edx = sigptr[2]; 875 break; 876 } 877 case KVM_CPUID_FEATURES: 878 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) | 879 (1 << KVM_FEATURE_NOP_IO_DELAY) | 880 (1 << KVM_FEATURE_CLOCKSOURCE2) | 881 (1 << KVM_FEATURE_ASYNC_PF) | 882 (1 << KVM_FEATURE_PV_EOI) | 883 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) | 884 (1 << KVM_FEATURE_PV_UNHALT) | 885 (1 << KVM_FEATURE_PV_TLB_FLUSH) | 886 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) | 887 (1 << KVM_FEATURE_PV_SEND_IPI) | 888 (1 << KVM_FEATURE_POLL_CONTROL) | 889 (1 << KVM_FEATURE_PV_SCHED_YIELD) | 890 (1 << KVM_FEATURE_ASYNC_PF_INT); 891 892 if (sched_info_on()) 893 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); 894 895 entry->ebx = 0; 896 entry->ecx = 0; 897 entry->edx = 0; 898 break; 899 case 0x80000000: 900 entry->eax = min(entry->eax, 0x8000001f); 901 break; 902 case 0x80000001: 903 cpuid_entry_override(entry, CPUID_8000_0001_EDX); 904 cpuid_entry_override(entry, CPUID_8000_0001_ECX); 905 break; 906 case 0x80000006: 907 /* L2 cache and TLB: pass through host info. */ 908 break; 909 case 0x80000007: /* Advanced power management */ 910 /* invariant TSC is CPUID.80000007H:EDX[8] */ 911 entry->edx &= (1 << 8); 912 /* mask against host */ 913 entry->edx &= boot_cpu_data.x86_power; 914 entry->eax = entry->ebx = entry->ecx = 0; 915 break; 916 case 0x80000008: { 917 unsigned g_phys_as = (entry->eax >> 16) & 0xff; 918 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U); 919 unsigned phys_as = entry->eax & 0xff; 920 921 if (!g_phys_as) 922 g_phys_as = phys_as; 923 entry->eax = g_phys_as | (virt_as << 8); 924 entry->edx = 0; 925 cpuid_entry_override(entry, CPUID_8000_0008_EBX); 926 break; 927 } 928 case 0x8000000A: 929 if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) { 930 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 931 break; 932 } 933 entry->eax = 1; /* SVM revision 1 */ 934 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper 935 ASID emulation to nested SVM */ 936 entry->ecx = 0; /* Reserved */ 937 cpuid_entry_override(entry, CPUID_8000_000A_EDX); 938 break; 939 case 0x80000019: 940 entry->ecx = entry->edx = 0; 941 break; 942 case 0x8000001a: 943 case 0x8000001e: 944 break; 945 /* Support memory encryption cpuid if host supports it */ 946 case 0x8000001F: 947 if (!boot_cpu_has(X86_FEATURE_SEV)) 948 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 949 break; 950 /*Add support for Centaur's CPUID instruction*/ 951 case 0xC0000000: 952 /*Just support up to 0xC0000004 now*/ 953 entry->eax = min(entry->eax, 0xC0000004); 954 break; 955 case 0xC0000001: 956 cpuid_entry_override(entry, CPUID_C000_0001_EDX); 957 break; 958 case 3: /* Processor serial number */ 959 case 5: /* MONITOR/MWAIT */ 960 case 0xC0000002: 961 case 0xC0000003: 962 case 0xC0000004: 963 default: 964 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 965 break; 966 } 967 968 r = 0; 969 970 out: 971 put_cpu(); 972 973 return r; 974 } 975 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org