* Re: [PATCH kvm-unit-tests] Update AMD instructions to conform to LLVM assembler
[not found] <20190220202442.145494-1-pshier@google.com>
@ 2019-12-11 20:22 ` Jim Mattson
2019-12-12 0:40 ` Paolo Bonzini
2019-12-18 17:33 ` Paolo Bonzini
2019-12-14 19:05 ` Fangrui Song
1 sibling, 2 replies; 4+ messages in thread
From: Jim Mattson @ 2019-12-11 20:22 UTC (permalink / raw)
To: Peter Shier; +Cc: kvm list, drjones, Paolo Bonzini
On Wed, Feb 20, 2019 at 12:25 PM Peter Shier <pshier@google.com> wrote:
>
> The GNU assembler (gas) allows omitting operands where there is only a
> single choice e.g. "VMRUN rAX".The LLVM assembler requires those operands
> even though they are the default and only choice.
>
> In addition, LLVM does not allow a CLGI instruction with a terminating
> \n\t. Adding a ; separator after the instruction is a workaround.
>
> Signed-off-by: Peter Shier <pshier@google.com>
> Reviewed-by: Marc Orr <marcorr@google.com>
> Reviewed-by: Jim Mattson <jmattson@google.com>
> ---
> x86/svm.c | 16 ++++++++--------
> 1 file changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/x86/svm.c b/x86/svm.c
> index bc74e7c690a8..e5cb730b08cb 100644
> --- a/x86/svm.c
> +++ b/x86/svm.c
> @@ -154,7 +154,7 @@ static void vmcb_ident(struct vmcb *vmcb)
> struct descriptor_table_ptr desc_table_ptr;
>
> memset(vmcb, 0, sizeof(*vmcb));
> - asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
> + asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");
> vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
> vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
> vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
> @@ -262,20 +262,20 @@ static void test_run(struct test *test, struct vmcb *vmcb)
> do {
> tsc_start = rdtsc();
> asm volatile (
> - "clgi \n\t"
> - "vmload \n\t"
> + "clgi;\n\t" // semi-colon needed for LLVM compatibility
> + "vmload %0\n\t"
> "mov regs+0x80, %%r15\n\t" // rflags
> "mov %%r15, 0x170(%0)\n\t"
> "mov regs, %%r15\n\t" // rax
> "mov %%r15, 0x1f8(%0)\n\t"
> LOAD_GPR_C
> - "vmrun \n\t"
> + "vmrun %0\n\t"
> SAVE_GPR_C
> "mov 0x170(%0), %%r15\n\t" // rflags
> "mov %%r15, regs+0x80\n\t"
> "mov 0x1f8(%0), %%r15\n\t" // rax
> "mov %%r15, regs\n\t"
> - "vmsave \n\t"
> + "vmsave %0\n\t"
> "stgi"
> : : "a"(vmcb_phys)
> : "rbx", "rcx", "rdx", "rsi",
> @@ -330,7 +330,7 @@ static bool check_no_vmrun_int(struct test *test)
>
> static void test_vmrun(struct test *test)
> {
> - asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
> + asm volatile ("vmrun %0" : : "a"(virt_to_phys(test->vmcb)));
> }
>
> static bool check_vmrun(struct test *test)
> @@ -1241,7 +1241,7 @@ static bool lat_svm_insn_finished(struct test *test)
>
> for ( ; runs != 0; runs--) {
> tsc_start = rdtsc();
> - asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
> + asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory");
> cycles = rdtsc() - tsc_start;
> if (cycles > latvmload_max)
> latvmload_max = cycles;
> @@ -1250,7 +1250,7 @@ static bool lat_svm_insn_finished(struct test *test)
> vmload_sum += cycles;
>
> tsc_start = rdtsc();
> - asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
> + asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory");
> cycles = rdtsc() - tsc_start;
> if (cycles > latvmsave_max)
> latvmsave_max = cycles;
Ping.
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH kvm-unit-tests] Update AMD instructions to conform to LLVM assembler
2019-12-11 20:22 ` [PATCH kvm-unit-tests] Update AMD instructions to conform to LLVM assembler Jim Mattson
@ 2019-12-12 0:40 ` Paolo Bonzini
2019-12-18 17:33 ` Paolo Bonzini
1 sibling, 0 replies; 4+ messages in thread
From: Paolo Bonzini @ 2019-12-12 0:40 UTC (permalink / raw)
To: Jim Mattson, Peter Shier; +Cc: kvm list, drjones
On 11/12/19 21:22, Jim Mattson wrote:
> On Wed, Feb 20, 2019 at 12:25 PM Peter Shier <pshier@google.com> wrote:
>>
>> The GNU assembler (gas) allows omitting operands where there is only a
>> single choice e.g. "VMRUN rAX".The LLVM assembler requires those operands
>> even though they are the default and only choice.
>>
>> In addition, LLVM does not allow a CLGI instruction with a terminating
>> \n\t. Adding a ; separator after the instruction is a workaround.
>>
>> Signed-off-by: Peter Shier <pshier@google.com>
>> Reviewed-by: Marc Orr <marcorr@google.com>
>> Reviewed-by: Jim Mattson <jmattson@google.com>
>> ---
>> x86/svm.c | 16 ++++++++--------
>> 1 file changed, 8 insertions(+), 8 deletions(-)
>>
>> diff --git a/x86/svm.c b/x86/svm.c
>> index bc74e7c690a8..e5cb730b08cb 100644
>> --- a/x86/svm.c
>> +++ b/x86/svm.c
>> @@ -154,7 +154,7 @@ static void vmcb_ident(struct vmcb *vmcb)
>> struct descriptor_table_ptr desc_table_ptr;
>>
>> memset(vmcb, 0, sizeof(*vmcb));
>> - asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
>> + asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");
>> vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
>> vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
>> vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
>> @@ -262,20 +262,20 @@ static void test_run(struct test *test, struct vmcb *vmcb)
>> do {
>> tsc_start = rdtsc();
>> asm volatile (
>> - "clgi \n\t"
>> - "vmload \n\t"
>> + "clgi;\n\t" // semi-colon needed for LLVM compatibility
>> + "vmload %0\n\t"
>> "mov regs+0x80, %%r15\n\t" // rflags
>> "mov %%r15, 0x170(%0)\n\t"
>> "mov regs, %%r15\n\t" // rax
>> "mov %%r15, 0x1f8(%0)\n\t"
>> LOAD_GPR_C
>> - "vmrun \n\t"
>> + "vmrun %0\n\t"
>> SAVE_GPR_C
>> "mov 0x170(%0), %%r15\n\t" // rflags
>> "mov %%r15, regs+0x80\n\t"
>> "mov 0x1f8(%0), %%r15\n\t" // rax
>> "mov %%r15, regs\n\t"
>> - "vmsave \n\t"
>> + "vmsave %0\n\t"
>> "stgi"
>> : : "a"(vmcb_phys)
>> : "rbx", "rcx", "rdx", "rsi",
>> @@ -330,7 +330,7 @@ static bool check_no_vmrun_int(struct test *test)
>>
>> static void test_vmrun(struct test *test)
>> {
>> - asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
>> + asm volatile ("vmrun %0" : : "a"(virt_to_phys(test->vmcb)));
>> }
>>
>> static bool check_vmrun(struct test *test)
>> @@ -1241,7 +1241,7 @@ static bool lat_svm_insn_finished(struct test *test)
>>
>> for ( ; runs != 0; runs--) {
>> tsc_start = rdtsc();
>> - asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
>> + asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory");
>> cycles = rdtsc() - tsc_start;
>> if (cycles > latvmload_max)
>> latvmload_max = cycles;
>> @@ -1250,7 +1250,7 @@ static bool lat_svm_insn_finished(struct test *test)
>> vmload_sum += cycles;
>>
>> tsc_start = rdtsc();
>> - asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
>> + asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory");
>> cycles = rdtsc() - tsc_start;
>> if (cycles > latvmsave_max)
>> latvmsave_max = cycles;
>
> Ping.
>
I am applying it, but I'm seriously puzzled by the clgi one. Can you
open a bug on LLVM?
Paolo
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH kvm-unit-tests] Update AMD instructions to conform to LLVM assembler
[not found] <20190220202442.145494-1-pshier@google.com>
2019-12-11 20:22 ` [PATCH kvm-unit-tests] Update AMD instructions to conform to LLVM assembler Jim Mattson
@ 2019-12-14 19:05 ` Fangrui Song
1 sibling, 0 replies; 4+ messages in thread
From: Fangrui Song @ 2019-12-14 19:05 UTC (permalink / raw)
To: Peter Shier; +Cc: kvm, drjones, Marc Orr, Jim Mattson
> I am applying it, but I'm seriously puzzled by the clgi one. Can you
> open a bug on LLVM?
>
> Paolo
The clgi change does not appear to be needed.
- "clgi \n\t"
+ "clgi;\n\t"
clang 7.0.0, 8.0.0 (downloaded from releases.llvm.org) and HEAD (built from source) compile
"clgi" fine.
% ~/ccls/build/clang+llvm-7.0.0-x86_64-linux-gnu-ubuntu-16.04/bin/clang -mno-red-zone -mno-sse -mno-sse2 -m64 -O1 -g -MMD -MF x86/.emulator.d -Wall -Wwrite-strings -Wempty-body -Wuninitialized -Wignored-qualifiers -fno-omit-frame-pointer -fno-pic -Woverride-init -Wmissing-prototypes -Wstrict-prototypes -std=gnu99 -ffreestanding -I /tmp/p/kvm-unit-tests/lib -I /tmp/p/kvm-unit-tests/lib/x86 -I lib -c -o x86/svm.o x86/svm.c -w
# succeeded
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH kvm-unit-tests] Update AMD instructions to conform to LLVM assembler
2019-12-11 20:22 ` [PATCH kvm-unit-tests] Update AMD instructions to conform to LLVM assembler Jim Mattson
2019-12-12 0:40 ` Paolo Bonzini
@ 2019-12-18 17:33 ` Paolo Bonzini
1 sibling, 0 replies; 4+ messages in thread
From: Paolo Bonzini @ 2019-12-18 17:33 UTC (permalink / raw)
To: Jim Mattson, Peter Shier; +Cc: kvm list, drjones
On 11/12/19 21:22, Jim Mattson wrote:
> On Wed, Feb 20, 2019 at 12:25 PM Peter Shier <pshier@google.com> wrote:
>>
>> The GNU assembler (gas) allows omitting operands where there is only a
>> single choice e.g. "VMRUN rAX".The LLVM assembler requires those operands
>> even though they are the default and only choice.
>>
>> In addition, LLVM does not allow a CLGI instruction with a terminating
>> \n\t. Adding a ; separator after the instruction is a workaround.
>>
>> Signed-off-by: Peter Shier <pshier@google.com>
>> Reviewed-by: Marc Orr <marcorr@google.com>
>> Reviewed-by: Jim Mattson <jmattson@google.com>
>> ---
>> x86/svm.c | 16 ++++++++--------
>> 1 file changed, 8 insertions(+), 8 deletions(-)
>>
>> diff --git a/x86/svm.c b/x86/svm.c
>> index bc74e7c690a8..e5cb730b08cb 100644
>> --- a/x86/svm.c
>> +++ b/x86/svm.c
>> @@ -154,7 +154,7 @@ static void vmcb_ident(struct vmcb *vmcb)
>> struct descriptor_table_ptr desc_table_ptr;
>>
>> memset(vmcb, 0, sizeof(*vmcb));
>> - asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
>> + asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");
>> vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
>> vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
>> vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
>> @@ -262,20 +262,20 @@ static void test_run(struct test *test, struct vmcb *vmcb)
>> do {
>> tsc_start = rdtsc();
>> asm volatile (
>> - "clgi \n\t"
>> - "vmload \n\t"
>> + "clgi;\n\t" // semi-colon needed for LLVM compatibility
>> + "vmload %0\n\t"
>> "mov regs+0x80, %%r15\n\t" // rflags
>> "mov %%r15, 0x170(%0)\n\t"
>> "mov regs, %%r15\n\t" // rax
>> "mov %%r15, 0x1f8(%0)\n\t"
>> LOAD_GPR_C
>> - "vmrun \n\t"
>> + "vmrun %0\n\t"
>> SAVE_GPR_C
>> "mov 0x170(%0), %%r15\n\t" // rflags
>> "mov %%r15, regs+0x80\n\t"
>> "mov 0x1f8(%0), %%r15\n\t" // rax
>> "mov %%r15, regs\n\t"
>> - "vmsave \n\t"
>> + "vmsave %0\n\t"
>> "stgi"
>> : : "a"(vmcb_phys)
>> : "rbx", "rcx", "rdx", "rsi",
>> @@ -330,7 +330,7 @@ static bool check_no_vmrun_int(struct test *test)
>>
>> static void test_vmrun(struct test *test)
>> {
>> - asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
>> + asm volatile ("vmrun %0" : : "a"(virt_to_phys(test->vmcb)));
>> }
>>
>> static bool check_vmrun(struct test *test)
>> @@ -1241,7 +1241,7 @@ static bool lat_svm_insn_finished(struct test *test)
>>
>> for ( ; runs != 0; runs--) {
>> tsc_start = rdtsc();
>> - asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
>> + asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory");
>> cycles = rdtsc() - tsc_start;
>> if (cycles > latvmload_max)
>> latvmload_max = cycles;
>> @@ -1250,7 +1250,7 @@ static bool lat_svm_insn_finished(struct test *test)
>> vmload_sum += cycles;
>>
>> tsc_start = rdtsc();
>> - asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
>> + asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory");
>> cycles = rdtsc() - tsc_start;
>> if (cycles > latvmsave_max)
>> latvmsave_max = cycles;
>
> Ping.
>
Applied.
Paolo
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2019-12-18 17:33 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
[not found] <20190220202442.145494-1-pshier@google.com>
2019-12-11 20:22 ` [PATCH kvm-unit-tests] Update AMD instructions to conform to LLVM assembler Jim Mattson
2019-12-12 0:40 ` Paolo Bonzini
2019-12-18 17:33 ` Paolo Bonzini
2019-12-14 19:05 ` Fangrui Song
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).