From 8ddb0fcff9ec73aeef20b1288b4ab5e03cd0bd56 Mon Sep 17 00:00:00 2001 From: Freddy Ye Date: Wed, 27 Dec 2023 09:01:55 +0800 Subject: [PATCH] [X86] Correct operand order of UWRMSR. (#76389) --- clang/lib/Headers/usermsrintrin.h | 21 +++++++++++++++++++++ llvm/lib/Target/X86/X86InstrSystem.td | 4 ++-- llvm/test/CodeGen/X86/usermsr-intrinsics.ll | 8 ++++---- 3 files changed, 27 insertions(+), 6 deletions(-) diff --git a/clang/lib/Headers/usermsrintrin.h b/clang/lib/Headers/usermsrintrin.h index 6d1424ad3b2edd..61388376706dc6 100644 --- a/clang/lib/Headers/usermsrintrin.h +++ b/clang/lib/Headers/usermsrintrin.h @@ -14,12 +14,33 @@ #define __USERMSRINTRIN_H #ifdef __x86_64__ +/// Reads the contents of a 64-bit MSR specified in \a __A into \a dst. +/// +/// This intrinsic corresponds to the URDMSR instruction. +/// \param __A +/// An unsigned long long. +/// +/// \code{.operation} +/// DEST := MSR[__A] +/// \endcode static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("usermsr"))) _urdmsr(unsigned long long __A) { return __builtin_ia32_urdmsr(__A); } +/// Writes the contents of \a __B into the 64-bit MSR specified in \a __A. +/// +/// This intrinsic corresponds to the UWRMSR instruction. +/// +/// \param __A +/// An unsigned long long. +/// \param __B +/// An unsigned long long. +/// +/// \code{.operation} +/// MSR[__A] := __B +/// \endcode static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("usermsr"))) _uwrmsr(unsigned long long __A, unsigned long long __B) { diff --git a/llvm/lib/Target/X86/X86InstrSystem.td b/llvm/lib/Target/X86/X86InstrSystem.td index efb58c6102dd1d..699e5847e63fb9 100644 --- a/llvm/lib/Target/X86/X86InstrSystem.td +++ b/llvm/lib/Target/X86/X86InstrSystem.td @@ -446,11 +446,11 @@ let Predicates = [HasUSERMSR], mayLoad = 1 in { } let Predicates = [HasUSERMSR], mayStore = 1 in { def UWRMSRrr : I<0xf8, MRMSrcReg, (outs), (ins GR64:$src1, GR64:$src2), - "uwrmsr\t{$src1, $src2|$src2, $src1}", + "uwrmsr\t{$src2, $src1|$src1, $src2}", [(int_x86_uwrmsr GR64:$src1, GR64:$src2)]>, T8, XS; def UWRMSRir : Ii32<0xf8, MRM0r, (outs), (ins GR64:$src, i64i32imm:$imm), "uwrmsr\t{$src, $imm|$imm, $src}", - [(int_x86_uwrmsr GR64:$src, i64immSExt32_su:$imm)]>, T_MAP7, XS, VEX; + [(int_x86_uwrmsr i64immSExt32_su:$imm, GR64:$src)]>, T_MAP7, XS, VEX; } let Defs = [RAX, RDX], Uses = [ECX] in def RDPMC : I<0x33, RawFrm, (outs), (ins), "rdpmc", []>, TB; diff --git a/llvm/test/CodeGen/X86/usermsr-intrinsics.ll b/llvm/test/CodeGen/X86/usermsr-intrinsics.ll index 29801a494f498f..fa569affdd9ff3 100644 --- a/llvm/test/CodeGen/X86/usermsr-intrinsics.ll +++ b/llvm/test/CodeGen/X86/usermsr-intrinsics.ll @@ -35,7 +35,7 @@ declare i64 @llvm.x86.urdmsr(i64 %A) define void @test_int_x86_uwrmsr(i64 %A, i64 %B) nounwind { ; X64-LABEL: test_int_x86_uwrmsr: ; X64: # %bb.0: -; X64-NEXT: uwrmsr %rdi, %rsi # encoding: [0xf3,0x0f,0x38,0xf8,0xfe] +; X64-NEXT: uwrmsr %rsi, %rdi # encoding: [0xf3,0x0f,0x38,0xf8,0xfe] ; X64-NEXT: retq # encoding: [0xc3] call void @llvm.x86.uwrmsr(i64 %A, i64 %B) ret void @@ -46,7 +46,7 @@ define void @test_int_x86_uwrmsr_const(i64 %A) nounwind { ; X64: # %bb.0: ; X64-NEXT: uwrmsr %rdi, $123 # encoding: [0xc4,0xe7,0x7a,0xf8,0xc7,0x7b,0x00,0x00,0x00] ; X64-NEXT: retq # encoding: [0xc3] - call void @llvm.x86.uwrmsr(i64 %A, i64 123) + call void @llvm.x86.uwrmsr(i64 123, i64 %A) ret void } @@ -55,9 +55,9 @@ define void @test_int_x86_uwrmsr_const_i64(i64 %A) nounwind { ; X64: # %bb.0: ; X64-NEXT: movabsq $8589934591, %rax # encoding: [0x48,0xb8,0xff,0xff,0xff,0xff,0x01,0x00,0x00,0x00] ; X64-NEXT: # imm = 0x1FFFFFFFF -; X64-NEXT: uwrmsr %rdi, %rax # encoding: [0xf3,0x0f,0x38,0xf8,0xf8] +; X64-NEXT: uwrmsr %rdi, %rax # encoding: [0xf3,0x0f,0x38,0xf8,0xc7] ; X64-NEXT: retq # encoding: [0xc3] - call void @llvm.x86.uwrmsr(i64 %A, i64 8589934591) + call void @llvm.x86.uwrmsr(i64 8589934591, i64 %A) ret void }