@@ -645,7 +645,6 @@ oaknut::XReg A64Emitter::GetContextReg() { return X27; }
645
645
oaknut::XReg A64Emitter::GetMembaseReg () { return X28; }
646
646
647
647
void A64Emitter::ReloadContext () {
648
- // mov(GetContextReg(), qword[rsp + StackLayout::GUEST_CTX_HOME]);
649
648
LDR (GetContextReg (), SP, StackLayout::GUEST_CTX_HOME);
650
649
}
651
650
@@ -667,20 +666,13 @@ bool A64Emitter::ConstantFitsIn32Reg(uint64_t v) {
667
666
668
667
void A64Emitter::MovMem64 (const oaknut::XRegSp& addr, intptr_t offset,
669
668
uint64_t v) {
670
- // if ((v & ~0x7FFFFFFF) == 0) {
671
- // // Fits under 31 bits, so just load using normal mov.
672
- // mov(qword[addr], v);
673
- // } else if ((v & ~0x7FFFFFFF) == ~0x7FFFFFFF) {
674
- // // Negative number that fits in 32bits.
675
- // mov(qword[addr], v);
676
- // } else if (!(v >> 32)) {
677
- // // All high bits are zero. It'd be nice if we had a way to load a 32bit
678
- // // immediate without sign extending!
679
- // // TODO(benvanik): this is super common, find a better way.
680
- // mov(dword[addr], static_cast<uint32_t>(v));
681
- // mov(dword[addr + 4], 0);
682
- // } else
683
- {
669
+ if (v == 0 ) {
670
+ STR (XZR, addr, offset);
671
+ } else if (!(v >> 32 )) {
672
+ // All high bits are zero, 32-bit MOV
673
+ MOV (W0, static_cast <uint32_t >(v));
674
+ STR (X0, addr, offset);
675
+ } else {
684
676
// 64bit number that needs double movs.
685
677
MOV (X0, v);
686
678
STR (X0, addr, offset);
0 commit comments