Add a new JIT compiler for CPU code (#693)
* Start of the ARMeilleure project
* Refactoring around the old IRAdapter, now renamed to PreAllocator
* Optimize the LowestBitSet method
* Add CLZ support and fix CLS implementation
* Add missing Equals and GetHashCode overrides on some structs, misc small tweaks
* Implement the ByteSwap IR instruction, and some refactoring on the assembler
* Implement the DivideUI IR instruction and fix 64-bits IDIV
* Correct constant operand type on CSINC
* Move division instructions implementation to InstEmitDiv
* Fix destination type for the ConditionalSelect IR instruction
* Implement UMULH and SMULH, with new IR instructions
* Fix some issues with shift instructions
* Fix constant types for BFM instructions
* Fix up new tests using the new V128 struct
* Update tests
* Move DIV tests to a separate file
* Add support for calls, and some instructions that depends on them
* Start adding support for SIMD & FP types, along with some of the related ARM instructions
* Fix some typos and the divide instruction with FP operands
* Fix wrong method call on Clz_V
* Implement ARM FP & SIMD move instructions, Saddlv_V, and misc. fixes
* Implement SIMD logical instructions and more misc. fixes
* Fix PSRAD x86 instruction encoding, TRN, UABD and UABDL implementations
* Implement float conversion instruction, merge in LDj3SNuD fixes, and some other misc. fixes
* Implement SIMD shift instruction and fix Dup_V
* Add SCVTF and UCVTF (vector, fixed-point) variants to the opcode table
* Fix check with tolerance on tester
* Implement FP & SIMD comparison instructions, and some fixes
* Update FCVT (Scalar) encoding on the table to support the Half-float variants
* Support passing V128 structs, some cleanup on the register allocator, merge LDj3SNuD fixes
* Use old memory access methods, made a start on SIMD memory insts support, some fixes
* Fix float constant passed to functions, save and restore non-volatile XMM registers, other fixes
* Fix arguments count with struct return values, other fixes
* More instructions
* Misc. fixes and integrate LDj3SNuD fixes
* Update tests
* Add a faster linear scan allocator, unwinding support on windows, and other changes
* Update Ryujinx.HLE
* Update Ryujinx.Graphics
* Fix V128 return pointer passing, RCX is clobbered
* Update Ryujinx.Tests
* Update ITimeZoneService
* Stop using GetFunctionPointer as that can't be called from native code, misc. fixes and tweaks
* Use generic GetFunctionPointerForDelegate method and other tweaks
* Some refactoring on the code generator, assert on invalid operations and use a separate enum for intrinsics
* Remove some unused code on the assembler
* Fix REX.W prefix regression on float conversion instructions, add some sort of profiler
* Add hardware capability detection
* Fix regression on Sha1h and revert Fcm** changes
* Add SSE2-only paths on vector extract and insert, some refactoring on the pre-allocator
* Fix silly mistake introduced on last commit on CpuId
* Generate inline stack probes when the stack allocation is too large
* Initial support for the System-V ABI
* Support multiple destination operands
* Fix SSE2 VectorInsert8 path, and other fixes
* Change placement of XMM callee save and restore code to match other compilers
* Rename Dest to Destination and Inst to Instruction
* Fix a regression related to calls and the V128 type
* Add an extra space on comments to match code style
* Some refactoring
* Fix vector insert FP32 SSE2 path
* Port over the ARM32 instructions
* Avoid memory protection races on JIT Cache
* Another fix on VectorInsert FP32 (thanks to LDj3SNuD
* Float operands don't need to use the same register when VEX is supported
* Add a new register allocator, higher quality code for hot code (tier up), and other tweaks
* Some nits, small improvements on the pre allocator
* CpuThreadState is gone
* Allow changing CPU emulators with a config entry
* Add runtime identifiers on the ARMeilleure project
* Allow switching between CPUs through a config entry (pt. 2)
* Change win10-x64 to win-x64 on projects
* Update the Ryujinx project to use ARMeilleure
* Ensure that the selected register is valid on the hybrid allocator
* Allow exiting on returns to 0 (should fix test regression)
* Remove register assignments for most used variables on the hybrid allocator
* Do not use fixed registers as spill temp
* Add missing namespace and remove unneeded using
* Address PR feedback
* Fix types, etc
* Enable AssumeStrictAbiCompliance by default
* Ensure that Spill and Fill don't load or store any more than necessary
2019-08-08 14:56:22 -04:00
|
|
|
using ARMeilleure.Decoders;
|
|
|
|
using ARMeilleure.IntermediateRepresentation;
|
|
|
|
using ARMeilleure.Memory;
|
|
|
|
using ARMeilleure.Translation;
|
|
|
|
using System;
|
|
|
|
|
|
|
|
using static ARMeilleure.Instructions.InstEmitHelper;
|
|
|
|
using static ARMeilleure.IntermediateRepresentation.OperandHelper;
|
|
|
|
|
|
|
|
namespace ARMeilleure.Instructions
|
|
|
|
{
|
|
|
|
static class InstEmitMemoryHelper
|
|
|
|
{
|
|
|
|
private enum Extension
|
|
|
|
{
|
|
|
|
Zx,
|
|
|
|
Sx32,
|
|
|
|
Sx64
|
|
|
|
}
|
|
|
|
|
|
|
|
public static void EmitLoadZx(ArmEmitterContext context, Operand address, int rt, int size)
|
|
|
|
{
|
|
|
|
EmitLoad(context, address, Extension.Zx, rt, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
public static void EmitLoadSx32(ArmEmitterContext context, Operand address, int rt, int size)
|
|
|
|
{
|
|
|
|
EmitLoad(context, address, Extension.Sx32, rt, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
public static void EmitLoadSx64(ArmEmitterContext context, Operand address, int rt, int size)
|
|
|
|
{
|
|
|
|
EmitLoad(context, address, Extension.Sx64, rt, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
private static void EmitLoad(ArmEmitterContext context, Operand address, Extension ext, int rt, int size)
|
|
|
|
{
|
|
|
|
bool isSimd = IsSimd(context);
|
|
|
|
|
|
|
|
if ((uint)size > (isSimd ? 4 : 3))
|
|
|
|
{
|
|
|
|
throw new ArgumentOutOfRangeException(nameof(size));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isSimd)
|
|
|
|
{
|
|
|
|
EmitReadVector(context, address, context.VectorZero(), rt, 0, size);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
EmitReadInt(context, address, rt, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!isSimd)
|
|
|
|
{
|
|
|
|
Operand value = GetIntOrZR(context, rt);
|
|
|
|
|
|
|
|
if (ext == Extension.Sx32 || ext == Extension.Sx64)
|
|
|
|
{
|
|
|
|
OperandType destType = ext == Extension.Sx64 ? OperandType.I64 : OperandType.I32;
|
|
|
|
|
|
|
|
switch (size)
|
|
|
|
{
|
|
|
|
case 0: value = context.SignExtend8 (destType, value); break;
|
|
|
|
case 1: value = context.SignExtend16(destType, value); break;
|
|
|
|
case 2: value = context.SignExtend32(destType, value); break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
SetIntOrZR(context, rt, value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
public static void EmitLoadSimd(
|
|
|
|
ArmEmitterContext context,
|
|
|
|
Operand address,
|
|
|
|
Operand vector,
|
|
|
|
int rt,
|
|
|
|
int elem,
|
|
|
|
int size)
|
|
|
|
{
|
|
|
|
EmitReadVector(context, address, vector, rt, elem, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
public static void EmitStore(ArmEmitterContext context, Operand address, int rt, int size)
|
|
|
|
{
|
|
|
|
bool isSimd = IsSimd(context);
|
|
|
|
|
|
|
|
if ((uint)size > (isSimd ? 4 : 3))
|
|
|
|
{
|
|
|
|
throw new ArgumentOutOfRangeException(nameof(size));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isSimd)
|
|
|
|
{
|
|
|
|
EmitWriteVector(context, address, rt, 0, size);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
EmitWriteInt(context, address, rt, size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
public static void EmitStoreSimd(
|
|
|
|
ArmEmitterContext context,
|
|
|
|
Operand address,
|
|
|
|
int rt,
|
|
|
|
int elem,
|
|
|
|
int size)
|
|
|
|
{
|
|
|
|
EmitWriteVector(context, address, rt, elem, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
private static bool IsSimd(ArmEmitterContext context)
|
|
|
|
{
|
|
|
|
return context.CurrOp is IOpCodeSimd &&
|
|
|
|
!(context.CurrOp is OpCodeSimdMemMs ||
|
|
|
|
context.CurrOp is OpCodeSimdMemSs);
|
|
|
|
}
|
|
|
|
|
|
|
|
private static void EmitReadInt(ArmEmitterContext context, Operand address, int rt, int size)
|
|
|
|
{
|
|
|
|
Operand isUnalignedAddr = EmitAddressCheck(context, address, size);
|
|
|
|
|
|
|
|
Operand lblFastPath = Label();
|
|
|
|
Operand lblSlowPath = Label();
|
|
|
|
Operand lblEnd = Label();
|
|
|
|
|
|
|
|
context.BranchIfFalse(lblFastPath, isUnalignedAddr);
|
|
|
|
|
|
|
|
context.MarkLabel(lblSlowPath);
|
|
|
|
|
|
|
|
EmitReadIntFallback(context, address, rt, size);
|
|
|
|
|
|
|
|
context.Branch(lblEnd);
|
|
|
|
|
|
|
|
context.MarkLabel(lblFastPath);
|
|
|
|
|
|
|
|
Operand physAddr = EmitPtPointerLoad(context, address, lblSlowPath);
|
|
|
|
|
|
|
|
Operand value = null;
|
|
|
|
|
|
|
|
switch (size)
|
|
|
|
{
|
|
|
|
case 0:
|
|
|
|
value = context.Load8(physAddr);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 1:
|
|
|
|
value = context.Load16(physAddr);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 2:
|
|
|
|
value = context.Load(OperandType.I32, physAddr);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 3:
|
|
|
|
value = context.Load(OperandType.I64, physAddr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
SetInt(context, rt, value);
|
|
|
|
|
|
|
|
context.MarkLabel(lblEnd);
|
|
|
|
}
|
|
|
|
|
|
|
|
private static void EmitReadVector(
|
|
|
|
ArmEmitterContext context,
|
|
|
|
Operand address,
|
|
|
|
Operand vector,
|
|
|
|
int rt,
|
|
|
|
int elem,
|
|
|
|
int size)
|
|
|
|
{
|
|
|
|
Operand isUnalignedAddr = EmitAddressCheck(context, address, size);
|
|
|
|
|
|
|
|
Operand lblFastPath = Label();
|
|
|
|
Operand lblSlowPath = Label();
|
|
|
|
Operand lblEnd = Label();
|
|
|
|
|
|
|
|
context.BranchIfFalse(lblFastPath, isUnalignedAddr);
|
|
|
|
|
|
|
|
context.MarkLabel(lblSlowPath);
|
|
|
|
|
|
|
|
EmitReadVectorFallback(context, address, vector, rt, elem, size);
|
|
|
|
|
|
|
|
context.Branch(lblEnd);
|
|
|
|
|
|
|
|
context.MarkLabel(lblFastPath);
|
|
|
|
|
|
|
|
Operand physAddr = EmitPtPointerLoad(context, address, lblSlowPath);
|
|
|
|
|
|
|
|
Operand value = null;
|
|
|
|
|
|
|
|
switch (size)
|
|
|
|
{
|
|
|
|
case 0:
|
|
|
|
value = context.VectorInsert8(vector, context.Load8(physAddr), elem);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 1:
|
|
|
|
value = context.VectorInsert16(vector, context.Load16(physAddr), elem);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 2:
|
|
|
|
value = context.VectorInsert(vector, context.Load(OperandType.I32, physAddr), elem);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 3:
|
|
|
|
value = context.VectorInsert(vector, context.Load(OperandType.I64, physAddr), elem);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 4:
|
|
|
|
value = context.Load(OperandType.V128, physAddr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
context.Copy(GetVec(rt), value);
|
|
|
|
|
|
|
|
context.MarkLabel(lblEnd);
|
|
|
|
}
|
|
|
|
|
|
|
|
private static Operand VectorCreate(ArmEmitterContext context, Operand value)
|
|
|
|
{
|
|
|
|
return context.VectorInsert(context.VectorZero(), value, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
private static void EmitWriteInt(ArmEmitterContext context, Operand address, int rt, int size)
|
|
|
|
{
|
|
|
|
Operand isUnalignedAddr = EmitAddressCheck(context, address, size);
|
|
|
|
|
|
|
|
Operand lblFastPath = Label();
|
|
|
|
Operand lblSlowPath = Label();
|
|
|
|
Operand lblEnd = Label();
|
|
|
|
|
|
|
|
context.BranchIfFalse(lblFastPath, isUnalignedAddr);
|
|
|
|
|
|
|
|
context.MarkLabel(lblSlowPath);
|
|
|
|
|
|
|
|
EmitWriteIntFallback(context, address, rt, size);
|
|
|
|
|
|
|
|
context.Branch(lblEnd);
|
|
|
|
|
|
|
|
context.MarkLabel(lblFastPath);
|
|
|
|
|
|
|
|
Operand physAddr = EmitPtPointerLoad(context, address, lblSlowPath);
|
|
|
|
|
|
|
|
Operand value = GetInt(context, rt);
|
|
|
|
|
|
|
|
if (size < 3 && value.Type == OperandType.I64)
|
|
|
|
{
|
|
|
|
value = context.ConvertI64ToI32(value);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (size)
|
|
|
|
{
|
|
|
|
case 0: context.Store8 (physAddr, value); break;
|
|
|
|
case 1: context.Store16(physAddr, value); break;
|
|
|
|
case 2: context.Store (physAddr, value); break;
|
|
|
|
case 3: context.Store (physAddr, value); break;
|
|
|
|
}
|
|
|
|
|
|
|
|
context.MarkLabel(lblEnd);
|
|
|
|
}
|
|
|
|
|
|
|
|
private static void EmitWriteVector(
|
|
|
|
ArmEmitterContext context,
|
|
|
|
Operand address,
|
|
|
|
int rt,
|
|
|
|
int elem,
|
|
|
|
int size)
|
|
|
|
{
|
|
|
|
Operand isUnalignedAddr = EmitAddressCheck(context, address, size);
|
|
|
|
|
|
|
|
Operand lblFastPath = Label();
|
|
|
|
Operand lblSlowPath = Label();
|
|
|
|
Operand lblEnd = Label();
|
|
|
|
|
|
|
|
context.BranchIfFalse(lblFastPath, isUnalignedAddr);
|
|
|
|
|
|
|
|
context.MarkLabel(lblSlowPath);
|
|
|
|
|
|
|
|
EmitWriteVectorFallback(context, address, rt, elem, size);
|
|
|
|
|
|
|
|
context.Branch(lblEnd);
|
|
|
|
|
|
|
|
context.MarkLabel(lblFastPath);
|
|
|
|
|
|
|
|
Operand physAddr = EmitPtPointerLoad(context, address, lblSlowPath);
|
|
|
|
|
|
|
|
Operand value = GetVec(rt);
|
|
|
|
|
|
|
|
switch (size)
|
|
|
|
{
|
|
|
|
case 0:
|
|
|
|
context.Store8(physAddr, context.VectorExtract8(value, elem));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 1:
|
|
|
|
context.Store16(physAddr, context.VectorExtract16(value, elem));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 2:
|
|
|
|
context.Store(physAddr, context.VectorExtract(OperandType.FP32, value, elem));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 3:
|
|
|
|
context.Store(physAddr, context.VectorExtract(OperandType.FP64, value, elem));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 4:
|
|
|
|
context.Store(physAddr, value);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
context.MarkLabel(lblEnd);
|
|
|
|
}
|
|
|
|
|
|
|
|
private static Operand EmitAddressCheck(ArmEmitterContext context, Operand address, int size)
|
|
|
|
{
|
|
|
|
long addressCheckMask = ~(context.Memory.AddressSpaceSize - 1);
|
|
|
|
|
|
|
|
addressCheckMask |= (1u << size) - 1;
|
|
|
|
|
|
|
|
return context.BitwiseAnd(address, Const(address.Type, addressCheckMask));
|
|
|
|
}
|
|
|
|
|
|
|
|
private static Operand EmitPtPointerLoad(ArmEmitterContext context, Operand address, Operand lblFallbackPath)
|
|
|
|
{
|
|
|
|
Operand pte = Const(context.Memory.PageTable.ToInt64());
|
|
|
|
|
|
|
|
int bit = MemoryManager.PageBits;
|
|
|
|
|
|
|
|
do
|
|
|
|
{
|
|
|
|
Operand addrPart = context.ShiftRightUI(address, Const(bit));
|
|
|
|
|
|
|
|
bit += context.Memory.PtLevelBits;
|
|
|
|
|
|
|
|
if (bit < context.Memory.AddressSpaceBits)
|
|
|
|
{
|
|
|
|
addrPart = context.BitwiseAnd(addrPart, Const(addrPart.Type, context.Memory.PtLevelMask));
|
|
|
|
}
|
|
|
|
|
|
|
|
Operand pteOffset = context.ShiftLeft(addrPart, Const(3));
|
|
|
|
|
|
|
|
if (pteOffset.Type == OperandType.I32)
|
|
|
|
{
|
|
|
|
pteOffset = context.ZeroExtend32(OperandType.I64, pteOffset);
|
|
|
|
}
|
|
|
|
|
|
|
|
Operand pteAddress = context.Add(pte, pteOffset);
|
|
|
|
|
|
|
|
pte = context.Load(OperandType.I64, pteAddress);
|
|
|
|
}
|
|
|
|
while (bit < context.Memory.AddressSpaceBits);
|
|
|
|
|
2019-10-13 02:02:07 -04:00
|
|
|
Operand hasFlagSet = context.BitwiseAnd(pte, Const((long)MemoryManager.PteFlagsMask));
|
Add a new JIT compiler for CPU code (#693)
* Start of the ARMeilleure project
* Refactoring around the old IRAdapter, now renamed to PreAllocator
* Optimize the LowestBitSet method
* Add CLZ support and fix CLS implementation
* Add missing Equals and GetHashCode overrides on some structs, misc small tweaks
* Implement the ByteSwap IR instruction, and some refactoring on the assembler
* Implement the DivideUI IR instruction and fix 64-bits IDIV
* Correct constant operand type on CSINC
* Move division instructions implementation to InstEmitDiv
* Fix destination type for the ConditionalSelect IR instruction
* Implement UMULH and SMULH, with new IR instructions
* Fix some issues with shift instructions
* Fix constant types for BFM instructions
* Fix up new tests using the new V128 struct
* Update tests
* Move DIV tests to a separate file
* Add support for calls, and some instructions that depends on them
* Start adding support for SIMD & FP types, along with some of the related ARM instructions
* Fix some typos and the divide instruction with FP operands
* Fix wrong method call on Clz_V
* Implement ARM FP & SIMD move instructions, Saddlv_V, and misc. fixes
* Implement SIMD logical instructions and more misc. fixes
* Fix PSRAD x86 instruction encoding, TRN, UABD and UABDL implementations
* Implement float conversion instruction, merge in LDj3SNuD fixes, and some other misc. fixes
* Implement SIMD shift instruction and fix Dup_V
* Add SCVTF and UCVTF (vector, fixed-point) variants to the opcode table
* Fix check with tolerance on tester
* Implement FP & SIMD comparison instructions, and some fixes
* Update FCVT (Scalar) encoding on the table to support the Half-float variants
* Support passing V128 structs, some cleanup on the register allocator, merge LDj3SNuD fixes
* Use old memory access methods, made a start on SIMD memory insts support, some fixes
* Fix float constant passed to functions, save and restore non-volatile XMM registers, other fixes
* Fix arguments count with struct return values, other fixes
* More instructions
* Misc. fixes and integrate LDj3SNuD fixes
* Update tests
* Add a faster linear scan allocator, unwinding support on windows, and other changes
* Update Ryujinx.HLE
* Update Ryujinx.Graphics
* Fix V128 return pointer passing, RCX is clobbered
* Update Ryujinx.Tests
* Update ITimeZoneService
* Stop using GetFunctionPointer as that can't be called from native code, misc. fixes and tweaks
* Use generic GetFunctionPointerForDelegate method and other tweaks
* Some refactoring on the code generator, assert on invalid operations and use a separate enum for intrinsics
* Remove some unused code on the assembler
* Fix REX.W prefix regression on float conversion instructions, add some sort of profiler
* Add hardware capability detection
* Fix regression on Sha1h and revert Fcm** changes
* Add SSE2-only paths on vector extract and insert, some refactoring on the pre-allocator
* Fix silly mistake introduced on last commit on CpuId
* Generate inline stack probes when the stack allocation is too large
* Initial support for the System-V ABI
* Support multiple destination operands
* Fix SSE2 VectorInsert8 path, and other fixes
* Change placement of XMM callee save and restore code to match other compilers
* Rename Dest to Destination and Inst to Instruction
* Fix a regression related to calls and the V128 type
* Add an extra space on comments to match code style
* Some refactoring
* Fix vector insert FP32 SSE2 path
* Port over the ARM32 instructions
* Avoid memory protection races on JIT Cache
* Another fix on VectorInsert FP32 (thanks to LDj3SNuD
* Float operands don't need to use the same register when VEX is supported
* Add a new register allocator, higher quality code for hot code (tier up), and other tweaks
* Some nits, small improvements on the pre allocator
* CpuThreadState is gone
* Allow changing CPU emulators with a config entry
* Add runtime identifiers on the ARMeilleure project
* Allow switching between CPUs through a config entry (pt. 2)
* Change win10-x64 to win-x64 on projects
* Update the Ryujinx project to use ARMeilleure
* Ensure that the selected register is valid on the hybrid allocator
* Allow exiting on returns to 0 (should fix test regression)
* Remove register assignments for most used variables on the hybrid allocator
* Do not use fixed registers as spill temp
* Add missing namespace and remove unneeded using
* Address PR feedback
* Fix types, etc
* Enable AssumeStrictAbiCompliance by default
* Ensure that Spill and Fill don't load or store any more than necessary
2019-08-08 14:56:22 -04:00
|
|
|
|
2019-10-13 02:02:07 -04:00
|
|
|
context.BranchIfTrue(lblFallbackPath, hasFlagSet);
|
Add a new JIT compiler for CPU code (#693)
* Start of the ARMeilleure project
* Refactoring around the old IRAdapter, now renamed to PreAllocator
* Optimize the LowestBitSet method
* Add CLZ support and fix CLS implementation
* Add missing Equals and GetHashCode overrides on some structs, misc small tweaks
* Implement the ByteSwap IR instruction, and some refactoring on the assembler
* Implement the DivideUI IR instruction and fix 64-bits IDIV
* Correct constant operand type on CSINC
* Move division instructions implementation to InstEmitDiv
* Fix destination type for the ConditionalSelect IR instruction
* Implement UMULH and SMULH, with new IR instructions
* Fix some issues with shift instructions
* Fix constant types for BFM instructions
* Fix up new tests using the new V128 struct
* Update tests
* Move DIV tests to a separate file
* Add support for calls, and some instructions that depends on them
* Start adding support for SIMD & FP types, along with some of the related ARM instructions
* Fix some typos and the divide instruction with FP operands
* Fix wrong method call on Clz_V
* Implement ARM FP & SIMD move instructions, Saddlv_V, and misc. fixes
* Implement SIMD logical instructions and more misc. fixes
* Fix PSRAD x86 instruction encoding, TRN, UABD and UABDL implementations
* Implement float conversion instruction, merge in LDj3SNuD fixes, and some other misc. fixes
* Implement SIMD shift instruction and fix Dup_V
* Add SCVTF and UCVTF (vector, fixed-point) variants to the opcode table
* Fix check with tolerance on tester
* Implement FP & SIMD comparison instructions, and some fixes
* Update FCVT (Scalar) encoding on the table to support the Half-float variants
* Support passing V128 structs, some cleanup on the register allocator, merge LDj3SNuD fixes
* Use old memory access methods, made a start on SIMD memory insts support, some fixes
* Fix float constant passed to functions, save and restore non-volatile XMM registers, other fixes
* Fix arguments count with struct return values, other fixes
* More instructions
* Misc. fixes and integrate LDj3SNuD fixes
* Update tests
* Add a faster linear scan allocator, unwinding support on windows, and other changes
* Update Ryujinx.HLE
* Update Ryujinx.Graphics
* Fix V128 return pointer passing, RCX is clobbered
* Update Ryujinx.Tests
* Update ITimeZoneService
* Stop using GetFunctionPointer as that can't be called from native code, misc. fixes and tweaks
* Use generic GetFunctionPointerForDelegate method and other tweaks
* Some refactoring on the code generator, assert on invalid operations and use a separate enum for intrinsics
* Remove some unused code on the assembler
* Fix REX.W prefix regression on float conversion instructions, add some sort of profiler
* Add hardware capability detection
* Fix regression on Sha1h and revert Fcm** changes
* Add SSE2-only paths on vector extract and insert, some refactoring on the pre-allocator
* Fix silly mistake introduced on last commit on CpuId
* Generate inline stack probes when the stack allocation is too large
* Initial support for the System-V ABI
* Support multiple destination operands
* Fix SSE2 VectorInsert8 path, and other fixes
* Change placement of XMM callee save and restore code to match other compilers
* Rename Dest to Destination and Inst to Instruction
* Fix a regression related to calls and the V128 type
* Add an extra space on comments to match code style
* Some refactoring
* Fix vector insert FP32 SSE2 path
* Port over the ARM32 instructions
* Avoid memory protection races on JIT Cache
* Another fix on VectorInsert FP32 (thanks to LDj3SNuD
* Float operands don't need to use the same register when VEX is supported
* Add a new register allocator, higher quality code for hot code (tier up), and other tweaks
* Some nits, small improvements on the pre allocator
* CpuThreadState is gone
* Allow changing CPU emulators with a config entry
* Add runtime identifiers on the ARMeilleure project
* Allow switching between CPUs through a config entry (pt. 2)
* Change win10-x64 to win-x64 on projects
* Update the Ryujinx project to use ARMeilleure
* Ensure that the selected register is valid on the hybrid allocator
* Allow exiting on returns to 0 (should fix test regression)
* Remove register assignments for most used variables on the hybrid allocator
* Do not use fixed registers as spill temp
* Add missing namespace and remove unneeded using
* Address PR feedback
* Fix types, etc
* Enable AssumeStrictAbiCompliance by default
* Ensure that Spill and Fill don't load or store any more than necessary
2019-08-08 14:56:22 -04:00
|
|
|
|
|
|
|
Operand pageOffset = context.BitwiseAnd(address, Const(address.Type, MemoryManager.PageMask));
|
|
|
|
|
|
|
|
if (pageOffset.Type == OperandType.I32)
|
|
|
|
{
|
|
|
|
pageOffset = context.ZeroExtend32(OperandType.I64, pageOffset);
|
|
|
|
}
|
|
|
|
|
|
|
|
Operand physAddr = context.Add(pte, pageOffset);
|
|
|
|
|
|
|
|
return physAddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
private static void EmitReadIntFallback(ArmEmitterContext context, Operand address, int rt, int size)
|
|
|
|
{
|
|
|
|
Delegate fallbackMethodDlg = null;
|
|
|
|
|
|
|
|
switch (size)
|
|
|
|
{
|
|
|
|
case 0: fallbackMethodDlg = new _U8_U64 (NativeInterface.ReadByte); break;
|
|
|
|
case 1: fallbackMethodDlg = new _U16_U64(NativeInterface.ReadUInt16); break;
|
|
|
|
case 2: fallbackMethodDlg = new _U32_U64(NativeInterface.ReadUInt32); break;
|
|
|
|
case 3: fallbackMethodDlg = new _U64_U64(NativeInterface.ReadUInt64); break;
|
|
|
|
}
|
|
|
|
|
|
|
|
SetInt(context, rt, context.Call(fallbackMethodDlg, address));
|
|
|
|
}
|
|
|
|
|
|
|
|
private static void EmitReadVectorFallback(
|
|
|
|
ArmEmitterContext context,
|
|
|
|
Operand address,
|
|
|
|
Operand vector,
|
|
|
|
int rt,
|
|
|
|
int elem,
|
|
|
|
int size)
|
|
|
|
{
|
|
|
|
Delegate fallbackMethodDlg = null;
|
|
|
|
|
|
|
|
switch (size)
|
|
|
|
{
|
|
|
|
case 0: fallbackMethodDlg = new _U8_U64 (NativeInterface.ReadByte); break;
|
|
|
|
case 1: fallbackMethodDlg = new _U16_U64 (NativeInterface.ReadUInt16); break;
|
|
|
|
case 2: fallbackMethodDlg = new _U32_U64 (NativeInterface.ReadUInt32); break;
|
|
|
|
case 3: fallbackMethodDlg = new _U64_U64 (NativeInterface.ReadUInt64); break;
|
|
|
|
case 4: fallbackMethodDlg = new _V128_U64(NativeInterface.ReadVector128); break;
|
|
|
|
}
|
|
|
|
|
|
|
|
Operand value = context.Call(fallbackMethodDlg, address);
|
|
|
|
|
|
|
|
switch (size)
|
|
|
|
{
|
|
|
|
case 0: value = context.VectorInsert8 (vector, value, elem); break;
|
|
|
|
case 1: value = context.VectorInsert16(vector, value, elem); break;
|
|
|
|
case 2: value = context.VectorInsert (vector, value, elem); break;
|
|
|
|
case 3: value = context.VectorInsert (vector, value, elem); break;
|
|
|
|
}
|
|
|
|
|
|
|
|
context.Copy(GetVec(rt), value);
|
|
|
|
}
|
|
|
|
|
|
|
|
private static void EmitWriteIntFallback(ArmEmitterContext context, Operand address, int rt, int size)
|
|
|
|
{
|
|
|
|
Delegate fallbackMethodDlg = null;
|
|
|
|
|
|
|
|
switch (size)
|
|
|
|
{
|
|
|
|
case 0: fallbackMethodDlg = new _Void_U64_U8 (NativeInterface.WriteByte); break;
|
|
|
|
case 1: fallbackMethodDlg = new _Void_U64_U16(NativeInterface.WriteUInt16); break;
|
|
|
|
case 2: fallbackMethodDlg = new _Void_U64_U32(NativeInterface.WriteUInt32); break;
|
|
|
|
case 3: fallbackMethodDlg = new _Void_U64_U64(NativeInterface.WriteUInt64); break;
|
|
|
|
}
|
|
|
|
|
|
|
|
Operand value = GetInt(context, rt);
|
|
|
|
|
|
|
|
if (size < 3 && value.Type == OperandType.I64)
|
|
|
|
{
|
|
|
|
value = context.ConvertI64ToI32(value);
|
|
|
|
}
|
|
|
|
|
|
|
|
context.Call(fallbackMethodDlg, address, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
private static void EmitWriteVectorFallback(
|
|
|
|
ArmEmitterContext context,
|
|
|
|
Operand address,
|
|
|
|
int rt,
|
|
|
|
int elem,
|
|
|
|
int size)
|
|
|
|
{
|
|
|
|
Delegate fallbackMethodDlg = null;
|
|
|
|
|
|
|
|
switch (size)
|
|
|
|
{
|
|
|
|
case 0: fallbackMethodDlg = new _Void_U64_U8 (NativeInterface.WriteByte); break;
|
|
|
|
case 1: fallbackMethodDlg = new _Void_U64_U16 (NativeInterface.WriteUInt16); break;
|
|
|
|
case 2: fallbackMethodDlg = new _Void_U64_U32 (NativeInterface.WriteUInt32); break;
|
|
|
|
case 3: fallbackMethodDlg = new _Void_U64_U64 (NativeInterface.WriteUInt64); break;
|
|
|
|
case 4: fallbackMethodDlg = new _Void_U64_V128(NativeInterface.WriteVector128); break;
|
|
|
|
}
|
|
|
|
|
|
|
|
Operand value = null;
|
|
|
|
|
|
|
|
if (size < 4)
|
|
|
|
{
|
|
|
|
switch (size)
|
|
|
|
{
|
|
|
|
case 0:
|
|
|
|
value = context.VectorExtract8(GetVec(rt), elem);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 1:
|
|
|
|
value = context.VectorExtract16(GetVec(rt), elem);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 2:
|
|
|
|
value = context.VectorExtract(OperandType.I32, GetVec(rt), elem);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 3:
|
|
|
|
value = context.VectorExtract(OperandType.I64, GetVec(rt), elem);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
value = GetVec(rt);
|
|
|
|
}
|
|
|
|
|
|
|
|
context.Call(fallbackMethodDlg, address, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
private static Operand GetInt(ArmEmitterContext context, int rt)
|
|
|
|
{
|
|
|
|
return context.CurrOp is OpCode32 ? GetIntA32(context, rt) : GetIntOrZR(context, rt);
|
|
|
|
}
|
|
|
|
|
|
|
|
private static void SetInt(ArmEmitterContext context, int rt, Operand value)
|
|
|
|
{
|
|
|
|
if (context.CurrOp is OpCode32)
|
|
|
|
{
|
|
|
|
SetIntA32(context, rt, value);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
SetIntOrZR(context, rt, value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|