Initial Apple Hypervisor based CPU emulation (#4332)

* Initial Apple Hypervisor based CPU emulation implementation

* Add UseHypervisor Setting

* Add basic MacOS support to Avalonia

* Fix initialization

* Fix GTK build

* Fix/silence warnings

* Change exceptions to asserts on HvAddressSpaceRange

* Replace DllImport with LibraryImport

* Fix LibraryImport

* Remove unneeded usings

* Revert outdated change

* Set DiskCacheLoadState when using hypervisor too

* Fix HvExecutionContext PC value

* Address PR feedback

* Use existing entitlements.xml file on distribution folder

---------

Co-authored-by: riperiperi <rhy3756547@hotmail.com>
This commit is contained in:
gdkchan 2023-01-29 08:37:52 -03:00 committed by GitHub
parent c7f9962dde
commit a53cfdab78
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
33 changed files with 2939 additions and 45 deletions

View file

@ -0,0 +1,27 @@
namespace Ryujinx.Cpu.AppleHv.Arm
{
enum ApFlags : ulong
{
ApShift = 6,
PxnShift = 53,
UxnShift = 54,
UserExecuteKernelReadWriteExecute = (0UL << (int)ApShift),
UserReadWriteExecuteKernelReadWrite = (1UL << (int)ApShift),
UserExecuteKernelReadExecute = (2UL << (int)ApShift),
UserReadExecuteKernelReadExecute = (3UL << (int)ApShift),
UserExecuteKernelReadWrite = (1UL << (int)PxnShift) | (0UL << (int)ApShift),
UserExecuteKernelRead = (1UL << (int)PxnShift) | (2UL << (int)ApShift),
UserReadExecuteKernelRead = (1UL << (int)PxnShift) | (3UL << (int)ApShift),
UserNoneKernelReadWriteExecute = (1UL << (int)UxnShift) | (0UL << (int)ApShift),
UserReadWriteKernelReadWrite = (1UL << (int)UxnShift) | (1UL << (int)ApShift),
UserNoneKernelReadExecute = (1UL << (int)UxnShift) | (2UL << (int)ApShift),
UserReadKernelReadExecute = (1UL << (int)UxnShift) | (3UL << (int)ApShift),
UserNoneKernelReadWrite = (1UL << (int)PxnShift) | (1UL << (int)UxnShift) | (0UL << (int)ApShift),
UserNoneKernelRead = (1UL << (int)PxnShift) | (1UL << (int)UxnShift) | (2UL << (int)ApShift),
UserReadKernelRead = (1UL << (int)PxnShift) | (1UL << (int)UxnShift) | (3UL << (int)ApShift)
}
}

View file

@ -0,0 +1,47 @@
namespace Ryujinx.Cpu.AppleHv.Arm
{
enum ExceptionClass
{
Unknown = 0b000000,
TrappedWfeWfiWfetWfit = 0b000001,
TrappedMcrMrcCp15 = 0b000011,
TrappedMcrrMrrcCp15 = 0b000100,
TrappedMcrMrcCp14 = 0b000101,
TrappedLdcStc = 0b000110,
TrappedSveFpSimd = 0b000111,
TrappedVmrs = 0b001000,
TrappedPAuth = 0b001001,
TrappedLd64bSt64bSt64bvSt64bv0 = 0b001010,
TrappedMrrcCp14 = 0b001100,
IllegalExecutionState = 0b001110,
SvcAarch32 = 0b010001,
HvcAarch32 = 0b010010,
SmcAarch32 = 0b010011,
SvcAarch64 = 0b010101,
HvcAarch64 = 0b010110,
SmcAarch64 = 0b010111,
TrappedMsrMrsSystem = 0b011000,
TrappedSve = 0b011001,
TrappedEretEretaaEretab = 0b011010,
PointerAuthenticationFailure = 0b011100,
ImplementationDefinedEl3 = 0b011111,
InstructionAbortLowerEl = 0b100000,
InstructionAbortSameEl = 0b100001,
PcAlignmentFault = 0b100010,
DataAbortLowerEl = 0b100100,
DataAbortSameEl = 0b100101,
SpAlignmentFault = 0b100110,
TrappedFpExceptionAarch32 = 0b101000,
TrappedFpExceptionAarch64 = 0b101100,
SErrorInterrupt = 0b101111,
BreakpointLowerEl = 0b110000,
BreakpointSameEl = 0b110001,
SoftwareStepLowerEl = 0b110010,
SoftwareStepSameEl = 0b110011,
WatchpointLowerEl = 0b110100,
WatchpointSameEl = 0b110101,
BkptAarch32 = 0b111000,
VectorCatchAarch32 = 0b111010,
BrkAarch64 = 0b111100
}
}

View file

@ -0,0 +1,17 @@
using System;
namespace Ryujinx.Cpu.AppleHv
{
public class DummyDiskCacheLoadState : IDiskCacheLoadState
{
#pragma warning disable CS0067
/// <inheritdoc/>
public event Action<LoadState, int, int> StateChanged;
#pragma warning restore CS0067
/// <inheritdoc/>
public void Cancel()
{
}
}
}

View file

@ -0,0 +1,129 @@
using Ryujinx.Cpu.AppleHv.Arm;
using Ryujinx.Memory;
using System;
namespace Ryujinx.Cpu.AppleHv
{
class HvAddressSpace : IDisposable
{
private const ulong KernelRegionBase = unchecked((ulong)-(1L << 39));
private const ulong KernelRegionCodeOffset = 0UL;
private const ulong KernelRegionCodeSize = 0x2000UL;
private const ulong KernelRegionTlbiEretOffset = KernelRegionCodeOffset + 0x1000UL;
private const ulong KernelRegionEretOffset = KernelRegionTlbiEretOffset + 4UL;
public const ulong KernelRegionEretAddress = KernelRegionBase + KernelRegionEretOffset;
public const ulong KernelRegionTlbiEretAddress = KernelRegionBase + KernelRegionTlbiEretOffset;
private const ulong AllocationGranule = 1UL << 14;
private readonly ulong _asBase;
private readonly ulong _asSize;
private readonly ulong _backingSize;
private readonly HvAddressSpaceRange _userRange;
private readonly HvAddressSpaceRange _kernelRange;
private MemoryBlock _kernelCodeBlock;
public HvAddressSpace(MemoryBlock backingMemory, ulong asSize)
{
(_asBase, var ipaAllocator) = HvVm.CreateAddressSpace(backingMemory);
_asSize = asSize;
_backingSize = backingMemory.Size;
_userRange = new HvAddressSpaceRange(ipaAllocator);
_kernelRange = new HvAddressSpaceRange(ipaAllocator);
_kernelCodeBlock = new MemoryBlock(AllocationGranule);
InitializeKernelCode(ipaAllocator);
}
private void InitializeKernelCode(HvIpaAllocator ipaAllocator)
{
// Write exception handlers.
for (ulong offset = 0; offset < 0x800; offset += 0x80)
{
// Offsets:
// 0x0: Synchronous
// 0x80: IRQ
// 0x100: FIQ
// 0x180: SError
_kernelCodeBlock.Write(KernelRegionCodeOffset + offset, 0xD41FFFE2u); // HVC #0xFFFF
_kernelCodeBlock.Write(KernelRegionCodeOffset + offset + 4, 0xD69F03E0u); // ERET
}
_kernelCodeBlock.Write(KernelRegionTlbiEretOffset, 0xD508831Fu); // TLBI VMALLE1IS
_kernelCodeBlock.Write(KernelRegionEretOffset, 0xD69F03E0u); // ERET
ulong kernelCodePa = ipaAllocator.Allocate(AllocationGranule);
HvApi.hv_vm_map((ulong)_kernelCodeBlock.Pointer, kernelCodePa, AllocationGranule, hv_memory_flags_t.HV_MEMORY_READ | hv_memory_flags_t.HV_MEMORY_EXEC).ThrowOnError();
_kernelRange.Map(KernelRegionCodeOffset, kernelCodePa, KernelRegionCodeSize, ApFlags.UserNoneKernelReadExecute);
}
public void InitializeMmu(ulong vcpu)
{
HvApi.hv_vcpu_set_sys_reg(vcpu, hv_sys_reg_t.HV_SYS_REG_VBAR_EL1, KernelRegionBase + KernelRegionCodeOffset);
HvApi.hv_vcpu_set_sys_reg(vcpu, hv_sys_reg_t.HV_SYS_REG_TTBR0_EL1, _userRange.GetIpaBase());
HvApi.hv_vcpu_set_sys_reg(vcpu, hv_sys_reg_t.HV_SYS_REG_TTBR1_EL1, _kernelRange.GetIpaBase());
HvApi.hv_vcpu_set_sys_reg(vcpu, hv_sys_reg_t.HV_SYS_REG_MAIR_EL1, 0xffUL);
HvApi.hv_vcpu_set_sys_reg(vcpu, hv_sys_reg_t.HV_SYS_REG_TCR_EL1, 0x00000011B5193519UL);
HvApi.hv_vcpu_set_sys_reg(vcpu, hv_sys_reg_t.HV_SYS_REG_SCTLR_EL1, 0x0000000034D5D925UL);
}
public bool GetAndClearUserTlbInvalidationPending()
{
return _userRange.GetAndClearTlbInvalidationPending();
}
public void MapUser(ulong va, ulong pa, ulong size, MemoryPermission permission)
{
pa += _asBase;
lock (_userRange)
{
_userRange.Map(va, pa, size, GetApFlags(permission));
}
}
public void UnmapUser(ulong va, ulong size)
{
lock (_userRange)
{
_userRange.Unmap(va, size);
}
}
public void ReprotectUser(ulong va, ulong size, MemoryPermission permission)
{
lock (_userRange)
{
_userRange.Reprotect(va, size, GetApFlags(permission));
}
}
private static ApFlags GetApFlags(MemoryPermission permission)
{
return permission switch
{
MemoryPermission.None => ApFlags.UserNoneKernelRead,
MemoryPermission.Execute => ApFlags.UserExecuteKernelRead,
MemoryPermission.Read => ApFlags.UserReadKernelRead,
MemoryPermission.ReadAndWrite => ApFlags.UserReadWriteKernelReadWrite,
MemoryPermission.ReadAndExecute => ApFlags.UserReadExecuteKernelRead,
MemoryPermission.ReadWriteExecute => ApFlags.UserReadWriteExecuteKernelReadWrite,
_ => throw new ArgumentException($"Permission \"{permission}\" is invalid.")
};
}
public void Dispose()
{
_userRange.Dispose();
_kernelRange.Dispose();
HvVm.DestroyAddressSpace(_asBase, _backingSize);
}
}
}

View file

@ -0,0 +1,370 @@
using Ryujinx.Cpu.AppleHv.Arm;
using System;
using System.Diagnostics;
using System.Runtime.InteropServices;
using System.Threading;
namespace Ryujinx.Cpu.AppleHv
{
class HvAddressSpaceRange : IDisposable
{
private const ulong AllocationGranule = 1UL << 14;
private const ulong AttributesMask = (0x3ffUL << 2) | (0x3fffUL << 50);
private const ulong BaseAttributes = (1UL << 10) | (3UL << 8); // Access flag set, inner shareable.
private const int LevelBits = 9;
private const int LevelCount = 1 << LevelBits;
private const int LevelMask = LevelCount - 1;
private const int PageBits = 12;
private const int PageSize = 1 << PageBits;
private const int PageMask = PageSize - 1;
private const int AllLevelsMask = PageMask | (LevelMask << PageBits) | (LevelMask << (PageBits + LevelBits));
private class PtLevel
{
public ulong Address => Allocation.Ipa + Allocation.Offset;
public int EntriesCount;
public readonly HvMemoryBlockAllocation Allocation;
public readonly PtLevel[] Next;
public PtLevel(HvMemoryBlockAllocator blockAllocator, int count, bool hasNext)
{
ulong size = (ulong)count * sizeof(ulong);
Allocation = blockAllocator.Allocate(size, PageSize);
AsSpan().Fill(0UL);
if (hasNext)
{
Next = new PtLevel[count];
}
}
public unsafe Span<ulong> AsSpan()
{
return MemoryMarshal.Cast<byte, ulong>(Allocation.Memory.GetSpan(Allocation.Offset, (int)Allocation.Size));
}
}
private PtLevel _level0;
private int _tlbInvalidationPending;
private readonly HvIpaAllocator _ipaAllocator;
private readonly HvMemoryBlockAllocator _blockAllocator;
public HvAddressSpaceRange(HvIpaAllocator ipaAllocator)
{
_ipaAllocator = ipaAllocator;
_blockAllocator = new HvMemoryBlockAllocator(ipaAllocator, (int)AllocationGranule);
}
public ulong GetIpaBase()
{
return EnsureLevel0().Address;
}
public bool GetAndClearTlbInvalidationPending()
{
return Interlocked.Exchange(ref _tlbInvalidationPending, 0) != 0;
}
public void Map(ulong va, ulong pa, ulong size, ApFlags accessPermission)
{
MapImpl(va, pa, size, (ulong)accessPermission | BaseAttributes);
}
public void Unmap(ulong va, ulong size)
{
UnmapImpl(EnsureLevel0(), 0, va, size);
Interlocked.Exchange(ref _tlbInvalidationPending, 1);
}
public void Reprotect(ulong va, ulong size, ApFlags accessPermission)
{
UpdateAttributes(va, size, (ulong)accessPermission | BaseAttributes);
}
private void MapImpl(ulong va, ulong pa, ulong size, ulong attr)
{
PtLevel level0 = EnsureLevel0();
ulong endVa = va + size;
while (va < endVa)
{
(ulong mapSize, int depth) = GetMapSizeAndDepth(va, pa, endVa);
PtLevel currentLevel = level0;
for (int i = 0; i < depth; i++)
{
int l = (int)(va >> (PageBits + (2 - i) * LevelBits)) & LevelMask;
EnsureTable(currentLevel, l, i == 0);
currentLevel = currentLevel.Next[l];
}
(ulong blockSize, int blockShift) = GetBlockSizeAndShift(depth);
for (ulong i = 0; i < mapSize; i += blockSize)
{
if ((va >> blockShift) << blockShift != va ||
(pa >> blockShift) << blockShift != pa)
{
Debug.Fail($"Block size 0x{blockSize:X} (log2: {blockShift}) is invalid for VA 0x{va:X} or PA 0x{pa:X}.");
}
WriteBlock(currentLevel, (int)(va >> blockShift) & LevelMask, depth, pa, attr);
va += blockSize;
pa += blockSize;
}
}
}
private void UnmapImpl(PtLevel level, int depth, ulong va, ulong size)
{
ulong endVa = (va + size + PageMask) & ~((ulong)PageMask);
va &= ~((ulong)PageMask);
(ulong blockSize, int blockShift) = GetBlockSizeAndShift(depth);
while (va < endVa)
{
ulong nextEntryVa = GetNextAddress(va, blockSize);
ulong chunckSize = Math.Min(endVa - va, nextEntryVa - va);
int l = (int)(va >> (PageBits + (2 - depth) * LevelBits)) & LevelMask;
PtLevel nextTable = level.Next != null ? level.Next[l] : null;
if (nextTable != null)
{
// Entry is a table, visit it and update attributes as required.
UnmapImpl(nextTable, depth + 1, va, chunckSize);
}
else if (chunckSize != blockSize)
{
// Entry is a block but is not aligned, we need to turn it into a table.
ref ulong pte = ref level.AsSpan()[l];
nextTable = CreateTable(pte, depth + 1);
level.Next[l] = nextTable;
// Now that we have a table, we can handle it like the first case.
UnmapImpl(nextTable, depth + 1, va, chunckSize);
// Update PTE to point to the new table.
pte = (nextTable.Address & ~(ulong)PageMask) | 3UL;
}
// If entry is a block, or if entry is a table but it is empty, we can remove it.
if (nextTable == null || nextTable.EntriesCount == 0)
{
// Entry is a block and is fully aligned, so we can just set it to 0.
if (nextTable != null)
{
nextTable.Allocation.Dispose();
level.Next[l] = null;
}
level.AsSpan()[l] = 0UL;
level.EntriesCount--;
ValidateEntriesCount(level.EntriesCount);
}
va += chunckSize;
}
}
private void UpdateAttributes(ulong va, ulong size, ulong newAttr)
{
UpdateAttributes(EnsureLevel0(), 0, va, size, newAttr);
Interlocked.Exchange(ref _tlbInvalidationPending, 1);
}
private void UpdateAttributes(PtLevel level, int depth, ulong va, ulong size, ulong newAttr)
{
ulong endVa = (va + size + PageSize - 1) & ~((ulong)PageSize - 1);
va &= ~((ulong)PageSize - 1);
(ulong blockSize, int blockShift) = GetBlockSizeAndShift(depth);
while (va < endVa)
{
ulong nextEntryVa = GetNextAddress(va, blockSize);
ulong chunckSize = Math.Min(endVa - va, nextEntryVa - va);
int l = (int)(va >> (PageBits + (2 - depth) * LevelBits)) & LevelMask;
ref ulong pte = ref level.AsSpan()[l];
// First check if the region is mapped.
if ((pte & 3) != 0)
{
PtLevel nextTable = level.Next != null ? level.Next[l] : null;
if (nextTable != null)
{
// Entry is a table, visit it and update attributes as required.
UpdateAttributes(nextTable, depth + 1, va, chunckSize, newAttr);
}
else if (chunckSize != blockSize)
{
// Entry is a block but is not aligned, we need to turn it into a table.
nextTable = CreateTable(pte, depth + 1);
level.Next[l] = nextTable;
// Now that we have a table, we can handle it like the first case.
UpdateAttributes(nextTable, depth + 1, va, chunckSize, newAttr);
// Update PTE to point to the new table.
pte = (nextTable.Address & ~(ulong)PageMask) | 3UL;
}
else
{
// Entry is a block and is fully aligned, so we can just update the attributes.
// Update PTE with the new attributes.
pte = (pte & ~AttributesMask) | newAttr;
}
}
va += chunckSize;
}
}
private PtLevel CreateTable(ulong pte, int depth)
{
pte &= ~3UL;
pte |= (depth == 2 ? 3UL : 1UL);
PtLevel level = new PtLevel(_blockAllocator, LevelCount, depth < 2);
Span<ulong> currentLevel = level.AsSpan();
(ulong blockSize, int blockShift) = GetBlockSizeAndShift(depth);
// Fill in the blocks.
for (int i = 0; i < LevelCount; i++)
{
ulong offset = (ulong)i << blockShift;
currentLevel[i] = pte + offset;
}
level.EntriesCount = LevelCount;
return level;
}
private static (ulong, int) GetBlockSizeAndShift(int depth)
{
int blockShift = PageBits + (2 - depth) * LevelBits;
ulong blockSize = 1UL << blockShift;
return (blockSize, blockShift);
}
private static (ulong, int) GetMapSizeAndDepth(ulong va, ulong pa, ulong endVa)
{
// Both virtual and physical addresses must be aligned to the block size.
ulong combinedAddress = va | pa;
ulong l0Alignment = 1UL << (PageBits + LevelBits * 2);
ulong l1Alignment = 1UL << (PageBits + LevelBits);
if ((combinedAddress & (l0Alignment - 1)) == 0 && AlignDown(endVa, l0Alignment) > va)
{
return (AlignDown(endVa, l0Alignment) - va, 0);
}
else if ((combinedAddress & (l1Alignment - 1)) == 0 && AlignDown(endVa, l1Alignment) > va)
{
ulong nextOrderVa = GetNextAddress(va, l0Alignment);
if (nextOrderVa <= endVa)
{
return (nextOrderVa - va, 1);
}
else
{
return (AlignDown(endVa, l1Alignment) - va, 1);
}
}
else
{
ulong nextOrderVa = GetNextAddress(va, l1Alignment);
if (nextOrderVa <= endVa)
{
return (nextOrderVa - va, 2);
}
else
{
return (endVa - va, 2);
}
}
}
private static ulong AlignDown(ulong va, ulong alignment)
{
return va & ~(alignment - 1);
}
private static ulong GetNextAddress(ulong va, ulong alignment)
{
return (va + alignment) & ~(alignment - 1);
}
private PtLevel EnsureLevel0()
{
PtLevel level0 = _level0;
if (level0 == null)
{
level0 = new PtLevel(_blockAllocator, LevelCount, true);
_level0 = level0;
}
return level0;
}
private void EnsureTable(PtLevel level, int index, bool hasNext)
{
Span<ulong> currentTable = level.AsSpan();
if ((currentTable[index] & 1) == 0)
{
PtLevel nextLevel = new PtLevel(_blockAllocator, LevelCount, hasNext);
currentTable[index] = (nextLevel.Address & ~(ulong)PageMask) | 3UL;
level.Next[index] = nextLevel;
level.EntriesCount++;
ValidateEntriesCount(level.EntriesCount);
}
else if (level.Next[index] == null)
{
Debug.Fail($"Index {index} is block, expected a table.");
}
}
private void WriteBlock(PtLevel level, int index, int depth, ulong pa, ulong attr)
{
Span<ulong> currentTable = level.AsSpan();
currentTable[index] = (pa & ~((ulong)AllLevelsMask >> (depth * LevelBits))) | (depth == 2 ? 3UL : 1UL) | attr;
level.EntriesCount++;
ValidateEntriesCount(level.EntriesCount);
}
private static void ValidateEntriesCount(int count)
{
Debug.Assert(count >= 0 && count <= LevelCount, $"Entries count {count} is invalid.");
}
public void Dispose()
{
_blockAllocator.Dispose();
}
}
}

View file

@ -0,0 +1,320 @@
using ARMeilleure.State;
using System;
using System.Runtime.InteropServices;
namespace Ryujinx.Cpu.AppleHv
{
struct hv_vcpu_exit_exception_t
{
#pragma warning disable CS0649
public ulong syndrome;
public ulong virtual_address;
public ulong physical_address;
#pragma warning restore CS0649
}
struct hv_vcpu_exit_t
{
#pragma warning disable CS0649
public uint reason;
public hv_vcpu_exit_exception_t exception;
#pragma warning restore CS0649
}
enum hv_reg_t : uint
{
HV_REG_X0,
HV_REG_X1,
HV_REG_X2,
HV_REG_X3,
HV_REG_X4,
HV_REG_X5,
HV_REG_X6,
HV_REG_X7,
HV_REG_X8,
HV_REG_X9,
HV_REG_X10,
HV_REG_X11,
HV_REG_X12,
HV_REG_X13,
HV_REG_X14,
HV_REG_X15,
HV_REG_X16,
HV_REG_X17,
HV_REG_X18,
HV_REG_X19,
HV_REG_X20,
HV_REG_X21,
HV_REG_X22,
HV_REG_X23,
HV_REG_X24,
HV_REG_X25,
HV_REG_X26,
HV_REG_X27,
HV_REG_X28,
HV_REG_X29,
HV_REG_FP = HV_REG_X29,
HV_REG_X30,
HV_REG_LR = HV_REG_X30,
HV_REG_PC,
HV_REG_FPCR,
HV_REG_FPSR,
HV_REG_CPSR,
}
enum hv_simd_fp_reg_t : uint
{
HV_SIMD_FP_REG_Q0,
HV_SIMD_FP_REG_Q1,
HV_SIMD_FP_REG_Q2,
HV_SIMD_FP_REG_Q3,
HV_SIMD_FP_REG_Q4,
HV_SIMD_FP_REG_Q5,
HV_SIMD_FP_REG_Q6,
HV_SIMD_FP_REG_Q7,
HV_SIMD_FP_REG_Q8,
HV_SIMD_FP_REG_Q9,
HV_SIMD_FP_REG_Q10,
HV_SIMD_FP_REG_Q11,
HV_SIMD_FP_REG_Q12,
HV_SIMD_FP_REG_Q13,
HV_SIMD_FP_REG_Q14,
HV_SIMD_FP_REG_Q15,
HV_SIMD_FP_REG_Q16,
HV_SIMD_FP_REG_Q17,
HV_SIMD_FP_REG_Q18,
HV_SIMD_FP_REG_Q19,
HV_SIMD_FP_REG_Q20,
HV_SIMD_FP_REG_Q21,
HV_SIMD_FP_REG_Q22,
HV_SIMD_FP_REG_Q23,
HV_SIMD_FP_REG_Q24,
HV_SIMD_FP_REG_Q25,
HV_SIMD_FP_REG_Q26,
HV_SIMD_FP_REG_Q27,
HV_SIMD_FP_REG_Q28,
HV_SIMD_FP_REG_Q29,
HV_SIMD_FP_REG_Q30,
HV_SIMD_FP_REG_Q31,
}
enum hv_sys_reg_t : ushort
{
HV_SYS_REG_DBGBVR0_EL1 = 0x8004,
HV_SYS_REG_DBGBCR0_EL1 = 0x8005,
HV_SYS_REG_DBGWVR0_EL1 = 0x8006,
HV_SYS_REG_DBGWCR0_EL1 = 0x8007,
HV_SYS_REG_DBGBVR1_EL1 = 0x800c,
HV_SYS_REG_DBGBCR1_EL1 = 0x800d,
HV_SYS_REG_DBGWVR1_EL1 = 0x800e,
HV_SYS_REG_DBGWCR1_EL1 = 0x800f,
HV_SYS_REG_MDCCINT_EL1 = 0x8010,
HV_SYS_REG_MDSCR_EL1 = 0x8012,
HV_SYS_REG_DBGBVR2_EL1 = 0x8014,
HV_SYS_REG_DBGBCR2_EL1 = 0x8015,
HV_SYS_REG_DBGWVR2_EL1 = 0x8016,
HV_SYS_REG_DBGWCR2_EL1 = 0x8017,
HV_SYS_REG_DBGBVR3_EL1 = 0x801c,
HV_SYS_REG_DBGBCR3_EL1 = 0x801d,
HV_SYS_REG_DBGWVR3_EL1 = 0x801e,
HV_SYS_REG_DBGWCR3_EL1 = 0x801f,
HV_SYS_REG_DBGBVR4_EL1 = 0x8024,
HV_SYS_REG_DBGBCR4_EL1 = 0x8025,
HV_SYS_REG_DBGWVR4_EL1 = 0x8026,
HV_SYS_REG_DBGWCR4_EL1 = 0x8027,
HV_SYS_REG_DBGBVR5_EL1 = 0x802c,
HV_SYS_REG_DBGBCR5_EL1 = 0x802d,
HV_SYS_REG_DBGWVR5_EL1 = 0x802e,
HV_SYS_REG_DBGWCR5_EL1 = 0x802f,
HV_SYS_REG_DBGBVR6_EL1 = 0x8034,
HV_SYS_REG_DBGBCR6_EL1 = 0x8035,
HV_SYS_REG_DBGWVR6_EL1 = 0x8036,
HV_SYS_REG_DBGWCR6_EL1 = 0x8037,
HV_SYS_REG_DBGBVR7_EL1 = 0x803c,
HV_SYS_REG_DBGBCR7_EL1 = 0x803d,
HV_SYS_REG_DBGWVR7_EL1 = 0x803e,
HV_SYS_REG_DBGWCR7_EL1 = 0x803f,
HV_SYS_REG_DBGBVR8_EL1 = 0x8044,
HV_SYS_REG_DBGBCR8_EL1 = 0x8045,
HV_SYS_REG_DBGWVR8_EL1 = 0x8046,
HV_SYS_REG_DBGWCR8_EL1 = 0x8047,
HV_SYS_REG_DBGBVR9_EL1 = 0x804c,
HV_SYS_REG_DBGBCR9_EL1 = 0x804d,
HV_SYS_REG_DBGWVR9_EL1 = 0x804e,
HV_SYS_REG_DBGWCR9_EL1 = 0x804f,
HV_SYS_REG_DBGBVR10_EL1 = 0x8054,
HV_SYS_REG_DBGBCR10_EL1 = 0x8055,
HV_SYS_REG_DBGWVR10_EL1 = 0x8056,
HV_SYS_REG_DBGWCR10_EL1 = 0x8057,
HV_SYS_REG_DBGBVR11_EL1 = 0x805c,
HV_SYS_REG_DBGBCR11_EL1 = 0x805d,
HV_SYS_REG_DBGWVR11_EL1 = 0x805e,
HV_SYS_REG_DBGWCR11_EL1 = 0x805f,
HV_SYS_REG_DBGBVR12_EL1 = 0x8064,
HV_SYS_REG_DBGBCR12_EL1 = 0x8065,
HV_SYS_REG_DBGWVR12_EL1 = 0x8066,
HV_SYS_REG_DBGWCR12_EL1 = 0x8067,
HV_SYS_REG_DBGBVR13_EL1 = 0x806c,
HV_SYS_REG_DBGBCR13_EL1 = 0x806d,
HV_SYS_REG_DBGWVR13_EL1 = 0x806e,
HV_SYS_REG_DBGWCR13_EL1 = 0x806f,
HV_SYS_REG_DBGBVR14_EL1 = 0x8074,
HV_SYS_REG_DBGBCR14_EL1 = 0x8075,
HV_SYS_REG_DBGWVR14_EL1 = 0x8076,
HV_SYS_REG_DBGWCR14_EL1 = 0x8077,
HV_SYS_REG_DBGBVR15_EL1 = 0x807c,
HV_SYS_REG_DBGBCR15_EL1 = 0x807d,
HV_SYS_REG_DBGWVR15_EL1 = 0x807e,
HV_SYS_REG_DBGWCR15_EL1 = 0x807f,
HV_SYS_REG_MIDR_EL1 = 0xc000,
HV_SYS_REG_MPIDR_EL1 = 0xc005,
HV_SYS_REG_ID_AA64PFR0_EL1 = 0xc020,
HV_SYS_REG_ID_AA64PFR1_EL1 = 0xc021,
HV_SYS_REG_ID_AA64DFR0_EL1 = 0xc028,
HV_SYS_REG_ID_AA64DFR1_EL1 = 0xc029,
HV_SYS_REG_ID_AA64ISAR0_EL1 = 0xc030,
HV_SYS_REG_ID_AA64ISAR1_EL1 = 0xc031,
HV_SYS_REG_ID_AA64MMFR0_EL1 = 0xc038,
HV_SYS_REG_ID_AA64MMFR1_EL1 = 0xc039,
HV_SYS_REG_ID_AA64MMFR2_EL1 = 0xc03a,
HV_SYS_REG_SCTLR_EL1 = 0xc080,
HV_SYS_REG_CPACR_EL1 = 0xc082,
HV_SYS_REG_TTBR0_EL1 = 0xc100,
HV_SYS_REG_TTBR1_EL1 = 0xc101,
HV_SYS_REG_TCR_EL1 = 0xc102,
HV_SYS_REG_APIAKEYLO_EL1 = 0xc108,
HV_SYS_REG_APIAKEYHI_EL1 = 0xc109,
HV_SYS_REG_APIBKEYLO_EL1 = 0xc10a,
HV_SYS_REG_APIBKEYHI_EL1 = 0xc10b,
HV_SYS_REG_APDAKEYLO_EL1 = 0xc110,
HV_SYS_REG_APDAKEYHI_EL1 = 0xc111,
HV_SYS_REG_APDBKEYLO_EL1 = 0xc112,
HV_SYS_REG_APDBKEYHI_EL1 = 0xc113,
HV_SYS_REG_APGAKEYLO_EL1 = 0xc118,
HV_SYS_REG_APGAKEYHI_EL1 = 0xc119,
HV_SYS_REG_SPSR_EL1 = 0xc200,
HV_SYS_REG_ELR_EL1 = 0xc201,
HV_SYS_REG_SP_EL0 = 0xc208,
HV_SYS_REG_AFSR0_EL1 = 0xc288,
HV_SYS_REG_AFSR1_EL1 = 0xc289,
HV_SYS_REG_ESR_EL1 = 0xc290,
HV_SYS_REG_FAR_EL1 = 0xc300,
HV_SYS_REG_PAR_EL1 = 0xc3a0,
HV_SYS_REG_MAIR_EL1 = 0xc510,
HV_SYS_REG_AMAIR_EL1 = 0xc518,
HV_SYS_REG_VBAR_EL1 = 0xc600,
HV_SYS_REG_CONTEXTIDR_EL1 = 0xc681,
HV_SYS_REG_TPIDR_EL1 = 0xc684,
HV_SYS_REG_CNTKCTL_EL1 = 0xc708,
HV_SYS_REG_CSSELR_EL1 = 0xd000,
HV_SYS_REG_TPIDR_EL0 = 0xde82,
HV_SYS_REG_TPIDRRO_EL0 = 0xde83,
HV_SYS_REG_CNTV_CTL_EL0 = 0xdf19,
HV_SYS_REG_CNTV_CVAL_EL0 = 0xdf1a,
HV_SYS_REG_SP_EL1 = 0xe208,
}
enum hv_memory_flags_t : ulong
{
HV_MEMORY_READ = 1UL << 0,
HV_MEMORY_WRITE = 1UL << 1,
HV_MEMORY_EXEC = 1UL << 2
}
enum hv_result_t : uint
{
HV_SUCCESS = 0,
HV_ERROR = 0xfae94001,
HV_BUSY = 0xfae94002,
HV_BAD_ARGUMENT = 0xfae94003,
HV_NO_RESOURCES = 0xfae94005,
HV_NO_DEVICE = 0xfae94006,
HV_DENIED = 0xfae94007,
HV_UNSUPPORTED = 0xfae9400f
}
enum hv_interrupt_type_t : uint
{
HV_INTERRUPT_TYPE_IRQ,
HV_INTERRUPT_TYPE_FIQ
}
struct hv_simd_fp_uchar16_t
{
public ulong Low;
public ulong High;
}
static class HvResultExtensions
{
public static void ThrowOnError(this hv_result_t result)
{
if (result != hv_result_t.HV_SUCCESS)
{
throw new Exception($"Unexpected result \"{result}\".");
}
}
}
static partial class HvApi
{
public const string LibraryName = "/System/Library/Frameworks/Hypervisor.framework/Hypervisor";
[LibraryImport(LibraryName, SetLastError = true)]
public static partial hv_result_t hv_vm_get_max_vcpu_count(out uint max_vcpu_count);
[LibraryImport(LibraryName, SetLastError = true)]
public static partial hv_result_t hv_vm_create(IntPtr config);
[LibraryImport(LibraryName, SetLastError = true)]
public static partial hv_result_t hv_vm_destroy();
[LibraryImport(LibraryName, SetLastError = true)]
public static partial hv_result_t hv_vm_map(ulong addr, ulong ipa, ulong size, hv_memory_flags_t flags);
[LibraryImport(LibraryName, SetLastError = true)]
public static partial hv_result_t hv_vm_unmap(ulong ipa, ulong size);
[LibraryImport(LibraryName, SetLastError = true)]
public static partial hv_result_t hv_vm_protect(ulong ipa, ulong size, hv_memory_flags_t flags);
[LibraryImport(LibraryName, SetLastError = true)]
public unsafe static partial hv_result_t hv_vcpu_create(out ulong vcpu, ref hv_vcpu_exit_t* exit, IntPtr config);
[LibraryImport(LibraryName, SetLastError = true)]
public unsafe static partial hv_result_t hv_vcpu_destroy(ulong vcpu);
[LibraryImport(LibraryName, SetLastError = true)]
public static partial hv_result_t hv_vcpu_run(ulong vcpu);
[LibraryImport(LibraryName, SetLastError = true)]
public static partial hv_result_t hv_vcpus_exit(ref ulong vcpus, uint vcpu_count);
[LibraryImport(LibraryName, SetLastError = true)]
public static partial hv_result_t hv_vcpu_set_vtimer_mask(ulong vcpu, [MarshalAs(UnmanagedType.Bool)] bool vtimer_is_masked);
[LibraryImport(LibraryName, SetLastError = true)]
public static partial hv_result_t hv_vcpu_get_reg(ulong vcpu, hv_reg_t reg, out ulong value);
[LibraryImport(LibraryName, SetLastError = true)]
public static partial hv_result_t hv_vcpu_set_reg(ulong vcpu, hv_reg_t reg, ulong value);
[LibraryImport(LibraryName, SetLastError = true)]
public static partial hv_result_t hv_vcpu_get_simd_fp_reg(ulong vcpu, hv_simd_fp_reg_t reg, out hv_simd_fp_uchar16_t value);
[LibraryImport(LibraryName, SetLastError = true)]
public static partial hv_result_t hv_vcpu_set_simd_fp_reg(ulong vcpu, hv_simd_fp_reg_t reg, hv_simd_fp_uchar16_t value); // DO NOT USE DIRECTLY!
[LibraryImport(LibraryName, SetLastError = true)]
public static partial hv_result_t hv_vcpu_get_sys_reg(ulong vcpu, hv_sys_reg_t reg, out ulong value);
[LibraryImport(LibraryName, SetLastError = true)]
public static partial hv_result_t hv_vcpu_set_sys_reg(ulong vcpu, hv_sys_reg_t reg, ulong value);
[LibraryImport(LibraryName, SetLastError = true)]
public static partial hv_result_t hv_vcpu_get_pending_interrupt(ulong vcpu, hv_interrupt_type_t type, [MarshalAs(UnmanagedType.Bool)] out bool pending);
[LibraryImport(LibraryName, SetLastError = true)]
public static partial hv_result_t hv_vcpu_set_pending_interrupt(ulong vcpu, hv_interrupt_type_t type, [MarshalAs(UnmanagedType.Bool)] bool pending);
}
}

View file

@ -0,0 +1,47 @@
using ARMeilleure.Memory;
using System;
namespace Ryujinx.Cpu.AppleHv
{
class HvCpuContext : ICpuContext
{
private readonly ITickSource _tickSource;
private readonly HvMemoryManager _memoryManager;
public HvCpuContext(ITickSource tickSource, IMemoryManager memory, bool for64Bit)
{
_tickSource = tickSource;
_memoryManager = (HvMemoryManager)memory;
}
private void UnmapHandler(ulong address, ulong size)
{
}
/// <inheritdoc/>
public IExecutionContext CreateExecutionContext(ExceptionCallbacks exceptionCallbacks)
{
return new HvExecutionContext(_tickSource, exceptionCallbacks);
}
/// <inheritdoc/>
public void Execute(IExecutionContext context, ulong address)
{
((HvExecutionContext)context).Execute(_memoryManager, address);
}
/// <inheritdoc/>
public void InvalidateCacheRegion(ulong address, ulong size)
{
}
public IDiskCacheLoadState LoadDiskCache(string titleIdText, string displayVersion, bool enabled)
{
return new DummyDiskCacheLoadState();
}
public void PrepareCodeRange(ulong address, ulong size)
{
}
}
}

View file

@ -0,0 +1,20 @@
using ARMeilleure.Memory;
namespace Ryujinx.Cpu.AppleHv
{
public class HvEngine : ICpuEngine
{
private readonly ITickSource _tickSource;
public HvEngine(ITickSource tickSource)
{
_tickSource = tickSource;
}
/// <inheritdoc/>
public ICpuContext CreateCpuContext(IMemoryManager memoryManager, bool for64Bit)
{
return new HvCpuContext(_tickSource, memoryManager, for64Bit);
}
}
}

View file

@ -0,0 +1,284 @@
using ARMeilleure.State;
using Ryujinx.Cpu.AppleHv.Arm;
using Ryujinx.Memory.Tracking;
using System;
namespace Ryujinx.Cpu.AppleHv
{
class HvExecutionContext : IExecutionContext
{
/// <inheritdoc/>
public ulong Pc => _impl.ElrEl1;
/// <inheritdoc/>
public long TpidrEl0
{
get => _impl.TpidrEl0;
set => _impl.TpidrEl0 = value;
}
/// <inheritdoc/>
public long TpidrroEl0
{
get => _impl.TpidrroEl0;
set => _impl.TpidrroEl0 = value;
}
/// <inheritdoc/>
public uint Pstate
{
get => _impl.Pstate;
set => _impl.Pstate = value;
}
/// <inheritdoc/>
public uint Fpcr
{
get => _impl.Fpcr;
set => _impl.Fpcr = value;
}
/// <inheritdoc/>
public uint Fpsr
{
get => _impl.Fpsr;
set => _impl.Fpsr = value;
}
/// <inheritdoc/>
public bool IsAarch32
{
get => false;
set
{
if (value)
{
throw new NotSupportedException();
}
}
}
/// <inheritdoc/>
public bool Running { get; private set; }
private readonly ICounter _counter;
private readonly IHvExecutionContext _shadowContext;
private IHvExecutionContext _impl;
private readonly ExceptionCallbacks _exceptionCallbacks;
public HvExecutionContext(ICounter counter, ExceptionCallbacks exceptionCallbacks)
{
_counter = counter;
_shadowContext = new HvExecutionContextShadow();
_impl = _shadowContext;
_exceptionCallbacks = exceptionCallbacks;
Running = true;
}
/// <inheritdoc/>
public ulong GetX(int index) => _impl.GetX(index);
/// <inheritdoc/>
public void SetX(int index, ulong value) => _impl.SetX(index, value);
/// <inheritdoc/>
public V128 GetV(int index) => _impl.GetV(index);
/// <inheritdoc/>
public void SetV(int index, V128 value) => _impl.SetV(index, value);
private void InterruptHandler()
{
_exceptionCallbacks.InterruptCallback?.Invoke(this);
}
private void BreakHandler(ulong address, int imm)
{
_exceptionCallbacks.BreakCallback?.Invoke(this, address, imm);
}
private void SupervisorCallHandler(ulong address, int imm)
{
_exceptionCallbacks.SupervisorCallback?.Invoke(this, address, imm);
}
private void UndefinedHandler(ulong address, int opCode)
{
_exceptionCallbacks.UndefinedCallback?.Invoke(this, address, opCode);
}
/// <inheritdoc/>
public void RequestInterrupt()
{
_impl.RequestInterrupt();
}
/// <inheritdoc/>
public void StopRunning()
{
Running = false;
RequestInterrupt();
}
public unsafe void Execute(HvMemoryManager memoryManager, ulong address)
{
HvVcpu vcpu = HvVcpuPool.Instance.Create(memoryManager.AddressSpace, _shadowContext, SwapContext);
HvApi.hv_vcpu_set_reg(vcpu.Handle, hv_reg_t.HV_REG_PC, address).ThrowOnError();
while (Running)
{
HvApi.hv_vcpu_run(vcpu.Handle).ThrowOnError();
uint reason = vcpu.ExitInfo->reason;
if (reason == 1)
{
uint hvEsr = (uint)vcpu.ExitInfo->exception.syndrome;
ExceptionClass hvEc = (ExceptionClass)(hvEsr >> 26);
if (hvEc != ExceptionClass.HvcAarch64)
{
throw new Exception($"Unhandled exception from guest kernel with ESR 0x{hvEsr:X} ({hvEc}).");
}
address = SynchronousException(memoryManager, ref vcpu);
HvApi.hv_vcpu_set_reg(vcpu.Handle, hv_reg_t.HV_REG_PC, address).ThrowOnError();
}
else if (reason == 0)
{
if (_impl.GetAndClearInterruptRequested())
{
ReturnToPool(vcpu);
InterruptHandler();
vcpu = RentFromPool(memoryManager.AddressSpace, vcpu);
}
}
else
{
throw new Exception($"Unhandled exit reason {reason}.");
}
}
HvVcpuPool.Instance.Destroy(vcpu, SwapContext);
}
private ulong SynchronousException(HvMemoryManager memoryManager, ref HvVcpu vcpu)
{
ulong vcpuHandle = vcpu.Handle;
HvApi.hv_vcpu_get_sys_reg(vcpuHandle, hv_sys_reg_t.HV_SYS_REG_ELR_EL1, out ulong elr).ThrowOnError();
HvApi.hv_vcpu_get_sys_reg(vcpuHandle, hv_sys_reg_t.HV_SYS_REG_ESR_EL1, out ulong esr).ThrowOnError();
ExceptionClass ec = (ExceptionClass)((uint)esr >> 26);
switch (ec)
{
case ExceptionClass.DataAbortLowerEl:
DataAbort(memoryManager.Tracking, vcpuHandle, (uint)esr);
break;
case ExceptionClass.TrappedMsrMrsSystem:
InstructionTrap((uint)esr);
HvApi.hv_vcpu_set_sys_reg(vcpuHandle, hv_sys_reg_t.HV_SYS_REG_ELR_EL1, elr + 4UL).ThrowOnError();
break;
case ExceptionClass.SvcAarch64:
ReturnToPool(vcpu);
ushort id = (ushort)esr;
SupervisorCallHandler(elr - 4UL, id);
vcpu = RentFromPool(memoryManager.AddressSpace, vcpu);
break;
default:
throw new Exception($"Unhandled guest exception {ec}.");
}
// Make sure we will continue running at EL0.
if (memoryManager.AddressSpace.GetAndClearUserTlbInvalidationPending())
{
// TODO: Invalidate only the range that was modified?
return HvAddressSpace.KernelRegionTlbiEretAddress;
}
else
{
return HvAddressSpace.KernelRegionEretAddress;
}
}
private void DataAbort(MemoryTracking tracking, ulong vcpu, uint esr)
{
bool write = (esr & (1u << 6)) != 0;
bool farValid = (esr & (1u << 10)) == 0;
int accessSizeLog2 = (int)((esr >> 22) & 3);
if (farValid)
{
HvApi.hv_vcpu_get_sys_reg(vcpu, hv_sys_reg_t.HV_SYS_REG_FAR_EL1, out ulong far).ThrowOnError();
ulong size = 1UL << accessSizeLog2;
if (!tracking.VirtualMemoryEvent(far, size, write))
{
string rw = write ? "write" : "read";
throw new Exception($"Unhandled invalid memory access at VA 0x{far:X} with size 0x{size:X} ({rw}).");
}
}
else
{
throw new Exception($"Unhandled invalid memory access at unknown VA with ESR 0x{esr:X}.");
}
}
private void InstructionTrap(uint esr)
{
bool read = (esr & 1) != 0;
uint rt = (esr >> 5) & 0x1f;
if (read)
{
// Op0 Op2 Op1 CRn 00000 CRm
switch ((esr >> 1) & 0x1ffe0f)
{
case 0b11_000_011_1110_00000_0000: // CNTFRQ_EL0
WriteRt(rt, _counter.Frequency);
break;
case 0b11_001_011_1110_00000_0000: // CNTPCT_EL0
WriteRt(rt, _counter.Counter);
break;
default:
throw new Exception($"Unhandled system register read with ESR 0x{esr:X}");
}
}
else
{
throw new Exception($"Unhandled system register write with ESR 0x{esr:X}");
}
}
private void WriteRt(uint rt, ulong value)
{
if (rt < 31)
{
SetX((int)rt, value);
}
}
private void ReturnToPool(HvVcpu vcpu)
{
HvVcpuPool.Instance.Return(vcpu, SwapContext);
}
private HvVcpu RentFromPool(HvAddressSpace addressSpace, HvVcpu vcpu)
{
return HvVcpuPool.Instance.Rent(addressSpace, _shadowContext, vcpu, SwapContext);
}
private void SwapContext(IHvExecutionContext newContext)
{
_impl = newContext;
}
public void Dispose()
{
}
}
}

View file

@ -0,0 +1,59 @@
using ARMeilleure.State;
namespace Ryujinx.Cpu.AppleHv
{
unsafe class HvExecutionContextShadow : IHvExecutionContext
{
public ulong Pc { get; set; }
public ulong ElrEl1 { get; set; }
public ulong EsrEl1 { get; set; }
public long TpidrEl0 { get; set; }
public long TpidrroEl0 { get; set; }
public uint Pstate { get; set; }
public uint Fpcr { get; set; }
public uint Fpsr { get; set; }
public bool IsAarch32 { get; set; }
private readonly ulong[] _x;
private readonly V128[] _v;
public HvExecutionContextShadow()
{
_x = new ulong[32];
_v = new V128[32];
}
public ulong GetX(int index)
{
return _x[index];
}
public void SetX(int index, ulong value)
{
_x[index] = value;
}
public V128 GetV(int index)
{
return _v[index];
}
public void SetV(int index, V128 value)
{
_v[index] = value;
}
public void RequestInterrupt()
{
}
public bool GetAndClearInterruptRequested()
{
return false;
}
}
}

View file

@ -0,0 +1,196 @@
using ARMeilleure.State;
using Ryujinx.Memory;
using System;
using System.Runtime.InteropServices;
using System.Threading;
namespace Ryujinx.Cpu.AppleHv
{
class HvExecutionContextVcpu : IHvExecutionContext
{
private static MemoryBlock _setSimdFpRegFuncMem;
private delegate hv_result_t SetSimdFpReg(ulong vcpu, hv_simd_fp_reg_t reg, in V128 value, IntPtr funcPtr);
private static SetSimdFpReg _setSimdFpReg;
private static IntPtr _setSimdFpRegNativePtr;
static HvExecutionContextVcpu()
{
// .NET does not support passing vectors by value, so we need to pass a pointer and use a native
// function to load the value into a vector register.
_setSimdFpRegFuncMem = new MemoryBlock(MemoryBlock.GetPageSize());
_setSimdFpRegFuncMem.Write(0, 0x3DC00040u); // LDR Q0, [X2]
_setSimdFpRegFuncMem.Write(4, 0xD61F0060u); // BR X3
_setSimdFpRegFuncMem.Reprotect(0, _setSimdFpRegFuncMem.Size, MemoryPermission.ReadAndExecute);
_setSimdFpReg = Marshal.GetDelegateForFunctionPointer<SetSimdFpReg>(_setSimdFpRegFuncMem.Pointer);
if (NativeLibrary.TryLoad(HvApi.LibraryName, out IntPtr hvLibHandle))
{
_setSimdFpRegNativePtr = NativeLibrary.GetExport(hvLibHandle, nameof(HvApi.hv_vcpu_set_simd_fp_reg));
}
}
public ulong Pc
{
get
{
HvApi.hv_vcpu_get_reg(_vcpu, hv_reg_t.HV_REG_PC, out ulong pc).ThrowOnError();
return pc;
}
set
{
HvApi.hv_vcpu_set_reg(_vcpu, hv_reg_t.HV_REG_PC, value).ThrowOnError();
}
}
public ulong ElrEl1
{
get
{
HvApi.hv_vcpu_get_sys_reg(_vcpu, hv_sys_reg_t.HV_SYS_REG_ELR_EL1, out ulong elr).ThrowOnError();
return elr;
}
set
{
HvApi.hv_vcpu_set_sys_reg(_vcpu, hv_sys_reg_t.HV_SYS_REG_ELR_EL1, value).ThrowOnError();
}
}
public ulong EsrEl1
{
get
{
HvApi.hv_vcpu_get_sys_reg(_vcpu, hv_sys_reg_t.HV_SYS_REG_ESR_EL1, out ulong esr).ThrowOnError();
return esr;
}
set
{
HvApi.hv_vcpu_set_sys_reg(_vcpu, hv_sys_reg_t.HV_SYS_REG_ESR_EL1, value).ThrowOnError();
}
}
public long TpidrEl0
{
get
{
HvApi.hv_vcpu_get_sys_reg(_vcpu, hv_sys_reg_t.HV_SYS_REG_TPIDR_EL0, out ulong tpidrEl0).ThrowOnError();
return (long)tpidrEl0;
}
set
{
HvApi.hv_vcpu_set_sys_reg(_vcpu, hv_sys_reg_t.HV_SYS_REG_TPIDR_EL0, (ulong)value).ThrowOnError();
}
}
public long TpidrroEl0
{
get
{
HvApi.hv_vcpu_get_sys_reg(_vcpu, hv_sys_reg_t.HV_SYS_REG_TPIDRRO_EL0, out ulong tpidrroEl0).ThrowOnError();
return (long)tpidrroEl0;
}
set
{
HvApi.hv_vcpu_set_sys_reg(_vcpu, hv_sys_reg_t.HV_SYS_REG_TPIDRRO_EL0, (ulong)value).ThrowOnError();
}
}
public uint Pstate
{
get
{
HvApi.hv_vcpu_get_reg(_vcpu, hv_reg_t.HV_REG_CPSR, out ulong cpsr).ThrowOnError();
return (uint)cpsr;
}
set
{
HvApi.hv_vcpu_set_reg(_vcpu, hv_reg_t.HV_REG_CPSR, (ulong)value).ThrowOnError();
}
}
public uint Fpcr
{
get
{
HvApi.hv_vcpu_get_reg(_vcpu, hv_reg_t.HV_REG_FPCR, out ulong fpcr).ThrowOnError();
return (uint)fpcr;
}
set
{
HvApi.hv_vcpu_set_reg(_vcpu, hv_reg_t.HV_REG_FPCR, (ulong)value).ThrowOnError();
}
}
public uint Fpsr
{
get
{
HvApi.hv_vcpu_get_reg(_vcpu, hv_reg_t.HV_REG_FPSR, out ulong fpsr).ThrowOnError();
return (uint)fpsr;
}
set
{
HvApi.hv_vcpu_set_reg(_vcpu, hv_reg_t.HV_REG_FPSR, (ulong)value).ThrowOnError();
}
}
private ulong _vcpu;
private int _interruptRequested;
public HvExecutionContextVcpu(ulong vcpu)
{
_vcpu = vcpu;
}
public ulong GetX(int index)
{
if (index == 31)
{
HvApi.hv_vcpu_get_sys_reg(_vcpu, hv_sys_reg_t.HV_SYS_REG_SP_EL0, out ulong value).ThrowOnError();
return value;
}
else
{
HvApi.hv_vcpu_get_reg(_vcpu, hv_reg_t.HV_REG_X0 + (uint)index, out ulong value).ThrowOnError();
return value;
}
}
public void SetX(int index, ulong value)
{
if (index == 31)
{
HvApi.hv_vcpu_set_sys_reg(_vcpu, hv_sys_reg_t.HV_SYS_REG_SP_EL0, value).ThrowOnError();
}
else
{
HvApi.hv_vcpu_set_reg(_vcpu, hv_reg_t.HV_REG_X0 + (uint)index, value).ThrowOnError();
}
}
public V128 GetV(int index)
{
HvApi.hv_vcpu_get_simd_fp_reg(_vcpu, hv_simd_fp_reg_t.HV_SIMD_FP_REG_Q0 + (uint)index, out hv_simd_fp_uchar16_t value).ThrowOnError();
return new V128(value.Low, value.High);
}
public void SetV(int index, V128 value)
{
_setSimdFpReg(_vcpu, hv_simd_fp_reg_t.HV_SIMD_FP_REG_Q0 + (uint)index, value, _setSimdFpRegNativePtr).ThrowOnError();
}
public void RequestInterrupt()
{
if (Interlocked.Exchange(ref _interruptRequested, 1) == 0)
{
ulong vcpu = _vcpu;
HvApi.hv_vcpus_exit(ref vcpu, 1);
}
}
public bool GetAndClearInterruptRequested()
{
return Interlocked.Exchange(ref _interruptRequested, 0) != 0;
}
}
}

View file

@ -0,0 +1,34 @@
using System;
namespace Ryujinx.Cpu.AppleHv
{
class HvIpaAllocator
{
private const ulong AllocationGranule = 1UL << 14;
private const ulong IpaRegionSize = 1UL << 35;
private readonly PrivateMemoryAllocator.Block _block;
public HvIpaAllocator()
{
_block = new PrivateMemoryAllocator.Block(null, IpaRegionSize);
}
public ulong Allocate(ulong size, ulong alignment = AllocationGranule)
{
ulong offset = _block.Allocate(size, alignment);
if (offset == PrivateMemoryAllocator.InvalidOffset)
{
throw new InvalidOperationException($"No enough free IPA memory to allocate 0x{size:X} bytes with alignment 0x{alignment:X}.");
}
return offset;
}
public void Free(ulong offset, ulong size)
{
_block.Free(offset, size);
}
}
}

View file

@ -0,0 +1,34 @@
using Ryujinx.Memory;
using System;
namespace Ryujinx.Cpu.AppleHv
{
struct HvMemoryBlockAllocation : IDisposable
{
private readonly HvMemoryBlockAllocator _owner;
private readonly HvMemoryBlockAllocator.Block _block;
public bool IsValid => _owner != null;
public MemoryBlock Memory => _block.Memory;
public ulong Ipa => _block.Ipa;
public ulong Offset { get; }
public ulong Size { get; }
public HvMemoryBlockAllocation(
HvMemoryBlockAllocator owner,
HvMemoryBlockAllocator.Block block,
ulong offset,
ulong size)
{
_owner = owner;
_block = block;
Offset = offset;
Size = size;
}
public void Dispose()
{
_owner.Free(_block, Offset, Size);
}
}
}

View file

@ -0,0 +1,59 @@
using Ryujinx.Memory;
using System.Collections.Generic;
namespace Ryujinx.Cpu.AppleHv
{
class HvMemoryBlockAllocator : PrivateMemoryAllocatorImpl<HvMemoryBlockAllocator.Block>
{
private const ulong InvalidOffset = ulong.MaxValue;
public class Block : PrivateMemoryAllocator.Block
{
private readonly HvIpaAllocator _ipaAllocator;
public ulong Ipa { get; }
public Block(HvIpaAllocator ipaAllocator, MemoryBlock memory, ulong size) : base(memory, size)
{
_ipaAllocator = ipaAllocator;
lock (ipaAllocator)
{
Ipa = ipaAllocator.Allocate(size);
}
HvApi.hv_vm_map((ulong)Memory.Pointer, Ipa, size, hv_memory_flags_t.HV_MEMORY_READ | hv_memory_flags_t.HV_MEMORY_WRITE).ThrowOnError();
}
public override void Destroy()
{
HvApi.hv_vm_unmap(Ipa, Size).ThrowOnError();
lock (_ipaAllocator)
{
_ipaAllocator.Free(Ipa, Size);
}
base.Destroy();
}
}
private readonly HvIpaAllocator _ipaAllocator;
public HvMemoryBlockAllocator(HvIpaAllocator ipaAllocator, int blockAlignment) : base(blockAlignment, MemoryAllocationFlags.None)
{
_ipaAllocator = ipaAllocator;
}
public unsafe HvMemoryBlockAllocation Allocate(ulong size, ulong alignment)
{
var allocation = Allocate(size, alignment, CreateBlock);
return new HvMemoryBlockAllocation(this, allocation.Block, allocation.Offset, allocation.Size);
}
private Block CreateBlock(MemoryBlock memory, ulong size)
{
return new Block(_ipaAllocator, memory, size);
}
}
}

View file

@ -0,0 +1,947 @@
using ARMeilleure.Memory;
using Ryujinx.Cpu.Tracking;
using Ryujinx.Memory;
using Ryujinx.Memory.Range;
using Ryujinx.Memory.Tracking;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Threading;
namespace Ryujinx.Cpu.AppleHv
{
/// <summary>
/// Represents a CPU memory manager which maps guest virtual memory directly onto the Hypervisor page table.
/// </summary>
public class HvMemoryManager : MemoryManagerBase, IMemoryManager, IVirtualMemoryManagerTracked, IWritableBlock
{
public const int PageBits = 12;
public const int PageSize = 1 << PageBits;
public const int PageMask = PageSize - 1;
public const int PageToPteShift = 5; // 32 pages (2 bits each) in one ulong page table entry.
public const ulong BlockMappedMask = 0x5555555555555555; // First bit of each table entry set.
private enum HostMappedPtBits : ulong
{
Unmapped = 0,
Mapped,
WriteTracked,
ReadWriteTracked,
MappedReplicated = 0x5555555555555555,
WriteTrackedReplicated = 0xaaaaaaaaaaaaaaaa,
ReadWriteTrackedReplicated = ulong.MaxValue
}
private readonly InvalidAccessHandler _invalidAccessHandler;
private readonly ulong _addressSpaceSize;
private readonly HvAddressSpace _addressSpace;
internal HvAddressSpace AddressSpace => _addressSpace;
private readonly MemoryBlock _backingMemory;
private readonly PageTable<ulong> _pageTable;
private readonly ulong[] _pageBitmap;
public bool Supports4KBPages => true;
public int AddressSpaceBits { get; }
public IntPtr PageTablePointer => IntPtr.Zero;
public MemoryManagerType Type => MemoryManagerType.SoftwarePageTable;
public MemoryTracking Tracking { get; }
public event Action<ulong, ulong> UnmapEvent;
/// <summary>
/// Creates a new instance of the Hypervisor memory manager.
/// </summary>
/// <param name="backingMemory">Physical backing memory where virtual memory will be mapped to</param>
/// <param name="addressSpaceSize">Size of the address space</param>
/// <param name="invalidAccessHandler">Optional function to handle invalid memory accesses</param>
public HvMemoryManager(MemoryBlock backingMemory, ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler = null)
{
_backingMemory = backingMemory;
_pageTable = new PageTable<ulong>();
_invalidAccessHandler = invalidAccessHandler;
_addressSpaceSize = addressSpaceSize;
ulong asSize = PageSize;
int asBits = PageBits;
while (asSize < addressSpaceSize)
{
asSize <<= 1;
asBits++;
}
_addressSpace = new HvAddressSpace(backingMemory, asSize);
AddressSpaceBits = asBits;
_pageBitmap = new ulong[1 << (AddressSpaceBits - (PageBits + PageToPteShift))];
Tracking = new MemoryTracking(this, PageSize, invalidAccessHandler);
}
/// <summary>
/// Checks if the virtual address is part of the addressable space.
/// </summary>
/// <param name="va">Virtual address</param>
/// <returns>True if the virtual address is part of the addressable space</returns>
private bool ValidateAddress(ulong va)
{
return va < _addressSpaceSize;
}
/// <summary>
/// Checks if the combination of virtual address and size is part of the addressable space.
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range in bytes</param>
/// <returns>True if the combination of virtual address and size is part of the addressable space</returns>
private bool ValidateAddressAndSize(ulong va, ulong size)
{
ulong endVa = va + size;
return endVa >= va && endVa >= size && endVa <= _addressSpaceSize;
}
/// <summary>
/// Ensures the combination of virtual address and size is part of the addressable space.
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range in bytes</param>
/// <exception cref="InvalidMemoryRegionException">Throw when the memory region specified outside the addressable space</exception>
private void AssertValidAddressAndSize(ulong va, ulong size)
{
if (!ValidateAddressAndSize(va, size))
{
throw new InvalidMemoryRegionException($"va=0x{va:X16}, size=0x{size:X16}");
}
}
/// <summary>
/// Ensures the combination of virtual address and size is part of the addressable space and fully mapped.
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range in bytes</param>
private void AssertMapped(ulong va, ulong size)
{
if (!ValidateAddressAndSize(va, size) || !IsRangeMappedImpl(va, size))
{
throw new InvalidMemoryRegionException($"Not mapped: va=0x{va:X16}, size=0x{size:X16}");
}
}
/// <inheritdoc/>
public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags)
{
AssertValidAddressAndSize(va, size);
PtMap(va, pa, size);
_addressSpace.MapUser(va, pa, size, MemoryPermission.ReadWriteExecute);
AddMapping(va, size);
Tracking.Map(va, size);
}
private void PtMap(ulong va, ulong pa, ulong size)
{
while (size != 0)
{
_pageTable.Map(va, pa);
va += PageSize;
pa += PageSize;
size -= PageSize;
}
}
/// <inheritdoc/>
public void MapForeign(ulong va, nuint hostPointer, ulong size)
{
throw new NotSupportedException();
}
/// <inheritdoc/>
public void Unmap(ulong va, ulong size)
{
AssertValidAddressAndSize(va, size);
UnmapEvent?.Invoke(va, size);
Tracking.Unmap(va, size);
RemoveMapping(va, size);
_addressSpace.UnmapUser(va, size);
PtUnmap(va, size);
}
private void PtUnmap(ulong va, ulong size)
{
while (size != 0)
{
_pageTable.Unmap(va);
va += PageSize;
size -= PageSize;
}
}
/// <inheritdoc/>
public T Read<T>(ulong va) where T : unmanaged
{
return MemoryMarshal.Cast<byte, T>(GetSpan(va, Unsafe.SizeOf<T>()))[0];
}
/// <inheritdoc/>
public T ReadTracked<T>(ulong va) where T : unmanaged
{
try
{
SignalMemoryTracking(va, (ulong)Unsafe.SizeOf<T>(), false);
return Read<T>(va);
}
catch (InvalidMemoryRegionException)
{
if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
{
throw;
}
return default;
}
}
/// <inheritdoc/>
public void Read(ulong va, Span<byte> data)
{
ReadImpl(va, data);
}
/// <inheritdoc/>
public void Write<T>(ulong va, T value) where T : unmanaged
{
Write(va, MemoryMarshal.Cast<T, byte>(MemoryMarshal.CreateSpan(ref value, 1)));
}
/// <inheritdoc/>
public void Write(ulong va, ReadOnlySpan<byte> data)
{
if (data.Length == 0)
{
return;
}
SignalMemoryTracking(va, (ulong)data.Length, true);
WriteImpl(va, data);
}
/// <inheritdoc/>
public void WriteUntracked(ulong va, ReadOnlySpan<byte> data)
{
if (data.Length == 0)
{
return;
}
WriteImpl(va, data);
}
/// <inheritdoc/>
public bool WriteWithRedundancyCheck(ulong va, ReadOnlySpan<byte> data)
{
if (data.Length == 0)
{
return false;
}
SignalMemoryTracking(va, (ulong)data.Length, false);
if (IsContiguousAndMapped(va, data.Length))
{
var target = _backingMemory.GetSpan(GetPhysicalAddressInternal(va), data.Length);
bool changed = !data.SequenceEqual(target);
if (changed)
{
data.CopyTo(target);
}
return changed;
}
else
{
WriteImpl(va, data);
return true;
}
}
private void WriteImpl(ulong va, ReadOnlySpan<byte> data)
{
try
{
AssertValidAddressAndSize(va, (ulong)data.Length);
if (IsContiguousAndMapped(va, data.Length))
{
data.CopyTo(_backingMemory.GetSpan(GetPhysicalAddressInternal(va), data.Length));
}
else
{
int offset = 0, size;
if ((va & PageMask) != 0)
{
ulong pa = GetPhysicalAddressChecked(va);
size = Math.Min(data.Length, PageSize - (int)(va & PageMask));
data.Slice(0, size).CopyTo(_backingMemory.GetSpan(pa, size));
offset += size;
}
for (; offset < data.Length; offset += size)
{
ulong pa = GetPhysicalAddressChecked(va + (ulong)offset);
size = Math.Min(data.Length - offset, PageSize);
data.Slice(offset, size).CopyTo(_backingMemory.GetSpan(pa, size));
}
}
}
catch (InvalidMemoryRegionException)
{
if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
{
throw;
}
}
}
/// <inheritdoc/>
public ReadOnlySpan<byte> GetSpan(ulong va, int size, bool tracked = false)
{
if (size == 0)
{
return ReadOnlySpan<byte>.Empty;
}
if (tracked)
{
SignalMemoryTracking(va, (ulong)size, false);
}
if (IsContiguousAndMapped(va, size))
{
return _backingMemory.GetSpan(GetPhysicalAddressInternal(va), size);
}
else
{
Span<byte> data = new byte[size];
ReadImpl(va, data);
return data;
}
}
/// <inheritdoc/>
public WritableRegion GetWritableRegion(ulong va, int size, bool tracked = false)
{
if (size == 0)
{
return new WritableRegion(null, va, Memory<byte>.Empty);
}
if (tracked)
{
SignalMemoryTracking(va, (ulong)size, true);
}
if (IsContiguousAndMapped(va, size))
{
return new WritableRegion(null, va, _backingMemory.GetMemory(GetPhysicalAddressInternal(va), size));
}
else
{
Memory<byte> memory = new byte[size];
ReadImpl(va, memory.Span);
return new WritableRegion(this, va, memory);
}
}
/// <inheritdoc/>
public ref T GetRef<T>(ulong va) where T : unmanaged
{
if (!IsContiguous(va, Unsafe.SizeOf<T>()))
{
ThrowMemoryNotContiguous();
}
SignalMemoryTracking(va, (ulong)Unsafe.SizeOf<T>(), true);
return ref _backingMemory.GetRef<T>(GetPhysicalAddressChecked(va));
}
/// <inheritdoc/>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public bool IsMapped(ulong va)
{
return ValidateAddress(va) && IsMappedImpl(va);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private bool IsMappedImpl(ulong va)
{
ulong page = va >> PageBits;
int bit = (int)((page & 31) << 1);
int pageIndex = (int)(page >> PageToPteShift);
ref ulong pageRef = ref _pageBitmap[pageIndex];
ulong pte = Volatile.Read(ref pageRef);
return ((pte >> bit) & 3) != 0;
}
/// <inheritdoc/>
public bool IsRangeMapped(ulong va, ulong size)
{
AssertValidAddressAndSize(va, size);
return IsRangeMappedImpl(va, size);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private void GetPageBlockRange(ulong pageStart, ulong pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex)
{
startMask = ulong.MaxValue << ((int)(pageStart & 31) << 1);
endMask = ulong.MaxValue >> (64 - ((int)(pageEnd & 31) << 1));
pageIndex = (int)(pageStart >> PageToPteShift);
pageEndIndex = (int)((pageEnd - 1) >> PageToPteShift);
}
private bool IsRangeMappedImpl(ulong va, ulong size)
{
int pages = GetPagesCount(va, size, out _);
if (pages == 1)
{
return IsMappedImpl(va);
}
ulong pageStart = va >> PageBits;
ulong pageEnd = pageStart + (ulong)pages;
GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex);
// Check if either bit in each 2 bit page entry is set.
// OR the block with itself shifted down by 1, and check the first bit of each entry.
ulong mask = BlockMappedMask & startMask;
while (pageIndex <= pageEndIndex)
{
if (pageIndex == pageEndIndex)
{
mask &= endMask;
}
ref ulong pageRef = ref _pageBitmap[pageIndex++];
ulong pte = Volatile.Read(ref pageRef);
pte |= pte >> 1;
if ((pte & mask) != mask)
{
return false;
}
mask = BlockMappedMask;
}
return true;
}
private static void ThrowMemoryNotContiguous() => throw new MemoryNotContiguousException();
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private bool IsContiguousAndMapped(ulong va, int size) => IsContiguous(va, size) && IsMapped(va);
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private bool IsContiguous(ulong va, int size)
{
if (!ValidateAddress(va) || !ValidateAddressAndSize(va, (ulong)size))
{
return false;
}
int pages = GetPagesCount(va, (uint)size, out va);
for (int page = 0; page < pages - 1; page++)
{
if (!ValidateAddress(va + PageSize))
{
return false;
}
if (GetPhysicalAddressInternal(va) + PageSize != GetPhysicalAddressInternal(va + PageSize))
{
return false;
}
va += PageSize;
}
return true;
}
/// <inheritdoc/>
public IEnumerable<HostMemoryRange> GetHostRegions(ulong va, ulong size)
{
if (size == 0)
{
return Enumerable.Empty<HostMemoryRange>();
}
var guestRegions = GetPhysicalRegionsImpl(va, size);
if (guestRegions == null)
{
return null;
}
var regions = new HostMemoryRange[guestRegions.Count];
for (int i = 0; i < regions.Length; i++)
{
var guestRegion = guestRegions[i];
IntPtr pointer = _backingMemory.GetPointer(guestRegion.Address, guestRegion.Size);
regions[i] = new HostMemoryRange((nuint)(ulong)pointer, guestRegion.Size);
}
return regions;
}
/// <inheritdoc/>
public IEnumerable<MemoryRange> GetPhysicalRegions(ulong va, ulong size)
{
if (size == 0)
{
return Enumerable.Empty<MemoryRange>();
}
return GetPhysicalRegionsImpl(va, size);
}
private List<MemoryRange> GetPhysicalRegionsImpl(ulong va, ulong size)
{
if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size))
{
return null;
}
int pages = GetPagesCount(va, (uint)size, out va);
var regions = new List<MemoryRange>();
ulong regionStart = GetPhysicalAddressInternal(va);
ulong regionSize = PageSize;
for (int page = 0; page < pages - 1; page++)
{
if (!ValidateAddress(va + PageSize))
{
return null;
}
ulong newPa = GetPhysicalAddressInternal(va + PageSize);
if (GetPhysicalAddressInternal(va) + PageSize != newPa)
{
regions.Add(new MemoryRange(regionStart, regionSize));
regionStart = newPa;
regionSize = 0;
}
va += PageSize;
regionSize += PageSize;
}
regions.Add(new MemoryRange(regionStart, regionSize));
return regions;
}
private void ReadImpl(ulong va, Span<byte> data)
{
if (data.Length == 0)
{
return;
}
try
{
AssertValidAddressAndSize(va, (ulong)data.Length);
int offset = 0, size;
if ((va & PageMask) != 0)
{
ulong pa = GetPhysicalAddressChecked(va);
size = Math.Min(data.Length, PageSize - (int)(va & PageMask));
_backingMemory.GetSpan(pa, size).CopyTo(data.Slice(0, size));
offset += size;
}
for (; offset < data.Length; offset += size)
{
ulong pa = GetPhysicalAddressChecked(va + (ulong)offset);
size = Math.Min(data.Length - offset, PageSize);
_backingMemory.GetSpan(pa, size).CopyTo(data.Slice(offset, size));
}
}
catch (InvalidMemoryRegionException)
{
if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
{
throw;
}
}
}
/// <inheritdoc/>
/// <remarks>
/// This function also validates that the given range is both valid and mapped, and will throw if it is not.
/// </remarks>
public void SignalMemoryTracking(ulong va, ulong size, bool write, bool precise = false)
{
AssertValidAddressAndSize(va, size);
if (precise)
{
Tracking.VirtualMemoryEvent(va, size, write, precise: true);
return;
}
// Software table, used for managed memory tracking.
int pages = GetPagesCount(va, size, out _);
ulong pageStart = va >> PageBits;
if (pages == 1)
{
ulong tag = (ulong)(write ? HostMappedPtBits.WriteTracked : HostMappedPtBits.ReadWriteTracked);
int bit = (int)((pageStart & 31) << 1);
int pageIndex = (int)(pageStart >> PageToPteShift);
ref ulong pageRef = ref _pageBitmap[pageIndex];
ulong pte = Volatile.Read(ref pageRef);
ulong state = ((pte >> bit) & 3);
if (state >= tag)
{
Tracking.VirtualMemoryEvent(va, size, write);
return;
}
else if (state == 0)
{
ThrowInvalidMemoryRegionException($"Not mapped: va=0x{va:X16}, size=0x{size:X16}");
}
}
else
{
ulong pageEnd = pageStart + (ulong)pages;
GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex);
ulong mask = startMask;
ulong anyTrackingTag = (ulong)HostMappedPtBits.WriteTrackedReplicated;
while (pageIndex <= pageEndIndex)
{
if (pageIndex == pageEndIndex)
{
mask &= endMask;
}
ref ulong pageRef = ref _pageBitmap[pageIndex++];
ulong pte = Volatile.Read(ref pageRef);
ulong mappedMask = mask & BlockMappedMask;
ulong mappedPte = pte | (pte >> 1);
if ((mappedPte & mappedMask) != mappedMask)
{
ThrowInvalidMemoryRegionException($"Not mapped: va=0x{va:X16}, size=0x{size:X16}");
}
pte &= mask;
if ((pte & anyTrackingTag) != 0) // Search for any tracking.
{
// Writes trigger any tracking.
// Only trigger tracking from reads if both bits are set on any page.
if (write || (pte & (pte >> 1) & BlockMappedMask) != 0)
{
Tracking.VirtualMemoryEvent(va, size, write);
break;
}
}
mask = ulong.MaxValue;
}
}
}
/// <summary>
/// Computes the number of pages in a virtual address range.
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range</param>
/// <param name="startVa">The virtual address of the beginning of the first page</param>
/// <remarks>This function does not differentiate between allocated and unallocated pages.</remarks>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private int GetPagesCount(ulong va, ulong size, out ulong startVa)
{
// WARNING: Always check if ulong does not overflow during the operations.
startVa = va & ~(ulong)PageMask;
ulong vaSpan = (va - startVa + size + PageMask) & ~(ulong)PageMask;
return (int)(vaSpan / PageSize);
}
/// <inheritdoc/>
public void TrackingReprotect(ulong va, ulong size, MemoryPermission protection)
{
// Protection is inverted on software pages, since the default value is 0.
protection = (~protection) & MemoryPermission.ReadAndWrite;
int pages = GetPagesCount(va, size, out va);
ulong pageStart = va >> PageBits;
if (pages == 1)
{
ulong protTag = protection switch
{
MemoryPermission.None => (ulong)HostMappedPtBits.Mapped,
MemoryPermission.Write => (ulong)HostMappedPtBits.WriteTracked,
_ => (ulong)HostMappedPtBits.ReadWriteTracked,
};
int bit = (int)((pageStart & 31) << 1);
ulong tagMask = 3UL << bit;
ulong invTagMask = ~tagMask;
ulong tag = protTag << bit;
int pageIndex = (int)(pageStart >> PageToPteShift);
ref ulong pageRef = ref _pageBitmap[pageIndex];
ulong pte;
do
{
pte = Volatile.Read(ref pageRef);
}
while ((pte & tagMask) != 0 && Interlocked.CompareExchange(ref pageRef, (pte & invTagMask) | tag, pte) != pte);
}
else
{
ulong pageEnd = pageStart + (ulong)pages;
GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex);
ulong mask = startMask;
ulong protTag = protection switch
{
MemoryPermission.None => (ulong)HostMappedPtBits.MappedReplicated,
MemoryPermission.Write => (ulong)HostMappedPtBits.WriteTrackedReplicated,
_ => (ulong)HostMappedPtBits.ReadWriteTrackedReplicated,
};
while (pageIndex <= pageEndIndex)
{
if (pageIndex == pageEndIndex)
{
mask &= endMask;
}
ref ulong pageRef = ref _pageBitmap[pageIndex++];
ulong pte;
ulong mappedMask;
// Change the protection of all 2 bit entries that are mapped.
do
{
pte = Volatile.Read(ref pageRef);
mappedMask = pte | (pte >> 1);
mappedMask |= (mappedMask & BlockMappedMask) << 1;
mappedMask &= mask; // Only update mapped pages within the given range.
}
while (Interlocked.CompareExchange(ref pageRef, (pte & (~mappedMask)) | (protTag & mappedMask), pte) != pte);
mask = ulong.MaxValue;
}
}
protection = protection switch
{
MemoryPermission.None => MemoryPermission.ReadAndWrite,
MemoryPermission.Write => MemoryPermission.Read,
_ => MemoryPermission.None
};
_addressSpace.ReprotectUser(va, size, protection);
}
/// <inheritdoc/>
public CpuRegionHandle BeginTracking(ulong address, ulong size)
{
return new CpuRegionHandle(Tracking.BeginTracking(address, size));
}
/// <inheritdoc/>
public CpuMultiRegionHandle BeginGranularTracking(ulong address, ulong size, IEnumerable<IRegionHandle> handles, ulong granularity)
{
return new CpuMultiRegionHandle(Tracking.BeginGranularTracking(address, size, handles, granularity));
}
/// <inheritdoc/>
public CpuSmartMultiRegionHandle BeginSmartGranularTracking(ulong address, ulong size, ulong granularity)
{
return new CpuSmartMultiRegionHandle(Tracking.BeginSmartGranularTracking(address, size, granularity));
}
/// <summary>
/// Adds the given address mapping to the page table.
/// </summary>
/// <param name="va">Virtual memory address</param>
/// <param name="size">Size to be mapped</param>
private void AddMapping(ulong va, ulong size)
{
int pages = GetPagesCount(va, size, out _);
ulong pageStart = va >> PageBits;
ulong pageEnd = pageStart + (ulong)pages;
GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex);
ulong mask = startMask;
while (pageIndex <= pageEndIndex)
{
if (pageIndex == pageEndIndex)
{
mask &= endMask;
}
ref ulong pageRef = ref _pageBitmap[pageIndex++];
ulong pte;
ulong mappedMask;
// Map all 2-bit entries that are unmapped.
do
{
pte = Volatile.Read(ref pageRef);
mappedMask = pte | (pte >> 1);
mappedMask |= (mappedMask & BlockMappedMask) << 1;
mappedMask |= ~mask; // Treat everything outside the range as mapped, thus unchanged.
}
while (Interlocked.CompareExchange(ref pageRef, (pte & mappedMask) | (BlockMappedMask & (~mappedMask)), pte) != pte);
mask = ulong.MaxValue;
}
}
/// <summary>
/// Removes the given address mapping from the page table.
/// </summary>
/// <param name="va">Virtual memory address</param>
/// <param name="size">Size to be unmapped</param>
private void RemoveMapping(ulong va, ulong size)
{
int pages = GetPagesCount(va, size, out _);
ulong pageStart = va >> PageBits;
ulong pageEnd = pageStart + (ulong)pages;
GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex);
startMask = ~startMask;
endMask = ~endMask;
ulong mask = startMask;
while (pageIndex <= pageEndIndex)
{
if (pageIndex == pageEndIndex)
{
mask |= endMask;
}
ref ulong pageRef = ref _pageBitmap[pageIndex++];
ulong pte;
do
{
pte = Volatile.Read(ref pageRef);
}
while (Interlocked.CompareExchange(ref pageRef, pte & mask, pte) != pte);
mask = 0;
}
}
private ulong GetPhysicalAddressChecked(ulong va)
{
if (!IsMapped(va))
{
ThrowInvalidMemoryRegionException($"Not mapped: va=0x{va:X16}");
}
return GetPhysicalAddressInternal(va);
}
private ulong GetPhysicalAddressInternal(ulong va)
{
return _pageTable.Read(va) + (va & PageMask);
}
/// <summary>
/// Disposes of resources used by the memory manager.
/// </summary>
protected override void Destroy()
{
_addressSpace.Dispose();
}
private static void ThrowInvalidMemoryRegionException(string message) => throw new InvalidMemoryRegionException(message);
}
}

View file

@ -0,0 +1,25 @@
namespace Ryujinx.Cpu.AppleHv
{
unsafe class HvVcpu
{
public readonly ulong Handle;
public readonly hv_vcpu_exit_t* ExitInfo;
public readonly IHvExecutionContext ShadowContext;
public readonly IHvExecutionContext NativeContext;
public readonly bool IsEphemeral;
public HvVcpu(
ulong handle,
hv_vcpu_exit_t* exitInfo,
IHvExecutionContext shadowContext,
IHvExecutionContext nativeContext,
bool isEphemeral)
{
Handle = handle;
ExitInfo = exitInfo;
ShadowContext = shadowContext;
NativeContext = nativeContext;
IsEphemeral = isEphemeral;
}
}
}

View file

@ -0,0 +1,103 @@
using System;
using System.Threading;
namespace Ryujinx.Cpu.AppleHv
{
class HvVcpuPool
{
// Since there's a limit on the number of VCPUs we can create,
// and we assign one VCPU per guest thread, we need to ensure
// there are enough VCPUs available for at least the maximum number of active guest threads.
// To do that, we always destroy and re-create VCPUs that are above a given limit.
// Those VCPUs are called "ephemeral" here because they are not kept for long.
//
// In the future, we might want to consider a smarter approach that only makes
// VCPUs for threads that are not running frequently "ephemeral", but this is
// complicated because VCPUs can only be destroyed by the same thread that created them.
private const int MaxActiveVcpus = 4;
public static readonly HvVcpuPool Instance = new HvVcpuPool();
private int _totalVcpus;
private int _maxVcpus;
public HvVcpuPool()
{
HvApi.hv_vm_get_max_vcpu_count(out uint maxVcpuCount).ThrowOnError();
_maxVcpus = (int)maxVcpuCount;
}
public HvVcpu Create(HvAddressSpace addressSpace, IHvExecutionContext shadowContext, Action<IHvExecutionContext> swapContext)
{
HvVcpu vcpu = CreateNew(addressSpace, shadowContext);
vcpu.NativeContext.Load(shadowContext);
swapContext(vcpu.NativeContext);
return vcpu;
}
public void Destroy(HvVcpu vcpu, Action<IHvExecutionContext> swapContext)
{
vcpu.ShadowContext.Load(vcpu.NativeContext);
swapContext(vcpu.ShadowContext);
DestroyVcpu(vcpu);
}
public void Return(HvVcpu vcpu, Action<IHvExecutionContext> swapContext)
{
if (vcpu.IsEphemeral)
{
Destroy(vcpu, swapContext);
}
}
public HvVcpu Rent(HvAddressSpace addressSpace, IHvExecutionContext shadowContext, HvVcpu vcpu, Action<IHvExecutionContext> swapContext)
{
if (vcpu.IsEphemeral)
{
return Create(addressSpace, shadowContext, swapContext);
}
else
{
return vcpu;
}
}
private unsafe HvVcpu CreateNew(HvAddressSpace addressSpace, IHvExecutionContext shadowContext)
{
int newCount = IncrementVcpuCount();
bool isEphemeral = newCount > _maxVcpus - MaxActiveVcpus;
// Create VCPU.
hv_vcpu_exit_t* exitInfo = null;
HvApi.hv_vcpu_create(out ulong vcpuHandle, ref exitInfo, IntPtr.Zero).ThrowOnError();
// Enable FP and SIMD instructions.
HvApi.hv_vcpu_set_sys_reg(vcpuHandle, hv_sys_reg_t.HV_SYS_REG_CPACR_EL1, 0b11 << 20).ThrowOnError();
addressSpace.InitializeMmu(vcpuHandle);
HvExecutionContextVcpu nativeContext = new HvExecutionContextVcpu(vcpuHandle);
HvVcpu vcpu = new HvVcpu(vcpuHandle, exitInfo, shadowContext, nativeContext, isEphemeral);
return vcpu;
}
private void DestroyVcpu(HvVcpu vcpu)
{
HvApi.hv_vcpu_destroy(vcpu.Handle).ThrowOnError();
DecrementVcpuCount();
}
private int IncrementVcpuCount()
{
return Interlocked.Increment(ref _totalVcpus);
}
private void DecrementVcpuCount()
{
Interlocked.Decrement(ref _totalVcpus);
}
}
}

View file

@ -0,0 +1,68 @@
using Ryujinx.Memory;
using System;
namespace Ryujinx.Cpu.AppleHv
{
static class HvVm
{
// This alignment allows us to use larger blocks on the page table.
private const ulong AsIpaAlignment = 1UL << 30;
private static int _addressSpaces;
private static HvIpaAllocator _ipaAllocator;
private static object _lock = new object();
public static (ulong, HvIpaAllocator) CreateAddressSpace(MemoryBlock block)
{
HvIpaAllocator ipaAllocator;
lock (_lock)
{
if (++_addressSpaces == 1)
{
HvApi.hv_vm_create(IntPtr.Zero).ThrowOnError();
_ipaAllocator = ipaAllocator = new HvIpaAllocator();
}
else
{
ipaAllocator = _ipaAllocator;
}
}
ulong baseAddress;
lock (ipaAllocator)
{
baseAddress = ipaAllocator.Allocate(block.Size, AsIpaAlignment);
}
var rwx = hv_memory_flags_t.HV_MEMORY_READ | hv_memory_flags_t.HV_MEMORY_WRITE | hv_memory_flags_t.HV_MEMORY_EXEC;
HvApi.hv_vm_map((ulong)block.Pointer, baseAddress, block.Size, rwx).ThrowOnError();
return (baseAddress, ipaAllocator);
}
public static void DestroyAddressSpace(ulong address, ulong size)
{
HvApi.hv_vm_unmap(address, size);
HvIpaAllocator ipaAllocator;
lock (_lock)
{
if (--_addressSpaces == 0)
{
HvApi.hv_vm_destroy().ThrowOnError();
}
ipaAllocator = _ipaAllocator;
}
lock (ipaAllocator)
{
ipaAllocator.Free(address, size);
}
}
}
}

View file

@ -0,0 +1,46 @@
using ARMeilleure.State;
namespace Ryujinx.Cpu.AppleHv
{
public interface IHvExecutionContext
{
ulong Pc { get; set; }
ulong ElrEl1 { get; set; }
ulong EsrEl1 { get; set; }
long TpidrEl0 { get; set; }
long TpidrroEl0 { get; set; }
uint Pstate { get; set; }
uint Fpcr { get; set; }
uint Fpsr { get; set; }
ulong GetX(int index);
void SetX(int index, ulong value);
V128 GetV(int index);
void SetV(int index, V128 value);
public void Load(IHvExecutionContext context)
{
Pc = context.Pc;
ElrEl1 = context.ElrEl1;
EsrEl1 = context.EsrEl1;
TpidrEl0 = context.TpidrEl0;
TpidrroEl0 = context.TpidrroEl0;
Pstate = context.Pstate;
Fpcr = context.Fpcr;
Fpsr = context.Fpsr;
for (int i = 0; i < 32; i++)
{
SetX(i, context.GetX(i));
SetV(i, context.GetV(i));
}
}
void RequestInterrupt();
bool GetAndClearInterruptRequested();
}
}