Move solution and projects to src

This commit is contained in:
TSR Berry 2023-04-08 01:22:00 +02:00 committed by Mary
parent cd124bda58
commit cee7121058
3466 changed files with 55 additions and 55 deletions

View file

@ -0,0 +1,544 @@
using Ryujinx.Cpu.Tracking;
using Ryujinx.Graphics.GAL;
using Ryujinx.Memory.Range;
using Ryujinx.Memory.Tracking;
using System;
using System.Collections.Generic;
using System.Linq;
namespace Ryujinx.Graphics.Gpu.Memory
{
/// <summary>
/// Buffer, used to store vertex and index data, uniform and storage buffers, and others.
/// </summary>
class Buffer : IRange, IDisposable
{
private const ulong GranularBufferThreshold = 4096;
private readonly GpuContext _context;
private readonly PhysicalMemory _physicalMemory;
/// <summary>
/// Host buffer handle.
/// </summary>
public BufferHandle Handle { get; }
/// <summary>
/// Start address of the buffer in guest memory.
/// </summary>
public ulong Address { get; }
/// <summary>
/// Size of the buffer in bytes.
/// </summary>
public ulong Size { get; }
/// <summary>
/// End address of the buffer in guest memory.
/// </summary>
public ulong EndAddress => Address + Size;
/// <summary>
/// Increments when the buffer is (partially) unmapped or disposed.
/// </summary>
public int UnmappedSequence { get; private set; }
/// <summary>
/// Ranges of the buffer that have been modified on the GPU.
/// Ranges defined here cannot be updated from CPU until a CPU waiting sync point is reached.
/// Then, write tracking will signal, wait for GPU sync (generated at the syncpoint) and flush these regions.
/// </summary>
/// <remarks>
/// This is null until at least one modification occurs.
/// </remarks>
private BufferModifiedRangeList _modifiedRanges = null;
private readonly CpuMultiRegionHandle _memoryTrackingGranular;
private readonly CpuRegionHandle _memoryTracking;
private readonly RegionSignal _externalFlushDelegate;
private readonly Action<ulong, ulong> _loadDelegate;
private readonly Action<ulong, ulong> _modifiedDelegate;
private int _sequenceNumber;
private bool _useGranular;
private bool _syncActionRegistered;
private int _referenceCount = 1;
/// <summary>
/// Creates a new instance of the buffer.
/// </summary>
/// <param name="context">GPU context that the buffer belongs to</param>
/// <param name="physicalMemory">Physical memory where the buffer is mapped</param>
/// <param name="address">Start address of the buffer</param>
/// <param name="size">Size of the buffer in bytes</param>
/// <param name="baseBuffers">Buffers which this buffer contains, and will inherit tracking handles from</param>
public Buffer(GpuContext context, PhysicalMemory physicalMemory, ulong address, ulong size, IEnumerable<Buffer> baseBuffers = null)
{
_context = context;
_physicalMemory = physicalMemory;
Address = address;
Size = size;
Handle = context.Renderer.CreateBuffer((int)size, baseBuffers?.MaxBy(x => x.Size).Handle ?? BufferHandle.Null);
_useGranular = size > GranularBufferThreshold;
IEnumerable<IRegionHandle> baseHandles = null;
if (baseBuffers != null)
{
baseHandles = baseBuffers.SelectMany(buffer =>
{
if (buffer._useGranular)
{
return buffer._memoryTrackingGranular.GetHandles();
}
else
{
return Enumerable.Repeat(buffer._memoryTracking.GetHandle(), 1);
}
});
}
if (_useGranular)
{
_memoryTrackingGranular = physicalMemory.BeginGranularTracking(address, size, ResourceKind.Buffer, baseHandles);
_memoryTrackingGranular.RegisterPreciseAction(address, size, PreciseAction);
}
else
{
_memoryTracking = physicalMemory.BeginTracking(address, size, ResourceKind.Buffer);
if (baseHandles != null)
{
_memoryTracking.Reprotect(false);
foreach (IRegionHandle handle in baseHandles)
{
if (handle.Dirty)
{
_memoryTracking.Reprotect(true);
}
handle.Dispose();
}
}
_memoryTracking.RegisterPreciseAction(PreciseAction);
}
_externalFlushDelegate = new RegionSignal(ExternalFlush);
_loadDelegate = new Action<ulong, ulong>(LoadRegion);
_modifiedDelegate = new Action<ulong, ulong>(RegionModified);
}
/// <summary>
/// Gets a sub-range from the buffer, from a start address till the end of the buffer.
/// </summary>
/// <remarks>
/// This can be used to bind and use sub-ranges of the buffer on the host API.
/// </remarks>
/// <param name="address">Start address of the sub-range, must be greater than or equal to the buffer address</param>
/// <returns>The buffer sub-range</returns>
public BufferRange GetRange(ulong address)
{
ulong offset = address - Address;
return new BufferRange(Handle, (int)offset, (int)(Size - offset));
}
/// <summary>
/// Gets a sub-range from the buffer.
/// </summary>
/// <remarks>
/// This can be used to bind and use sub-ranges of the buffer on the host API.
/// </remarks>
/// <param name="address">Start address of the sub-range, must be greater than or equal to the buffer address</param>
/// <param name="size">Size in bytes of the sub-range, must be less than or equal to the buffer size</param>
/// <returns>The buffer sub-range</returns>
public BufferRange GetRange(ulong address, ulong size)
{
int offset = (int)(address - Address);
return new BufferRange(Handle, offset, (int)size);
}
/// <summary>
/// Checks if a given range overlaps with the buffer.
/// </summary>
/// <param name="address">Start address of the range</param>
/// <param name="size">Size in bytes of the range</param>
/// <returns>True if the range overlaps, false otherwise</returns>
public bool OverlapsWith(ulong address, ulong size)
{
return Address < address + size && address < EndAddress;
}
/// <summary>
/// Checks if a given range is fully contained in the buffer.
/// </summary>
/// <param name="address">Start address of the range</param>
/// <param name="size">Size in bytes of the range</param>
/// <returns>True if the range is contained, false otherwise</returns>
public bool FullyContains(ulong address, ulong size)
{
return address >= Address && address + size <= EndAddress;
}
/// <summary>
/// Performs guest to host memory synchronization of the buffer data.
/// </summary>
/// <remarks>
/// This causes the buffer data to be overwritten if a write was detected from the CPU,
/// since the last call to this method.
/// </remarks>
/// <param name="address">Start address of the range to synchronize</param>
/// <param name="size">Size in bytes of the range to synchronize</param>
public void SynchronizeMemory(ulong address, ulong size)
{
if (_useGranular)
{
_memoryTrackingGranular.QueryModified(address, size, _modifiedDelegate, _context.SequenceNumber);
}
else
{
if (_context.SequenceNumber != _sequenceNumber && _memoryTracking.DirtyOrVolatile())
{
_memoryTracking.Reprotect();
if (_modifiedRanges != null)
{
_modifiedRanges.ExcludeModifiedRegions(Address, Size, _loadDelegate);
}
else
{
_context.Renderer.SetBufferData(Handle, 0, _physicalMemory.GetSpan(Address, (int)Size));
}
_sequenceNumber = _context.SequenceNumber;
}
}
}
/// <summary>
/// Ensure that the modified range list exists.
/// </summary>
private void EnsureRangeList()
{
if (_modifiedRanges == null)
{
_modifiedRanges = new BufferModifiedRangeList(_context, this, Flush);
}
}
/// <summary>
/// Signal that the given region of the buffer has been modified.
/// </summary>
/// <param name="address">The start address of the modified region</param>
/// <param name="size">The size of the modified region</param>
public void SignalModified(ulong address, ulong size)
{
EnsureRangeList();
_modifiedRanges.SignalModified(address, size);
if (!_syncActionRegistered)
{
_context.RegisterSyncAction(SyncAction);
_syncActionRegistered = true;
}
}
/// <summary>
/// Indicate that mofifications in a given region of this buffer have been overwritten.
/// </summary>
/// <param name="address">The start address of the region</param>
/// <param name="size">The size of the region</param>
public void ClearModified(ulong address, ulong size)
{
_modifiedRanges?.Clear(address, size);
}
/// <summary>
/// Action to be performed when a syncpoint is reached after modification.
/// This will register read/write tracking to flush the buffer from GPU when its memory is used.
/// </summary>
private void SyncAction()
{
_syncActionRegistered = false;
if (_useGranular)
{
_modifiedRanges?.GetRanges(Address, Size, (address, size) =>
{
_memoryTrackingGranular.RegisterAction(address, size, _externalFlushDelegate);
SynchronizeMemory(address, size);
});
}
else
{
_memoryTracking.RegisterAction(_externalFlushDelegate);
SynchronizeMemory(Address, Size);
}
}
/// <summary>
/// Inherit modified ranges from another buffer.
/// </summary>
/// <param name="from">The buffer to inherit from</param>
public void InheritModifiedRanges(Buffer from)
{
if (from._modifiedRanges != null && from._modifiedRanges.HasRanges)
{
if (from._syncActionRegistered && !_syncActionRegistered)
{
_context.RegisterSyncAction(SyncAction);
_syncActionRegistered = true;
}
Action<ulong, ulong> registerRangeAction = (ulong address, ulong size) =>
{
if (_useGranular)
{
_memoryTrackingGranular.RegisterAction(address, size, _externalFlushDelegate);
}
else
{
_memoryTracking.RegisterAction(_externalFlushDelegate);
}
};
EnsureRangeList();
_modifiedRanges.InheritRanges(from._modifiedRanges, registerRangeAction);
}
}
/// <summary>
/// Determine if a given region of the buffer has been modified, and must be flushed.
/// </summary>
/// <param name="address">The start address of the region</param>
/// <param name="size">The size of the region</param>
/// <returns></returns>
public bool IsModified(ulong address, ulong size)
{
if (_modifiedRanges != null)
{
return _modifiedRanges.HasRange(address, size);
}
return false;
}
/// <summary>
/// Indicate that a region of the buffer was modified, and must be loaded from memory.
/// </summary>
/// <param name="mAddress">Start address of the modified region</param>
/// <param name="mSize">Size of the modified region</param>
private void RegionModified(ulong mAddress, ulong mSize)
{
if (mAddress < Address)
{
mAddress = Address;
}
ulong maxSize = Address + Size - mAddress;
if (mSize > maxSize)
{
mSize = maxSize;
}
if (_modifiedRanges != null)
{
_modifiedRanges.ExcludeModifiedRegions(mAddress, mSize, _loadDelegate);
}
else
{
LoadRegion(mAddress, mSize);
}
}
/// <summary>
/// Load a region of the buffer from memory.
/// </summary>
/// <param name="mAddress">Start address of the modified region</param>
/// <param name="mSize">Size of the modified region</param>
private void LoadRegion(ulong mAddress, ulong mSize)
{
int offset = (int)(mAddress - Address);
_context.Renderer.SetBufferData(Handle, offset, _physicalMemory.GetSpan(mAddress, (int)mSize));
}
/// <summary>
/// Force a region of the buffer to be dirty. Avoids reprotection and nullifies sequence number check.
/// </summary>
/// <param name="mAddress">Start address of the modified region</param>
/// <param name="mSize">Size of the region to force dirty</param>
public void ForceDirty(ulong mAddress, ulong mSize)
{
_modifiedRanges?.Clear(mAddress, mSize);
if (_useGranular)
{
_memoryTrackingGranular.ForceDirty(mAddress, mSize);
}
else
{
_memoryTracking.ForceDirty();
_sequenceNumber--;
}
}
/// <summary>
/// Performs copy of all the buffer data from one buffer to another.
/// </summary>
/// <param name="destination">The destination buffer to copy the data into</param>
/// <param name="dstOffset">The offset of the destination buffer to copy into</param>
public void CopyTo(Buffer destination, int dstOffset)
{
_context.Renderer.Pipeline.CopyBuffer(Handle, destination.Handle, 0, dstOffset, (int)Size);
}
/// <summary>
/// Flushes a range of the buffer.
/// This writes the range data back into guest memory.
/// </summary>
/// <param name="address">Start address of the range</param>
/// <param name="size">Size in bytes of the range</param>
public void Flush(ulong address, ulong size)
{
int offset = (int)(address - Address);
using PinnedSpan<byte> data = _context.Renderer.GetBufferData(Handle, offset, (int)size);
// TODO: When write tracking shaders, they will need to be aware of changes in overlapping buffers.
_physicalMemory.WriteUntracked(address, data.Get());
}
/// <summary>
/// Align a given address and size region to page boundaries.
/// </summary>
/// <param name="address">The start address of the region</param>
/// <param name="size">The size of the region</param>
/// <returns>The page aligned address and size</returns>
private static (ulong address, ulong size) PageAlign(ulong address, ulong size)
{
ulong pageMask = MemoryManager.PageMask;
ulong rA = address & ~pageMask;
ulong rS = ((address + size + pageMask) & ~pageMask) - rA;
return (rA, rS);
}
/// <summary>
/// Flush modified ranges of the buffer from another thread.
/// This will flush all modifications made before the active SyncNumber was set, and may block to wait for GPU sync.
/// </summary>
/// <param name="address">Address of the memory action</param>
/// <param name="size">Size in bytes</param>
public void ExternalFlush(ulong address, ulong size)
{
_context.Renderer.BackgroundContextAction(() =>
{
var ranges = _modifiedRanges;
if (ranges != null)
{
(address, size) = PageAlign(address, size);
ranges.WaitForAndFlushRanges(address, size);
}
}, true);
}
/// <summary>
/// An action to be performed when a precise memory access occurs to this resource.
/// For buffers, this skips flush-on-write by punching holes directly into the modified range list.
/// </summary>
/// <param name="address">Address of the memory action</param>
/// <param name="size">Size in bytes</param>
/// <param name="write">True if the access was a write, false otherwise</param>
private bool PreciseAction(ulong address, ulong size, bool write)
{
if (!write)
{
// We only want to skip flush-on-write.
return false;
}
ulong maxAddress = Math.Max(address, Address);
ulong minEndAddress = Math.Min(address + size, Address + Size);
if (maxAddress >= minEndAddress)
{
// Access doesn't overlap.
return false;
}
ForceDirty(maxAddress, minEndAddress - maxAddress);
return true;
}
/// <summary>
/// Called when part of the memory for this buffer has been unmapped.
/// Calls are from non-GPU threads.
/// </summary>
/// <param name="address">Start address of the unmapped region</param>
/// <param name="size">Size of the unmapped region</param>
public void Unmapped(ulong address, ulong size)
{
BufferModifiedRangeList modifiedRanges = _modifiedRanges;
modifiedRanges?.Clear(address, size);
UnmappedSequence++;
}
/// <summary>
/// Increments the buffer reference count.
/// </summary>
public void IncrementReferenceCount()
{
_referenceCount++;
}
/// <summary>
/// Decrements the buffer reference count.
/// </summary>
public void DecrementReferenceCount()
{
if (--_referenceCount == 0)
{
DisposeData();
}
}
/// <summary>
/// Disposes the host buffer's data, not its tracking handles.
/// </summary>
public void DisposeData()
{
_modifiedRanges?.Clear();
_context.Renderer.DeleteBuffer(Handle);
UnmappedSequence++;
}
/// <summary>
/// Disposes the host buffer.
/// </summary>
public void Dispose()
{
_memoryTrackingGranular?.Dispose();
_memoryTracking?.Dispose();
DecrementReferenceCount();
}
}
}

View file

@ -0,0 +1,38 @@
using Ryujinx.Graphics.Shader;
namespace Ryujinx.Graphics.Gpu.Memory
{
/// <summary>
/// Memory range used for buffers.
/// </summary>
readonly struct BufferBounds
{
/// <summary>
/// Region virtual address.
/// </summary>
public ulong Address { get; }
/// <summary>
/// Region size in bytes.
/// </summary>
public ulong Size { get; }
/// <summary>
/// Buffer usage flags.
/// </summary>
public BufferUsageFlags Flags { get; }
/// <summary>
/// Creates a new buffer region.
/// </summary>
/// <param name="address">Region address</param>
/// <param name="size">Region size</param>
/// <param name="flags">Buffer usage flags</param>
public BufferBounds(ulong address, ulong size, BufferUsageFlags flags = BufferUsageFlags.None)
{
Address = address;
Size = size;
Flags = flags;
}
}
}

View file

@ -0,0 +1,507 @@
using Ryujinx.Graphics.GAL;
using Ryujinx.Memory.Range;
using System;
using System.Collections.Generic;
using System.Linq;
namespace Ryujinx.Graphics.Gpu.Memory
{
/// <summary>
/// Buffer cache.
/// </summary>
class BufferCache : IDisposable
{
private const int OverlapsBufferInitialCapacity = 10;
private const int OverlapsBufferMaxCapacity = 10000;
private const ulong BufferAlignmentSize = 0x1000;
private const ulong BufferAlignmentMask = BufferAlignmentSize - 1;
private const ulong MaxDynamicGrowthSize = 0x100000;
private readonly GpuContext _context;
private readonly PhysicalMemory _physicalMemory;
/// <remarks>
/// Only modified from the GPU thread. Must lock for add/remove.
/// Must lock for any access from other threads.
/// </remarks>
private readonly RangeList<Buffer> _buffers;
private Buffer[] _bufferOverlaps;
private readonly Dictionary<ulong, BufferCacheEntry> _dirtyCache;
private readonly Dictionary<ulong, BufferCacheEntry> _modifiedCache;
private bool _pruneCaches;
public event Action NotifyBuffersModified;
/// <summary>
/// Creates a new instance of the buffer manager.
/// </summary>
/// <param name="context">The GPU context that the buffer manager belongs to</param>
/// <param name="physicalMemory">Physical memory where the cached buffers are mapped</param>
public BufferCache(GpuContext context, PhysicalMemory physicalMemory)
{
_context = context;
_physicalMemory = physicalMemory;
_buffers = new RangeList<Buffer>();
_bufferOverlaps = new Buffer[OverlapsBufferInitialCapacity];
_dirtyCache = new Dictionary<ulong, BufferCacheEntry>();
// There are a lot more entries on the modified cache, so it is separate from the one for ForceDirty.
_modifiedCache = new Dictionary<ulong, BufferCacheEntry>();
}
/// <summary>
/// Handles removal of buffers written to a memory region being unmapped.
/// </summary>
/// <param name="sender">Sender object</param>
/// <param name="e">Event arguments</param>
public void MemoryUnmappedHandler(object sender, UnmapEventArgs e)
{
Buffer[] overlaps = new Buffer[10];
int overlapCount;
ulong address = ((MemoryManager)sender).Translate(e.Address);
ulong size = e.Size;
lock (_buffers)
{
overlapCount = _buffers.FindOverlaps(address, size, ref overlaps);
}
for (int i = 0; i < overlapCount; i++)
{
overlaps[i].Unmapped(address, size);
}
}
/// <summary>
/// Performs address translation of the GPU virtual address, and creates a
/// new buffer, if needed, for the specified range.
/// </summary>
/// <param name="memoryManager">GPU memory manager where the buffer is mapped</param>
/// <param name="gpuVa">Start GPU virtual address of the buffer</param>
/// <param name="size">Size in bytes of the buffer</param>
/// <returns>CPU virtual address of the buffer, after address translation</returns>
public ulong TranslateAndCreateBuffer(MemoryManager memoryManager, ulong gpuVa, ulong size)
{
if (gpuVa == 0)
{
return 0;
}
ulong address = memoryManager.Translate(gpuVa);
if (address == MemoryManager.PteUnmapped)
{
return 0;
}
CreateBuffer(address, size);
return address;
}
/// <summary>
/// Creates a new buffer for the specified range, if it does not yet exist.
/// This can be used to ensure the existance of a buffer.
/// </summary>
/// <param name="address">Address of the buffer in memory</param>
/// <param name="size">Size of the buffer in bytes</param>
public void CreateBuffer(ulong address, ulong size)
{
ulong endAddress = address + size;
ulong alignedAddress = address & ~BufferAlignmentMask;
ulong alignedEndAddress = (endAddress + BufferAlignmentMask) & ~BufferAlignmentMask;
// The buffer must have the size of at least one page.
if (alignedEndAddress == alignedAddress)
{
alignedEndAddress += BufferAlignmentSize;
}
CreateBufferAligned(alignedAddress, alignedEndAddress - alignedAddress);
}
/// <summary>
/// Performs address translation of the GPU virtual address, and attempts to force
/// the buffer in the region as dirty.
/// The buffer lookup for this function is cached in a dictionary for quick access, which
/// accelerates common UBO updates.
/// </summary>
/// <param name="memoryManager">GPU memory manager where the buffer is mapped</param>
/// <param name="gpuVa">Start GPU virtual address of the buffer</param>
/// <param name="size">Size in bytes of the buffer</param>
public void ForceDirty(MemoryManager memoryManager, ulong gpuVa, ulong size)
{
if (_pruneCaches)
{
Prune();
}
if (!_dirtyCache.TryGetValue(gpuVa, out BufferCacheEntry result) ||
result.EndGpuAddress < gpuVa + size ||
result.UnmappedSequence != result.Buffer.UnmappedSequence)
{
ulong address = TranslateAndCreateBuffer(memoryManager, gpuVa, size);
result = new BufferCacheEntry(address, gpuVa, GetBuffer(address, size));
_dirtyCache[gpuVa] = result;
}
result.Buffer.ForceDirty(result.Address, size);
}
/// <summary>
/// Checks if the given buffer range has been GPU modifed.
/// </summary>
/// <param name="memoryManager">GPU memory manager where the buffer is mapped</param>
/// <param name="gpuVa">Start GPU virtual address of the buffer</param>
/// <param name="size">Size in bytes of the buffer</param>
/// <returns>True if modified, false otherwise</returns>
public bool CheckModified(MemoryManager memoryManager, ulong gpuVa, ulong size, out ulong outAddr)
{
if (_pruneCaches)
{
Prune();
}
// Align the address to avoid creating too many entries on the quick lookup dictionary.
ulong mask = BufferAlignmentMask;
ulong alignedGpuVa = gpuVa & (~mask);
ulong alignedEndGpuVa = (gpuVa + size + mask) & (~mask);
size = alignedEndGpuVa - alignedGpuVa;
if (!_modifiedCache.TryGetValue(alignedGpuVa, out BufferCacheEntry result) ||
result.EndGpuAddress < alignedEndGpuVa ||
result.UnmappedSequence != result.Buffer.UnmappedSequence)
{
ulong address = TranslateAndCreateBuffer(memoryManager, alignedGpuVa, size);
result = new BufferCacheEntry(address, alignedGpuVa, GetBuffer(address, size));
_modifiedCache[alignedGpuVa] = result;
}
outAddr = result.Address | (gpuVa & mask);
return result.Buffer.IsModified(result.Address, size);
}
/// <summary>
/// Creates a new buffer for the specified range, if needed.
/// If a buffer where this range can be fully contained already exists,
/// then the creation of a new buffer is not necessary.
/// </summary>
/// <param name="address">Address of the buffer in guest memory</param>
/// <param name="size">Size in bytes of the buffer</param>
private void CreateBufferAligned(ulong address, ulong size)
{
int overlapsCount = _buffers.FindOverlapsNonOverlapping(address, size, ref _bufferOverlaps);
if (overlapsCount != 0)
{
// The buffer already exists. We can just return the existing buffer
// if the buffer we need is fully contained inside the overlapping buffer.
// Otherwise, we must delete the overlapping buffers and create a bigger buffer
// that fits all the data we need. We also need to copy the contents from the
// old buffer(s) to the new buffer.
ulong endAddress = address + size;
if (_bufferOverlaps[0].Address > address || _bufferOverlaps[0].EndAddress < endAddress)
{
// Check if the following conditions are met:
// - We have a single overlap.
// - The overlap starts at or before the requested range. That is, the overlap happens at the end.
// - The size delta between the new, merged buffer and the old one is of at most 2 pages.
// In this case, we attempt to extend the buffer further than the requested range,
// this can potentially avoid future resizes if the application keeps using overlapping
// sequential memory.
// Allowing for 2 pages (rather than just one) is necessary to catch cases where the
// range crosses a page, and after alignment, ends having a size of 2 pages.
if (overlapsCount == 1 &&
address >= _bufferOverlaps[0].Address &&
endAddress - _bufferOverlaps[0].EndAddress <= BufferAlignmentSize * 2)
{
// Try to grow the buffer by 1.5x of its current size.
// This improves performance in the cases where the buffer is resized often by small amounts.
ulong existingSize = _bufferOverlaps[0].Size;
ulong growthSize = (existingSize + Math.Min(existingSize >> 1, MaxDynamicGrowthSize)) & ~BufferAlignmentMask;
size = Math.Max(size, growthSize);
endAddress = address + size;
overlapsCount = _buffers.FindOverlapsNonOverlapping(address, size, ref _bufferOverlaps);
}
for (int index = 0; index < overlapsCount; index++)
{
Buffer buffer = _bufferOverlaps[index];
address = Math.Min(address, buffer.Address);
endAddress = Math.Max(endAddress, buffer.EndAddress);
lock (_buffers)
{
_buffers.Remove(buffer);
}
}
ulong newSize = endAddress - address;
Buffer newBuffer = new Buffer(_context, _physicalMemory, address, newSize, _bufferOverlaps.Take(overlapsCount));
lock (_buffers)
{
_buffers.Add(newBuffer);
}
for (int index = 0; index < overlapsCount; index++)
{
Buffer buffer = _bufferOverlaps[index];
int dstOffset = (int)(buffer.Address - newBuffer.Address);
buffer.CopyTo(newBuffer, dstOffset);
newBuffer.InheritModifiedRanges(buffer);
buffer.DecrementReferenceCount();
}
newBuffer.SynchronizeMemory(address, newSize);
// Existing buffers were modified, we need to rebind everything.
NotifyBuffersModified?.Invoke();
}
}
else
{
// No overlap, just create a new buffer.
Buffer buffer = new Buffer(_context, _physicalMemory, address, size);
lock (_buffers)
{
_buffers.Add(buffer);
}
}
ShrinkOverlapsBufferIfNeeded();
}
/// <summary>
/// Resizes the temporary buffer used for range list intersection results, if it has grown too much.
/// </summary>
private void ShrinkOverlapsBufferIfNeeded()
{
if (_bufferOverlaps.Length > OverlapsBufferMaxCapacity)
{
Array.Resize(ref _bufferOverlaps, OverlapsBufferMaxCapacity);
}
}
/// <summary>
/// Copy a buffer data from a given address to another.
/// </summary>
/// <remarks>
/// This does a GPU side copy.
/// </remarks>
/// <param name="memoryManager">GPU memory manager where the buffer is mapped</param>
/// <param name="srcVa">GPU virtual address of the copy source</param>
/// <param name="dstVa">GPU virtual address of the copy destination</param>
/// <param name="size">Size in bytes of the copy</param>
public void CopyBuffer(MemoryManager memoryManager, ulong srcVa, ulong dstVa, ulong size)
{
ulong srcAddress = TranslateAndCreateBuffer(memoryManager, srcVa, size);
ulong dstAddress = TranslateAndCreateBuffer(memoryManager, dstVa, size);
Buffer srcBuffer = GetBuffer(srcAddress, size);
Buffer dstBuffer = GetBuffer(dstAddress, size);
int srcOffset = (int)(srcAddress - srcBuffer.Address);
int dstOffset = (int)(dstAddress - dstBuffer.Address);
_context.Renderer.Pipeline.CopyBuffer(
srcBuffer.Handle,
dstBuffer.Handle,
srcOffset,
dstOffset,
(int)size);
if (srcBuffer.IsModified(srcAddress, size))
{
dstBuffer.SignalModified(dstAddress, size);
}
else
{
// Optimization: If the data being copied is already in memory, then copy it directly instead of flushing from GPU.
dstBuffer.ClearModified(dstAddress, size);
memoryManager.Physical.WriteUntracked(dstAddress, memoryManager.Physical.GetSpan(srcAddress, (int)size));
}
}
/// <summary>
/// Clears a buffer at a given address with the specified value.
/// </summary>
/// <remarks>
/// Both the address and size must be aligned to 4 bytes.
/// </remarks>
/// <param name="memoryManager">GPU memory manager where the buffer is mapped</param>
/// <param name="gpuVa">GPU virtual address of the region to clear</param>
/// <param name="size">Number of bytes to clear</param>
/// <param name="value">Value to be written into the buffer</param>
public void ClearBuffer(MemoryManager memoryManager, ulong gpuVa, ulong size, uint value)
{
ulong address = TranslateAndCreateBuffer(memoryManager, gpuVa, size);
Buffer buffer = GetBuffer(address, size);
int offset = (int)(address - buffer.Address);
_context.Renderer.Pipeline.ClearBuffer(buffer.Handle, offset, (int)size, value);
memoryManager.Physical.FillTrackedResource(address, size, value, ResourceKind.Buffer);
}
/// <summary>
/// Gets a buffer sub-range starting at a given memory address.
/// </summary>
/// <param name="address">Start address of the memory range</param>
/// <param name="size">Size in bytes of the memory range</param>
/// <param name="write">Whether the buffer will be written to by this use</param>
/// <returns>The buffer sub-range starting at the given memory address</returns>
public BufferRange GetBufferRangeTillEnd(ulong address, ulong size, bool write = false)
{
return GetBuffer(address, size, write).GetRange(address);
}
/// <summary>
/// Gets a buffer sub-range for a given memory range.
/// </summary>
/// <param name="address">Start address of the memory range</param>
/// <param name="size">Size in bytes of the memory range</param>
/// <param name="write">Whether the buffer will be written to by this use</param>
/// <returns>The buffer sub-range for the given range</returns>
public BufferRange GetBufferRange(ulong address, ulong size, bool write = false)
{
return GetBuffer(address, size, write).GetRange(address, size);
}
/// <summary>
/// Gets a buffer for a given memory range.
/// A buffer overlapping with the specified range is assumed to already exist on the cache.
/// </summary>
/// <param name="address">Start address of the memory range</param>
/// <param name="size">Size in bytes of the memory range</param>
/// <param name="write">Whether the buffer will be written to by this use</param>
/// <returns>The buffer where the range is fully contained</returns>
private Buffer GetBuffer(ulong address, ulong size, bool write = false)
{
Buffer buffer;
if (size != 0)
{
buffer = _buffers.FindFirstOverlap(address, size);
buffer.SynchronizeMemory(address, size);
if (write)
{
buffer.SignalModified(address, size);
}
}
else
{
buffer = _buffers.FindFirstOverlap(address, 1);
}
return buffer;
}
/// <summary>
/// Performs guest to host memory synchronization of a given memory range.
/// </summary>
/// <param name="address">Start address of the memory range</param>
/// <param name="size">Size in bytes of the memory range</param>
public void SynchronizeBufferRange(ulong address, ulong size)
{
if (size != 0)
{
Buffer buffer = _buffers.FindFirstOverlap(address, size);
buffer.SynchronizeMemory(address, size);
}
}
/// <summary>
/// Prune any invalid entries from a quick access dictionary.
/// </summary>
/// <param name="dictionary">Dictionary to prune</param>
/// <param name="toDelete">List used to track entries to delete</param>
private void Prune(Dictionary<ulong, BufferCacheEntry> dictionary, ref List<ulong> toDelete)
{
foreach (var entry in dictionary)
{
if (entry.Value.UnmappedSequence != entry.Value.Buffer.UnmappedSequence)
{
(toDelete ??= new()).Add(entry.Key);
}
}
if (toDelete != null)
{
foreach (ulong entry in toDelete)
{
dictionary.Remove(entry);
}
}
}
/// <summary>
/// Prune any invalid entries from the quick access dictionaries.
/// </summary>
private void Prune()
{
List<ulong> toDelete = null;
Prune(_dirtyCache, ref toDelete);
toDelete?.Clear();
Prune(_modifiedCache, ref toDelete);
_pruneCaches = false;
}
/// <summary>
/// Queues a prune of invalid entries the next time a dictionary cache is accessed.
/// </summary>
public void QueuePrune()
{
_pruneCaches = true;
}
/// <summary>
/// Disposes all buffers in the cache.
/// It's an error to use the buffer manager after disposal.
/// </summary>
public void Dispose()
{
lock (_buffers)
{
foreach (Buffer buffer in _buffers)
{
buffer.Dispose();
}
}
}
}
}

View file

@ -0,0 +1,43 @@
namespace Ryujinx.Graphics.Gpu.Memory
{
/// <summary>
/// A cached entry for easily locating a buffer that is used often internally.
/// </summary>
class BufferCacheEntry
{
/// <summary>
/// The CPU VA of the buffer destination.
/// </summary>
public ulong Address;
/// <summary>
/// The end GPU VA of the associated buffer, used to check if new data can fit.
/// </summary>
public ulong EndGpuAddress;
/// <summary>
/// The buffer associated with this cache entry.
/// </summary>
public Buffer Buffer;
/// <summary>
/// The UnmappedSequence of the buffer at the time of creation.
/// If this differs from the value currently in the buffer, then this cache entry is outdated.
/// </summary>
public int UnmappedSequence;
/// <summary>
/// Create a new cache entry.
/// </summary>
/// <param name="address">The CPU VA of the buffer destination</param>
/// <param name="gpuVa">The GPU VA of the buffer destination</param>
/// <param name="buffer">The buffer object containing the target buffer</param>
public BufferCacheEntry(ulong address, ulong gpuVa, Buffer buffer)
{
Address = address;
EndGpuAddress = gpuVa + (buffer.EndAddress - address);
Buffer = buffer;
UnmappedSequence = buffer.UnmappedSequence;
}
}
}

View file

@ -0,0 +1,754 @@
using Ryujinx.Common;
using Ryujinx.Graphics.GAL;
using Ryujinx.Graphics.Gpu.Image;
using Ryujinx.Graphics.Gpu.Shader;
using Ryujinx.Graphics.Shader;
using System;
using System.Collections.Generic;
using System.Runtime.CompilerServices;
namespace Ryujinx.Graphics.Gpu.Memory
{
/// <summary>
/// Buffer manager.
/// </summary>
class BufferManager
{
private readonly GpuContext _context;
private readonly GpuChannel _channel;
private int _unalignedStorageBuffers;
public bool HasUnalignedStorageBuffers => _unalignedStorageBuffers > 0;
private IndexBuffer _indexBuffer;
private readonly VertexBuffer[] _vertexBuffers;
private readonly BufferBounds[] _transformFeedbackBuffers;
private readonly List<BufferTextureBinding> _bufferTextures;
private readonly BufferAssignment[] _ranges;
/// <summary>
/// Holds shader stage buffer state and binding information.
/// </summary>
private class BuffersPerStage
{
/// <summary>
/// Shader buffer binding information.
/// </summary>
public BufferDescriptor[] Bindings { get; private set; }
/// <summary>
/// Buffer regions.
/// </summary>
public BufferBounds[] Buffers { get; }
/// <summary>
/// Flag indicating if this binding is unaligned.
/// </summary>
public bool[] Unaligned { get; }
/// <summary>
/// Total amount of buffers used on the shader.
/// </summary>
public int Count { get; private set; }
/// <summary>
/// Creates a new instance of the shader stage buffer information.
/// </summary>
/// <param name="count">Maximum amount of buffers that the shader stage can use</param>
public BuffersPerStage(int count)
{
Bindings = new BufferDescriptor[count];
Buffers = new BufferBounds[count];
Unaligned = new bool[count];
}
/// <summary>
/// Sets the region of a buffer at a given slot.
/// </summary>
/// <param name="index">Buffer slot</param>
/// <param name="address">Region virtual address</param>
/// <param name="size">Region size in bytes</param>
/// <param name="flags">Buffer usage flags</param>
public void SetBounds(int index, ulong address, ulong size, BufferUsageFlags flags = BufferUsageFlags.None)
{
Buffers[index] = new BufferBounds(address, size, flags);
}
/// <summary>
/// Sets shader buffer binding information.
/// </summary>
/// <param name="descriptors">Buffer binding information</param>
public void SetBindings(BufferDescriptor[] descriptors)
{
if (descriptors == null)
{
Count = 0;
return;
}
if ((Count = descriptors.Length) != 0)
{
Bindings = descriptors;
}
}
}
private readonly BuffersPerStage _cpStorageBuffers;
private readonly BuffersPerStage _cpUniformBuffers;
private readonly BuffersPerStage[] _gpStorageBuffers;
private readonly BuffersPerStage[] _gpUniformBuffers;
private bool _gpStorageBuffersDirty;
private bool _gpUniformBuffersDirty;
private bool _indexBufferDirty;
private bool _vertexBuffersDirty;
private uint _vertexBuffersEnableMask;
private bool _transformFeedbackBuffersDirty;
private bool _rebind;
/// <summary>
/// Creates a new instance of the buffer manager.
/// </summary>
/// <param name="context">GPU context that the buffer manager belongs to</param>
/// <param name="channel">GPU channel that the buffer manager belongs to</param>
public BufferManager(GpuContext context, GpuChannel channel)
{
_context = context;
_channel = channel;
_vertexBuffers = new VertexBuffer[Constants.TotalVertexBuffers];
_transformFeedbackBuffers = new BufferBounds[Constants.TotalTransformFeedbackBuffers];
_cpStorageBuffers = new BuffersPerStage(Constants.TotalCpStorageBuffers);
_cpUniformBuffers = new BuffersPerStage(Constants.TotalCpUniformBuffers);
_gpStorageBuffers = new BuffersPerStage[Constants.ShaderStages];
_gpUniformBuffers = new BuffersPerStage[Constants.ShaderStages];
for (int index = 0; index < Constants.ShaderStages; index++)
{
_gpStorageBuffers[index] = new BuffersPerStage(Constants.TotalGpStorageBuffers);
_gpUniformBuffers[index] = new BuffersPerStage(Constants.TotalGpUniformBuffers);
}
_bufferTextures = new List<BufferTextureBinding>();
_ranges = new BufferAssignment[Constants.TotalGpUniformBuffers * Constants.ShaderStages];
}
/// <summary>
/// Sets the memory range with the index buffer data, to be used for subsequent draw calls.
/// </summary>
/// <param name="gpuVa">Start GPU virtual address of the index buffer</param>
/// <param name="size">Size, in bytes, of the index buffer</param>
/// <param name="type">Type of each index buffer element</param>
public void SetIndexBuffer(ulong gpuVa, ulong size, IndexType type)
{
ulong address = _channel.MemoryManager.Physical.BufferCache.TranslateAndCreateBuffer(_channel.MemoryManager, gpuVa, size);
_indexBuffer.Address = address;
_indexBuffer.Size = size;
_indexBuffer.Type = type;
_indexBufferDirty = true;
}
/// <summary>
/// Sets a new index buffer that overrides the one set on the call to <see cref="CommitGraphicsBindings"/>.
/// </summary>
/// <param name="buffer">Buffer to be used as index buffer</param>
/// <param name="type">Type of each index buffer element</param>
public void SetIndexBuffer(BufferRange buffer, IndexType type)
{
_context.Renderer.Pipeline.SetIndexBuffer(buffer, type);
_indexBufferDirty = true;
}
/// <summary>
/// Sets the memory range with vertex buffer data, to be used for subsequent draw calls.
/// </summary>
/// <param name="index">Index of the vertex buffer (up to 16)</param>
/// <param name="gpuVa">GPU virtual address of the buffer</param>
/// <param name="size">Size in bytes of the buffer</param>
/// <param name="stride">Stride of the buffer, defined as the number of bytes of each vertex</param>
/// <param name="divisor">Vertex divisor of the buffer, for instanced draws</param>
public void SetVertexBuffer(int index, ulong gpuVa, ulong size, int stride, int divisor)
{
ulong address = _channel.MemoryManager.Physical.BufferCache.TranslateAndCreateBuffer(_channel.MemoryManager, gpuVa, size);
_vertexBuffers[index].Address = address;
_vertexBuffers[index].Size = size;
_vertexBuffers[index].Stride = stride;
_vertexBuffers[index].Divisor = divisor;
_vertexBuffersDirty = true;
if (address != 0)
{
_vertexBuffersEnableMask |= 1u << index;
}
else
{
_vertexBuffersEnableMask &= ~(1u << index);
}
}
/// <summary>
/// Sets a transform feedback buffer on the graphics pipeline.
/// The output from the vertex transformation stages are written into the feedback buffer.
/// </summary>
/// <param name="index">Index of the transform feedback buffer</param>
/// <param name="gpuVa">Start GPU virtual address of the buffer</param>
/// <param name="size">Size in bytes of the transform feedback buffer</param>
public void SetTransformFeedbackBuffer(int index, ulong gpuVa, ulong size)
{
ulong address = _channel.MemoryManager.Physical.BufferCache.TranslateAndCreateBuffer(_channel.MemoryManager, gpuVa, size);
_transformFeedbackBuffers[index] = new BufferBounds(address, size);
_transformFeedbackBuffersDirty = true;
}
/// <summary>
/// Records the alignment of a storage buffer.
/// Unaligned storage buffers disable some optimizations on the shader.
/// </summary>
/// <param name="buffers">The binding list to modify</param>
/// <param name="index">Index of the storage buffer</param>
/// <param name="gpuVa">Start GPU virtual address of the buffer</param>
private void RecordStorageAlignment(BuffersPerStage buffers, int index, ulong gpuVa)
{
bool unaligned = (gpuVa & (Constants.StorageAlignment - 1)) != 0;
if (unaligned || HasUnalignedStorageBuffers)
{
// Check if the alignment changed for this binding.
ref bool currentUnaligned = ref buffers.Unaligned[index];
if (currentUnaligned != unaligned)
{
currentUnaligned = unaligned;
_unalignedStorageBuffers += unaligned ? 1 : -1;
}
}
}
/// <summary>
/// Sets a storage buffer on the compute pipeline.
/// Storage buffers can be read and written to on shaders.
/// </summary>
/// <param name="index">Index of the storage buffer</param>
/// <param name="gpuVa">Start GPU virtual address of the buffer</param>
/// <param name="size">Size in bytes of the storage buffer</param>
/// <param name="flags">Buffer usage flags</param>
public void SetComputeStorageBuffer(int index, ulong gpuVa, ulong size, BufferUsageFlags flags)
{
size += gpuVa & ((ulong)_context.Capabilities.StorageBufferOffsetAlignment - 1);
RecordStorageAlignment(_cpStorageBuffers, index, gpuVa);
gpuVa = BitUtils.AlignDown<ulong>(gpuVa, (ulong)_context.Capabilities.StorageBufferOffsetAlignment);
ulong address = _channel.MemoryManager.Physical.BufferCache.TranslateAndCreateBuffer(_channel.MemoryManager, gpuVa, size);
_cpStorageBuffers.SetBounds(index, address, size, flags);
}
/// <summary>
/// Sets a storage buffer on the graphics pipeline.
/// Storage buffers can be read and written to on shaders.
/// </summary>
/// <param name="stage">Index of the shader stage</param>
/// <param name="index">Index of the storage buffer</param>
/// <param name="gpuVa">Start GPU virtual address of the buffer</param>
/// <param name="size">Size in bytes of the storage buffer</param>
/// <param name="flags">Buffer usage flags</param>
public void SetGraphicsStorageBuffer(int stage, int index, ulong gpuVa, ulong size, BufferUsageFlags flags)
{
size += gpuVa & ((ulong)_context.Capabilities.StorageBufferOffsetAlignment - 1);
BuffersPerStage buffers = _gpStorageBuffers[stage];
RecordStorageAlignment(buffers, index, gpuVa);
gpuVa = BitUtils.AlignDown<ulong>(gpuVa, (ulong)_context.Capabilities.StorageBufferOffsetAlignment);
ulong address = _channel.MemoryManager.Physical.BufferCache.TranslateAndCreateBuffer(_channel.MemoryManager, gpuVa, size);
if (buffers.Buffers[index].Address != address ||
buffers.Buffers[index].Size != size)
{
_gpStorageBuffersDirty = true;
}
buffers.SetBounds(index, address, size, flags);
}
/// <summary>
/// Sets a uniform buffer on the compute pipeline.
/// Uniform buffers are read-only from shaders, and have a small capacity.
/// </summary>
/// <param name="index">Index of the uniform buffer</param>
/// <param name="gpuVa">Start GPU virtual address of the buffer</param>
/// <param name="size">Size in bytes of the storage buffer</param>
public void SetComputeUniformBuffer(int index, ulong gpuVa, ulong size)
{
ulong address = _channel.MemoryManager.Physical.BufferCache.TranslateAndCreateBuffer(_channel.MemoryManager, gpuVa, size);
_cpUniformBuffers.SetBounds(index, address, size);
}
/// <summary>
/// Sets a uniform buffer on the graphics pipeline.
/// Uniform buffers are read-only from shaders, and have a small capacity.
/// </summary>
/// <param name="stage">Index of the shader stage</param>
/// <param name="index">Index of the uniform buffer</param>
/// <param name="gpuVa">Start GPU virtual address of the buffer</param>
/// <param name="size">Size in bytes of the storage buffer</param>
public void SetGraphicsUniformBuffer(int stage, int index, ulong gpuVa, ulong size)
{
ulong address = _channel.MemoryManager.Physical.BufferCache.TranslateAndCreateBuffer(_channel.MemoryManager, gpuVa, size);
_gpUniformBuffers[stage].SetBounds(index, address, size);
_gpUniformBuffersDirty = true;
}
/// <summary>
/// Sets the binding points for the storage buffers bound on the compute pipeline.
/// </summary>
/// <param name="bindings">Bindings for the active shader</param>
public void SetComputeBufferBindings(CachedShaderBindings bindings)
{
_cpStorageBuffers.SetBindings(bindings.StorageBufferBindings[0]);
_cpUniformBuffers.SetBindings(bindings.ConstantBufferBindings[0]);
}
/// <summary>
/// Sets the binding points for the storage buffers bound on the graphics pipeline.
/// </summary>
/// <param name="bindings">Bindings for the active shader</param>
public void SetGraphicsBufferBindings(CachedShaderBindings bindings)
{
for (int i = 0; i < Constants.ShaderStages; i++)
{
_gpStorageBuffers[i].SetBindings(bindings.StorageBufferBindings[i]);
_gpUniformBuffers[i].SetBindings(bindings.ConstantBufferBindings[i]);
}
_gpStorageBuffersDirty = true;
_gpUniformBuffersDirty = true;
}
/// <summary>
/// Gets a bit mask indicating which compute uniform buffers are currently bound.
/// </summary>
/// <returns>Mask where each bit set indicates a bound constant buffer</returns>
public uint GetComputeUniformBufferUseMask()
{
uint mask = 0;
for (int i = 0; i < _cpUniformBuffers.Buffers.Length; i++)
{
if (_cpUniformBuffers.Buffers[i].Address != 0)
{
mask |= 1u << i;
}
}
return mask;
}
/// <summary>
/// Gets a bit mask indicating which graphics uniform buffers are currently bound.
/// </summary>
/// <param name="stage">Index of the shader stage</param>
/// <returns>Mask where each bit set indicates a bound constant buffer</returns>
public uint GetGraphicsUniformBufferUseMask(int stage)
{
uint mask = 0;
for (int i = 0; i < _gpUniformBuffers[stage].Buffers.Length; i++)
{
if (_gpUniformBuffers[stage].Buffers[i].Address != 0)
{
mask |= 1u << i;
}
}
return mask;
}
/// <summary>
/// Gets the address of the compute uniform buffer currently bound at the given index.
/// </summary>
/// <param name="index">Index of the uniform buffer binding</param>
/// <returns>The uniform buffer address, or an undefined value if the buffer is not currently bound</returns>
public ulong GetComputeUniformBufferAddress(int index)
{
return _cpUniformBuffers.Buffers[index].Address;
}
/// <summary>
/// Gets the address of the graphics uniform buffer currently bound at the given index.
/// </summary>
/// <param name="stage">Index of the shader stage</param>
/// <param name="index">Index of the uniform buffer binding</param>
/// <returns>The uniform buffer address, or an undefined value if the buffer is not currently bound</returns>
public ulong GetGraphicsUniformBufferAddress(int stage, int index)
{
return _gpUniformBuffers[stage].Buffers[index].Address;
}
/// <summary>
/// Gets the bounds of the uniform buffer currently bound at the given index.
/// </summary>
/// <param name="isCompute">Indicates whenever the uniform is requested by the 3D or compute engine</param>
/// <param name="stage">Index of the shader stage, if the uniform is for the 3D engine</param>
/// <param name="index">Index of the uniform buffer binding</param>
/// <returns>The uniform buffer bounds, or an undefined value if the buffer is not currently bound</returns>
public ref BufferBounds GetUniformBufferBounds(bool isCompute, int stage, int index)
{
if (isCompute)
{
return ref _cpUniformBuffers.Buffers[index];
}
else
{
return ref _gpUniformBuffers[stage].Buffers[index];
}
}
/// <summary>
/// Ensures that the compute engine bindings are visible to the host GPU.
/// Note: this actually performs the binding using the host graphics API.
/// </summary>
public void CommitComputeBindings()
{
var bufferCache = _channel.MemoryManager.Physical.BufferCache;
BindBuffers(bufferCache, _cpStorageBuffers, isStorage: true);
BindBuffers(bufferCache, _cpUniformBuffers, isStorage: false);
CommitBufferTextureBindings();
// Force rebind after doing compute work.
Rebind();
}
/// <summary>
/// Commit any queued buffer texture bindings.
/// </summary>
private void CommitBufferTextureBindings()
{
if (_bufferTextures.Count > 0)
{
foreach (var binding in _bufferTextures)
{
var isStore = binding.BindingInfo.Flags.HasFlag(TextureUsageFlags.ImageStore);
var range = _channel.MemoryManager.Physical.BufferCache.GetBufferRange(binding.Address, binding.Size, isStore);
binding.Texture.SetStorage(range);
// The texture must be rebound to use the new storage if it was updated.
if (binding.IsImage)
{
_context.Renderer.Pipeline.SetImage(binding.BindingInfo.Binding, binding.Texture, binding.Format);
}
else
{
_context.Renderer.Pipeline.SetTextureAndSampler(binding.Stage, binding.BindingInfo.Binding, binding.Texture, null);
}
}
_bufferTextures.Clear();
}
}
/// <summary>
/// Ensures that the graphics engine bindings are visible to the host GPU.
/// Note: this actually performs the binding using the host graphics API.
/// </summary>
public void CommitGraphicsBindings()
{
var bufferCache = _channel.MemoryManager.Physical.BufferCache;
if (_indexBufferDirty || _rebind)
{
_indexBufferDirty = false;
if (_indexBuffer.Address != 0)
{
BufferRange buffer = bufferCache.GetBufferRange(_indexBuffer.Address, _indexBuffer.Size);
_context.Renderer.Pipeline.SetIndexBuffer(buffer, _indexBuffer.Type);
}
}
else if (_indexBuffer.Address != 0)
{
bufferCache.SynchronizeBufferRange(_indexBuffer.Address, _indexBuffer.Size);
}
uint vbEnableMask = _vertexBuffersEnableMask;
if (_vertexBuffersDirty || _rebind)
{
_vertexBuffersDirty = false;
Span<VertexBufferDescriptor> vertexBuffers = stackalloc VertexBufferDescriptor[Constants.TotalVertexBuffers];
for (int index = 0; (vbEnableMask >> index) != 0; index++)
{
VertexBuffer vb = _vertexBuffers[index];
if (vb.Address == 0)
{
continue;
}
BufferRange buffer = bufferCache.GetBufferRange(vb.Address, vb.Size);
vertexBuffers[index] = new VertexBufferDescriptor(buffer, vb.Stride, vb.Divisor);
}
_context.Renderer.Pipeline.SetVertexBuffers(vertexBuffers);
}
else
{
for (int index = 0; (vbEnableMask >> index) != 0; index++)
{
VertexBuffer vb = _vertexBuffers[index];
if (vb.Address == 0)
{
continue;
}
bufferCache.SynchronizeBufferRange(vb.Address, vb.Size);
}
}
if (_transformFeedbackBuffersDirty || _rebind)
{
_transformFeedbackBuffersDirty = false;
Span<BufferRange> tfbs = stackalloc BufferRange[Constants.TotalTransformFeedbackBuffers];
for (int index = 0; index < Constants.TotalTransformFeedbackBuffers; index++)
{
BufferBounds tfb = _transformFeedbackBuffers[index];
if (tfb.Address == 0)
{
tfbs[index] = BufferRange.Empty;
continue;
}
tfbs[index] = bufferCache.GetBufferRange(tfb.Address, tfb.Size, write: true);
}
_context.Renderer.Pipeline.SetTransformFeedbackBuffers(tfbs);
}
else
{
for (int index = 0; index < Constants.TotalTransformFeedbackBuffers; index++)
{
BufferBounds tfb = _transformFeedbackBuffers[index];
if (tfb.Address == 0)
{
continue;
}
bufferCache.SynchronizeBufferRange(tfb.Address, tfb.Size);
}
}
if (_gpStorageBuffersDirty || _rebind)
{
_gpStorageBuffersDirty = false;
BindBuffers(bufferCache, _gpStorageBuffers, isStorage: true);
}
else
{
UpdateBuffers(_gpStorageBuffers);
}
if (_gpUniformBuffersDirty || _rebind)
{
_gpUniformBuffersDirty = false;
BindBuffers(bufferCache, _gpUniformBuffers, isStorage: false);
}
else
{
UpdateBuffers(_gpUniformBuffers);
}
CommitBufferTextureBindings();
_rebind = false;
}
/// <summary>
/// Bind respective buffer bindings on the host API.
/// </summary>
/// <param name="bufferCache">Buffer cache holding the buffers for the specified ranges</param>
/// <param name="bindings">Buffer memory ranges to bind</param>
/// <param name="isStorage">True to bind as storage buffer, false to bind as uniform buffer</param>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private void BindBuffers(BufferCache bufferCache, BuffersPerStage[] bindings, bool isStorage)
{
int rangesCount = 0;
Span<BufferAssignment> ranges = _ranges;
for (ShaderStage stage = ShaderStage.Vertex; stage <= ShaderStage.Fragment; stage++)
{
ref var buffers = ref bindings[(int)stage - 1];
for (int index = 0; index < buffers.Count; index++)
{
ref var bindingInfo = ref buffers.Bindings[index];
BufferBounds bounds = buffers.Buffers[bindingInfo.Slot];
if (bounds.Address != 0)
{
var isWrite = bounds.Flags.HasFlag(BufferUsageFlags.Write);
var range = isStorage
? bufferCache.GetBufferRangeTillEnd(bounds.Address, bounds.Size, isWrite)
: bufferCache.GetBufferRange(bounds.Address, bounds.Size);
ranges[rangesCount++] = new BufferAssignment(bindingInfo.Binding, range);
}
}
}
if (rangesCount != 0)
{
SetHostBuffers(ranges, rangesCount, isStorage);
}
}
/// <summary>
/// Bind respective buffer bindings on the host API.
/// </summary>
/// <param name="bufferCache">Buffer cache holding the buffers for the specified ranges</param>
/// <param name="buffers">Buffer memory ranges to bind</param>
/// <param name="isStorage">True to bind as storage buffer, false to bind as uniform buffer</param>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private void BindBuffers(BufferCache bufferCache, BuffersPerStage buffers, bool isStorage)
{
int rangesCount = 0;
Span<BufferAssignment> ranges = _ranges;
for (int index = 0; index < buffers.Count; index++)
{
ref var bindingInfo = ref buffers.Bindings[index];
BufferBounds bounds = buffers.Buffers[bindingInfo.Slot];
if (bounds.Address != 0)
{
var isWrite = bounds.Flags.HasFlag(BufferUsageFlags.Write);
var range = isStorage
? bufferCache.GetBufferRangeTillEnd(bounds.Address, bounds.Size, isWrite)
: bufferCache.GetBufferRange(bounds.Address, bounds.Size);
ranges[rangesCount++] = new BufferAssignment(bindingInfo.Binding, range);
}
}
if (rangesCount != 0)
{
SetHostBuffers(ranges, rangesCount, isStorage);
}
}
/// <summary>
/// Bind respective buffer bindings on the host API.
/// </summary>
/// <param name="ranges">Host buffers to bind, with their offsets and sizes</param>
/// <param name="first">First binding point</param>
/// <param name="count">Number of bindings</param>
/// <param name="isStorage">Indicates if the buffers are storage or uniform buffers</param>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private void SetHostBuffers(ReadOnlySpan<BufferAssignment> ranges, int count, bool isStorage)
{
if (isStorage)
{
_context.Renderer.Pipeline.SetStorageBuffers(ranges.Slice(0, count));
}
else
{
_context.Renderer.Pipeline.SetUniformBuffers(ranges.Slice(0, count));
}
}
/// <summary>
/// Updates data for the already bound buffer bindings.
/// </summary>
/// <param name="bindings">Bindings to update</param>
private void UpdateBuffers(BuffersPerStage[] bindings)
{
for (ShaderStage stage = ShaderStage.Vertex; stage <= ShaderStage.Fragment; stage++)
{
ref var buffers = ref bindings[(int)stage - 1];
for (int index = 0; index < buffers.Count; index++)
{
ref var binding = ref buffers.Bindings[index];
BufferBounds bounds = buffers.Buffers[binding.Slot];
if (bounds.Address == 0)
{
continue;
}
_channel.MemoryManager.Physical.BufferCache.SynchronizeBufferRange(bounds.Address, bounds.Size);
}
}
}
/// <summary>
/// Sets the buffer storage of a buffer texture. This will be bound when the buffer manager commits bindings.
/// </summary>
/// <param name="stage">Shader stage accessing the texture</param>
/// <param name="texture">Buffer texture</param>
/// <param name="address">Address of the buffer in memory</param>
/// <param name="size">Size of the buffer in bytes</param>
/// <param name="bindingInfo">Binding info for the buffer texture</param>
/// <param name="format">Format of the buffer texture</param>
/// <param name="isImage">Whether the binding is for an image or a sampler</param>
public void SetBufferTextureStorage(
ShaderStage stage,
ITexture texture,
ulong address,
ulong size,
TextureBindingInfo bindingInfo,
Format format,
bool isImage)
{
_channel.MemoryManager.Physical.BufferCache.CreateBuffer(address, size);
_bufferTextures.Add(new BufferTextureBinding(stage, texture, address, size, bindingInfo, format, isImage));
}
/// <summary>
/// Force all bound textures and images to be rebound the next time CommitBindings is called.
/// </summary>
public void Rebind()
{
_rebind = true;
}
}
}

View file

@ -0,0 +1,125 @@
using System;
namespace Ryujinx.Graphics.Gpu.Memory
{
/// <summary>
/// A record of when buffer data was copied from one buffer to another, along with the SyncNumber when the migration will be complete.
/// Keeps the source buffer alive for data flushes until the migration is complete.
/// </summary>
internal class BufferMigration : IDisposable
{
/// <summary>
/// The offset for the migrated region.
/// </summary>
private readonly ulong _offset;
/// <summary>
/// The size for the migrated region.
/// </summary>
private readonly ulong _size;
/// <summary>
/// The buffer that was migrated from.
/// </summary>
private readonly Buffer _buffer;
/// <summary>
/// The source range action, to be called on overlap with an unreached sync number.
/// </summary>
private readonly Action<ulong, ulong> _sourceRangeAction;
/// <summary>
/// The source range list.
/// </summary>
private readonly BufferModifiedRangeList _source;
/// <summary>
/// The destination range list. This range list must be updated when flushing the source.
/// </summary>
public readonly BufferModifiedRangeList Destination;
/// <summary>
/// The sync number that needs to be reached for this migration to be removed. This is set to the pending sync number on creation.
/// </summary>
public readonly ulong SyncNumber;
/// <summary>
/// Creates a record for a buffer migration.
/// </summary>
/// <param name="buffer">The source buffer for this migration</param>
/// <param name="sourceRangeAction">The flush action for the source buffer</param>
/// <param name="source">The modified range list for the source buffer</param>
/// <param name="dest">The modified range list for the destination buffer</param>
/// <param name="syncNumber">The sync number for when the migration is complete</param>
public BufferMigration(
Buffer buffer,
Action<ulong, ulong> sourceRangeAction,
BufferModifiedRangeList source,
BufferModifiedRangeList dest,
ulong syncNumber)
{
_offset = buffer.Address;
_size = buffer.Size;
_buffer = buffer;
_sourceRangeAction = sourceRangeAction;
_source = source;
Destination = dest;
SyncNumber = syncNumber;
}
/// <summary>
/// Determine if the given range overlaps this migration, and has not been completed yet.
/// </summary>
/// <param name="offset">Start offset</param>
/// <param name="size">Range size</param>
/// <param name="syncNumber">The sync number that was waited on</param>
/// <returns>True if overlapping and in progress, false otherwise</returns>
public bool Overlaps(ulong offset, ulong size, ulong syncNumber)
{
ulong end = offset + size;
ulong destEnd = _offset + _size;
long syncDiff = (long)(syncNumber - SyncNumber); // syncNumber is less if the copy has not completed.
return !(end <= _offset || offset >= destEnd) && syncDiff < 0;
}
/// <summary>
/// Determine if the given range matches this migration.
/// </summary>
/// <param name="offset">Start offset</param>
/// <param name="size">Range size</param>
/// <returns>True if the range exactly matches, false otherwise</returns>
public bool FullyMatches(ulong offset, ulong size)
{
return _offset == offset && _size == size;
}
/// <summary>
/// Perform the migration source's range action on the range provided, clamped to the bounds of the source buffer.
/// </summary>
/// <param name="offset">Start offset</param>
/// <param name="size">Range size</param>
/// <param name="syncNumber">Current sync number</param>
/// <param name="parent">The modified range list that originally owned this range</param>
public void RangeActionWithMigration(ulong offset, ulong size, ulong syncNumber, BufferModifiedRangeList parent)
{
ulong end = offset + size;
end = Math.Min(_offset + _size, end);
offset = Math.Max(_offset, offset);
size = end - offset;
_source.RangeActionWithMigration(offset, size, syncNumber, parent, _sourceRangeAction);
}
/// <summary>
/// Removes this reference to the range list, potentially allowing for the source buffer to be disposed.
/// </summary>
public void Dispose()
{
Destination.RemoveMigration(this);
_buffer.DecrementReferenceCount();
}
}
}

View file

@ -0,0 +1,514 @@
using Ryujinx.Common.Logging;
using Ryujinx.Common.Pools;
using Ryujinx.Memory.Range;
using System;
using System.Collections.Generic;
using System.Linq;
namespace Ryujinx.Graphics.Gpu.Memory
{
/// <summary>
/// A range within a buffer that has been modified by the GPU.
/// </summary>
class BufferModifiedRange : IRange
{
/// <summary>
/// Start address of the range in guest memory.
/// </summary>
public ulong Address { get; }
/// <summary>
/// Size of the range in bytes.
/// </summary>
public ulong Size { get; }
/// <summary>
/// End address of the range in guest memory.
/// </summary>
public ulong EndAddress => Address + Size;
/// <summary>
/// The GPU sync number at the time of the last modification.
/// </summary>
public ulong SyncNumber { get; internal set; }
/// <summary>
/// The range list that originally owned this range.
/// </summary>
public BufferModifiedRangeList Parent { get; internal set; }
/// <summary>
/// Creates a new instance of a modified range.
/// </summary>
/// <param name="address">Start address of the range</param>
/// <param name="size">Size of the range in bytes</param>
/// <param name="syncNumber">The GPU sync number at the time of creation</param>
/// <param name="parent">The range list that owns this range</param>
public BufferModifiedRange(ulong address, ulong size, ulong syncNumber, BufferModifiedRangeList parent)
{
Address = address;
Size = size;
SyncNumber = syncNumber;
Parent = parent;
}
/// <summary>
/// Checks if a given range overlaps with the modified range.
/// </summary>
/// <param name="address">Start address of the range</param>
/// <param name="size">Size in bytes of the range</param>
/// <returns>True if the range overlaps, false otherwise</returns>
public bool OverlapsWith(ulong address, ulong size)
{
return Address < address + size && address < EndAddress;
}
}
/// <summary>
/// A structure used to track GPU modified ranges within a buffer.
/// </summary>
class BufferModifiedRangeList : RangeList<BufferModifiedRange>
{
private const int BackingInitialSize = 8;
private GpuContext _context;
private Buffer _parent;
private Action<ulong, ulong> _flushAction;
private List<BufferMigration> _sources;
private BufferMigration _migrationTarget;
private object _lock = new object();
/// <summary>
/// Whether the modified range list has any entries or not.
/// </summary>
public bool HasRanges
{
get
{
lock (_lock)
{
return Count > 0;
}
}
}
/// <summary>
/// Creates a new instance of a modified range list.
/// </summary>
/// <param name="context">GPU context that the buffer range list belongs to</param>
/// <param name="parent">The parent buffer that owns this range list</param>
/// <param name="flushAction">The flush action for the parent buffer</param>
public BufferModifiedRangeList(GpuContext context, Buffer parent, Action<ulong, ulong> flushAction) : base(BackingInitialSize)
{
_context = context;
_parent = parent;
_flushAction = flushAction;
}
/// <summary>
/// Given an input range, calls the given action with sub-ranges which exclude any of the modified regions.
/// </summary>
/// <param name="address">Start address of the query range</param>
/// <param name="size">Size of the query range in bytes</param>
/// <param name="action">Action to perform for each remaining sub-range of the input range</param>
public void ExcludeModifiedRegions(ulong address, ulong size, Action<ulong, ulong> action)
{
lock (_lock)
{
// Slices a given region using the modified regions in the list. Calls the action for the new slices.
ref var overlaps = ref ThreadStaticArray<BufferModifiedRange>.Get();
int count = FindOverlapsNonOverlapping(address, size, ref overlaps);
for (int i = 0; i < count; i++)
{
BufferModifiedRange overlap = overlaps[i];
if (overlap.Address > address)
{
// The start of the remaining region is uncovered by this overlap. Call the action for it.
action(address, overlap.Address - address);
}
// Remaining region is after this overlap.
size -= overlap.EndAddress - address;
address = overlap.EndAddress;
}
if ((long)size > 0)
{
// If there is any region left after removing the overlaps, signal it.
action(address, size);
}
}
}
/// <summary>
/// Signal that a region of the buffer has been modified, and add the new region to the range list.
/// Any overlapping ranges will be (partially) removed.
/// </summary>
/// <param name="address">Start address of the modified region</param>
/// <param name="size">Size of the modified region in bytes</param>
public void SignalModified(ulong address, ulong size)
{
// Must lock, as this can affect flushes from the background thread.
lock (_lock)
{
// We may overlap with some existing modified regions. They must be cut into by the new entry.
ref var overlaps = ref ThreadStaticArray<BufferModifiedRange>.Get();
int count = FindOverlapsNonOverlapping(address, size, ref overlaps);
ulong endAddress = address + size;
ulong syncNumber = _context.SyncNumber;
for (int i = 0; i < count; i++)
{
// The overlaps must be removed or split.
BufferModifiedRange overlap = overlaps[i];
if (overlap.Address == address && overlap.Size == size)
{
// Region already exists. Just update the existing sync number.
overlap.SyncNumber = syncNumber;
overlap.Parent = this;
return;
}
Remove(overlap);
if (overlap.Address < address && overlap.EndAddress > address)
{
// A split item must be created behind this overlap.
Add(new BufferModifiedRange(overlap.Address, address - overlap.Address, overlap.SyncNumber, overlap.Parent));
}
if (overlap.Address < endAddress && overlap.EndAddress > endAddress)
{
// A split item must be created after this overlap.
Add(new BufferModifiedRange(endAddress, overlap.EndAddress - endAddress, overlap.SyncNumber, overlap.Parent));
}
}
Add(new BufferModifiedRange(address, size, syncNumber, this));
}
}
/// <summary>
/// Gets modified ranges within the specified region, and then fires the given action for each range individually.
/// </summary>
/// <param name="address">Start address to query</param>
/// <param name="size">Size to query</param>
/// <param name="rangeAction">The action to call for each modified range</param>
public void GetRanges(ulong address, ulong size, Action<ulong, ulong> rangeAction)
{
int count = 0;
ref var overlaps = ref ThreadStaticArray<BufferModifiedRange>.Get();
// Range list must be consistent for this operation.
lock (_lock)
{
count = FindOverlapsNonOverlapping(address, size, ref overlaps);
}
for (int i = 0; i < count; i++)
{
BufferModifiedRange overlap = overlaps[i];
rangeAction(overlap.Address, overlap.Size);
}
}
/// <summary>
/// Queries if a range exists within the specified region.
/// </summary>
/// <param name="address">Start address to query</param>
/// <param name="size">Size to query</param>
/// <returns>True if a range exists in the specified region, false otherwise</returns>
public bool HasRange(ulong address, ulong size)
{
// Range list must be consistent for this operation.
lock (_lock)
{
return FindOverlapsNonOverlapping(address, size, ref ThreadStaticArray<BufferModifiedRange>.Get()) > 0;
}
}
/// <summary>
/// Performs the given range action, or one from a migration that overlaps and has not synced yet.
/// </summary>
/// <param name="offset">The offset to pass to the action</param>
/// <param name="size">The size to pass to the action</param>
/// <param name="syncNumber">The sync number that has been reached</param>
/// <param name="parent">The modified range list that originally owned this range</param>
/// <param name="rangeAction">The action to perform</param>
public void RangeActionWithMigration(ulong offset, ulong size, ulong syncNumber, BufferModifiedRangeList parent, Action<ulong, ulong> rangeAction)
{
bool firstSource = true;
if (parent != this)
{
lock (_lock)
{
if (_sources != null)
{
foreach (BufferMigration source in _sources)
{
if (source.Overlaps(offset, size, syncNumber))
{
if (firstSource && !source.FullyMatches(offset, size))
{
// Perform this buffer's action first. The migrations will run after.
rangeAction(offset, size);
}
source.RangeActionWithMigration(offset, size, syncNumber, parent);
firstSource = false;
}
}
}
}
}
if (firstSource)
{
// No overlapping migrations, or they are not meant for this range, flush the data using the given action.
rangeAction(offset, size);
}
}
/// <summary>
/// Removes modified ranges ready by the sync number from the list, and flushes their buffer data within a given address range.
/// </summary>
/// <param name="overlaps">Overlapping ranges to check</param>
/// <param name="rangeCount">Number of overlapping ranges</param>
/// <param name="highestDiff">The highest difference between an overlapping range's sync number and the current one</param>
/// <param name="currentSync">The current sync number</param>
/// <param name="address">The start address of the flush range</param>
/// <param name="endAddress">The end address of the flush range</param>
private void RemoveRangesAndFlush(
BufferModifiedRange[] overlaps,
int rangeCount,
long highestDiff,
ulong currentSync,
ulong address,
ulong endAddress)
{
lock (_lock)
{
if (_migrationTarget == null)
{
ulong waitSync = currentSync + (ulong)highestDiff;
for (int i = 0; i < rangeCount; i++)
{
BufferModifiedRange overlap = overlaps[i];
long diff = (long)(overlap.SyncNumber - currentSync);
if (diff <= highestDiff)
{
ulong clampAddress = Math.Max(address, overlap.Address);
ulong clampEnd = Math.Min(endAddress, overlap.EndAddress);
ClearPart(overlap, clampAddress, clampEnd);
RangeActionWithMigration(clampAddress, clampEnd - clampAddress, waitSync, overlap.Parent, _flushAction);
}
}
return;
}
}
// There is a migration target to call instead. This can't be changed after set so accessing it outside the lock is fine.
_migrationTarget.Destination.RemoveRangesAndFlush(overlaps, rangeCount, highestDiff, currentSync, address, endAddress);
}
/// <summary>
/// Gets modified ranges within the specified region, waits on ones from a previous sync number,
/// and then fires the flush action for each range individually.
/// </summary>
/// <remarks>
/// This function assumes it is called from the background thread.
/// Modifications from the current sync number are ignored because the guest should not expect them to be available yet.
/// They will remain reserved, so that any data sync prioritizes the data in the GPU.
/// </remarks>
/// <param name="address">Start address to query</param>
/// <param name="size">Size to query</param>
public void WaitForAndFlushRanges(ulong address, ulong size)
{
ulong endAddress = address + size;
ulong currentSync = _context.SyncNumber;
int rangeCount = 0;
ref var overlaps = ref ThreadStaticArray<BufferModifiedRange>.Get();
// Range list must be consistent for this operation
lock (_lock)
{
if (_migrationTarget != null)
{
rangeCount = -1;
}
else
{
rangeCount = FindOverlapsNonOverlapping(address, size, ref overlaps);
}
}
if (rangeCount == -1)
{
_migrationTarget.Destination.WaitForAndFlushRanges(address, size);
return;
}
else if (rangeCount == 0)
{
return;
}
// First, determine which syncpoint to wait on.
// This is the latest syncpoint that is not equal to the current sync.
long highestDiff = long.MinValue;
for (int i = 0; i < rangeCount; i++)
{
BufferModifiedRange overlap = overlaps[i];
long diff = (long)(overlap.SyncNumber - currentSync);
if (diff < 0 && diff > highestDiff)
{
highestDiff = diff;
}
}
if (highestDiff == long.MinValue)
{
return;
}
// Wait for the syncpoint.
_context.Renderer.WaitSync(currentSync + (ulong)highestDiff);
RemoveRangesAndFlush(overlaps, rangeCount, highestDiff, currentSync, address, endAddress);
}
/// <summary>
/// Inherit ranges from another modified range list.
/// </summary>
/// <param name="ranges">The range list to inherit from</param>
/// <param name="registerRangeAction">The action to call for each modified range</param>
public void InheritRanges(BufferModifiedRangeList ranges, Action<ulong, ulong> registerRangeAction)
{
BufferModifiedRange[] inheritRanges;
lock (ranges._lock)
{
BufferMigration migration = new(ranges._parent, ranges._flushAction, ranges, this, _context.SyncNumber);
ranges._parent.IncrementReferenceCount();
ranges._migrationTarget = migration;
_context.RegisterBufferMigration(migration);
inheritRanges = ranges.ToArray();
lock (_lock)
{
(_sources ??= new List<BufferMigration>()).Add(migration);
foreach (BufferModifiedRange range in inheritRanges)
{
Add(range);
}
}
}
ulong currentSync = _context.SyncNumber;
foreach (BufferModifiedRange range in inheritRanges)
{
if (range.SyncNumber != currentSync)
{
registerRangeAction(range.Address, range.Size);
}
}
}
/// <summary>
/// Removes a source buffer migration, indicating its copy has completed.
/// </summary>
/// <param name="migration">The migration to remove</param>
public void RemoveMigration(BufferMigration migration)
{
lock (_lock)
{
_sources.Remove(migration);
}
}
private void ClearPart(BufferModifiedRange overlap, ulong address, ulong endAddress)
{
Remove(overlap);
// If the overlap extends outside of the clear range, make sure those parts still exist.
if (overlap.Address < address)
{
Add(new BufferModifiedRange(overlap.Address, address - overlap.Address, overlap.SyncNumber, overlap.Parent));
}
if (overlap.EndAddress > endAddress)
{
Add(new BufferModifiedRange(endAddress, overlap.EndAddress - endAddress, overlap.SyncNumber, overlap.Parent));
}
}
/// <summary>
/// Clear modified ranges within the specified area.
/// </summary>
/// <param name="address">Start address to clear</param>
/// <param name="size">Size to clear</param>
public void Clear(ulong address, ulong size)
{
lock (_lock)
{
// This function can be called from any thread, so it cannot use the arrays for background or foreground.
BufferModifiedRange[] toClear = new BufferModifiedRange[1];
int rangeCount = FindOverlapsNonOverlapping(address, size, ref toClear);
ulong endAddress = address + size;
for (int i = 0; i < rangeCount; i++)
{
BufferModifiedRange overlap = toClear[i];
ClearPart(overlap, address, endAddress);
}
}
}
/// <summary>
/// Clear all modified ranges.
/// </summary>
public void Clear()
{
lock (_lock)
{
Count = 0;
}
}
}
}

View file

@ -0,0 +1,75 @@
using Ryujinx.Graphics.GAL;
using Ryujinx.Graphics.Gpu.Image;
using Ryujinx.Graphics.Shader;
namespace Ryujinx.Graphics.Gpu.Memory
{
/// <summary>
/// A buffer binding to apply to a buffer texture.
/// </summary>
readonly struct BufferTextureBinding
{
/// <summary>
/// Shader stage accessing the texture.
/// </summary>
public ShaderStage Stage { get; }
/// <summary>
/// The buffer texture.
/// </summary>
public ITexture Texture { get; }
/// <summary>
/// The base address of the buffer binding.
/// </summary>
public ulong Address { get; }
/// <summary>
/// The size of the buffer binding in bytes.
/// </summary>
public ulong Size { get; }
/// <summary>
/// The image or sampler binding info for the buffer texture.
/// </summary>
public TextureBindingInfo BindingInfo { get; }
/// <summary>
/// The image format for the binding.
/// </summary>
public Format Format { get; }
/// <summary>
/// Whether the binding is for an image or a sampler.
/// </summary>
public bool IsImage { get; }
/// <summary>
/// Create a new buffer texture binding.
/// </summary>
/// <param name="stage">Shader stage accessing the texture</param>
/// <param name="texture">Buffer texture</param>
/// <param name="address">Base address</param>
/// <param name="size">Size in bytes</param>
/// <param name="bindingInfo">Binding info</param>
/// <param name="format">Binding format</param>
/// <param name="isImage">Whether the binding is for an image or a sampler</param>
public BufferTextureBinding(
ShaderStage stage,
ITexture texture,
ulong address,
ulong size,
TextureBindingInfo bindingInfo,
Format format,
bool isImage)
{
Stage = stage;
Texture = texture;
Address = address;
Size = size;
BindingInfo = bindingInfo;
Format = format;
IsImage = isImage;
}
}
}

View file

@ -0,0 +1,191 @@
using Ryujinx.Graphics.GAL;
using System.Collections.Generic;
namespace Ryujinx.Graphics.Gpu.Memory
{
/// <summary>
/// Represents the GPU counter cache.
/// </summary>
class CounterCache
{
private readonly struct CounterEntry
{
public ulong Address { get; }
public ICounterEvent Event { get; }
public CounterEntry(ulong address, ICounterEvent evt)
{
Address = address;
Event = evt;
}
}
private readonly List<CounterEntry> _items;
/// <summary>
/// Creates a new instance of the GPU counter cache.
/// </summary>
public CounterCache()
{
_items = new List<CounterEntry>();
}
/// <summary>
/// Adds a new counter to the counter cache, or updates a existing one.
/// </summary>
/// <param name="gpuVa">GPU virtual address where the counter will be written in memory</param>
public void AddOrUpdate(ulong gpuVa, ICounterEvent evt)
{
int index = BinarySearch(gpuVa);
CounterEntry entry = new CounterEntry(gpuVa, evt);
if (index < 0)
{
_items.Insert(~index, entry);
}
else
{
_items[index] = entry;
}
}
/// <summary>
/// Handles removal of counters written to a memory region being unmapped.
/// </summary>
/// <param name="sender">Sender object</param>
/// <param name="e">Event arguments</param>
public void MemoryUnmappedHandler(object sender, UnmapEventArgs e) => RemoveRange(e.Address, e.Size);
private void RemoveRange(ulong gpuVa, ulong size)
{
int index = BinarySearch(gpuVa + size - 1);
if (index < 0)
{
index = ~index;
}
if (index >= _items.Count || !InRange(gpuVa, size, _items[index].Address))
{
return;
}
int count = 1;
while (index > 0 && InRange(gpuVa, size, _items[index - 1].Address))
{
index--;
count++;
}
// Notify the removed counter events that their result should no longer be written out.
for (int i = 0; i < count; i++)
{
ICounterEvent evt = _items[index + i].Event;
if (evt != null)
{
evt.Invalid = true;
}
}
_items.RemoveRange(index, count);
}
/// <summary>
/// Checks whenever an address falls inside a given range.
/// </summary>
/// <param name="startVa">Range start address</param>
/// <param name="size">Range size</param>
/// <param name="gpuVa">Address being checked</param>
/// <returns>True if the address falls inside the range, false otherwise</returns>
private static bool InRange(ulong startVa, ulong size, ulong gpuVa)
{
return gpuVa >= startVa && gpuVa < startVa + size;
}
/// <summary>
/// Check if any counter value was written to the specified GPU virtual memory address.
/// </summary>
/// <param name="gpuVa">GPU virtual address</param>
/// <returns>True if any counter value was written on the specified address, false otherwise</returns>
public bool Contains(ulong gpuVa)
{
return BinarySearch(gpuVa) >= 0;
}
/// <summary>
/// Flush any counter value written to the specified GPU virtual memory address.
/// </summary>
/// <param name="gpuVa">GPU virtual address</param>
/// <returns>True if any counter value was written on the specified address, false otherwise</returns>
public bool FindAndFlush(ulong gpuVa)
{
int index = BinarySearch(gpuVa);
if (index > 0)
{
_items[index].Event?.Flush();
return true;
}
else
{
return false;
}
}
/// <summary>
/// Find any counter event that would write to the specified GPU virtual memory address.
/// </summary>
/// <param name="gpuVa">GPU virtual address</param>
/// <returns>The counter event, or null if not present</returns>
public ICounterEvent FindEvent(ulong gpuVa)
{
int index = BinarySearch(gpuVa);
if (index > 0)
{
return _items[index].Event;
}
else
{
return null;
}
}
/// <summary>
/// Performs binary search of an address on the list.
/// </summary>
/// <param name="address">Address to search</param>
/// <returns>Index of the item, or complement of the index of the nearest item with lower value</returns>
private int BinarySearch(ulong address)
{
int left = 0;
int right = _items.Count - 1;
while (left <= right)
{
int range = right - left;
int middle = left + (range >> 1);
CounterEntry item = _items[middle];
if (item.Address == address)
{
return middle;
}
if (address < item.Address)
{
right = middle - 1;
}
else
{
left = middle + 1;
}
}
return ~left;
}
}
}

View file

@ -0,0 +1,102 @@
using Ryujinx.Cpu.Tracking;
using Ryujinx.Memory.Tracking;
using System;
namespace Ryujinx.Graphics.Gpu.Memory
{
/// <summary>
/// A tracking handle for a region of GPU VA, represented by one or more tracking handles in CPU VA.
/// </summary>
class GpuRegionHandle : IRegionHandle
{
private readonly CpuRegionHandle[] _cpuRegionHandles;
public bool Dirty
{
get
{
foreach (var regionHandle in _cpuRegionHandles)
{
if (regionHandle.Dirty)
{
return true;
}
}
return false;
}
}
public ulong Address => throw new NotSupportedException();
public ulong Size => throw new NotSupportedException();
public ulong EndAddress => throw new NotSupportedException();
/// <summary>
/// Create a new GpuRegionHandle, made up of mulitple CpuRegionHandles.
/// </summary>
/// <param name="cpuRegionHandles">The CpuRegionHandles that make up this handle</param>
public GpuRegionHandle(CpuRegionHandle[] cpuRegionHandles)
{
_cpuRegionHandles = cpuRegionHandles;
}
/// <summary>
/// Dispose the child handles.
/// </summary>
public void Dispose()
{
foreach (var regionHandle in _cpuRegionHandles)
{
regionHandle.Dispose();
}
}
/// <summary>
/// Register an action to perform when the tracked region is read or written.
/// The action is automatically removed after it runs.
/// </summary>
/// <param name="action">Action to call on read or write</param>
public void RegisterAction(RegionSignal action)
{
foreach (var regionHandle in _cpuRegionHandles)
{
regionHandle.RegisterAction(action);
}
}
/// <summary>
/// Register an action to perform when a precise access occurs (one with exact address and size).
/// If the action returns true, read/write tracking are skipped.
/// </summary>
/// <param name="action">Action to call on read or write</param>
public void RegisterPreciseAction(PreciseRegionSignal action)
{
foreach (var regionHandle in _cpuRegionHandles)
{
regionHandle.RegisterPreciseAction(action);
}
}
/// <summary>
/// Consume the dirty flag for the handles, and reprotect so it can be set on the next write.
/// </summary>
public void Reprotect(bool asDirty = false)
{
foreach (var regionHandle in _cpuRegionHandles)
{
regionHandle.Reprotect(asDirty);
}
}
/// <summary>
/// Force the handles to be dirty, without reprotecting.
/// </summary>
public void ForceDirty()
{
foreach (var regionHandle in _cpuRegionHandles)
{
regionHandle.ForceDirty();
}
}
}
}

View file

@ -0,0 +1,15 @@
using Ryujinx.Graphics.GAL;
namespace Ryujinx.Graphics.Gpu.Memory
{
/// <summary>
/// GPU Index Buffer information.
/// </summary>
struct IndexBuffer
{
public ulong Address;
public ulong Size;
public IndexType Type;
}
}

View file

@ -0,0 +1,713 @@
using Ryujinx.Memory;
using Ryujinx.Memory.Range;
using System;
using System.Collections.Generic;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
namespace Ryujinx.Graphics.Gpu.Memory
{
/// <summary>
/// GPU memory manager.
/// </summary>
public class MemoryManager : IWritableBlock
{
private const int PtLvl0Bits = 14;
private const int PtLvl1Bits = 14;
public const int PtPageBits = 12;
private const ulong PtLvl0Size = 1UL << PtLvl0Bits;
private const ulong PtLvl1Size = 1UL << PtLvl1Bits;
public const ulong PageSize = 1UL << PtPageBits;
private const ulong PtLvl0Mask = PtLvl0Size - 1;
private const ulong PtLvl1Mask = PtLvl1Size - 1;
public const ulong PageMask = PageSize - 1;
private const int PtLvl0Bit = PtPageBits + PtLvl1Bits;
private const int PtLvl1Bit = PtPageBits;
private const int AddressSpaceBits = PtPageBits + PtLvl1Bits + PtLvl0Bits;
public const ulong PteUnmapped = ulong.MaxValue;
private readonly ulong[][] _pageTable;
public event EventHandler<UnmapEventArgs> MemoryUnmapped;
/// <summary>
/// Physical memory where the virtual memory is mapped into.
/// </summary>
internal PhysicalMemory Physical { get; }
/// <summary>
/// Cache of GPU counters.
/// </summary>
internal CounterCache CounterCache { get; }
/// <summary>
/// Creates a new instance of the GPU memory manager.
/// </summary>
/// <param name="physicalMemory">Physical memory that this memory manager will map into</param>
internal MemoryManager(PhysicalMemory physicalMemory)
{
Physical = physicalMemory;
CounterCache = new CounterCache();
_pageTable = new ulong[PtLvl0Size][];
MemoryUnmapped += Physical.TextureCache.MemoryUnmappedHandler;
MemoryUnmapped += Physical.BufferCache.MemoryUnmappedHandler;
MemoryUnmapped += CounterCache.MemoryUnmappedHandler;
}
/// <summary>
/// Reads data from GPU mapped memory.
/// </summary>
/// <typeparam name="T">Type of the data</typeparam>
/// <param name="va">GPU virtual address where the data is located</param>
/// <param name="tracked">True if read tracking is triggered on the memory region</param>
/// <returns>The data at the specified memory location</returns>
public T Read<T>(ulong va, bool tracked = false) where T : unmanaged
{
int size = Unsafe.SizeOf<T>();
if (IsContiguous(va, size))
{
ulong address = Translate(va);
if (tracked)
{
return Physical.ReadTracked<T>(address);
}
else
{
return Physical.Read<T>(address);
}
}
else
{
Span<byte> data = new byte[size];
ReadImpl(va, data, tracked);
return MemoryMarshal.Cast<byte, T>(data)[0];
}
}
/// <summary>
/// Gets a read-only span of data from GPU mapped memory.
/// </summary>
/// <param name="va">GPU virtual address where the data is located</param>
/// <param name="size">Size of the data</param>
/// <param name="tracked">True if read tracking is triggered on the span</param>
/// <returns>The span of the data at the specified memory location</returns>
public ReadOnlySpan<byte> GetSpan(ulong va, int size, bool tracked = false)
{
if (IsContiguous(va, size))
{
return Physical.GetSpan(Translate(va), size, tracked);
}
else
{
Span<byte> data = new byte[size];
ReadImpl(va, data, tracked);
return data;
}
}
/// <summary>
/// Gets a read-only span of data from GPU mapped memory, up to the entire range specified,
/// or the last mapped page if the range is not fully mapped.
/// </summary>
/// <param name="va">GPU virtual address where the data is located</param>
/// <param name="size">Size of the data</param>
/// <param name="tracked">True if read tracking is triggered on the span</param>
/// <returns>The span of the data at the specified memory location</returns>
public ReadOnlySpan<byte> GetSpanMapped(ulong va, int size, bool tracked = false)
{
bool isContiguous = true;
int mappedSize;
if (ValidateAddress(va) && GetPte(va) != PteUnmapped && Physical.IsMapped(Translate(va)))
{
ulong endVa = va + (ulong)size;
ulong endVaAligned = (endVa + PageMask) & ~PageMask;
ulong currentVa = va & ~PageMask;
int pages = (int)((endVaAligned - currentVa) / PageSize);
for (int page = 0; page < pages - 1; page++)
{
ulong nextVa = currentVa + PageSize;
ulong nextPa = Translate(nextVa);
if (!ValidateAddress(nextVa) || GetPte(nextVa) == PteUnmapped || !Physical.IsMapped(nextPa))
{
break;
}
if (Translate(currentVa) + PageSize != nextPa)
{
isContiguous = false;
}
currentVa += PageSize;
}
currentVa += PageSize;
if (currentVa > endVa)
{
currentVa = endVa;
}
mappedSize = (int)(currentVa - va);
}
else
{
return ReadOnlySpan<byte>.Empty;
}
if (isContiguous)
{
return Physical.GetSpan(Translate(va), mappedSize, tracked);
}
else
{
Span<byte> data = new byte[mappedSize];
ReadImpl(va, data, tracked);
return data;
}
}
/// <summary>
/// Reads data from a possibly non-contiguous region of GPU mapped memory.
/// </summary>
/// <param name="va">GPU virtual address of the data</param>
/// <param name="data">Span to write the read data into</param>
/// <param name="tracked">True to enable write tracking on read, false otherwise</param>
private void ReadImpl(ulong va, Span<byte> data, bool tracked)
{
if (data.Length == 0)
{
return;
}
int offset = 0, size;
if ((va & PageMask) != 0)
{
ulong pa = Translate(va);
size = Math.Min(data.Length, (int)PageSize - (int)(va & PageMask));
Physical.GetSpan(pa, size, tracked).CopyTo(data.Slice(0, size));
offset += size;
}
for (; offset < data.Length; offset += size)
{
ulong pa = Translate(va + (ulong)offset);
size = Math.Min(data.Length - offset, (int)PageSize);
Physical.GetSpan(pa, size, tracked).CopyTo(data.Slice(offset, size));
}
}
/// <summary>
/// Gets a writable region from GPU mapped memory.
/// </summary>
/// <param name="va">Start address of the range</param>
/// <param name="size">Size in bytes to be range</param>
/// <param name="tracked">True if write tracking is triggered on the span</param>
/// <returns>A writable region with the data at the specified memory location</returns>
public WritableRegion GetWritableRegion(ulong va, int size, bool tracked = false)
{
if (IsContiguous(va, size))
{
return Physical.GetWritableRegion(Translate(va), size, tracked);
}
else
{
Memory<byte> memory = new byte[size];
GetSpan(va, size).CopyTo(memory.Span);
return new WritableRegion(this, va, memory, tracked);
}
}
/// <summary>
/// Writes data to GPU mapped memory.
/// </summary>
/// <typeparam name="T">Type of the data</typeparam>
/// <param name="va">GPU virtual address to write the value into</param>
/// <param name="value">The value to be written</param>
public void Write<T>(ulong va, T value) where T : unmanaged
{
Write(va, MemoryMarshal.Cast<T, byte>(MemoryMarshal.CreateSpan(ref value, 1)));
}
/// <summary>
/// Writes data to GPU mapped memory.
/// </summary>
/// <param name="va">GPU virtual address to write the data into</param>
/// <param name="data">The data to be written</param>
public void Write(ulong va, ReadOnlySpan<byte> data)
{
WriteImpl(va, data, Physical.Write);
}
/// <summary>
/// Writes data to GPU mapped memory, destined for a tracked resource.
/// </summary>
/// <param name="va">GPU virtual address to write the data into</param>
/// <param name="data">The data to be written</param>
public void WriteTrackedResource(ulong va, ReadOnlySpan<byte> data)
{
WriteImpl(va, data, Physical.WriteTrackedResource);
}
/// <summary>
/// Writes data to GPU mapped memory without write tracking.
/// </summary>
/// <param name="va">GPU virtual address to write the data into</param>
/// <param name="data">The data to be written</param>
public void WriteUntracked(ulong va, ReadOnlySpan<byte> data)
{
WriteImpl(va, data, Physical.WriteUntracked);
}
private delegate void WriteCallback(ulong address, ReadOnlySpan<byte> data);
/// <summary>
/// Writes data to possibly non-contiguous GPU mapped memory.
/// </summary>
/// <param name="va">GPU virtual address of the region to write into</param>
/// <param name="data">Data to be written</param>
/// <param name="writeCallback">Write callback</param>
private void WriteImpl(ulong va, ReadOnlySpan<byte> data, WriteCallback writeCallback)
{
if (IsContiguous(va, data.Length))
{
writeCallback(Translate(va), data);
}
else
{
int offset = 0, size;
if ((va & PageMask) != 0)
{
ulong pa = Translate(va);
size = Math.Min(data.Length, (int)PageSize - (int)(va & PageMask));
writeCallback(pa, data.Slice(0, size));
offset += size;
}
for (; offset < data.Length; offset += size)
{
ulong pa = Translate(va + (ulong)offset);
size = Math.Min(data.Length - offset, (int)PageSize);
writeCallback(pa, data.Slice(offset, size));
}
}
}
/// <summary>
/// Writes data to GPU mapped memory, stopping at the first unmapped page at the memory region, if any.
/// </summary>
/// <param name="va">GPU virtual address to write the data into</param>
/// <param name="data">The data to be written</param>
public void WriteMapped(ulong va, ReadOnlySpan<byte> data)
{
if (IsContiguous(va, data.Length))
{
Physical.Write(Translate(va), data);
}
else
{
int offset = 0, size;
if ((va & PageMask) != 0)
{
ulong pa = Translate(va);
size = Math.Min(data.Length, (int)PageSize - (int)(va & PageMask));
if (pa != PteUnmapped && Physical.IsMapped(pa))
{
Physical.Write(pa, data.Slice(0, size));
}
offset += size;
}
for (; offset < data.Length; offset += size)
{
ulong pa = Translate(va + (ulong)offset);
size = Math.Min(data.Length - offset, (int)PageSize);
if (pa != PteUnmapped && Physical.IsMapped(pa))
{
Physical.Write(pa, data.Slice(offset, size));
}
}
}
}
/// <summary>
/// Maps a given range of pages to the specified CPU virtual address.
/// </summary>
/// <remarks>
/// All addresses and sizes must be page aligned.
/// </remarks>
/// <param name="pa">CPU virtual address to map into</param>
/// <param name="va">GPU virtual address to be mapped</param>
/// <param name="size">Size in bytes of the mapping</param>
/// <param name="kind">Kind of the resource located at the mapping</param>
public void Map(ulong pa, ulong va, ulong size, PteKind kind)
{
lock (_pageTable)
{
MemoryUnmapped?.Invoke(this, new UnmapEventArgs(va, size));
for (ulong offset = 0; offset < size; offset += PageSize)
{
SetPte(va + offset, PackPte(pa + offset, kind));
}
}
}
/// <summary>
/// Unmaps a given range of pages at the specified GPU virtual memory region.
/// </summary>
/// <param name="va">GPU virtual address to unmap</param>
/// <param name="size">Size in bytes of the region being unmapped</param>
public void Unmap(ulong va, ulong size)
{
lock (_pageTable)
{
// Event handlers are not expected to be thread safe.
MemoryUnmapped?.Invoke(this, new UnmapEventArgs(va, size));
for (ulong offset = 0; offset < size; offset += PageSize)
{
SetPte(va + offset, PteUnmapped);
}
}
}
/// <summary>
/// Checks if a region of GPU mapped memory is contiguous.
/// </summary>
/// <param name="va">GPU virtual address of the region</param>
/// <param name="size">Size of the region</param>
/// <returns>True if the region is contiguous, false otherwise</returns>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private bool IsContiguous(ulong va, int size)
{
if (!ValidateAddress(va) || GetPte(va) == PteUnmapped)
{
return false;
}
ulong endVa = (va + (ulong)size + PageMask) & ~PageMask;
va &= ~PageMask;
int pages = (int)((endVa - va) / PageSize);
for (int page = 0; page < pages - 1; page++)
{
if (!ValidateAddress(va + PageSize) || GetPte(va + PageSize) == PteUnmapped)
{
return false;
}
if (Translate(va) + PageSize != Translate(va + PageSize))
{
return false;
}
va += PageSize;
}
return true;
}
/// <summary>
/// Gets the physical regions that make up the given virtual address region.
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range</param>
/// <returns>Multi-range with the physical regions</returns>
public MultiRange GetPhysicalRegions(ulong va, ulong size)
{
if (IsContiguous(va, (int)size))
{
return new MultiRange(Translate(va), size);
}
ulong regionStart = Translate(va);
ulong regionSize = Math.Min(size, PageSize - (va & PageMask));
ulong endVa = va + size;
ulong endVaRounded = (endVa + PageMask) & ~PageMask;
va &= ~PageMask;
int pages = (int)((endVaRounded - va) / PageSize);
var regions = new List<MemoryRange>();
for (int page = 0; page < pages - 1; page++)
{
ulong currPa = Translate(va);
ulong newPa = Translate(va + PageSize);
if ((currPa != PteUnmapped || newPa != PteUnmapped) && currPa + PageSize != newPa)
{
regions.Add(new MemoryRange(regionStart, regionSize));
regionStart = newPa;
regionSize = 0;
}
va += PageSize;
regionSize += Math.Min(endVa - va, PageSize);
}
regions.Add(new MemoryRange(regionStart, regionSize));
return new MultiRange(regions.ToArray());
}
/// <summary>
/// Checks if a given GPU virtual memory range is mapped to the same physical regions
/// as the specified physical memory multi-range.
/// </summary>
/// <param name="range">Physical memory multi-range</param>
/// <param name="va">GPU virtual memory address</param>
/// <returns>True if the virtual memory region is mapped into the specified physical one, false otherwise</returns>
public bool CompareRange(MultiRange range, ulong va)
{
va &= ~PageMask;
for (int i = 0; i < range.Count; i++)
{
MemoryRange currentRange = range.GetSubRange(i);
if (currentRange.Address != PteUnmapped)
{
ulong address = currentRange.Address & ~PageMask;
ulong endAddress = (currentRange.EndAddress + PageMask) & ~PageMask;
while (address < endAddress)
{
if (Translate(va) != address)
{
return false;
}
va += PageSize;
address += PageSize;
}
}
else
{
ulong endVa = va + (((currentRange.Size) + PageMask) & ~PageMask);
while (va < endVa)
{
if (Translate(va) != PteUnmapped)
{
return false;
}
va += PageSize;
}
}
}
return true;
}
/// <summary>
/// Validates a GPU virtual address.
/// </summary>
/// <param name="va">Address to validate</param>
/// <returns>True if the address is valid, false otherwise</returns>
private static bool ValidateAddress(ulong va)
{
return va < (1UL << AddressSpaceBits);
}
/// <summary>
/// Checks if a given page is mapped.
/// </summary>
/// <param name="va">GPU virtual address of the page to check</param>
/// <returns>True if the page is mapped, false otherwise</returns>
public bool IsMapped(ulong va)
{
return Translate(va) != PteUnmapped;
}
/// <summary>
/// Translates a GPU virtual address to a CPU virtual address.
/// </summary>
/// <param name="va">GPU virtual address to be translated</param>
/// <returns>CPU virtual address, or <see cref="PteUnmapped"/> if unmapped</returns>
public ulong Translate(ulong va)
{
if (!ValidateAddress(va))
{
return PteUnmapped;
}
ulong pte = GetPte(va);
if (pte == PteUnmapped)
{
return PteUnmapped;
}
return UnpackPaFromPte(pte) + (va & PageMask);
}
/// <summary>
/// Translates a GPU virtual address to a CPU virtual address on the first mapped page of memory
/// on the specified region.
/// If no page is mapped on the specified region, <see cref="PteUnmapped"/> is returned.
/// </summary>
/// <param name="va">GPU virtual address to be translated</param>
/// <param name="size">Size of the range to be translated</param>
/// <returns>CPU virtual address, or <see cref="PteUnmapped"/> if unmapped</returns>
public ulong TranslateFirstMapped(ulong va, ulong size)
{
if (!ValidateAddress(va))
{
return PteUnmapped;
}
ulong endVa = va + size;
ulong pte = GetPte(va);
for (; va < endVa && pte == PteUnmapped; va += PageSize - (va & PageMask))
{
pte = GetPte(va);
}
if (pte == PteUnmapped)
{
return PteUnmapped;
}
return UnpackPaFromPte(pte) + (va & PageMask);
}
/// <summary>
/// Gets the kind of a given memory page.
/// This might indicate the type of resource that can be allocated on the page, and also texture tiling.
/// </summary>
/// <param name="va">GPU virtual address</param>
/// <returns>Kind of the memory page</returns>
public PteKind GetKind(ulong va)
{
if (!ValidateAddress(va))
{
return PteKind.Invalid;
}
ulong pte = GetPte(va);
if (pte == PteUnmapped)
{
return PteKind.Invalid;
}
return UnpackKindFromPte(pte);
}
/// <summary>
/// Gets the Page Table entry for a given GPU virtual address.
/// </summary>
/// <param name="va">GPU virtual address</param>
/// <returns>Page table entry (CPU virtual address)</returns>
private ulong GetPte(ulong va)
{
ulong l0 = (va >> PtLvl0Bit) & PtLvl0Mask;
ulong l1 = (va >> PtLvl1Bit) & PtLvl1Mask;
if (_pageTable[l0] == null)
{
return PteUnmapped;
}
return _pageTable[l0][l1];
}
/// <summary>
/// Sets a Page Table entry at a given GPU virtual address.
/// </summary>
/// <param name="va">GPU virtual address</param>
/// <param name="pte">Page table entry (CPU virtual address)</param>
private void SetPte(ulong va, ulong pte)
{
ulong l0 = (va >> PtLvl0Bit) & PtLvl0Mask;
ulong l1 = (va >> PtLvl1Bit) & PtLvl1Mask;
if (_pageTable[l0] == null)
{
_pageTable[l0] = new ulong[PtLvl1Size];
for (ulong index = 0; index < PtLvl1Size; index++)
{
_pageTable[l0][index] = PteUnmapped;
}
}
_pageTable[l0][l1] = pte;
}
/// <summary>
/// Creates a page table entry from a physical address and kind.
/// </summary>
/// <param name="pa">Physical address</param>
/// <param name="kind">Kind</param>
/// <returns>Page table entry</returns>
private static ulong PackPte(ulong pa, PteKind kind)
{
return pa | ((ulong)kind << 56);
}
/// <summary>
/// Unpacks kind from a page table entry.
/// </summary>
/// <param name="pte">Page table entry</param>
/// <returns>Kind</returns>
private static PteKind UnpackKindFromPte(ulong pte)
{
return (PteKind)(pte >> 56);
}
/// <summary>
/// Unpacks physical address from a page table entry.
/// </summary>
/// <param name="pte">Page table entry</param>
/// <returns>Physical address</returns>
private static ulong UnpackPaFromPte(ulong pte)
{
return pte & 0xffffffffffffffUL;
}
}
}

View file

@ -0,0 +1,58 @@
using Ryujinx.Memory;
using Ryujinx.Memory.Range;
using System;
namespace Ryujinx.Graphics.Gpu.Memory
{
/// <summary>
/// A writable block that targets a given MultiRange within a PhysicalMemory instance.
/// </summary>
internal class MultiRangeWritableBlock : IWritableBlock
{
private readonly MultiRange _range;
private readonly PhysicalMemory _physicalMemory;
/// <summary>
/// Creates a new MultiRangeWritableBlock.
/// </summary>
/// <param name="range">The MultiRange to write to</param>
/// <param name="physicalMemory">The PhysicalMemory the given MultiRange addresses</param>
public MultiRangeWritableBlock(MultiRange range, PhysicalMemory physicalMemory)
{
_range = range;
_physicalMemory = physicalMemory;
}
/// <summary>
/// Write data to the MultiRange.
/// </summary>
/// <param name="va">Offset address</param>
/// <param name="data">Data to write</param>
/// <exception cref="ArgumentException">Throw when a non-zero offset is given</exception>
public void Write(ulong va, ReadOnlySpan<byte> data)
{
if (va != 0)
{
throw new ArgumentException($"{nameof(va)} cannot be non-zero for {nameof(MultiRangeWritableBlock)}.");
}
_physicalMemory.Write(_range, data);
}
/// <summary>
/// Write data to the MultiRange, without tracking.
/// </summary>
/// <param name="va">Offset address</param>
/// <param name="data">Data to write</param>
/// <exception cref="ArgumentException">Throw when a non-zero offset is given</exception>
public void WriteUntracked(ulong va, ReadOnlySpan<byte> data)
{
if (va != 0)
{
throw new ArgumentException($"{nameof(va)} cannot be non-zero for {nameof(MultiRangeWritableBlock)}.");
}
_physicalMemory.WriteUntracked(_range, data);
}
}
}

View file

@ -0,0 +1,413 @@
using Ryujinx.Cpu;
using Ryujinx.Cpu.Tracking;
using Ryujinx.Graphics.Gpu.Image;
using Ryujinx.Graphics.Gpu.Shader;
using Ryujinx.Memory;
using Ryujinx.Memory.Range;
using Ryujinx.Memory.Tracking;
using System;
using System.Collections.Generic;
using System.Runtime.InteropServices;
using System.Threading;
namespace Ryujinx.Graphics.Gpu.Memory
{
/// <summary>
/// Represents physical memory, accessible from the GPU.
/// This is actually working CPU virtual addresses, of memory mapped on the application process.
/// </summary>
class PhysicalMemory : IDisposable
{
private readonly GpuContext _context;
private IVirtualMemoryManagerTracked _cpuMemory;
private int _referenceCount;
/// <summary>
/// Indicates whenever the memory manager supports 4KB pages.
/// </summary>
public bool Supports4KBPages => _cpuMemory.Supports4KBPages;
/// <summary>
/// In-memory shader cache.
/// </summary>
public ShaderCache ShaderCache { get; }
/// <summary>
/// GPU buffer manager.
/// </summary>
public BufferCache BufferCache { get; }
/// <summary>
/// GPU texture manager.
/// </summary>
public TextureCache TextureCache { get; }
/// <summary>
/// Creates a new instance of the physical memory.
/// </summary>
/// <param name="context">GPU context that the physical memory belongs to</param>
/// <param name="cpuMemory">CPU memory manager of the application process</param>
public PhysicalMemory(GpuContext context, IVirtualMemoryManagerTracked cpuMemory)
{
_context = context;
_cpuMemory = cpuMemory;
ShaderCache = new ShaderCache(context);
BufferCache = new BufferCache(context, this);
TextureCache = new TextureCache(context, this);
if (cpuMemory is IRefCounted rc)
{
rc.IncrementReferenceCount();
}
_referenceCount = 1;
}
/// <summary>
/// Increments the memory reference count.
/// </summary>
public void IncrementReferenceCount()
{
Interlocked.Increment(ref _referenceCount);
}
/// <summary>
/// Decrements the memory reference count.
/// </summary>
public void DecrementReferenceCount()
{
if (Interlocked.Decrement(ref _referenceCount) == 0 && _cpuMemory is IRefCounted rc)
{
rc.DecrementReferenceCount();
}
}
/// <summary>
/// Gets a span of data from the application process.
/// </summary>
/// <param name="address">Start address of the range</param>
/// <param name="size">Size in bytes to be range</param>
/// <param name="tracked">True if read tracking is triggered on the span</param>
/// <returns>A read only span of the data at the specified memory location</returns>
public ReadOnlySpan<byte> GetSpan(ulong address, int size, bool tracked = false)
{
return _cpuMemory.GetSpan(address, size, tracked);
}
/// <summary>
/// Gets a span of data from the application process.
/// </summary>
/// <param name="range">Ranges of physical memory where the data is located</param>
/// <param name="tracked">True if read tracking is triggered on the span</param>
/// <returns>A read only span of the data at the specified memory location</returns>
public ReadOnlySpan<byte> GetSpan(MultiRange range, bool tracked = false)
{
if (range.Count == 1)
{
var singleRange = range.GetSubRange(0);
if (singleRange.Address != MemoryManager.PteUnmapped)
{
return _cpuMemory.GetSpan(singleRange.Address, (int)singleRange.Size, tracked);
}
}
Span<byte> data = new byte[range.GetSize()];
int offset = 0;
for (int i = 0; i < range.Count; i++)
{
var currentRange = range.GetSubRange(i);
int size = (int)currentRange.Size;
if (currentRange.Address != MemoryManager.PteUnmapped)
{
_cpuMemory.GetSpan(currentRange.Address, size, tracked).CopyTo(data.Slice(offset, size));
}
offset += size;
}
return data;
}
/// <summary>
/// Gets a writable region from the application process.
/// </summary>
/// <param name="address">Start address of the range</param>
/// <param name="size">Size in bytes to be range</param>
/// <param name="tracked">True if write tracking is triggered on the span</param>
/// <returns>A writable region with the data at the specified memory location</returns>
public WritableRegion GetWritableRegion(ulong address, int size, bool tracked = false)
{
return _cpuMemory.GetWritableRegion(address, size, tracked);
}
/// <summary>
/// Gets a writable region from GPU mapped memory.
/// </summary>
/// <param name="range">Range</param>
/// <param name="tracked">True if write tracking is triggered on the span</param>
/// <returns>A writable region with the data at the specified memory location</returns>
public WritableRegion GetWritableRegion(MultiRange range, bool tracked = false)
{
if (range.Count == 1)
{
MemoryRange subrange = range.GetSubRange(0);
return GetWritableRegion(subrange.Address, (int)subrange.Size, tracked);
}
else
{
Memory<byte> memory = new byte[range.GetSize()];
int offset = 0;
for (int i = 0; i < range.Count; i++)
{
var currentRange = range.GetSubRange(i);
int size = (int)currentRange.Size;
if (currentRange.Address != MemoryManager.PteUnmapped)
{
GetSpan(currentRange.Address, size).CopyTo(memory.Span.Slice(offset, size));
}
offset += size;
}
return new WritableRegion(new MultiRangeWritableBlock(range, this), 0, memory, tracked);
}
}
/// <summary>
/// Reads data from the application process.
/// </summary>
/// <typeparam name="T">Type of the structure</typeparam>
/// <param name="address">Address to read from</param>
/// <returns>The data at the specified memory location</returns>
public T Read<T>(ulong address) where T : unmanaged
{
return _cpuMemory.Read<T>(address);
}
/// <summary>
/// Reads data from the application process, with write tracking.
/// </summary>
/// <typeparam name="T">Type of the structure</typeparam>
/// <param name="address">Address to read from</param>
/// <returns>The data at the specified memory location</returns>
public T ReadTracked<T>(ulong address) where T : unmanaged
{
return _cpuMemory.ReadTracked<T>(address);
}
/// <summary>
/// Writes data to the application process, triggering a precise memory tracking event.
/// </summary>
/// <param name="address">Address to write into</param>
/// <param name="data">Data to be written</param>
public void WriteTrackedResource(ulong address, ReadOnlySpan<byte> data)
{
_cpuMemory.SignalMemoryTracking(address, (ulong)data.Length, true, precise: true);
_cpuMemory.WriteUntracked(address, data);
}
/// <summary>
/// Writes data to the application process.
/// </summary>
/// <param name="address">Address to write into</param>
/// <param name="data">Data to be written</param>
public void Write(ulong address, ReadOnlySpan<byte> data)
{
_cpuMemory.Write(address, data);
}
/// <summary>
/// Writes data to the application process.
/// </summary>
/// <param name="range">Ranges of physical memory where the data is located</param>
/// <param name="data">Data to be written</param>
public void Write(MultiRange range, ReadOnlySpan<byte> data)
{
WriteImpl(range, data, _cpuMemory.Write);
}
/// <summary>
/// Writes data to the application process, without any tracking.
/// </summary>
/// <param name="address">Address to write into</param>
/// <param name="data">Data to be written</param>
public void WriteUntracked(ulong address, ReadOnlySpan<byte> data)
{
_cpuMemory.WriteUntracked(address, data);
}
/// <summary>
/// Writes data to the application process, without any tracking.
/// </summary>
/// <param name="range">Ranges of physical memory where the data is located</param>
/// <param name="data">Data to be written</param>
public void WriteUntracked(MultiRange range, ReadOnlySpan<byte> data)
{
WriteImpl(range, data, _cpuMemory.WriteUntracked);
}
/// <summary>
/// Writes data to the application process, returning false if the data was not changed.
/// This triggers read memory tracking, as a redundancy check would be useless if the data is not up to date.
/// </summary>
/// <remarks>The memory manager can return that memory has changed when it hasn't to avoid expensive data copies.</remarks>
/// <param name="address">Address to write into</param>
/// <param name="data">Data to be written</param>
/// <returns>True if the data was changed, false otherwise</returns>
public bool WriteWithRedundancyCheck(ulong address, ReadOnlySpan<byte> data)
{
return _cpuMemory.WriteWithRedundancyCheck(address, data);
}
private delegate void WriteCallback(ulong address, ReadOnlySpan<byte> data);
/// <summary>
/// Writes data to the application process, using the supplied callback method.
/// </summary>
/// <param name="range">Ranges of physical memory where the data is located</param>
/// <param name="data">Data to be written</param>
/// <param name="writeCallback">Callback method that will perform the write</param>
private static void WriteImpl(MultiRange range, ReadOnlySpan<byte> data, WriteCallback writeCallback)
{
if (range.Count == 1)
{
var singleRange = range.GetSubRange(0);
if (singleRange.Address != MemoryManager.PteUnmapped)
{
writeCallback(singleRange.Address, data);
}
}
else
{
int offset = 0;
for (int i = 0; i < range.Count; i++)
{
var currentRange = range.GetSubRange(i);
int size = (int)currentRange.Size;
if (currentRange.Address != MemoryManager.PteUnmapped)
{
writeCallback(currentRange.Address, data.Slice(offset, size));
}
offset += size;
}
}
}
/// <summary>
/// Fills the specified memory region with a 32-bit integer value.
/// </summary>
/// <param name="address">CPU virtual address of the region</param>
/// <param name="size">Size of the region</param>
/// <param name="value">Value to fill the region with</param>
/// <param name="kind">Kind of the resource being filled, which will not be signalled as CPU modified</param>
public void FillTrackedResource(ulong address, ulong size, uint value, ResourceKind kind)
{
_cpuMemory.SignalMemoryTracking(address, size, write: true, precise: true, (int)kind);
using WritableRegion region = _cpuMemory.GetWritableRegion(address, (int)size);
MemoryMarshal.Cast<byte, uint>(region.Memory.Span).Fill(value);
}
/// <summary>
/// Obtains a memory tracking handle for the given virtual region. This should be disposed when finished with.
/// </summary>
/// <param name="address">CPU virtual address of the region</param>
/// <param name="size">Size of the region</param>
/// <param name="kind">Kind of the resource being tracked</param>
/// <returns>The memory tracking handle</returns>
public CpuRegionHandle BeginTracking(ulong address, ulong size, ResourceKind kind)
{
return _cpuMemory.BeginTracking(address, size, (int)kind);
}
/// <summary>
/// Obtains a memory tracking handle for the given virtual region. This should be disposed when finished with.
/// </summary>
/// <param name="range">Ranges of physical memory where the data is located</param>
/// <param name="kind">Kind of the resource being tracked</param>
/// <returns>The memory tracking handle</returns>
public GpuRegionHandle BeginTracking(MultiRange range, ResourceKind kind)
{
var cpuRegionHandles = new CpuRegionHandle[range.Count];
int count = 0;
for (int i = 0; i < range.Count; i++)
{
var currentRange = range.GetSubRange(i);
if (currentRange.Address != MemoryManager.PteUnmapped)
{
cpuRegionHandles[count++] = _cpuMemory.BeginTracking(currentRange.Address, currentRange.Size, (int)kind);
}
}
if (count != range.Count)
{
Array.Resize(ref cpuRegionHandles, count);
}
return new GpuRegionHandle(cpuRegionHandles);
}
/// <summary>
/// Obtains a memory tracking handle for the given virtual region, with a specified granularity. This should be disposed when finished with.
/// </summary>
/// <param name="address">CPU virtual address of the region</param>
/// <param name="size">Size of the region</param>
/// <param name="kind">Kind of the resource being tracked</param>
/// <param name="handles">Handles to inherit state from or reuse</param>
/// <param name="granularity">Desired granularity of write tracking</param>
/// <returns>The memory tracking handle</returns>
public CpuMultiRegionHandle BeginGranularTracking(ulong address, ulong size, ResourceKind kind, IEnumerable<IRegionHandle> handles = null, ulong granularity = 4096)
{
return _cpuMemory.BeginGranularTracking(address, size, handles, granularity, (int)kind);
}
/// <summary>
/// Obtains a smart memory tracking handle for the given virtual region, with a specified granularity. This should be disposed when finished with.
/// </summary>
/// <param name="address">CPU virtual address of the region</param>
/// <param name="size">Size of the region</param>
/// <param name="kind">Kind of the resource being tracked</param>
/// <param name="granularity">Desired granularity of write tracking</param>
/// <returns>The memory tracking handle</returns>
public CpuSmartMultiRegionHandle BeginSmartGranularTracking(ulong address, ulong size, ResourceKind kind, ulong granularity = 4096)
{
return _cpuMemory.BeginSmartGranularTracking(address, size, granularity, (int)kind);
}
/// <summary>
/// Checks if a given memory page is mapped.
/// </summary>
/// <param name="address">CPU virtual address of the page</param>
/// <returns>True if mapped, false otherwise</returns>
public bool IsMapped(ulong address)
{
return _cpuMemory.IsMapped(address);
}
/// <summary>
/// Release our reference to the CPU memory manager.
/// </summary>
public void Dispose()
{
_context.DeferredActions.Enqueue(Destroy);
}
/// <summary>
/// Performs disposal of the host GPU caches with resources mapped on this physical memory.
/// This must only be called from the render thread.
/// </summary>
private void Destroy()
{
ShaderCache.Dispose();
BufferCache.Dispose();
TextureCache.Dispose();
DecrementReferenceCount();
}
}
}

View file

@ -0,0 +1,268 @@
namespace Ryujinx.Graphics.Gpu.Memory
{
/// <summary>
/// Kind of the resource at the given memory mapping.
/// </summary>
public enum PteKind : byte
{
Invalid = 0xff,
Pitch = 0x00,
Z16 = 0x01,
Z162C = 0x02,
Z16MS22C = 0x03,
Z16MS42C = 0x04,
Z16MS82C = 0x05,
Z16MS162C = 0x06,
Z162Z = 0x07,
Z16MS22Z = 0x08,
Z16MS42Z = 0x09,
Z16MS82Z = 0x0a,
Z16MS162Z = 0x0b,
Z162CZ = 0x36,
Z16MS22CZ = 0x37,
Z16MS42CZ = 0x38,
Z16MS82CZ = 0x39,
Z16MS162CZ = 0x5f,
Z164CZ = 0x0c,
Z16MS24CZ = 0x0d,
Z16MS44CZ = 0x0e,
Z16MS84CZ = 0x0f,
Z16MS164CZ = 0x10,
S8Z24 = 0x11,
S8Z241Z = 0x12,
S8Z24MS21Z = 0x13,
S8Z24MS41Z = 0x14,
S8Z24MS81Z = 0x15,
S8Z24MS161Z = 0x16,
S8Z242CZ = 0x17,
S8Z24MS22CZ = 0x18,
S8Z24MS42CZ = 0x19,
S8Z24MS82CZ = 0x1a,
S8Z24MS162CZ = 0x1b,
S8Z242CS = 0x1c,
S8Z24MS22CS = 0x1d,
S8Z24MS42CS = 0x1e,
S8Z24MS82CS = 0x1f,
S8Z24MS162CS = 0x20,
S8Z244CSZV = 0x21,
S8Z24MS24CSZV = 0x22,
S8Z24MS44CSZV = 0x23,
S8Z24MS84CSZV = 0x24,
S8Z24MS164CSZV = 0x25,
V8Z24MS4VC12 = 0x26,
V8Z24MS4VC4 = 0x27,
V8Z24MS8VC8 = 0x28,
V8Z24MS8VC24 = 0x29,
V8Z24MS4VC121ZV = 0x2e,
V8Z24MS4VC41ZV = 0x2f,
V8Z24MS8VC81ZV = 0x30,
V8Z24MS8VC241ZV = 0x31,
V8Z24MS4VC122CS = 0x32,
V8Z24MS4VC42CS = 0x33,
V8Z24MS8VC82CS = 0x34,
V8Z24MS8VC242CS = 0x35,
V8Z24MS4VC122CZV = 0x3a,
V8Z24MS4VC42CZV = 0x3b,
V8Z24MS8VC82CZV = 0x3c,
V8Z24MS8VC242CZV = 0x3d,
V8Z24MS4VC122ZV = 0x3e,
V8Z24MS4VC42ZV = 0x3f,
V8Z24MS8VC82ZV = 0x40,
V8Z24MS8VC242ZV = 0x41,
V8Z24MS4VC124CSZV = 0x42,
V8Z24MS4VC44CSZV = 0x43,
V8Z24MS8VC84CSZV = 0x44,
V8Z24MS8VC244CSZV = 0x45,
Z24S8 = 0x46,
Z24S81Z = 0x47,
Z24S8MS21Z = 0x48,
Z24S8MS41Z = 0x49,
Z24S8MS81Z = 0x4a,
Z24S8MS161Z = 0x4b,
Z24S82CS = 0x4c,
Z24S8MS22CS = 0x4d,
Z24S8MS42CS = 0x4e,
Z24S8MS82CS = 0x4f,
Z24S8MS162CS = 0x50,
Z24S82CZ = 0x51,
Z24S8MS22CZ = 0x52,
Z24S8MS42CZ = 0x53,
Z24S8MS82CZ = 0x54,
Z24S8MS162CZ = 0x55,
Z24S84CSZV = 0x56,
Z24S8MS24CSZV = 0x57,
Z24S8MS44CSZV = 0x58,
Z24S8MS84CSZV = 0x59,
Z24S8MS164CSZV = 0x5a,
Z24V8MS4VC12 = 0x5b,
Z24V8MS4VC4 = 0x5c,
Z24V8MS8VC8 = 0x5d,
Z24V8MS8VC24 = 0x5e,
YUVB8C12Y = 0x60,
YUVB8C22Y = 0x61,
YUVB10C12Y = 0x62,
YUVB10C22Y = 0x6b,
YUVB12C12Y = 0x6c,
YUVB12C22Y = 0x6d,
Z24V8MS4VC121ZV = 0x63,
Z24V8MS4VC41ZV = 0x64,
Z24V8MS8VC81ZV = 0x65,
Z24V8MS8VC241ZV = 0x66,
Z24V8MS4VC122CS = 0x67,
Z24V8MS4VC42CS = 0x68,
Z24V8MS8VC82CS = 0x69,
Z24V8MS8VC242CS = 0x6a,
Z24V8MS4VC122CZV = 0x6f,
Z24V8MS4VC42CZV = 0x70,
Z24V8MS8VC82CZV = 0x71,
Z24V8MS8VC242CZV = 0x72,
Z24V8MS4VC122ZV = 0x73,
Z24V8MS4VC42ZV = 0x74,
Z24V8MS8VC82ZV = 0x75,
Z24V8MS8VC242ZV = 0x76,
Z24V8MS4VC124CSZV = 0x77,
Z24V8MS4VC44CSZV = 0x78,
Z24V8MS8VC84CSZV = 0x79,
Z24V8MS8VC244CSZV = 0x7a,
ZF32 = 0x7b,
ZF321Z = 0x7c,
ZF32MS21Z = 0x7d,
ZF32MS41Z = 0x7e,
ZF32MS81Z = 0x7f,
ZF32MS161Z = 0x80,
ZF322CS = 0x81,
ZF32MS22CS = 0x82,
ZF32MS42CS = 0x83,
ZF32MS82CS = 0x84,
ZF32MS162CS = 0x85,
ZF322CZ = 0x86,
ZF32MS22CZ = 0x87,
ZF32MS42CZ = 0x88,
ZF32MS82CZ = 0x89,
ZF32MS162CZ = 0x8a,
X8Z24X16V8S8MS4VC12 = 0x8b,
X8Z24X16V8S8MS4VC4 = 0x8c,
X8Z24X16V8S8MS8VC8 = 0x8d,
X8Z24X16V8S8MS8VC24 = 0x8e,
X8Z24X16V8S8MS4VC121CS = 0x8f,
X8Z24X16V8S8MS4VC41CS = 0x90,
X8Z24X16V8S8MS8VC81CS = 0x91,
X8Z24X16V8S8MS8VC241CS = 0x92,
X8Z24X16V8S8MS4VC121ZV = 0x97,
X8Z24X16V8S8MS4VC41ZV = 0x98,
X8Z24X16V8S8MS8VC81ZV = 0x99,
X8Z24X16V8S8MS8VC241ZV = 0x9a,
X8Z24X16V8S8MS4VC121CZV = 0x9b,
X8Z24X16V8S8MS4VC41CZV = 0x9c,
X8Z24X16V8S8MS8VC81CZV = 0x9d,
X8Z24X16V8S8MS8VC241CZV = 0x9e,
X8Z24X16V8S8MS4VC122CS = 0x9f,
X8Z24X16V8S8MS4VC42CS = 0xa0,
X8Z24X16V8S8MS8VC82CS = 0xa1,
X8Z24X16V8S8MS8VC242CS = 0xa2,
X8Z24X16V8S8MS4VC122CSZV = 0xa3,
X8Z24X16V8S8MS4VC42CSZV = 0xa4,
X8Z24X16V8S8MS8VC82CSZV = 0xa5,
X8Z24X16V8S8MS8VC242CSZV = 0xa6,
ZF32X16V8S8MS4VC12 = 0xa7,
ZF32X16V8S8MS4VC4 = 0xa8,
ZF32X16V8S8MS8VC8 = 0xa9,
ZF32X16V8S8MS8VC24 = 0xaa,
ZF32X16V8S8MS4VC121CS = 0xab,
ZF32X16V8S8MS4VC41CS = 0xac,
ZF32X16V8S8MS8VC81CS = 0xad,
ZF32X16V8S8MS8VC241CS = 0xae,
ZF32X16V8S8MS4VC121ZV = 0xb3,
ZF32X16V8S8MS4VC41ZV = 0xb4,
ZF32X16V8S8MS8VC81ZV = 0xb5,
ZF32X16V8S8MS8VC241ZV = 0xb6,
ZF32X16V8S8MS4VC121CZV = 0xb7,
ZF32X16V8S8MS4VC41CZV = 0xb8,
ZF32X16V8S8MS8VC81CZV = 0xb9,
ZF32X16V8S8MS8VC241CZV = 0xba,
ZF32X16V8S8MS4VC122CS = 0xbb,
ZF32X16V8S8MS4VC42CS = 0xbc,
ZF32X16V8S8MS8VC82CS = 0xbd,
ZF32X16V8S8MS8VC242CS = 0xbe,
ZF32X16V8S8MS4VC122CSZV = 0xbf,
ZF32X16V8S8MS4VC42CSZV = 0xc0,
ZF32X16V8S8MS8VC82CSZV = 0xc1,
ZF32X16V8S8MS8VC242CSZV = 0xc2,
ZF32X24S8 = 0xc3,
ZF32X24S81CS = 0xc4,
ZF32X24S8MS21CS = 0xc5,
ZF32X24S8MS41CS = 0xc6,
ZF32X24S8MS81CS = 0xc7,
ZF32X24S8MS161CS = 0xc8,
ZF32X24S82CSZV = 0xce,
ZF32X24S8MS22CSZV = 0xcf,
ZF32X24S8MS42CSZV = 0xd0,
ZF32X24S8MS82CSZV = 0xd1,
ZF32X24S8MS162CSZV = 0xd2,
ZF32X24S82CS = 0xd3,
ZF32X24S8MS22CS = 0xd4,
ZF32X24S8MS42CS = 0xd5,
ZF32X24S8MS82CS = 0xd6,
ZF32X24S8MS162CS = 0xd7,
S8 = 0x2a,
S82S = 0x2b,
Generic16Bx2 = 0xfe,
C322C = 0xd8,
C322CBR = 0xd9,
C322CBA = 0xda,
C322CRA = 0xdb,
C322BRA = 0xdc,
C32MS22C = 0xdd,
C32MS22CBR = 0xde,
C32MS24CBRA = 0xcc,
C32MS42C = 0xdf,
C32MS42CBR = 0xe0,
C32MS42CBA = 0xe1,
C32MS42CRA = 0xe2,
C32MS42BRA = 0xe3,
C32MS44CBRA = 0x2c,
C32MS8MS162C = 0xe4,
C32MS8MS162CRA = 0xe5,
C642C = 0xe6,
C642CBR = 0xe7,
C642CBA = 0xe8,
C642CRA = 0xe9,
C642BRA = 0xea,
C64MS22C = 0xeb,
C64MS22CBR = 0xec,
C64MS24CBRA = 0xcd,
C64MS42C = 0xed,
C64MS42CBR = 0xee,
C64MS42CBA = 0xef,
C64MS42CRA = 0xf0,
C64MS42BRA = 0xf1,
C64MS44CBRA = 0x2d,
C64MS8MS162C = 0xf2,
C64MS8MS162CRA = 0xf3,
C1282C = 0xf4,
C1282CR = 0xf5,
C128MS22C = 0xf6,
C128MS22CR = 0xf7,
C128MS42C = 0xf8,
C128MS42CR = 0xf9,
C128MS8MS162C = 0xfa,
C128MS8MS162CR = 0xfb,
X8C24 = 0xfc,
PitchNoSwizzle = 0xfd,
SmSkedMessage = 0xca,
SmHostMessage = 0xcb
}
static class PteKindExtensions
{
/// <summary>
/// Checks if the kind is pitch.
/// </summary>
/// <param name="kind">Kind to check</param>
/// <returns>True if pitch, false otherwise</returns>
public static bool IsPitch(this PteKind kind)
{
return kind == PteKind.Pitch || kind == PteKind.PitchNoSwizzle;
}
}
}

View file

@ -0,0 +1,13 @@
namespace Ryujinx.Graphics.Gpu.Memory
{
/// <summary>
/// Kind of a GPU resource.
/// </summary>
enum ResourceKind
{
None,
Buffer,
Texture,
Pool
}
}

View file

@ -0,0 +1,14 @@
namespace Ryujinx.Graphics.Gpu.Memory
{
public class UnmapEventArgs
{
public ulong Address { get; }
public ulong Size { get; }
public UnmapEventArgs(ulong address, ulong size)
{
Address = address;
Size = size;
}
}
}

View file

@ -0,0 +1,13 @@
namespace Ryujinx.Graphics.Gpu.Memory
{
/// <summary>
/// GPU Vertex Buffer information.
/// </summary>
struct VertexBuffer
{
public ulong Address;
public ulong Size;
public int Stride;
public int Divisor;
}
}