Add a Multithreading layer for the GAL, multi-thread shader compilation at runtime (#2501)
* Initial Implementation
About as fast as nvidia GL multithreading, can be improved with faster command queuing.
* Struct based command list
Speeds up a bit. Still a lot of time lost to resource copy.
* Do shader init while the render thread is active.
* Introduce circular span pool V1
Ideally should be able to use structs instead of references for storing these spans on commands. Will try that next.
* Refactor SpanRef some more
Use a struct to represent SpanRef, rather than a reference.
* Flush buffers on background thread
* Use a span for UpdateRenderScale.
Much faster than copying the array.
* Calculate command size using reflection
* WIP parallel shaders
* Some minor optimisation
* Only 2 max refs per command now.
The command with 3 refs is gone. :relieved:
* Don't cast on the GPU side
* Remove redundant casts, force sync on window present
* Fix Shader Cache
* Fix host shader save.
* Fixup to work with new renderer stuff
* Make command Run static, use array of delegates as lookup
Profile says this takes less time than the previous way.
* Bring up to date
* Add settings toggle. Fix Muiltithreading Off mode.
* Fix warning.
* Release tracking lock for flushes
* Fix Conditional Render fast path with threaded gal
* Make handle iteration safe when releasing the lock
This is mostly temporary.
* Attempt to set backend threading on driver
Only really works on nvidia before launching a game.
* Fix race condition with BufferModifiedRangeList, exceptions in tracking actions
* Update buffer set commands
* Some cleanup
* Only use stutter workaround when using opengl renderer non-threaded
* Add host-conditional reservation of counter events
There has always been the possibility that conditional rendering could use a query object just as it is disposed by the counter queue. This change makes it so that when the host decides to use host conditional rendering, the query object is reserved so that it cannot be deleted. Counter events can optionally start reserved, as the threaded implementation can reserve them before the backend creates them, and there would otherwise be a short amount of time where the counter queue could dispose the event before a call to reserve it could be made.
* Address Feedback
* Make counter flush tracked again.
Hopefully does not cause any issues this time.
* Wait for FlushTo on the main queue thread.
Currently assumes only one thread will want to FlushTo (in this case, the GPU thread)
* Add SDL2 headless integration
* Add HLE macro commands.
Co-authored-by: Mary <mary@mary.zone>
2021-08-26 18:31:29 -04:00
|
|
|
|
using Ryujinx.Common;
|
|
|
|
|
using Ryujinx.Common.Configuration;
|
|
|
|
|
using Ryujinx.Graphics.GAL.Multithreading.Commands;
|
|
|
|
|
using Ryujinx.Graphics.GAL.Multithreading.Commands.Buffer;
|
|
|
|
|
using Ryujinx.Graphics.GAL.Multithreading.Commands.Renderer;
|
|
|
|
|
using Ryujinx.Graphics.GAL.Multithreading.Model;
|
|
|
|
|
using Ryujinx.Graphics.GAL.Multithreading.Resources;
|
|
|
|
|
using Ryujinx.Graphics.GAL.Multithreading.Resources.Programs;
|
|
|
|
|
using Ryujinx.Graphics.Shader;
|
|
|
|
|
using System;
|
|
|
|
|
using System.Diagnostics;
|
|
|
|
|
using System.Runtime.CompilerServices;
|
|
|
|
|
using System.Runtime.InteropServices;
|
|
|
|
|
using System.Threading;
|
|
|
|
|
|
|
|
|
|
namespace Ryujinx.Graphics.GAL.Multithreading
|
|
|
|
|
{
|
|
|
|
|
/// <summary>
|
|
|
|
|
/// The ThreadedRenderer is a layer that can be put in front of any Renderer backend to make
|
|
|
|
|
/// its processing happen on a separate thread, rather than intertwined with the GPU emulation.
|
|
|
|
|
/// A new thread is created to handle the GPU command processing, separate from the renderer thread.
|
|
|
|
|
/// Calls to the renderer, pipeline and resources are queued to happen on the renderer thread.
|
|
|
|
|
/// </summary>
|
|
|
|
|
public class ThreadedRenderer : IRenderer
|
|
|
|
|
{
|
|
|
|
|
private const int SpanPoolBytes = 4 * 1024 * 1024;
|
|
|
|
|
private const int MaxRefsPerCommand = 2;
|
|
|
|
|
private const int QueueCount = 10000;
|
|
|
|
|
|
|
|
|
|
private int _elementSize;
|
|
|
|
|
private IRenderer _baseRenderer;
|
|
|
|
|
private Thread _gpuThread;
|
|
|
|
|
private bool _disposed;
|
|
|
|
|
private bool _running;
|
|
|
|
|
|
|
|
|
|
private AutoResetEvent _frameComplete = new AutoResetEvent(true);
|
|
|
|
|
|
|
|
|
|
private ManualResetEventSlim _galWorkAvailable;
|
|
|
|
|
private CircularSpanPool _spanPool;
|
|
|
|
|
|
|
|
|
|
private ManualResetEventSlim _invokeRun;
|
|
|
|
|
|
|
|
|
|
private bool _lastSampleCounterClear = true;
|
|
|
|
|
|
|
|
|
|
private byte[] _commandQueue;
|
|
|
|
|
private object[] _refQueue;
|
|
|
|
|
|
|
|
|
|
private int _consumerPtr;
|
|
|
|
|
private int _commandCount;
|
|
|
|
|
|
|
|
|
|
private int _producerPtr;
|
|
|
|
|
private int _lastProducedPtr;
|
|
|
|
|
private int _invokePtr;
|
|
|
|
|
|
|
|
|
|
private int _refProducerPtr;
|
|
|
|
|
private int _refConsumerPtr;
|
|
|
|
|
|
|
|
|
|
public event EventHandler<ScreenCaptureImageInfo> ScreenCaptured;
|
|
|
|
|
|
|
|
|
|
internal BufferMap Buffers { get; }
|
|
|
|
|
internal SyncMap Sync { get; }
|
|
|
|
|
internal CircularSpanPool SpanPool { get; }
|
|
|
|
|
internal ProgramQueue Programs { get; }
|
|
|
|
|
|
|
|
|
|
public IPipeline Pipeline { get; }
|
|
|
|
|
public IWindow Window { get; }
|
|
|
|
|
|
|
|
|
|
public IRenderer BaseRenderer => _baseRenderer;
|
|
|
|
|
|
|
|
|
|
public bool PreferThreading => _baseRenderer.PreferThreading;
|
|
|
|
|
|
|
|
|
|
public ThreadedRenderer(IRenderer renderer)
|
|
|
|
|
{
|
|
|
|
|
_baseRenderer = renderer;
|
|
|
|
|
|
|
|
|
|
renderer.ScreenCaptured += (object sender, ScreenCaptureImageInfo info) => ScreenCaptured?.Invoke(this, info);
|
|
|
|
|
|
|
|
|
|
Pipeline = new ThreadedPipeline(this, renderer.Pipeline);
|
|
|
|
|
Window = new ThreadedWindow(this, renderer.Window);
|
|
|
|
|
Buffers = new BufferMap();
|
|
|
|
|
Sync = new SyncMap();
|
|
|
|
|
Programs = new ProgramQueue(renderer);
|
|
|
|
|
|
|
|
|
|
_galWorkAvailable = new ManualResetEventSlim(false);
|
|
|
|
|
_invokeRun = new ManualResetEventSlim();
|
|
|
|
|
_spanPool = new CircularSpanPool(this, SpanPoolBytes);
|
|
|
|
|
SpanPool = _spanPool;
|
|
|
|
|
|
|
|
|
|
_elementSize = BitUtils.AlignUp(CommandHelper.GetMaxCommandSize(), 4);
|
|
|
|
|
|
|
|
|
|
_commandQueue = new byte[_elementSize * QueueCount];
|
|
|
|
|
_refQueue = new object[MaxRefsPerCommand * QueueCount];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public void RunLoop(Action gpuLoop)
|
|
|
|
|
{
|
|
|
|
|
_running = true;
|
|
|
|
|
|
|
|
|
|
_gpuThread = new Thread(() => {
|
|
|
|
|
gpuLoop();
|
|
|
|
|
_running = false;
|
|
|
|
|
_galWorkAvailable.Set();
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
_gpuThread.Name = "GPU.MainThread";
|
|
|
|
|
_gpuThread.Start();
|
|
|
|
|
|
|
|
|
|
RenderLoop();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public void RenderLoop()
|
|
|
|
|
{
|
|
|
|
|
// Power through the render queue until the Gpu thread work is done.
|
|
|
|
|
|
|
|
|
|
while (_running && !_disposed)
|
|
|
|
|
{
|
|
|
|
|
_galWorkAvailable.Wait();
|
|
|
|
|
_galWorkAvailable.Reset();
|
|
|
|
|
|
|
|
|
|
// The other thread can only increase the command count.
|
|
|
|
|
// We can assume that if it is above 0, it will stay there or get higher.
|
|
|
|
|
|
|
|
|
|
while (_commandCount > 0)
|
|
|
|
|
{
|
|
|
|
|
int commandPtr = _consumerPtr;
|
|
|
|
|
|
|
|
|
|
Span<byte> command = new Span<byte>(_commandQueue, commandPtr * _elementSize, _elementSize);
|
|
|
|
|
|
|
|
|
|
// Run the command.
|
|
|
|
|
|
|
|
|
|
CommandHelper.RunCommand(command, this, _baseRenderer);
|
|
|
|
|
|
|
|
|
|
if (Interlocked.CompareExchange(ref _invokePtr, -1, commandPtr) == commandPtr)
|
|
|
|
|
{
|
|
|
|
|
_invokeRun.Set();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_consumerPtr = (_consumerPtr + 1) % QueueCount;
|
|
|
|
|
|
|
|
|
|
Interlocked.Decrement(ref _commandCount);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
internal SpanRef<T> CopySpan<T>(ReadOnlySpan<T> data) where T : unmanaged
|
|
|
|
|
{
|
|
|
|
|
return _spanPool.Insert(data);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
private TableRef<T> Ref<T>(T reference)
|
|
|
|
|
{
|
|
|
|
|
return new TableRef<T>(this, reference);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
internal ref T New<T>() where T : struct
|
|
|
|
|
{
|
|
|
|
|
while (_producerPtr == (_consumerPtr + QueueCount - 1) % QueueCount)
|
|
|
|
|
{
|
|
|
|
|
// If incrementing the producer pointer would overflow, we need to wait.
|
|
|
|
|
// _consumerPtr can only move forward, so there's no race to worry about here.
|
|
|
|
|
|
|
|
|
|
Thread.Sleep(1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int taken = _producerPtr;
|
|
|
|
|
_lastProducedPtr = taken;
|
|
|
|
|
|
|
|
|
|
_producerPtr = (_producerPtr + 1) % QueueCount;
|
|
|
|
|
|
|
|
|
|
Span<byte> memory = new Span<byte>(_commandQueue, taken * _elementSize, _elementSize);
|
|
|
|
|
ref T result = ref Unsafe.As<byte, T>(ref MemoryMarshal.GetReference(memory));
|
|
|
|
|
|
|
|
|
|
memory[memory.Length - 1] = (byte)((IGALCommand)result).CommandType;
|
|
|
|
|
|
|
|
|
|
return ref result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
internal int AddTableRef(object obj)
|
|
|
|
|
{
|
|
|
|
|
// The reference table is sized so that it will never overflow, so long as the references are taken after the command is allocated.
|
|
|
|
|
|
|
|
|
|
int index = _refProducerPtr;
|
|
|
|
|
|
|
|
|
|
_refQueue[index] = obj;
|
|
|
|
|
|
|
|
|
|
_refProducerPtr = (_refProducerPtr + 1) % _refQueue.Length;
|
|
|
|
|
|
|
|
|
|
return index;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
internal object RemoveTableRef(int index)
|
|
|
|
|
{
|
|
|
|
|
Debug.Assert(index == _refConsumerPtr);
|
|
|
|
|
|
|
|
|
|
object result = _refQueue[_refConsumerPtr];
|
|
|
|
|
_refQueue[_refConsumerPtr] = null;
|
|
|
|
|
|
|
|
|
|
_refConsumerPtr = (_refConsumerPtr + 1) % _refQueue.Length;
|
|
|
|
|
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
internal void QueueCommand()
|
|
|
|
|
{
|
|
|
|
|
int result = Interlocked.Increment(ref _commandCount);
|
|
|
|
|
|
|
|
|
|
if (result == 1)
|
|
|
|
|
{
|
|
|
|
|
_galWorkAvailable.Set();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
internal void InvokeCommand()
|
|
|
|
|
{
|
|
|
|
|
_invokeRun.Reset();
|
|
|
|
|
_invokePtr = _lastProducedPtr;
|
|
|
|
|
|
|
|
|
|
QueueCommand();
|
|
|
|
|
|
|
|
|
|
// Wait for the command to complete.
|
|
|
|
|
_invokeRun.Wait();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
internal void WaitForFrame()
|
|
|
|
|
{
|
|
|
|
|
_frameComplete.WaitOne();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
internal void SignalFrame()
|
|
|
|
|
{
|
|
|
|
|
_frameComplete.Set();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
internal bool IsGpuThread()
|
|
|
|
|
{
|
|
|
|
|
return Thread.CurrentThread == _gpuThread;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public void BackgroundContextAction(Action action, bool alwaysBackground = false)
|
|
|
|
|
{
|
|
|
|
|
if (IsGpuThread() && !alwaysBackground)
|
|
|
|
|
{
|
|
|
|
|
// The action must be performed on the render thread.
|
|
|
|
|
New<ActionCommand>().Set(Ref(action));
|
|
|
|
|
InvokeCommand();
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
_baseRenderer.BackgroundContextAction(action, true);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public BufferHandle CreateBuffer(int size)
|
|
|
|
|
{
|
|
|
|
|
BufferHandle handle = Buffers.CreateBufferHandle();
|
|
|
|
|
New<CreateBufferCommand>().Set(handle, size);
|
|
|
|
|
QueueCommand();
|
|
|
|
|
|
|
|
|
|
return handle;
|
|
|
|
|
}
|
|
|
|
|
|
New shader cache implementation (#3194)
* New shader cache implementation
* Remove some debug code
* Take transform feedback varying count into account
* Create shader cache directory if it does not exist + fragment output map related fixes
* Remove debug code
* Only check texture descriptors if the constant buffer is bound
* Also check CPU VA on GetSpanMapped
* Remove more unused code and move cache related code
* XML docs + remove more unused methods
* Better codegen for TransformFeedbackDescriptor.AsSpan
* Support migration from old cache format, remove more unused code
Shader cache rebuild now also rewrites the shared toc and data files
* Fix migration error with BRX shaders
* Add a limit to the async translation queue
Avoid async translation threads not being able to keep up and the queue growing very large
* Re-create specialization state on recompile
This might be required if a new version of the shader translator requires more or less state, or if there is a bug related to the GPU state access
* Make shader cache more error resilient
* Add some missing XML docs and move GpuAccessor docs to the interface/use inheritdoc
* Address early PR feedback
* Fix rebase
* Remove IRenderer.CompileShader and IShader interface, replace with new ShaderSource struct passed to CreateProgram directly
* Handle some missing exceptions
* Make shader cache purge delete both old and new shader caches
* Register textures on new specialization state
* Translate and compile shaders in forward order (eliminates diffs due to different binding numbers)
* Limit in-flight shader compilation to the maximum number of compilation threads
* Replace ParallelDiskCacheLoader state changed event with a callback function
* Better handling for invalid constant buffer 1 data length
* Do not create the old cache directory structure if the old cache does not exist
* Constant buffer use should be per-stage. This change will invalidate existing new caches (file format version was incremented)
* Replace rectangle texture with just coordinate normalization
* Skip incompatible shaders that are missing texture information, instead of crashing
This is required if we, for example, support new texture instruction to the shader translator, and then they allow access to textures that were not accessed before. In this scenario, the old cache entry is no longer usable
* Fix coordinates normalization on cubemap textures
* Check if title ID is null before combining shader cache path
* More robust constant buffer address validation on spec state
* More robust constant buffer address validation on spec state (2)
* Regenerate shader cache with one stream, rather than one per shader.
* Only create shader cache directory during initialization
* Logging improvements
* Proper shader program disposal
* PR feedback, and add a comment on serialized structs
* XML docs for RegisterTexture
Co-authored-by: riperiperi <rhy3756547@hotmail.com>
2022-04-10 09:49:44 -04:00
|
|
|
|
public IProgram CreateProgram(ShaderSource[] shaders, ShaderInfo info)
|
Add a Multithreading layer for the GAL, multi-thread shader compilation at runtime (#2501)
* Initial Implementation
About as fast as nvidia GL multithreading, can be improved with faster command queuing.
* Struct based command list
Speeds up a bit. Still a lot of time lost to resource copy.
* Do shader init while the render thread is active.
* Introduce circular span pool V1
Ideally should be able to use structs instead of references for storing these spans on commands. Will try that next.
* Refactor SpanRef some more
Use a struct to represent SpanRef, rather than a reference.
* Flush buffers on background thread
* Use a span for UpdateRenderScale.
Much faster than copying the array.
* Calculate command size using reflection
* WIP parallel shaders
* Some minor optimisation
* Only 2 max refs per command now.
The command with 3 refs is gone. :relieved:
* Don't cast on the GPU side
* Remove redundant casts, force sync on window present
* Fix Shader Cache
* Fix host shader save.
* Fixup to work with new renderer stuff
* Make command Run static, use array of delegates as lookup
Profile says this takes less time than the previous way.
* Bring up to date
* Add settings toggle. Fix Muiltithreading Off mode.
* Fix warning.
* Release tracking lock for flushes
* Fix Conditional Render fast path with threaded gal
* Make handle iteration safe when releasing the lock
This is mostly temporary.
* Attempt to set backend threading on driver
Only really works on nvidia before launching a game.
* Fix race condition with BufferModifiedRangeList, exceptions in tracking actions
* Update buffer set commands
* Some cleanup
* Only use stutter workaround when using opengl renderer non-threaded
* Add host-conditional reservation of counter events
There has always been the possibility that conditional rendering could use a query object just as it is disposed by the counter queue. This change makes it so that when the host decides to use host conditional rendering, the query object is reserved so that it cannot be deleted. Counter events can optionally start reserved, as the threaded implementation can reserve them before the backend creates them, and there would otherwise be a short amount of time where the counter queue could dispose the event before a call to reserve it could be made.
* Address Feedback
* Make counter flush tracked again.
Hopefully does not cause any issues this time.
* Wait for FlushTo on the main queue thread.
Currently assumes only one thread will want to FlushTo (in this case, the GPU thread)
* Add SDL2 headless integration
* Add HLE macro commands.
Co-authored-by: Mary <mary@mary.zone>
2021-08-26 18:31:29 -04:00
|
|
|
|
{
|
|
|
|
|
var program = new ThreadedProgram(this);
|
2022-02-16 17:15:39 -05:00
|
|
|
|
SourceProgramRequest request = new SourceProgramRequest(program, shaders, info);
|
Add a Multithreading layer for the GAL, multi-thread shader compilation at runtime (#2501)
* Initial Implementation
About as fast as nvidia GL multithreading, can be improved with faster command queuing.
* Struct based command list
Speeds up a bit. Still a lot of time lost to resource copy.
* Do shader init while the render thread is active.
* Introduce circular span pool V1
Ideally should be able to use structs instead of references for storing these spans on commands. Will try that next.
* Refactor SpanRef some more
Use a struct to represent SpanRef, rather than a reference.
* Flush buffers on background thread
* Use a span for UpdateRenderScale.
Much faster than copying the array.
* Calculate command size using reflection
* WIP parallel shaders
* Some minor optimisation
* Only 2 max refs per command now.
The command with 3 refs is gone. :relieved:
* Don't cast on the GPU side
* Remove redundant casts, force sync on window present
* Fix Shader Cache
* Fix host shader save.
* Fixup to work with new renderer stuff
* Make command Run static, use array of delegates as lookup
Profile says this takes less time than the previous way.
* Bring up to date
* Add settings toggle. Fix Muiltithreading Off mode.
* Fix warning.
* Release tracking lock for flushes
* Fix Conditional Render fast path with threaded gal
* Make handle iteration safe when releasing the lock
This is mostly temporary.
* Attempt to set backend threading on driver
Only really works on nvidia before launching a game.
* Fix race condition with BufferModifiedRangeList, exceptions in tracking actions
* Update buffer set commands
* Some cleanup
* Only use stutter workaround when using opengl renderer non-threaded
* Add host-conditional reservation of counter events
There has always been the possibility that conditional rendering could use a query object just as it is disposed by the counter queue. This change makes it so that when the host decides to use host conditional rendering, the query object is reserved so that it cannot be deleted. Counter events can optionally start reserved, as the threaded implementation can reserve them before the backend creates them, and there would otherwise be a short amount of time where the counter queue could dispose the event before a call to reserve it could be made.
* Address Feedback
* Make counter flush tracked again.
Hopefully does not cause any issues this time.
* Wait for FlushTo on the main queue thread.
Currently assumes only one thread will want to FlushTo (in this case, the GPU thread)
* Add SDL2 headless integration
* Add HLE macro commands.
Co-authored-by: Mary <mary@mary.zone>
2021-08-26 18:31:29 -04:00
|
|
|
|
Programs.Add(request);
|
|
|
|
|
|
|
|
|
|
New<CreateProgramCommand>().Set(Ref((IProgramRequest)request));
|
|
|
|
|
QueueCommand();
|
|
|
|
|
|
|
|
|
|
return program;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public ISampler CreateSampler(SamplerCreateInfo info)
|
|
|
|
|
{
|
|
|
|
|
var sampler = new ThreadedSampler(this);
|
|
|
|
|
New<CreateSamplerCommand>().Set(Ref(sampler), info);
|
|
|
|
|
QueueCommand();
|
|
|
|
|
|
|
|
|
|
return sampler;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public void CreateSync(ulong id)
|
|
|
|
|
{
|
|
|
|
|
Sync.CreateSyncHandle(id);
|
|
|
|
|
New<CreateSyncCommand>().Set(id);
|
|
|
|
|
QueueCommand();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public ITexture CreateTexture(TextureCreateInfo info, float scale)
|
|
|
|
|
{
|
|
|
|
|
if (IsGpuThread())
|
|
|
|
|
{
|
|
|
|
|
var texture = new ThreadedTexture(this, info, scale);
|
|
|
|
|
New<CreateTextureCommand>().Set(Ref(texture), info, scale);
|
|
|
|
|
QueueCommand();
|
|
|
|
|
|
|
|
|
|
return texture;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
var texture = new ThreadedTexture(this, info, scale);
|
|
|
|
|
texture.Base = _baseRenderer.CreateTexture(info, scale);
|
|
|
|
|
|
|
|
|
|
return texture;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public void DeleteBuffer(BufferHandle buffer)
|
|
|
|
|
{
|
|
|
|
|
New<BufferDisposeCommand>().Set(buffer);
|
|
|
|
|
QueueCommand();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public ReadOnlySpan<byte> GetBufferData(BufferHandle buffer, int offset, int size)
|
|
|
|
|
{
|
|
|
|
|
if (IsGpuThread())
|
|
|
|
|
{
|
|
|
|
|
ResultBox<PinnedSpan<byte>> box = new ResultBox<PinnedSpan<byte>>();
|
|
|
|
|
New<BufferGetDataCommand>().Set(buffer, offset, size, Ref(box));
|
|
|
|
|
InvokeCommand();
|
|
|
|
|
|
|
|
|
|
return box.Result.Get();
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
return _baseRenderer.GetBufferData(Buffers.MapBufferBlocking(buffer), offset, size);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public Capabilities GetCapabilities()
|
|
|
|
|
{
|
|
|
|
|
ResultBox<Capabilities> box = new ResultBox<Capabilities>();
|
|
|
|
|
New<GetCapabilitiesCommand>().Set(Ref(box));
|
|
|
|
|
InvokeCommand();
|
|
|
|
|
|
|
|
|
|
return box.Result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// <summary>
|
|
|
|
|
/// Initialize the base renderer. Must be called on the render thread.
|
|
|
|
|
/// </summary>
|
|
|
|
|
/// <param name="logLevel">Log level to use</param>
|
|
|
|
|
public void Initialize(GraphicsDebugLevel logLevel)
|
|
|
|
|
{
|
|
|
|
|
_baseRenderer.Initialize(logLevel);
|
|
|
|
|
}
|
|
|
|
|
|
2022-02-16 17:15:39 -05:00
|
|
|
|
public IProgram LoadProgramBinary(byte[] programBinary, bool hasFragmentShader, ShaderInfo info)
|
Add a Multithreading layer for the GAL, multi-thread shader compilation at runtime (#2501)
* Initial Implementation
About as fast as nvidia GL multithreading, can be improved with faster command queuing.
* Struct based command list
Speeds up a bit. Still a lot of time lost to resource copy.
* Do shader init while the render thread is active.
* Introduce circular span pool V1
Ideally should be able to use structs instead of references for storing these spans on commands. Will try that next.
* Refactor SpanRef some more
Use a struct to represent SpanRef, rather than a reference.
* Flush buffers on background thread
* Use a span for UpdateRenderScale.
Much faster than copying the array.
* Calculate command size using reflection
* WIP parallel shaders
* Some minor optimisation
* Only 2 max refs per command now.
The command with 3 refs is gone. :relieved:
* Don't cast on the GPU side
* Remove redundant casts, force sync on window present
* Fix Shader Cache
* Fix host shader save.
* Fixup to work with new renderer stuff
* Make command Run static, use array of delegates as lookup
Profile says this takes less time than the previous way.
* Bring up to date
* Add settings toggle. Fix Muiltithreading Off mode.
* Fix warning.
* Release tracking lock for flushes
* Fix Conditional Render fast path with threaded gal
* Make handle iteration safe when releasing the lock
This is mostly temporary.
* Attempt to set backend threading on driver
Only really works on nvidia before launching a game.
* Fix race condition with BufferModifiedRangeList, exceptions in tracking actions
* Update buffer set commands
* Some cleanup
* Only use stutter workaround when using opengl renderer non-threaded
* Add host-conditional reservation of counter events
There has always been the possibility that conditional rendering could use a query object just as it is disposed by the counter queue. This change makes it so that when the host decides to use host conditional rendering, the query object is reserved so that it cannot be deleted. Counter events can optionally start reserved, as the threaded implementation can reserve them before the backend creates them, and there would otherwise be a short amount of time where the counter queue could dispose the event before a call to reserve it could be made.
* Address Feedback
* Make counter flush tracked again.
Hopefully does not cause any issues this time.
* Wait for FlushTo on the main queue thread.
Currently assumes only one thread will want to FlushTo (in this case, the GPU thread)
* Add SDL2 headless integration
* Add HLE macro commands.
Co-authored-by: Mary <mary@mary.zone>
2021-08-26 18:31:29 -04:00
|
|
|
|
{
|
|
|
|
|
var program = new ThreadedProgram(this);
|
|
|
|
|
|
2022-02-16 17:15:39 -05:00
|
|
|
|
BinaryProgramRequest request = new BinaryProgramRequest(program, programBinary, hasFragmentShader, info);
|
Add a Multithreading layer for the GAL, multi-thread shader compilation at runtime (#2501)
* Initial Implementation
About as fast as nvidia GL multithreading, can be improved with faster command queuing.
* Struct based command list
Speeds up a bit. Still a lot of time lost to resource copy.
* Do shader init while the render thread is active.
* Introduce circular span pool V1
Ideally should be able to use structs instead of references for storing these spans on commands. Will try that next.
* Refactor SpanRef some more
Use a struct to represent SpanRef, rather than a reference.
* Flush buffers on background thread
* Use a span for UpdateRenderScale.
Much faster than copying the array.
* Calculate command size using reflection
* WIP parallel shaders
* Some minor optimisation
* Only 2 max refs per command now.
The command with 3 refs is gone. :relieved:
* Don't cast on the GPU side
* Remove redundant casts, force sync on window present
* Fix Shader Cache
* Fix host shader save.
* Fixup to work with new renderer stuff
* Make command Run static, use array of delegates as lookup
Profile says this takes less time than the previous way.
* Bring up to date
* Add settings toggle. Fix Muiltithreading Off mode.
* Fix warning.
* Release tracking lock for flushes
* Fix Conditional Render fast path with threaded gal
* Make handle iteration safe when releasing the lock
This is mostly temporary.
* Attempt to set backend threading on driver
Only really works on nvidia before launching a game.
* Fix race condition with BufferModifiedRangeList, exceptions in tracking actions
* Update buffer set commands
* Some cleanup
* Only use stutter workaround when using opengl renderer non-threaded
* Add host-conditional reservation of counter events
There has always been the possibility that conditional rendering could use a query object just as it is disposed by the counter queue. This change makes it so that when the host decides to use host conditional rendering, the query object is reserved so that it cannot be deleted. Counter events can optionally start reserved, as the threaded implementation can reserve them before the backend creates them, and there would otherwise be a short amount of time where the counter queue could dispose the event before a call to reserve it could be made.
* Address Feedback
* Make counter flush tracked again.
Hopefully does not cause any issues this time.
* Wait for FlushTo on the main queue thread.
Currently assumes only one thread will want to FlushTo (in this case, the GPU thread)
* Add SDL2 headless integration
* Add HLE macro commands.
Co-authored-by: Mary <mary@mary.zone>
2021-08-26 18:31:29 -04:00
|
|
|
|
Programs.Add(request);
|
|
|
|
|
|
|
|
|
|
New<CreateProgramCommand>().Set(Ref((IProgramRequest)request));
|
|
|
|
|
QueueCommand();
|
|
|
|
|
|
|
|
|
|
return program;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public void PreFrame()
|
|
|
|
|
{
|
|
|
|
|
New<PreFrameCommand>();
|
|
|
|
|
QueueCommand();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public ICounterEvent ReportCounter(CounterType type, EventHandler<ulong> resultHandler, bool hostReserved)
|
|
|
|
|
{
|
|
|
|
|
ThreadedCounterEvent evt = new ThreadedCounterEvent(this, type, _lastSampleCounterClear);
|
|
|
|
|
New<ReportCounterCommand>().Set(Ref(evt), type, Ref(resultHandler), hostReserved);
|
|
|
|
|
QueueCommand();
|
|
|
|
|
|
|
|
|
|
if (type == CounterType.SamplesPassed)
|
|
|
|
|
{
|
|
|
|
|
_lastSampleCounterClear = false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return evt;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public void ResetCounter(CounterType type)
|
|
|
|
|
{
|
|
|
|
|
New<ResetCounterCommand>().Set(type);
|
|
|
|
|
QueueCommand();
|
|
|
|
|
_lastSampleCounterClear = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public void Screenshot()
|
|
|
|
|
{
|
|
|
|
|
_baseRenderer.Screenshot();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public void SetBufferData(BufferHandle buffer, int offset, ReadOnlySpan<byte> data)
|
|
|
|
|
{
|
|
|
|
|
New<BufferSetDataCommand>().Set(buffer, offset, CopySpan(data));
|
|
|
|
|
QueueCommand();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public void UpdateCounters()
|
|
|
|
|
{
|
|
|
|
|
New<UpdateCountersCommand>();
|
|
|
|
|
QueueCommand();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public void WaitSync(ulong id)
|
|
|
|
|
{
|
|
|
|
|
Sync.WaitSyncAvailability(id);
|
|
|
|
|
|
|
|
|
|
_baseRenderer.WaitSync(id);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public void Dispose()
|
|
|
|
|
{
|
|
|
|
|
// Dispose must happen from the render thread, after all commands have completed.
|
|
|
|
|
|
|
|
|
|
// Stop the GPU thread.
|
|
|
|
|
_disposed = true;
|
|
|
|
|
_gpuThread.Join();
|
|
|
|
|
|
|
|
|
|
// Dispose the renderer.
|
|
|
|
|
_baseRenderer.Dispose();
|
|
|
|
|
|
|
|
|
|
// Dispose events.
|
|
|
|
|
_frameComplete.Dispose();
|
|
|
|
|
_galWorkAvailable.Dispose();
|
|
|
|
|
_invokeRun.Dispose();
|
|
|
|
|
|
|
|
|
|
Sync.Dispose();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|