IPC refactor part 1: Use explicit separate threads to process requests (#1447)
* Changes to allow explicit management of service threads * Remove now unused code * Remove ThreadCounter, its no longer needed * Allow and use separate server per service, also fix exit issues * New policy change: PTC version now uses PR number
This commit is contained in:
parent
5dd6f41ff4
commit
6c9565693f
18 changed files with 138 additions and 135 deletions
|
@ -24,8 +24,6 @@ namespace Ryujinx.HLE.HOS.Kernel
|
|||
public Syscall Syscall { get; }
|
||||
public SyscallHandler SyscallHandler { get; }
|
||||
|
||||
public CountdownEvent ThreadCounter { get; }
|
||||
|
||||
public KResourceLimit ResourceLimit { get; }
|
||||
|
||||
public KMemoryRegionManager[] MemoryRegions { get; }
|
||||
|
@ -57,8 +55,6 @@ namespace Ryujinx.HLE.HOS.Kernel
|
|||
|
||||
SyscallHandler = new SyscallHandler(this);
|
||||
|
||||
ThreadCounter = new CountdownEvent(1);
|
||||
|
||||
ResourceLimit = new KResourceLimit(this);
|
||||
|
||||
KernelInit.InitializeResourceLimit(ResourceLimit);
|
||||
|
|
|
@ -791,19 +791,16 @@ namespace Ryujinx.HLE.HOS.Kernel.Process
|
|||
private void InterruptHandler(object sender, EventArgs e)
|
||||
{
|
||||
KernelContext.Scheduler.ContextSwitch();
|
||||
KernelContext.Scheduler.GetCurrentThread().HandlePostSyscall();
|
||||
}
|
||||
|
||||
public void IncrementThreadCount()
|
||||
{
|
||||
Interlocked.Increment(ref _threadCount);
|
||||
|
||||
KernelContext.ThreadCounter.AddCount();
|
||||
}
|
||||
|
||||
public void DecrementThreadCountAndTerminateIfZero()
|
||||
{
|
||||
KernelContext.ThreadCounter.Signal();
|
||||
|
||||
if (Interlocked.Decrement(ref _threadCount) == 0)
|
||||
{
|
||||
Terminate();
|
||||
|
@ -812,8 +809,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Process
|
|||
|
||||
public void DecrementToZeroWhileTerminatingCurrent()
|
||||
{
|
||||
KernelContext.ThreadCounter.Signal();
|
||||
|
||||
while (Interlocked.Decrement(ref _threadCount) != 0)
|
||||
{
|
||||
Destroy();
|
||||
|
@ -1000,24 +995,29 @@ namespace Ryujinx.HLE.HOS.Kernel.Process
|
|||
KernelContext.CriticalSection.Leave();
|
||||
}
|
||||
|
||||
KThread blockedThread = null;
|
||||
|
||||
lock (_threadingLock)
|
||||
while (true)
|
||||
{
|
||||
foreach (KThread thread in _threads)
|
||||
{
|
||||
if (thread != currentThread && (thread.SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.TerminationPending)
|
||||
{
|
||||
thread.IncrementReferenceCount();
|
||||
KThread blockedThread = null;
|
||||
|
||||
blockedThread = thread;
|
||||
break;
|
||||
lock (_threadingLock)
|
||||
{
|
||||
foreach (KThread thread in _threads)
|
||||
{
|
||||
if (thread != currentThread && (thread.SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.TerminationPending)
|
||||
{
|
||||
thread.IncrementReferenceCount();
|
||||
|
||||
blockedThread = thread;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (blockedThread != null)
|
||||
{
|
||||
if (blockedThread == null)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
blockedThread.Terminate();
|
||||
blockedThread.DecrementReferenceCount();
|
||||
}
|
||||
|
|
|
@ -2,14 +2,12 @@
|
|||
using Ryujinx.Common.Logging;
|
||||
using Ryujinx.Cpu;
|
||||
using Ryujinx.HLE.Exceptions;
|
||||
using Ryujinx.HLE.HOS.Ipc;
|
||||
using Ryujinx.HLE.HOS.Kernel.Common;
|
||||
using Ryujinx.HLE.HOS.Kernel.Ipc;
|
||||
using Ryujinx.HLE.HOS.Kernel.Memory;
|
||||
using Ryujinx.HLE.HOS.Kernel.Process;
|
||||
using Ryujinx.HLE.HOS.Kernel.Threading;
|
||||
using System;
|
||||
using System.Threading;
|
||||
|
||||
namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
|
||||
{
|
||||
|
@ -26,29 +24,6 @@ namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
|
|||
|
||||
// IPC
|
||||
|
||||
private struct HleIpcMessage
|
||||
{
|
||||
public KProcess Process { get; }
|
||||
public KThread Thread { get; }
|
||||
public KClientSession Session { get; }
|
||||
public IpcMessage Message { get; }
|
||||
public long MessagePtr { get; }
|
||||
|
||||
public HleIpcMessage(
|
||||
KProcess process,
|
||||
KThread thread,
|
||||
KClientSession session,
|
||||
IpcMessage message,
|
||||
long messagePtr)
|
||||
{
|
||||
Process = process;
|
||||
Thread = thread;
|
||||
Session = session;
|
||||
Message = message;
|
||||
MessagePtr = messagePtr;
|
||||
}
|
||||
}
|
||||
|
||||
public KernelResult ConnectToNamedPort(ulong namePtr, out int handle)
|
||||
{
|
||||
handle = 0;
|
||||
|
@ -135,16 +110,7 @@ namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
|
|||
|
||||
currentThread.Reschedule(ThreadSchedState.Paused);
|
||||
|
||||
IpcMessage message = new IpcMessage(messageData, (long)messagePtr);
|
||||
|
||||
ThreadPool.QueueUserWorkItem(ProcessIpcRequest, new HleIpcMessage(
|
||||
process,
|
||||
currentThread,
|
||||
clientSession,
|
||||
message,
|
||||
(long)messagePtr));
|
||||
|
||||
_context.ThreadCounter.AddCount();
|
||||
clientSession.Service.Server.PushMessage(_device, currentThread, clientSession, messagePtr, messageSize);
|
||||
|
||||
_context.CriticalSection.Leave();
|
||||
|
||||
|
@ -158,24 +124,6 @@ namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
|
|||
}
|
||||
}
|
||||
|
||||
private void ProcessIpcRequest(object state)
|
||||
{
|
||||
HleIpcMessage ipcMessage = (HleIpcMessage)state;
|
||||
|
||||
ipcMessage.Thread.ObjSyncResult = IpcHandler.IpcCall(
|
||||
_device,
|
||||
ipcMessage.Process,
|
||||
ipcMessage.Process.CpuMemory,
|
||||
ipcMessage.Thread,
|
||||
ipcMessage.Session,
|
||||
ipcMessage.Message,
|
||||
ipcMessage.MessagePtr);
|
||||
|
||||
_context.ThreadCounter.Signal();
|
||||
|
||||
ipcMessage.Thread.Reschedule(ThreadSchedState.Running);
|
||||
}
|
||||
|
||||
private KernelResult SendSyncRequest(int handle)
|
||||
{
|
||||
KProcess currentProcess = _context.Scheduler.GetCurrentProcess();
|
||||
|
|
|
@ -348,6 +348,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading
|
|||
if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Running)
|
||||
{
|
||||
// TODO: GIC distributor stuffs (sgir changes ect)
|
||||
Context.RequestInterrupt();
|
||||
}
|
||||
|
||||
SignaledObj = null;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue