Reduce JIT GC allocations (#2515)

* Turn `MemoryOperand` into a struct

* Remove `IntrinsicOperation`

* Remove `PhiNode`

* Remove `Node`

* Turn `Operand` into a struct

* Turn `Operation` into a struct

* Clean up pool management methods

* Add `Arena` allocator

* Move `OperationHelper` to `Operation.Factory`

* Move `OperandHelper` to `Operand.Factory`

* Optimize `Operation` a bit

* Fix `Arena` initialization

* Rename `NativeList<T>` to `ArenaList<T>`

* Reduce `Operand` size from 88 to 56 bytes

* Reduce `Operation` size from 56 to 40 bytes

* Add optimistic interning of Register & Constant operands

* Optimize `RegisterUsage` pass a bit

* Optimize `RemoveUnusedNodes` pass a bit

Iterating in reverse-order allows killing dependency chains in a single
pass.

* Fix PPTC symbols

* Optimize `BasicBlock` a bit

Reduce allocations from `_successor` & `DominanceFrontiers`

* Fix `Operation` resize

* Make `Arena` expandable

Change the arena allocator to be expandable by allocating in pages, with
some of them being pooled. Currently 32 pages are pooled. An LRU removal
mechanism should probably be added to it.

Apparently MHR can allocate bitmaps large enough to exceed the 16MB
limit for the type.

* Move `Arena` & `ArenaList` to `Common`

* Remove `ThreadStaticPool` & co

* Add `PhiOperation`

* Reduce `Operand` size from 56 from 48 bytes

* Add linear-probing to `Operand` intern table

* Optimize `HybridAllocator` a bit

* Add `Allocators` class

* Tune `ArenaAllocator` sizes

* Add page removal mechanism to `ArenaAllocator`

Remove pages which have not been used for more than 5s after each reset.

I am on fence if this would be better using a Gen2 callback object like
the one in System.Buffers.ArrayPool<T>, to trim the pool. Because right
now if a large translation happens, the pages will be freed only after a
reset. This reset may not happen for a while because no new translation
is hit, but the arena base sizes are rather small.

* Fix `OOM` when allocating larger than page size in `ArenaAllocator`

Tweak resizing mechanism for Operand.Uses and Assignemnts.

* Optimize `Optimizer` a bit

* Optimize `Operand.Add<T>/Remove<T>` a bit

* Clean up `PreAllocator`

* Fix phi insertion order

Reduce codegen diffs.

* Fix code alignment

* Use new heuristics for degree of parallelism

* Suppress warnings

* Address gdkchan's feedback

Renamed `GetValue()` to `GetValueUnsafe()` to make it more clear that
`Operand.Value` should usually not be modified directly.

* Add fast path to `ArenaAllocator`

* Assembly for `ArenaAllocator.Allocate(ulong)`:

  .L0:
    mov rax, [rcx+0x18]
    lea r8, [rax+rdx]
    cmp r8, [rcx+0x10]
    ja short .L2
  .L1:
    mov rdx, [rcx+8]
    add rax, [rdx+8]
    mov [rcx+0x18], r8
    ret
  .L2:
    jmp ArenaAllocator.AllocateSlow(UInt64)

  A few variable/field had to be changed to ulong so that RyuJIT avoids
  emitting zero-extends.

* Implement a new heuristic to free pooled pages.

  If an arena is used often, it is more likely that its pages will be
  needed, so the pages are kept for longer (e.g: during PPTC rebuild or
  burst sof compilations). If is not used often, then it is more likely
  that its pages will not be needed (e.g: after PPTC rebuild or bursts
  of compilations).

* Address riperiperi's feedback

* Use `EqualityComparer<T>` in `IntrusiveList<T>`

Avoids a potential GC hole in `Equals(T, T)`.
This commit is contained in:
FICTURE7 2021-08-17 22:08:34 +04:00 committed by GitHub
parent cd4530f29c
commit 22b2cb39af
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
91 changed files with 2354 additions and 2142 deletions

View file

@ -211,7 +211,7 @@ namespace ARMeilleure.Common
private IntPtr Allocate<T>(int length, T fill, bool leaf) where T : unmanaged
{
var size = sizeof(T) * length;
var page = Marshal.AllocHGlobal(size);
var page = (IntPtr)NativeAllocator.Instance.Allocate((uint)size);
var span = new Span<T>((void*)page, length);
span.Fill(fill);

View file

@ -0,0 +1,24 @@
using System;
namespace ARMeilleure.Common
{
unsafe abstract class Allocator : IDisposable
{
public T* Allocate<T>(ulong count = 1) where T : unmanaged
{
return (T*)Allocate(count * (uint)sizeof(T));
}
public abstract void* Allocate(ulong size);
public abstract void Free(void* block);
protected virtual void Dispose(bool disposing) { }
public void Dispose()
{
Dispose(true);
GC.SuppressFinalize(this);
}
}
}

View file

@ -0,0 +1,188 @@
using System;
using System.Collections.Generic;
using System.Runtime.CompilerServices;
using System.Threading;
namespace ARMeilleure.Common
{
unsafe sealed class ArenaAllocator : Allocator
{
private class PageInfo
{
public byte* Pointer;
public byte Unused;
public int UnusedCounter;
}
private int _lastReset;
private ulong _index;
private int _pageIndex;
private PageInfo _page;
private List<PageInfo> _pages;
private readonly ulong _pageSize;
private readonly uint _pageCount;
private readonly List<IntPtr> _extras;
public ArenaAllocator(uint pageSize, uint pageCount)
{
_lastReset = Environment.TickCount;
// Set _index to pageSize so that the first allocation goes through the slow path.
_index = pageSize;
_pageIndex = -1;
_page = null;
_pages = new List<PageInfo>();
_pageSize = pageSize;
_pageCount = pageCount;
_extras = new List<IntPtr>();
}
public Span<T> AllocateSpan<T>(ulong count) where T : unmanaged
{
return new Span<T>(Allocate<T>(count), (int)count);
}
public override void* Allocate(ulong size)
{
if (_index + size <= _pageSize)
{
byte* result = _page.Pointer + _index;
_index += size;
return result;
}
return AllocateSlow(size);
}
[MethodImpl(MethodImplOptions.NoInlining)]
private void* AllocateSlow(ulong size)
{
if (size > _pageSize)
{
void* extra = NativeAllocator.Instance.Allocate(size);
_extras.Add((IntPtr)extra);
return extra;
}
if (_index + size > _pageSize)
{
_index = 0;
_pageIndex++;
}
if (_pageIndex < _pages.Count)
{
_page = _pages[_pageIndex];
_page.Unused = 0;
}
else
{
_page = new PageInfo();
_page.Pointer = (byte*)NativeAllocator.Instance.Allocate(_pageSize);
_pages.Add(_page);
}
byte* result = _page.Pointer + _index;
_index += size;
return result;
}
public override void Free(void* block) { }
public void Reset()
{
_index = _pageSize;
_pageIndex = -1;
_page = null;
// Free excess pages that was allocated.
while (_pages.Count > _pageCount)
{
NativeAllocator.Instance.Free(_pages[_pages.Count - 1].Pointer);
_pages.RemoveAt(_pages.Count - 1);
}
// Free extra blocks that are not page-sized
foreach (IntPtr ptr in _extras)
{
NativeAllocator.Instance.Free((void*)ptr);
}
_extras.Clear();
// Free pooled pages that has not been used in a while. Remove pages at the back first, because we try to
// keep the pages at the front alive, since they're more likely to be hot and in the d-cache.
bool removing = true;
// If arena is used frequently, keep pages for longer. Otherwise keep pages for a shorter amount of time.
int now = Environment.TickCount;
int count = (now - _lastReset) switch {
>= 5000 => 0,
>= 2500 => 50,
>= 1000 => 100,
>= 10 => 1500,
_ => 5000
};
for (int i = _pages.Count - 1; i >= 0; i--)
{
PageInfo page = _pages[i];
if (page.Unused == 0)
{
page.UnusedCounter = 0;
}
page.UnusedCounter += page.Unused;
page.Unused = 1;
// If page not used after `count` resets, remove it.
if (removing && page.UnusedCounter >= count)
{
NativeAllocator.Instance.Free(page.Pointer);
_pages.RemoveAt(i);
}
else
{
removing = false;
}
}
_lastReset = now;
}
protected override void Dispose(bool disposing)
{
if (_pages != null)
{
foreach (PageInfo info in _pages)
{
NativeAllocator.Instance.Free(info.Pointer);
}
foreach (IntPtr ptr in _extras)
{
NativeAllocator.Instance.Free((void*)ptr);
}
_pages = null;
}
}
~ArenaAllocator()
{
Dispose(false);
}
}
}

View file

@ -1,57 +1,27 @@
using System;
using System.Collections;
using System.Collections.Generic;
using System.Numerics;
namespace ARMeilleure.Common
{
class BitMap : IEnumerator<int>, IEnumerable<int>
unsafe class BitMap : IEnumerable<int>, IDisposable
{
private const int IntSize = 64;
private const int IntMask = IntSize - 1;
private readonly List<long> _masks;
private int _count;
private long* _masks;
private readonly Allocator _allocator;
private int _enumIndex;
private long _enumMask;
private int _enumBit;
public int Current => _enumIndex * IntSize + _enumBit;
object IEnumerator.Current => Current;
public BitMap()
public BitMap(Allocator allocator)
{
_masks = new List<long>(0);
_allocator = allocator;
}
public BitMap(int initialCapacity)
public BitMap(Allocator allocator, int capacity) : this(allocator)
{
int count = (initialCapacity + IntMask) / IntSize;
_masks = new List<long>(count);
while (count-- > 0)
{
_masks.Add(0);
}
}
public BitMap Reset(int initialCapacity)
{
int count = (initialCapacity + IntMask) / IntSize;
if (count > _masks.Capacity)
{
_masks.Capacity = count;
}
_masks.Clear();
while (count-- > 0)
{
_masks.Add(0);
}
return this;
EnsureCapacity(capacity);
}
public bool Set(int bit)
@ -97,7 +67,7 @@ namespace ARMeilleure.Common
public int FindFirstUnset()
{
for (int index = 0; index < _masks.Count; index++)
for (int index = 0; index < _count; index++)
{
long mask = _masks[index];
@ -107,16 +77,16 @@ namespace ARMeilleure.Common
}
}
return _masks.Count * IntSize;
return _count * IntSize;
}
public bool Set(BitMap map)
{
EnsureCapacity(map._masks.Count * IntSize);
EnsureCapacity(map._count * IntSize);
bool modified = false;
for (int index = 0; index < _masks.Count; index++)
for (int index = 0; index < _count; index++)
{
long newValue = _masks[index] | map._masks[index];
@ -133,11 +103,11 @@ namespace ARMeilleure.Common
public bool Clear(BitMap map)
{
EnsureCapacity(map._masks.Count * IntSize);
EnsureCapacity(map._count * IntSize);
bool modified = false;
for (int index = 0; index < _masks.Count; index++)
for (int index = 0; index < _count; index++)
{
long newValue = _masks[index] & ~map._masks[index];
@ -152,15 +122,34 @@ namespace ARMeilleure.Common
return modified;
}
#region IEnumerable<long> Methods
// Note: The bit enumerator is embedded in this class to avoid creating garbage when enumerating.
private void EnsureCapacity(int size)
{
while (_masks.Count * IntSize < size)
int count = (size + IntMask) / IntSize;
if (count > _count)
{
_masks.Add(0);
var oldMask = _masks;
var oldSpan = new Span<long>(_masks, _count);
_masks = _allocator.Allocate<long>((uint)count);
_count = count;
var newSpan = new Span<long>(_masks, _count);
oldSpan.CopyTo(newSpan);
newSpan.Slice(oldSpan.Length).Clear();
_allocator.Free(oldMask);
}
}
public void Dispose()
{
if (_masks != null)
{
_allocator.Free(_masks);
_masks = null;
}
}
@ -169,39 +158,59 @@ namespace ARMeilleure.Common
return GetEnumerator();
}
public IEnumerator<int> GetEnumerator()
IEnumerator<int> IEnumerable<int>.GetEnumerator()
{
Reset();
return this;
return GetEnumerator();
}
public bool MoveNext()
public Enumerator GetEnumerator()
{
if (_enumMask != 0)
return new Enumerator(this);
}
public struct Enumerator : IEnumerator<int>
{
private int _index;
private long _mask;
private int _bit;
private readonly BitMap _map;
public int Current => _index * IntSize + _bit;
object IEnumerator.Current => Current;
public Enumerator(BitMap map)
{
_enumMask &= ~(1L << _enumBit);
_index = -1;
_mask = 0;
_bit = 0;
_map = map;
}
while (_enumMask == 0)
public bool MoveNext()
{
if (++_enumIndex >= _masks.Count)
if (_mask != 0)
{
return false;
_mask &= ~(1L << _bit);
}
_enumMask = _masks[_enumIndex];
while (_mask == 0)
{
if (++_index >= _map._count)
{
return false;
}
_mask = _map._masks[_index];
}
_bit = BitOperations.TrailingZeroCount(_mask);
return true;
}
_enumBit = BitOperations.TrailingZeroCount(_enumMask);
return true;
public void Reset() { }
public void Dispose() { }
}
public void Reset()
{
_enumIndex = -1;
_enumMask = 0;
_enumBit = 0;
}
public void Dispose() { }
#endregion
}
}

View file

@ -1,32 +0,0 @@
namespace ARMeilleure.Common
{
static class BitMapPool
{
public static BitMap Allocate(int initialCapacity)
{
return BitMap().Reset(initialCapacity);
}
#region "ThreadStaticPool"
public static void PrepareBitMapPool(int groupId = 0)
{
ThreadStaticPool<BitMap>.PreparePool(groupId, ChunkSizeLimit.Small);
}
private static BitMap BitMap()
{
return ThreadStaticPool<BitMap>.Instance.Allocate();
}
public static void ResetBitMapPool(int groupId = 0)
{
ThreadStaticPool<BitMap>.ResetPool(groupId);
}
public static void DisposeBitMapPools()
{
ThreadStaticPool<BitMap>.DisposePools();
}
#endregion
}
}

View file

@ -1,7 +1,6 @@
using System;
using System.Collections.Generic;
using System.Numerics;
using System.Runtime.InteropServices;
namespace ARMeilleure.Common
{
@ -41,7 +40,7 @@ namespace ARMeilleure.Common
throw new ArgumentException("Size of TEntry cannot be zero.");
}
_allocated = new BitMap();
_allocated = new BitMap(NativeAllocator.Instance);
_pages = new Dictionary<int, IntPtr>();
_pageLogCapacity = BitOperations.Log2((uint)(pageSize / sizeof(TEntry)));
_pageCapacity = 1 << _pageLogCapacity;
@ -150,7 +149,7 @@ namespace ARMeilleure.Common
if (!_pages.TryGetValue(pageIndex, out IntPtr page))
{
page = Marshal.AllocHGlobal(sizeof(TEntry) * _pageCapacity);
page = (IntPtr)NativeAllocator.Instance.Allocate((uint)sizeof(TEntry) * (uint)_pageCapacity);
_pages.Add(pageIndex, page);
}
@ -172,13 +171,15 @@ namespace ARMeilleure.Common
/// instance.
/// </summary>
/// <param name="disposing"><see langword="true"/> to dispose managed resources also; otherwise just unmanaged resouces</param>
protected virtual void Dispose(bool disposing)
protected unsafe virtual void Dispose(bool disposing)
{
if (!_disposed)
{
_allocated.Dispose();
foreach (var page in _pages.Values)
{
Marshal.FreeHGlobal(page);
NativeAllocator.Instance.Free((void*)page);
}
_disposed = true;

View file

@ -0,0 +1,27 @@
using System;
using System.Runtime.InteropServices;
namespace ARMeilleure.Common
{
unsafe sealed class NativeAllocator : Allocator
{
public static NativeAllocator Instance { get; } = new();
public override void* Allocate(ulong size)
{
void* result = (void*)Marshal.AllocHGlobal((IntPtr)size);
if (result == null)
{
throw new OutOfMemoryException();
}
return result;
}
public override void Free(void* block)
{
Marshal.FreeHGlobal((IntPtr)block);
}
}
}

View file

@ -1,219 +0,0 @@
using ARMeilleure.Translation.PTC;
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
namespace ARMeilleure.Common
{
class ThreadStaticPool<T> where T : class, new()
{
[ThreadStatic]
private static ThreadStaticPool<T> _instance;
public static ThreadStaticPool<T> Instance
{
get
{
if (_instance == null)
{
PreparePool(); // So that we can still use a pool when blindly initializing one.
}
return _instance;
}
}
private static readonly ConcurrentDictionary<int, Stack<ThreadStaticPool<T>>> _pools = new();
private static Stack<ThreadStaticPool<T>> GetPools(int groupId)
{
return _pools.GetOrAdd(groupId, (groupId) => new());
}
public static void PreparePool(
int groupId = 0,
ChunkSizeLimit chunkSizeLimit = ChunkSizeLimit.Large,
PoolSizeIncrement poolSizeIncrement = PoolSizeIncrement.Default)
{
if (Ptc.State == PtcState.Disabled)
{
PreparePoolDefault(groupId, (int)chunkSizeLimit, (int)poolSizeIncrement);
}
else
{
PreparePoolSlim((int)chunkSizeLimit, (int)poolSizeIncrement);
}
}
private static void PreparePoolDefault(int groupId, int chunkSizeLimit, int poolSizeIncrement)
{
// Prepare the pool for this thread, ideally using an existing one from the specified group.
if (_instance == null)
{
var pools = GetPools(groupId);
lock (pools)
{
_instance = (pools.Count != 0) ? pools.Pop() : new(chunkSizeLimit, poolSizeIncrement);
}
}
}
private static void PreparePoolSlim(int chunkSizeLimit, int poolSizeIncrement)
{
// Prepare the pool for this thread.
if (_instance == null)
{
_instance = new(chunkSizeLimit, poolSizeIncrement);
}
}
public static void ResetPool(int groupId = 0)
{
if (Ptc.State == PtcState.Disabled)
{
ResetPoolDefault(groupId);
}
else
{
ResetPoolSlim();
}
}
private static void ResetPoolDefault(int groupId)
{
// Reset, limit if necessary, and return the pool for this thread to the specified group.
if (_instance != null)
{
var pools = GetPools(groupId);
lock (pools)
{
_instance.Clear();
_instance.ChunkSizeLimiter();
pools.Push(_instance);
_instance = null;
}
}
}
private static void ResetPoolSlim()
{
// Reset, limit if necessary, the pool for this thread.
if (_instance != null)
{
_instance.Clear();
_instance.ChunkSizeLimiter();
}
}
public static void DisposePools()
{
if (Ptc.State == PtcState.Disabled)
{
DisposePoolsDefault();
}
else
{
DisposePoolSlim();
}
}
private static void DisposePoolsDefault()
{
// Resets any static references to the pools used by threads for each group, allowing them to be garbage collected.
foreach (var pools in _pools.Values)
{
foreach (var instance in pools)
{
instance.Dispose();
}
pools.Clear();
}
_pools.Clear();
}
private static void DisposePoolSlim()
{
// Dispose the pool for this thread.
if (_instance != null)
{
_instance.Dispose();
_instance = null;
}
}
private List<T[]> _pool;
private int _chunkIndex = -1;
private int _poolIndex = -1;
private int _chunkSizeLimit;
private int _poolSizeIncrement;
private ThreadStaticPool(int chunkSizeLimit, int poolSizeIncrement)
{
_chunkSizeLimit = chunkSizeLimit;
_poolSizeIncrement = poolSizeIncrement;
_pool = new(chunkSizeLimit * 2);
AddChunkIfNeeded();
}
public T Allocate()
{
if (++_poolIndex >= _poolSizeIncrement)
{
AddChunkIfNeeded();
_poolIndex = 0;
}
return _pool[_chunkIndex][_poolIndex];
}
private void AddChunkIfNeeded()
{
if (++_chunkIndex >= _pool.Count)
{
T[] pool = new T[_poolSizeIncrement];
for (int i = 0; i < _poolSizeIncrement; i++)
{
pool[i] = new T();
}
_pool.Add(pool);
}
}
public void Clear()
{
_chunkIndex = 0;
_poolIndex = -1;
}
private void ChunkSizeLimiter()
{
if (_pool.Count >= _chunkSizeLimit)
{
int newChunkSize = _chunkSizeLimit / 2;
_pool.RemoveRange(newChunkSize, _pool.Count - newChunkSize);
_pool.Capacity = _chunkSizeLimit * 2;
}
}
private void Dispose()
{
_pool = null;
}
}
}

View file

@ -1,14 +0,0 @@
namespace ARMeilleure.Common
{
public enum PoolSizeIncrement
{
Default = 200
}
public enum ChunkSizeLimit
{
Large = 200000 / PoolSizeIncrement.Default,
Medium = 100000 / PoolSizeIncrement.Default,
Small = 50000 / PoolSizeIncrement.Default
}
}