1
0
forked from Alepha/Alepha

Blob based per-thread slab allocator

This permits "stateless" allocators which grab memory from a
`thread_local Alepha::Blob` instance.  Each allocation
sticks a malloc cookie of type
`std::shared_ptr< Alepha::Blob::StorageReservation >`
just before the base of the allocation.

The allocator object knows that it needs to `reinterpret_cast`
the malloc cookie into a shared pointer and run its destructor.
This causes the Blob's underlying reference counted allocation
to be tied to the lifetime of the allocated memory.  The intent
is to permit cheap allocation in one thread and deallocation
in another.  Each deallocation should be a single atomic
dereference operation.  Each allocation should be (usually) a
bit of pointer arithmetic and a single atomic increment operation.

This, hopefully, eliminates significant thread contention for
the global allocation mechanism between various threads in
an intensive multithreaded situation where each processing
thread thread may independently retire data objects allocated by
a single source.
This commit is contained in:
2024-09-05 17:06:52 -04:00
parent d4dfe9f90f
commit 6c165b1603
6 changed files with 191 additions and 3 deletions

129
Memory/ThreadSlab.h Normal file
View File

@ -0,0 +1,129 @@
static_assert( __cplusplus > 2020'99 );
#pragma once
#include <Alepha/Alepha.h>
#include <string>
#include <Alepha/Memory/Blob.h>
namespace Alepha::Hydrogen::Memory ::detail:: ThreadSlab_m
{
inline namespace exports
{
template< typename T >
class ThreadSlab;
using ThreadSlabString= std::basic_string< char, std::char_traits< char >, ThreadSlab< char > >;
}
namespace C
{
const std::size_t slabSize= 64 * 1024 * 1024;
const bool debug= false;
const bool debugLifecycle= false or C::debug;
const bool debugAllocation= false or C::debug;
const bool debugDeallocation= false or C::debug;
}
template< typename T >
class exports::ThreadSlab
{
public:
inline static thread_local Blob slab;
public:
using value_type= T;
using propagate_on_container_copy_assignment= std::true_type;
using propagate_on_container_move_assignment= std::true_type;
using propagate_on_container_swap= std::true_type;
using is_always_equal= std::true_type;
ThreadSlab select_on_container_copy_construction() { auto rv= ThreadSlab{}; }
ThreadSlab()= default;
ThreadSlab &operator= ( const ThreadSlab &other )= default;
ThreadSlab( const ThreadSlab &other )= default;
ThreadSlab( ThreadSlab &&other ) : ThreadSlab( std::as_const( other ) ) {}
~ThreadSlab()
{
if( C::debugLifecycle )
{
std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when "
<< (void *) this << " is retired." << std::endl;
}
}
[[nodiscard]] T *
allocate( const std::size_t amt )
{
// TODO: Alignment needs to be handled.
const std::size_t req= amt + sizeof( Blob::StorageReservation );
// TODO: Larger allocations may be worth bespoke allocations, if they're rare one-off cases
if( req > C::slabSize ) throw std::bad_alloc{}; //{ "Unable to allocate larger than the slab size." };
if( slab.size() < req ) slab.reset( std::max( req, C::slabSize ) );
if( C::debugAllocation )
{
std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when "
<< (void *) this << " made an allocation." << std::endl;
}
auto next= slab.carveHead( req + sizeof( Blob::StorageReservation ) );
const auto rv= reinterpret_cast< T * >( &next.template as< Blob::StorageReservation >() + 1 );
// FIXME: The placement new here is potentially unaligned -- this may significantly impact
// performance.
new ( &next.template as< Blob::StorageReservation >() ) Blob::StorageReservation{ std::move( next.reservation() ) };
if( C::debugAllocation )
{
std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when "
<< (void *) this << " made an allocation." << std::endl;
}
return rv;
}
template< typename SP >
static void
destroy( SP *p )
{
p->~SP();
}
void
deallocate( T *const p, const std::size_t /* ignored */ ) noexcept
{
if( C::debugDeallocation )
{
std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when "
<< (void *) this << " made a deallocation." << std::endl;
}
auto *const hidden= reinterpret_cast< Blob::StorageReservation * >( p ) - 1;
destroy( hidden );
if( C::debugDeallocation )
{
std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when "
<< (void *) this << " made a deallocation." << std::endl;
}
}
friend constexpr bool operator == ( const ThreadSlab &, const ThreadSlab & ) noexcept { return true; }
};
}
namespace Alepha::Hydrogen::Memory::inline exports::inline ThreadSlab_m
{
using namespace detail::ThreadSlab_m::exports;
}