1
0
forked from Alepha/Alepha
Files
Alepha/Memory/ThreadSlab.h

130 lines
3.6 KiB
C++

static_assert( __cplusplus > 2020'99 );
#pragma once
#include <Alepha/Alepha.h>
#include <string>
#include <Alepha/Memory/Blob.h>
namespace Alepha::Hydrogen::Memory ::detail:: ThreadSlab_m
{
inline namespace exports
{
template< typename T >
class ThreadSlab;
using ThreadSlabString= std::basic_string< char, std::char_traits< char >, ThreadSlab< char > >;
}
namespace C
{
const std::size_t slabSize= 64 * 1024 * 1024;
const bool debug= false;
const bool debugLifecycle= false or C::debug;
const bool debugAllocation= false or C::debug;
const bool debugDeallocations= false or C::debug;
}
template< typename T >
class exports::ThreadSlab
{
public:
inline static thread_local Blob slab;
public:
using value_type= T;
using propagate_on_container_copy_assignment= std::true_type;
using propagate_on_container_move_assignment= std::true_type;
using propagate_on_container_swap= std::true_type;
using is_always_equal= std::true_type;
ThreadSlab select_on_container_copy_construction() { auto rv= ThreadSlab{}; }
ThreadSlab()= default;
ThreadSlab &operator= ( const ThreadSlab &other )= default;
ThreadSlab( const ThreadSlab &other )= default;
ThreadSlab( ThreadSlab &&other ) : ThreadSlab( std::as_const( other ) ) {}
~ThreadSlab()
{
if( C::debugLifecycle )
{
std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when "
<< (void *) this << " is retired." << std::endl;
}
}
[[nodiscard]] T *
allocate( const std::size_t amt )
{
// TODO: Alignment needs to be handled.
const std::size_t req= amt + sizeof( Blob::StorageReservation );
// TODO: Larger allocations may be worth bespoke allocations, if they're rare one-off cases
if( req > C::slabSize ) throw std::bad_alloc{}; //{ "Unable to allocate larger than the slab size." };
if( slab.size() < req ) slab.reset( std::max( req, C::slabSize ) );
if( C::debugAllocation )
{
std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when "
<< (void *) this << " made an allocation." << std::endl;
}
auto next= slab.carveHead( req + sizeof( Blob::StorageReservation ) );
const auto rv= reinterpret_cast< T * >( &next.template as< Blob::StorageReservation >() + 1 );
// FIXME: The placement new here is potentially unaligned -- this may significantly impact
// performance.
new ( &next.template as< Blob::StorageReservation >() ) Blob::StorageReservation{ std::move( next.reservation() ) };
if( C::debugAllocation )
{
std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when "
<< (void *) this << " made an allocation." << std::endl;
}
return rv;
}
template< typename SP >
static void
destroy( SP *p )
{
p->~SP();
}
void
deallocate( T *const p, const std::size_t /* ignored */ ) noexcept
{
if( C::debugDeallocation )
{
std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when "
<< (void *) this << " made a deallocation." << std::endl;
}
auto *const hidden= reinterpret_cast< Blob::StorageReservation * >( p ) - 1;
destroy( hidden );
if( C::debugDeallocation )
{
std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when "
<< (void *) this << " made a deallocation." << std::endl;
}
}
friend constexpr bool operator == ( const ThreadSlab &, const ThreadSlab & ) noexcept { return true; }
};
}
namespace Alepha::Hydrogen::Memory::inline exports::inline ThreadSlab_m
{
using namespace detail::ThreadSlab_m::exports;
}