1
0
forked from Alepha/Alepha

Blob based per-thread slab allocator

Uses a fast random generator to decide when to split Blobs.

Closes: #31
This commit is contained in:
2025-01-27 13:23:18 -05:00
12 changed files with 385 additions and 21 deletions

View File

@ -12,6 +12,7 @@ add_library( alepha SHARED
Console.cc
ProgramOptions.cc
string_algorithms.cc
fastRandom.cc
word_wrap.cc
Thread.cc
delimited_list.cc

View File

@ -434,6 +434,7 @@ namespace Alepha::Hydrogen ::detail:: Exception_m
using storage_type= AllocationAmountStorage;
virtual ~AllocationAmountInterface()= default;
virtual std::size_t allocationAmount() const noexcept= 0;
virtual void setAllocationAmount( std::size_t ) noexcept= 0;
};
class AllocationAmountStorage
: virtual public AllocationAmountInterface
@ -443,6 +444,8 @@ namespace Alepha::Hydrogen ::detail:: Exception_m
public:
std::size_t allocationAmount() const noexcept final { return amount; }
void setAllocationAmount( const std::size_t amount ) noexcept { this->amount= amount; }
};
class AllocationException
: virtual public create_exception< struct allocation_throwable, Exception >, virtual public AllocationAmountInterface {};
@ -496,6 +499,7 @@ namespace Alepha::Hydrogen ::detail:: Exception_m
class Undergird
: virtual public Kind, virtual protected GenericExceptionBridge< std::bad_alloc >,
virtual protected MessageStorage, virtual protected AllocationAmountStorage,
virtual public AllocationAmountInterface,
virtual public std::bad_alloc
{};

View File

@ -5,10 +5,12 @@ static_assert( __cplusplus > 2020'99 );
#include <Alepha/Alepha.h>
#include <cassert>
#include <cstdlib>
#include <memory>
#include <Alepha/swappable.h>
#include <Alepha/fastRandom.h>
#include <Alepha/Exception.h>
#include <Alepha/error.h>
@ -17,6 +19,8 @@ static_assert( __cplusplus > 2020'99 );
#include "Buffer.h"
// TODO: Put this into the `Alepha::Memory::` namespace.
// TODO: Consider whether a "republish" into `Alepha::` namespace is a good idea.
namespace Alepha::Hydrogen ::detail:: Blob_m
{
inline namespace exports
@ -28,6 +32,9 @@ namespace Alepha::Hydrogen ::detail:: Blob_m
namespace C
{
const auto doubleBlobPointerOption= "ALEPHA_USE_DOUBLE_BLOB_POINTERS";
const int storageSplitRandomBitDepth= 12; // TODO: Environment tunable? Global tunable?
const bool debug= false;
const bool debugLifecycle= false or C::debug;
const bool debugCtors= false or C::debugLifecycle or C::debug;
@ -75,25 +82,37 @@ namespace Alepha::Hydrogen ::detail:: Blob_m
Buffer< Mutable > buffer;
std::size_t viewLimit= 0; // TODO: Consider allowing for unrooted sub-buffer views?
// TODO: Take the `storage` parameter and make it not increment when this ctor is called -- only when the dice roll passes.
// Potentially rollover the internal storage, during a blob sharing operation.
void
maybeRollover()
{
if( not ::getenv( C::doubleBlobPointerOption ) ) return;
if( Alepha::fastRandomBits( C::storageSplitRandomBitDepth ) ) return;
// The inner pointer gets incremented.
std::shared_ptr< Blob > inner= *storage;
// Capture the new inner into the outer.
storage= std::make_shared< std::shared_ptr< Blob > >( std::move( inner ) );
// Post rollover, there should only be one reference to start.
assert( storage.use_count() == 1 );
}
// Takeover a portion of a shared block
explicit
Blob( IndirectStorage storage, Buffer< Mutable > buffer ) noexcept
: storage( Utility::evaluate <=[storage= std::move( storage )] () -> IndirectStorage
{
//if( fastRandomBits( C::storageSplitRandomBitDepth ) )
return std::move( storage );
//if( C::debugSplitSharing ) error() << "Observed a use count of " << storage.use_count() << " when we failed the dice roll." << std::endl;
//auto split= std::make_shared< std::shared_ptr< Blob > >( *storage );
//if( C::
//return split;
}),
buffer( buffer ),
viewLimit( buffer.size() )
: storage( std::move( storage ) ), buffer( buffer ), viewLimit( buffer.size() )
{}
public:
~Blob() { reset(); }
using StorageReservation= IndirectStorage;
const StorageReservation &reservation() const { return storage; }
auto
swap_lens() noexcept
{
@ -218,7 +237,10 @@ namespace Alepha::Hydrogen ::detail:: Blob_m
* inside a large single physical backing. This helps maintain zero-copy semantics.
*
* @param amount The amount of data to carve off.
* @return A new `Blob` object referring to the same physical data, scoped to `amount` bytes.
* @param alignment The size alignment that the new base should be at (the extra padding is
* considered part of the resulting `Blob`).
* @return A new `Blob` object referring to the same physical data, scoped to `amount` bytes (with
* possible extra space, due to alignment).
*/
Blob
carveHead( const std::size_t amount )
@ -234,7 +256,10 @@ namespace Alepha::Hydrogen ::detail:: Blob_m
viewLimit= (*storage)->viewLimit;
}
assert( storage );
// Now we assume that there's a two-layer scheme, so we operate based upon that.
maybeRollover();
Blob rv{ storage, Buffer< Mutable >{ buffer, amount } };
buffer= buffer + amount;

View File

@ -2,7 +2,7 @@ static_assert( __cplusplus > 2020'99 );
#pragma once
#include <Alepha.h>
#include <Alepha/Alepha.h>
#include <cstddef>
#include <cstring>
@ -15,15 +15,14 @@ static_assert( __cplusplus > 2020'99 );
#include <exception>
#include <stdexcept>
#include <Alepha/Constness.h>
#include <Alepha/lifetime.h>
#include <Alepha/Constness.h>
#include <Alepha/Capabilities.h>
#include <Alepha/assertion.h>
#include <Alepha/Concepts.h>
#include <Alepha/IOStreams/String.h>
#include "Concepts.h"
#include "assertion.h"
#include "Capabilities.h"
namespace Alepha::Hydrogen ::detail:: Buffer_m
{

View File

@ -1 +1,6 @@
add_subdirectory( Blob.test )
add_subdirectory( ThreadSlab.test )
target_sources( alepha PRIVATE
ThreadSlab.cc
)

View File

@ -52,7 +52,7 @@ namespace Alepha::inline Cavorite ::detail:: DataChain_m
friend DataChain;
explicit Iterator( const ChainIter pos, cosnt std::size_t offset ) noexcept : pos( pos ), offset( offset ) {}
explicit Iterator( const ChainIter pos, const std::size_t offset ) noexcept : pos( pos ), offset( offset ) {}
public:
auto
@ -165,7 +165,7 @@ namespace Alepha::inline Cavorite ::detail:: DataChain_m
std::copy_n( std::prev( end(), amount ), amount, rv.byte_data() );
return rv;
}
};
};
}

96
Memory/ThreadSlab.cc Normal file
View File

@ -0,0 +1,96 @@
static_assert( __cplusplus > 2023'00 );
#include "ThreadSlab.h"
#include <Alepha/Exception.h>
namespace Alepha::Hydrogen::Memory ::detail:: ThreadSlab_m
{
namespace
{
namespace C
{
const std::size_t slabSize= 64 * 1024 * 1024;
const bool debug= false;
const bool debugLifecycle= false or C::debug;
const bool debugAllocation= false or C::debug;
const bool debugDeallocation= false or C::debug;
}
template< typename SP >
void
destroy( SP *p )
{
p->~SP();
}
namespace storage
{
thread_local Blob slab;
}
}
[[nodiscard]]
void *
shim::allocate( std::size_t amt )
{
// TODO: Alignment needs to be handled.
const std::size_t req= amt + sizeof( Blob::StorageReservation );
// TODO: Larger allocations may be worth bespoke allocations, if they're rare one-off cases
if( req > C::slabSize )
{
auto exc= build_exception< AllocationError >( "Unable to allocate larger than the slab size." );
//exc.setAllocationAmount( req );
throw exc;
}
if( slab().size() < req ) slab().reset( std::max( req, C::slabSize ) );
if( C::debugAllocation )
{
std::cerr << "Reporting " << slab().reservation().use_count() << " living allocations when "
<< (void *) &slab << " made an allocation." << std::endl;
}
auto next= slab().carveHead( req );
void *const rv= &next.as< Blob::StorageReservation >() + 1;
// FIXME: The placement new here is potentially unaligned -- this may significantly impact
// performance. It is also non-portable.
new ( &next.template as< Blob::StorageReservation >() ) Blob::StorageReservation{ std::move( next.reservation() ) };
if( C::debugAllocation )
{
std::cerr << "Reporting " << slab().reservation().use_count() << " living allocations when "
<< (void *) &slab() << " made an allocation." << std::endl;
}
return rv;
}
void
shim::deallocate( void *p ) noexcept
{
if( C::debugDeallocation )
{
std::cerr << "Reporting " << slab().reservation().use_count() << " living allocations when "
<< (void *) &slab() << " made a deallocation." << std::endl;
}
auto *const hidden= reinterpret_cast< Blob::StorageReservation * >( p ) - 1;
destroy( hidden );
if( C::debugDeallocation )
{
std::cerr << "Reporting " << slab().reservation().use_count() << " living allocations when "
<< (void *) &slab() << " made a deallocation." << std::endl;
}
}
Blob &
shim::slab()
{
return storage::slab;
}
}

101
Memory/ThreadSlab.h Normal file
View File

@ -0,0 +1,101 @@
static_assert( __cplusplus > 2020'99 );
#pragma once
#include <Alepha/Alepha.h>
#include <string>
#include <vector>
#include <map>
#include <Alepha/Memory/Blob.h>
namespace Alepha::Hydrogen::Memory ::detail:: ThreadSlab_m
{
inline namespace exports {}
namespace exports::inline ThreadSlab
{
template< typename T >
class Allocator;
using String= std::basic_string< char, std::char_traits< char >, Allocator< char > >;
template< typename T >
using Vector= std::vector< T, Allocator< T > >;
template< typename K, typename V, typename Compare= std::less< K > >
using Map= std::map< K, V, Allocator< std::pair< const K, V > > >;
}
namespace shim
{
[[nodiscard]] void *allocate( std::size_t );
void deallocate( void * ) noexcept;
Blob &slab();
}
template< typename T >
struct BlockFromSlab
{
std::byte raw[ sizeof( T ) ];
[[nodiscard]] static void *
operator new [] ( const std::size_t sz )
{
return shim::allocate( sz );
}
static void
operator delete [] ( void *const p ) noexcept
{
return shim::deallocate( p );
}
};
template< typename T >
class exports::Allocator
{
public:
using value_type= T;
using propagate_on_container_copy_assignment= std::true_type;
using propagate_on_container_move_assignment= std::true_type;
using propagate_on_container_swap= std::true_type;
using is_always_equal= std::true_type;
Allocator select_on_container_copy_construction() { auto rv= Allocator{}; }
Allocator()= default;
Allocator &operator= ( const Allocator &other )= default;
Allocator( const Allocator &other )= default;
Allocator( Allocator &&other ) : Allocator( std::as_const( other ) ) {}
~Allocator()= default;
static Blob &slab() { return shim::slab(); }
[[nodiscard]] T *
allocate( const std::size_t amt )
{
return reinterpret_cast< T * >( new BlockFromSlab< T >[ amt ] );
}
void
deallocate( T *const p, const std::size_t /* ignored */ ) noexcept
{
return BlockFromSlab< T >::operator delete [] ( p );
}
friend constexpr bool operator == ( const Allocator &, const Allocator & ) noexcept { return true; }
};
}
namespace Alepha::Hydrogen::Memory::inline exports::inline ThreadSlab_m
{
using namespace detail::ThreadSlab_m::exports;
}

View File

@ -0,0 +1,49 @@
static_assert( __cplusplus > 2020'99 );
#include "../ThreadSlab.h"
#include <Alepha/Testing/test.h>
#include <Alepha/Utility/evaluation_helpers.h>
static auto init= Alepha::Utility::enroll <=[]
{
using namespace Alepha::Testing::literals;
using namespace Alepha::Memory::exports::ThreadSlab_m;
using String= ThreadSlab::String;
"Check slab usage"_test <=[]
{
std::cout << "I see " << Alepha::Memory::detail::ThreadSlab_m::shim::slab().reservation().use_count() << " reservations in a separate test." <<
std::endl;
};
"Can we work with simple `ThreadSlabStrings` without errors?"_test <=[]
{
String s;
std::cerr << "s is empty" << std::endl;
String s2= "Hello World";
std::cerr << "small hello world string." << std::endl;
String s3= s2 + ": and bob";
for( int i= 0; i < 10; ++i )
{
std::cerr << "appended..." << std::endl;
s3= s3 + s3 + s2;
s2= std::move( s3 );
}
std::cout << s3 << std::endl;
};
"Check slab usage"_test <=[]
{
std::cout << "I see " << Alepha::Memory::detail::ThreadSlab_m::shim::slab().reservation().use_count() << " reservations in a separate test." <<
std::endl;
};
};

View File

@ -0,0 +1 @@
unit_test( 0 )

63
fastRandom.cc Normal file
View File

@ -0,0 +1,63 @@
static_assert( __cplusplus > 2023'00 );
#include "fastRandom.h"
#include <random>
#include <boost/random/taus88.hpp>
namespace Alepha::Hydrogen ::detail:: fastRandom_m
{
namespace
{
struct FastRandomState
{
boost::random::taus88 fastRandomState{ std::random_device{}() };
std::uint32_t pool;
int remainingBits= 0;
void
next()
{
pool= fastRandomState();
remainingBits= 32;
}
void
refresh()
{
if( remainingBits == 0 ) next();
}
std::uint32_t
getBit()
{
refresh();
const std::uint32_t rv= pool & 1;
--remainingBits;
pool>>= 1;
return rv;
}
std::uint32_t
get( int count )
{
std::uint32_t rv= 0;
while( count-- )
{
rv<<= 1;
rv|= getBit();
}
return rv;
}
};
thread_local FastRandomState fastRandom;
}
std::uint32_t
exports::fastRandomBits( const int numBits )
{
return fastRandom.get( numBits );
}
}

20
fastRandom.h Normal file
View File

@ -0,0 +1,20 @@
static_assert( __cplusplus > 2023'00 );
#pragma once
#include <Alepha/Alepha.h>
#include <cstdint>
namespace Alepha::Hydrogen ::detail:: fastRandom_m
{
inline namespace exports
{
std::uint32_t fastRandomBits( int numBits );
}
}
namespace Alepha::Hydrogen::inline exports::inline fastRandom_m
{
using namespace detail::fastRandom_m::exports;
}