From 6c165b16035db5274e10df6ed912fe8169f45a47 Mon Sep 17 00:00:00 2001 From: ADAM David Alan Martin Date: Thu, 5 Sep 2024 17:06:52 -0400 Subject: [PATCH 1/5] Blob based per-thread slab allocator This permits "stateless" allocators which grab memory from a `thread_local Alepha::Blob` instance. Each allocation sticks a malloc cookie of type `std::shared_ptr< Alepha::Blob::StorageReservation >` just before the base of the allocation. The allocator object knows that it needs to `reinterpret_cast` the malloc cookie into a shared pointer and run its destructor. This causes the Blob's underlying reference counted allocation to be tied to the lifetime of the allocated memory. The intent is to permit cheap allocation in one thread and deallocation in another. Each deallocation should be a single atomic dereference operation. Each allocation should be (usually) a bit of pointer arithmetic and a single atomic increment operation. This, hopefully, eliminates significant thread contention for the global allocation mechanism between various threads in an intensive multithreaded situation where each processing thread thread may independently retire data objects allocated by a single source. --- Memory/Blob.h | 10 +- Memory/CMakeLists.txt | 1 + Memory/DataChain.h | 4 +- Memory/ThreadSlab.h | 129 ++++++++++++++++++++++++++ Memory/ThreadSlab.test/0.cc | 49 ++++++++++ Memory/ThreadSlab.test/CMakeLists.txt | 1 + 6 files changed, 191 insertions(+), 3 deletions(-) create mode 100644 Memory/ThreadSlab.h create mode 100644 Memory/ThreadSlab.test/0.cc create mode 100644 Memory/ThreadSlab.test/CMakeLists.txt diff --git a/Memory/Blob.h b/Memory/Blob.h index c24cd6b..5123be8 100644 --- a/Memory/Blob.h +++ b/Memory/Blob.h @@ -17,6 +17,8 @@ static_assert( __cplusplus > 2020'99 ); #include "Buffer.h" +// TODO: Put this into the `Alepha::Memory::` namespace. +// TODO: Consider whether a "republish" into `Alepha::` namespace is a good idea. namespace Alepha::Hydrogen ::detail:: Blob_m { inline namespace exports @@ -94,6 +96,9 @@ namespace Alepha::Hydrogen ::detail:: Blob_m public: ~Blob() { reset(); } + using StorageReservation= IndirectStorage; + const StorageReservation &reservation() const { return storage; } + auto swap_lens() noexcept { @@ -218,7 +223,10 @@ namespace Alepha::Hydrogen ::detail:: Blob_m * inside a large single physical backing. This helps maintain zero-copy semantics. * * @param amount The amount of data to carve off. - * @return A new `Blob` object referring to the same physical data, scoped to `amount` bytes. + * @param alignment The size alignment that the new base should be at (the extra padding is + * considered part of the resulting `Blob`). + * @return A new `Blob` object referring to the same physical data, scoped to `amount` bytes (with + * possible extra space, due to alignment). */ Blob carveHead( const std::size_t amount ) diff --git a/Memory/CMakeLists.txt b/Memory/CMakeLists.txt index 0964a08..cb3030b 100644 --- a/Memory/CMakeLists.txt +++ b/Memory/CMakeLists.txt @@ -1 +1,2 @@ add_subdirectory( Blob.test ) +add_subdirectory( ThreadSlab.test ) diff --git a/Memory/DataChain.h b/Memory/DataChain.h index 6f33edd..0f5853e 100644 --- a/Memory/DataChain.h +++ b/Memory/DataChain.h @@ -52,7 +52,7 @@ namespace Alepha::inline Cavorite ::detail:: DataChain_m friend DataChain; - explicit Iterator( const ChainIter pos, cosnt std::size_t offset ) noexcept : pos( pos ), offset( offset ) {} + explicit Iterator( const ChainIter pos, const std::size_t offset ) noexcept : pos( pos ), offset( offset ) {} public: auto @@ -165,7 +165,7 @@ namespace Alepha::inline Cavorite ::detail:: DataChain_m std::copy_n( std::prev( end(), amount ), amount, rv.byte_data() ); return rv; - + } }; }; } diff --git a/Memory/ThreadSlab.h b/Memory/ThreadSlab.h new file mode 100644 index 0000000..3b12023 --- /dev/null +++ b/Memory/ThreadSlab.h @@ -0,0 +1,129 @@ +static_assert( __cplusplus > 2020'99 ); + +#pragma once + +#include + +#include + +#include + +namespace Alepha::Hydrogen::Memory ::detail:: ThreadSlab_m +{ + inline namespace exports + { + template< typename T > + class ThreadSlab; + + using ThreadSlabString= std::basic_string< char, std::char_traits< char >, ThreadSlab< char > >; + } + + namespace C + { + const std::size_t slabSize= 64 * 1024 * 1024; + + const bool debug= false; + const bool debugLifecycle= false or C::debug; + const bool debugAllocation= false or C::debug; + const bool debugDeallocation= false or C::debug; + } + + template< typename T > + class exports::ThreadSlab + { + public: + inline static thread_local Blob slab; + + public: + using value_type= T; + using propagate_on_container_copy_assignment= std::true_type; + using propagate_on_container_move_assignment= std::true_type; + using propagate_on_container_swap= std::true_type; + using is_always_equal= std::true_type; + + ThreadSlab select_on_container_copy_construction() { auto rv= ThreadSlab{}; } + + ThreadSlab()= default; + + + ThreadSlab &operator= ( const ThreadSlab &other )= default; + + ThreadSlab( const ThreadSlab &other )= default; + + ThreadSlab( ThreadSlab &&other ) : ThreadSlab( std::as_const( other ) ) {} + + ~ThreadSlab() + { + if( C::debugLifecycle ) + { + std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when " + << (void *) this << " is retired." << std::endl; + } + } + + [[nodiscard]] T * + allocate( const std::size_t amt ) + { + // TODO: Alignment needs to be handled. + const std::size_t req= amt + sizeof( Blob::StorageReservation ); + + // TODO: Larger allocations may be worth bespoke allocations, if they're rare one-off cases + if( req > C::slabSize ) throw std::bad_alloc{}; //{ "Unable to allocate larger than the slab size." }; + if( slab.size() < req ) slab.reset( std::max( req, C::slabSize ) ); + + if( C::debugAllocation ) + { + std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when " + << (void *) this << " made an allocation." << std::endl; + } + + auto next= slab.carveHead( req + sizeof( Blob::StorageReservation ) ); + const auto rv= reinterpret_cast< T * >( &next.template as< Blob::StorageReservation >() + 1 ); + + // FIXME: The placement new here is potentially unaligned -- this may significantly impact + // performance. + new ( &next.template as< Blob::StorageReservation >() ) Blob::StorageReservation{ std::move( next.reservation() ) }; + + if( C::debugAllocation ) + { + std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when " + << (void *) this << " made an allocation." << std::endl; + } + + return rv; + } + + template< typename SP > + static void + destroy( SP *p ) + { + p->~SP(); + } + + void + deallocate( T *const p, const std::size_t /* ignored */ ) noexcept + { + if( C::debugDeallocation ) + { + std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when " + << (void *) this << " made a deallocation." << std::endl; + } + + auto *const hidden= reinterpret_cast< Blob::StorageReservation * >( p ) - 1; + destroy( hidden ); + + if( C::debugDeallocation ) + { + std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when " + << (void *) this << " made a deallocation." << std::endl; + } + } + + friend constexpr bool operator == ( const ThreadSlab &, const ThreadSlab & ) noexcept { return true; } + }; +} + +namespace Alepha::Hydrogen::Memory::inline exports::inline ThreadSlab_m +{ + using namespace detail::ThreadSlab_m::exports; +} diff --git a/Memory/ThreadSlab.test/0.cc b/Memory/ThreadSlab.test/0.cc new file mode 100644 index 0000000..675b9f8 --- /dev/null +++ b/Memory/ThreadSlab.test/0.cc @@ -0,0 +1,49 @@ +static_assert( __cplusplus > 2020'99 ); + +#include "../ThreadSlab.h" + +#include + +#include + +static auto init= Alepha::Utility::enroll <=[] +{ + using namespace Alepha::Testing::literals; + + using namespace Alepha::Memory::exports::ThreadSlab_m; + using String= ThreadSlabString; + + + "Check slab usage"_test <=[] + { + std::cout << "I see " << Alepha::Memory::ThreadSlab< char >::slab.reservation().use_count() << " reservations in a separate test." << +std::endl; + }; + + "Can we work with simple `ThreadSlabStrings` without errors?"_test <=[] + { + String s; + std::cerr << "s is empty" << std::endl; + + String s2= "Hello World"; + std::cerr << "small hello world string." << std::endl; + + String s3= s2 + ": and bob"; + + for( int i= 0; i < 10; ++i ) + { + std::cerr << "appended..." << std::endl; + s3= s3 + s3 + s2; + + s2= std::move( s3 ); + } + + std::cout << s3 << std::endl; + }; + + "Check slab usage"_test <=[] + { + std::cout << "I see " << Alepha::Memory::ThreadSlab< char >::slab.reservation().use_count() << " reservations in a separate test." << +std::endl; + }; +}; diff --git a/Memory/ThreadSlab.test/CMakeLists.txt b/Memory/ThreadSlab.test/CMakeLists.txt new file mode 100644 index 0000000..b099603 --- /dev/null +++ b/Memory/ThreadSlab.test/CMakeLists.txt @@ -0,0 +1 @@ +unit_test( 0 ) From 3bd236b556457637a14af1456f8ae48f365c566c Mon Sep 17 00:00:00 2001 From: ADAM David Alan Martin Date: Thu, 5 Sep 2024 18:35:39 -0400 Subject: [PATCH 2/5] Add fast random facility. This is a low-memory-overhead and low-cpu-overhead (per generated bit) random number generator. The repeat cycle is around 2**88. Which means around 2**120 bits are available before a cycle. Which means that about 2**102 12-bit samples are available before repeats. This should be more than sufficient for blob rollover purposes. If 100 million blobs are split per second (absurdly high), then that's about 2**27 per second. If run for 30 years, that's 2**30 seconds. If run across 128 CPUs, that's 2**7 CPUs. Thus 2**(27+30+7) total samples are required before loop. This is 2**64 which is WAAAY less than 2**88. (And this is overly conservative, as these generators should be one-per-thread... so we're really much closer to 2**57, not that it matters.) For this reason, there's no reseed code. The cycle length of mt11213b is significantly longer, however, it has a significantly larger state. One goal here is to keep the amount of state for this generator to a single cache line. As such, if the cycle length is later shown to be significantly smaller than 2**48 or so, a reseed code path may need to be added. (This is on the assumption that the above described intensive run would run for more than 1 million seconds, or about two weeks.) --- CMakeLists.txt | 1 + fastRandom.cc | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++ fastRandom.h | 20 ++++++++++++++++ 3 files changed, 84 insertions(+) create mode 100644 fastRandom.cc create mode 100644 fastRandom.h diff --git a/CMakeLists.txt b/CMakeLists.txt index fa07543..b042b02 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -12,6 +12,7 @@ add_library( alepha SHARED Console.cc ProgramOptions.cc string_algorithms.cc + fastRandom.cc word_wrap.cc Thread.cc delimited_list.cc diff --git a/fastRandom.cc b/fastRandom.cc new file mode 100644 index 0000000..8e01c24 --- /dev/null +++ b/fastRandom.cc @@ -0,0 +1,63 @@ +static_assert( __cplusplus > 2023'00 ); + +#include "fastRandom.h" + +#include + +#include + +namespace Alepha::Hydrogen ::detail:: fastRandom_m +{ + namespace + { + struct FastRandomState + { + boost::random::taus88 fastRandomState{ std::random_device{}() }; + std::uint32_t pool; + int remainingBits= 0; + + void + next() + { + pool= fastRandomState(); + remainingBits= 32; + } + + void + refresh() + { + if( remainingBits == 0 ) next(); + } + + std::uint32_t + getBit() + { + refresh(); + const std::uint32_t rv= pool & 1; + --remainingBits; + pool>>= 1; + return rv; + } + + std::uint32_t + get( int count ) + { + std::uint32_t rv= 0; + while( count-- ) + { + rv<<= 1; + rv|= getBit(); + } + return rv; + } + }; + + thread_local FastRandomState fastRandom; + } + + std::uint32_t + exports::fastRandomBits( const int numBits ) + { + return fastRandom.get( numBits ); + } +} diff --git a/fastRandom.h b/fastRandom.h new file mode 100644 index 0000000..3febd23 --- /dev/null +++ b/fastRandom.h @@ -0,0 +1,20 @@ +static_assert( __cplusplus > 2023'00 ); + +#pragma once + +#include + +#include + +namespace Alepha::Hydrogen ::detail:: fastRandom_m +{ + inline namespace exports + { + std::uint32_t fastRandomBits( int numBits ); + } +} + +namespace Alepha::Hydrogen::inline exports::inline fastRandom_m +{ + using namespace detail::fastRandom_m::exports; +} From 9717ae49a479e3266aed834dc4b1f12239dd83b8 Mon Sep 17 00:00:00 2001 From: ADAM David Alan Martin Date: Thu, 5 Sep 2024 18:48:07 -0400 Subject: [PATCH 3/5] Use fast random to decide when to split Blobs. --- Memory/Blob.h | 41 +++++++++++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/Memory/Blob.h b/Memory/Blob.h index 5123be8..1481725 100644 --- a/Memory/Blob.h +++ b/Memory/Blob.h @@ -5,10 +5,12 @@ static_assert( __cplusplus > 2020'99 ); #include #include +#include #include #include +#include #include #include @@ -30,6 +32,9 @@ namespace Alepha::Hydrogen ::detail:: Blob_m namespace C { + const auto doubleBlobPointerOption= "ALEPHA_USE_DOUBLE_BLOB_POINTERS"; + const int storageSplitRandomBitDepth= 12; // TODO: Environment tunable? Global tunable? + const bool debug= false; const bool debugLifecycle= false or C::debug; const bool debugCtors= false or C::debugLifecycle or C::debug; @@ -77,20 +82,29 @@ namespace Alepha::Hydrogen ::detail:: Blob_m Buffer< Mutable > buffer; std::size_t viewLimit= 0; // TODO: Consider allowing for unrooted sub-buffer views? - // TODO: Take the `storage` parameter and make it not increment when this ctor is called -- only when the dice roll passes. + // Potentially rollover the internal storage, during a blob sharing operation. + void + maybeRollover() + { + if( not ::getenv( C::doubleBlobPointerOption ) ) return; + + if( Alepha::fastRandomBits( C::storageSplitRandomBitDepth ) ) return; + + // The inner pointer gets incremented. + std::shared_ptr< Blob > inner= *storage; + + // Capture the new inner into the outer. + storage= std::make_shared< std::shared_ptr< Blob > >( std::move( inner ) ); + + // Post rollover, there should only be one reference to start. + assert( storage.use_count() == 1 ); + } + + + // Takeover a portion of a shared block explicit Blob( IndirectStorage storage, Buffer< Mutable > buffer ) noexcept - : storage( Utility::evaluate <=[storage= std::move( storage )] () -> IndirectStorage - { - //if( fastRandomBits( C::storageSplitRandomBitDepth ) ) - return std::move( storage ); - //if( C::debugSplitSharing ) error() << "Observed a use count of " << storage.use_count() << " when we failed the dice roll." << std::endl; - //auto split= std::make_shared< std::shared_ptr< Blob > >( *storage ); - //if( C:: - //return split; - }), - buffer( buffer ), - viewLimit( buffer.size() ) + : storage( std::move( storage ) ), buffer( buffer ), viewLimit( buffer.size() ) {} public: @@ -242,7 +256,10 @@ namespace Alepha::Hydrogen ::detail:: Blob_m viewLimit= (*storage)->viewLimit; } + assert( storage ); + // Now we assume that there's a two-layer scheme, so we operate based upon that. + maybeRollover(); Blob rv{ storage, Buffer< Mutable >{ buffer, amount } }; buffer= buffer + amount; From 5efc8b79f06c4ba7dd13ecebcaacecf0b3338811 Mon Sep 17 00:00:00 2001 From: ADAM David Alan Martin Date: Fri, 6 Sep 2024 17:06:08 -0400 Subject: [PATCH 4/5] Updated thread slab with overflow protection and rewritten. --- Exception.h | 4 ++ Memory/CMakeLists.txt | 4 ++ Memory/ThreadSlab.cc | 96 +++++++++++++++++++++++++++++ Memory/ThreadSlab.h | 120 ++++++++++++++---------------------- Memory/ThreadSlab.test/0.cc | 6 +- 5 files changed, 153 insertions(+), 77 deletions(-) create mode 100644 Memory/ThreadSlab.cc diff --git a/Exception.h b/Exception.h index 8ad7d5c..c3532db 100644 --- a/Exception.h +++ b/Exception.h @@ -434,6 +434,7 @@ namespace Alepha::Hydrogen ::detail:: Exception_m using storage_type= AllocationAmountStorage; virtual ~AllocationAmountInterface()= default; virtual std::size_t allocationAmount() const noexcept= 0; + virtual void setAllocationAmount( std::size_t ) noexcept= 0; }; class AllocationAmountStorage : virtual public AllocationAmountInterface @@ -443,6 +444,8 @@ namespace Alepha::Hydrogen ::detail:: Exception_m public: std::size_t allocationAmount() const noexcept final { return amount; } + + void setAllocationAmount( const std::size_t amount ) noexcept { this->amount= amount; } }; class AllocationException : virtual public create_exception< struct allocation_throwable, Exception >, virtual public AllocationAmountInterface {}; @@ -496,6 +499,7 @@ namespace Alepha::Hydrogen ::detail:: Exception_m class Undergird : virtual public Kind, virtual protected GenericExceptionBridge< std::bad_alloc >, virtual protected MessageStorage, virtual protected AllocationAmountStorage, + virtual public AllocationAmountInterface, virtual public std::bad_alloc {}; diff --git a/Memory/CMakeLists.txt b/Memory/CMakeLists.txt index cb3030b..c22560b 100644 --- a/Memory/CMakeLists.txt +++ b/Memory/CMakeLists.txt @@ -1,2 +1,6 @@ add_subdirectory( Blob.test ) add_subdirectory( ThreadSlab.test ) + +target_sources( alepha PRIVATE + ThreadSlab.cc +) diff --git a/Memory/ThreadSlab.cc b/Memory/ThreadSlab.cc new file mode 100644 index 0000000..8049e97 --- /dev/null +++ b/Memory/ThreadSlab.cc @@ -0,0 +1,96 @@ +static_assert( __cplusplus > 2023'00 ); + +#include "ThreadSlab.h" + +#include + +namespace Alepha::Hydrogen::Memory ::detail:: ThreadSlab_m +{ + namespace + { + namespace C + { + const std::size_t slabSize= 64 * 1024 * 1024; + + const bool debug= false; + const bool debugLifecycle= false or C::debug; + const bool debugAllocation= false or C::debug; + const bool debugDeallocation= false or C::debug; + } + + template< typename SP > + void + destroy( SP *p ) + { + p->~SP(); + } + + namespace storage + { + thread_local Blob slab; + } + } + + [[nodiscard]] + void * + shim::allocate( std::size_t amt ) + { + // TODO: Alignment needs to be handled. + const std::size_t req= amt + sizeof( Blob::StorageReservation ); + + // TODO: Larger allocations may be worth bespoke allocations, if they're rare one-off cases + if( req > C::slabSize ) + { + auto exc= build_exception< AllocationError >( "Unable to allocate larger than the slab size." ); + //exc.setAllocationAmount( req ); + throw exc; + } + if( slab().size() < req ) slab().reset( std::max( req, C::slabSize ) ); + + if( C::debugAllocation ) + { + std::cerr << "Reporting " << slab().reservation().use_count() << " living allocations when " + << (void *) &slab << " made an allocation." << std::endl; + } + + auto next= slab().carveHead( req ); + void *const rv= &next.as< Blob::StorageReservation >() + 1; + + // FIXME: The placement new here is potentially unaligned -- this may significantly impact + // performance. It is also non-portable. + new ( &next.template as< Blob::StorageReservation >() ) Blob::StorageReservation{ std::move( next.reservation() ) }; + + if( C::debugAllocation ) + { + std::cerr << "Reporting " << slab().reservation().use_count() << " living allocations when " + << (void *) &slab() << " made an allocation." << std::endl; + } + + return rv; + } + + void + shim::deallocate( void *p ) noexcept + { + if( C::debugDeallocation ) + { + std::cerr << "Reporting " << slab().reservation().use_count() << " living allocations when " + << (void *) &slab() << " made a deallocation." << std::endl; + } + + auto *const hidden= reinterpret_cast< Blob::StorageReservation * >( p ) - 1; + destroy( hidden ); + + if( C::debugDeallocation ) + { + std::cerr << "Reporting " << slab().reservation().use_count() << " living allocations when " + << (void *) &slab() << " made a deallocation." << std::endl; + } + } + + Blob & + shim::slab() + { + return storage::slab; + } +} diff --git a/Memory/ThreadSlab.h b/Memory/ThreadSlab.h index 3b12023..9d7750c 100644 --- a/Memory/ThreadSlab.h +++ b/Memory/ThreadSlab.h @@ -5,35 +5,58 @@ static_assert( __cplusplus > 2020'99 ); #include #include +#include +#include #include namespace Alepha::Hydrogen::Memory ::detail:: ThreadSlab_m { - inline namespace exports + inline namespace exports {} + + namespace exports::inline ThreadSlab { template< typename T > - class ThreadSlab; + class Allocator; - using ThreadSlabString= std::basic_string< char, std::char_traits< char >, ThreadSlab< char > >; + using String= std::basic_string< char, std::char_traits< char >, Allocator< char > >; + + template< typename T > + using Vector= std::vector< T, Allocator< T > >; + + template< typename K, typename V, typename Compare= std::less< K > > + using Map= std::map< K, V, Allocator< std::pair< const K, V > > >; } - namespace C + namespace shim { - const std::size_t slabSize= 64 * 1024 * 1024; + [[nodiscard]] void *allocate( std::size_t ); + void deallocate( void * ) noexcept; - const bool debug= false; - const bool debugLifecycle= false or C::debug; - const bool debugAllocation= false or C::debug; - const bool debugDeallocation= false or C::debug; + Blob &slab(); } template< typename T > - class exports::ThreadSlab + struct BlockFromSlab { - public: - inline static thread_local Blob slab; + std::byte raw[ sizeof( T ) ]; + [[nodiscard]] static void * + operator new [] ( const std::size_t sz ) + { + return shim::allocate( sz ); + } + + static void + operator delete [] ( void *const p ) noexcept + { + return shim::deallocate( p ); + } + }; + + template< typename T > + class exports::Allocator + { public: using value_type= T; using propagate_on_container_copy_assignment= std::true_type; @@ -41,85 +64,34 @@ namespace Alepha::Hydrogen::Memory ::detail:: ThreadSlab_m using propagate_on_container_swap= std::true_type; using is_always_equal= std::true_type; - ThreadSlab select_on_container_copy_construction() { auto rv= ThreadSlab{}; } + Allocator select_on_container_copy_construction() { auto rv= Allocator{}; } - ThreadSlab()= default; + Allocator()= default; - ThreadSlab &operator= ( const ThreadSlab &other )= default; + Allocator &operator= ( const Allocator &other )= default; - ThreadSlab( const ThreadSlab &other )= default; + Allocator( const Allocator &other )= default; - ThreadSlab( ThreadSlab &&other ) : ThreadSlab( std::as_const( other ) ) {} + Allocator( Allocator &&other ) : Allocator( std::as_const( other ) ) {} - ~ThreadSlab() - { - if( C::debugLifecycle ) - { - std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when " - << (void *) this << " is retired." << std::endl; - } - } + ~Allocator()= default; + + static Blob &slab() { return shim::slab(); } [[nodiscard]] T * allocate( const std::size_t amt ) { - // TODO: Alignment needs to be handled. - const std::size_t req= amt + sizeof( Blob::StorageReservation ); - - // TODO: Larger allocations may be worth bespoke allocations, if they're rare one-off cases - if( req > C::slabSize ) throw std::bad_alloc{}; //{ "Unable to allocate larger than the slab size." }; - if( slab.size() < req ) slab.reset( std::max( req, C::slabSize ) ); - - if( C::debugAllocation ) - { - std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when " - << (void *) this << " made an allocation." << std::endl; - } - - auto next= slab.carveHead( req + sizeof( Blob::StorageReservation ) ); - const auto rv= reinterpret_cast< T * >( &next.template as< Blob::StorageReservation >() + 1 ); - - // FIXME: The placement new here is potentially unaligned -- this may significantly impact - // performance. - new ( &next.template as< Blob::StorageReservation >() ) Blob::StorageReservation{ std::move( next.reservation() ) }; - - if( C::debugAllocation ) - { - std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when " - << (void *) this << " made an allocation." << std::endl; - } - - return rv; - } - - template< typename SP > - static void - destroy( SP *p ) - { - p->~SP(); + return reinterpret_cast< T * >( new BlockFromSlab< T >[ amt ] ); } void deallocate( T *const p, const std::size_t /* ignored */ ) noexcept { - if( C::debugDeallocation ) - { - std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when " - << (void *) this << " made a deallocation." << std::endl; - } - - auto *const hidden= reinterpret_cast< Blob::StorageReservation * >( p ) - 1; - destroy( hidden ); - - if( C::debugDeallocation ) - { - std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when " - << (void *) this << " made a deallocation." << std::endl; - } + return BlockFromSlab< T >::operator delete [] ( p ); } - friend constexpr bool operator == ( const ThreadSlab &, const ThreadSlab & ) noexcept { return true; } + friend constexpr bool operator == ( const Allocator &, const Allocator & ) noexcept { return true; } }; } diff --git a/Memory/ThreadSlab.test/0.cc b/Memory/ThreadSlab.test/0.cc index 675b9f8..07f5d33 100644 --- a/Memory/ThreadSlab.test/0.cc +++ b/Memory/ThreadSlab.test/0.cc @@ -11,12 +11,12 @@ static auto init= Alepha::Utility::enroll <=[] using namespace Alepha::Testing::literals; using namespace Alepha::Memory::exports::ThreadSlab_m; - using String= ThreadSlabString; + using String= ThreadSlab::String; "Check slab usage"_test <=[] { - std::cout << "I see " << Alepha::Memory::ThreadSlab< char >::slab.reservation().use_count() << " reservations in a separate test." << + std::cout << "I see " << Alepha::Memory::detail::ThreadSlab_m::shim::slab().reservation().use_count() << " reservations in a separate test." << std::endl; }; @@ -43,7 +43,7 @@ std::endl; "Check slab usage"_test <=[] { - std::cout << "I see " << Alepha::Memory::ThreadSlab< char >::slab.reservation().use_count() << " reservations in a separate test." << + std::cout << "I see " << Alepha::Memory::detail::ThreadSlab_m::shim::slab().reservation().use_count() << " reservations in a separate test." << std::endl; }; }; From 54edf41d9644e82a0423cb4491b361a7fc7988dc Mon Sep 17 00:00:00 2001 From: ADAM David Alan Martin Date: Fri, 6 Sep 2024 17:31:14 -0400 Subject: [PATCH 5/5] Fix building of `Memory/Buffer.h` --- Memory/Buffer.h | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/Memory/Buffer.h b/Memory/Buffer.h index 0cb2883..ed96ec0 100644 --- a/Memory/Buffer.h +++ b/Memory/Buffer.h @@ -2,7 +2,7 @@ static_assert( __cplusplus > 2020'99 ); #pragma once -#include +#include #include #include @@ -15,15 +15,14 @@ static_assert( __cplusplus > 2020'99 ); #include #include -#include #include +#include +#include +#include +#include #include -#include "Concepts.h" -#include "assertion.h" -#include "Capabilities.h" - namespace Alepha::Hydrogen ::detail:: Buffer_m {