From 7070a9364c42bce12047d9bd4f6e0314caff8bdd Mon Sep 17 00:00:00 2001 From: ADAM David Alan Martin Date: Wed, 15 May 2024 22:26:13 -0400 Subject: [PATCH] Start moving towards the alignment solution> Some of this needs to be branched out. --- Memory/Blob.h | 15 +++++++++++---- Memory/DataChain.h | 4 ++-- Memory/ThreadSlab.h | 6 +++--- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/Memory/Blob.h b/Memory/Blob.h index 00e765e..a92f073 100644 --- a/Memory/Blob.h +++ b/Memory/Blob.h @@ -8,9 +8,10 @@ static_assert( __cplusplus > 2020'99 ); #include +#include +#include + #include "Buffer.h" -#include "swappable.h" -#include "Exception.h" //#include "threading.h" #include "error.h" @@ -221,11 +222,17 @@ namespace Alepha::Hydrogen ::detail:: Blob_m * inside a large single physical backing. This helps maintain zero-copy semantics. * * @param amount The amount of data to carve off. - * @return A new `Blob` object referring to the same physical data, scoped to `amount` bytes. + * @param alignment The size alignment that the new base should be at (the extra padding is + * considered part of the resulting `Blob`). + * @return A new `Blob` object referring to the same physical data, scoped to `amount` bytes (with + * possible extra space, due to alignment). */ Blob - carveHead( const std::size_t amount ) + carveHead( const std::size_t unalignedAmount, Alignment alignment= Alignment{ 1 } ) { + assert( std::popcount( alignment.amt ) == 1 ); + // TODO: Consider arithmetic overflow, here. + const std::size_t amount= alignTo( ( reinterpret_cast< std::uintptr_t >( data() ) % alignment.amt ) + unalignedAmount, alignment ); if( amount > size() ) throw DataCarveTooLargeError( data(), amount, size() ); if( not storage ) { diff --git a/Memory/DataChain.h b/Memory/DataChain.h index 6f33edd..0f5853e 100644 --- a/Memory/DataChain.h +++ b/Memory/DataChain.h @@ -52,7 +52,7 @@ namespace Alepha::inline Cavorite ::detail:: DataChain_m friend DataChain; - explicit Iterator( const ChainIter pos, cosnt std::size_t offset ) noexcept : pos( pos ), offset( offset ) {} + explicit Iterator( const ChainIter pos, const std::size_t offset ) noexcept : pos( pos ), offset( offset ) {} public: auto @@ -165,7 +165,7 @@ namespace Alepha::inline Cavorite ::detail:: DataChain_m std::copy_n( std::prev( end(), amount ), amount, rv.byte_data() ); return rv; - + } }; }; } diff --git a/Memory/ThreadSlab.h b/Memory/ThreadSlab.h index 40d9cea..e3bd28d 100644 --- a/Memory/ThreadSlab.h +++ b/Memory/ThreadSlab.h @@ -58,15 +58,15 @@ namespace Alepha::Hydrogen::Memory ::detail:: ThreadSlab_m { // TODO: Alignment? const std::size_t req= amt + sizeof( Blob::StorageReservation ); + if( req > C::slabSize ) throw std::bad_alloc{}; //{ "Unable to allocate larger than the slab size." }; if( slab.size() < req ) slab.reset( std::max( req, C::slabSize ) ); std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when " << (void *) this << " made an allocation." << std::endl; - auto next= slab.carveHead( req ); - new ( &next.template as< Blob::StorageReservation >() ) Blob::StorageReservation{ next.reservation() }; + auto next= slab.carveHead( req, Alignment{ sizeof( Blob::StorageReservation ) } ); const auto rv= reinterpret_cast< T * >( &next.template as< Blob::StorageReservation >() + 1 ); - next.reset(); + new ( &next.template as< Blob::StorageReservation >() ) Blob::StorageReservation{ std::move( next.reservation() ) }; std::cerr << "Reporting " << slab.reservation().use_count() << " living allocations when " << (void *) this << " made an allocation." << std::endl;