1
0
forked from Alepha/Alepha

Relocate Blob, Buffer, and DataChain to Memory.

This commit is contained in:
2024-05-16 13:36:47 -04:00
parent f47b942e28
commit 94b0a1561b
7 changed files with 7 additions and 6 deletions

500
Memory/Blob.h Normal file
View File

@ -0,0 +1,500 @@
static_assert( __cplusplus > 2020'99 );
#pragma once
#include <Alepha/Alepha.h>
#include <cassert>
#include <memory>
#include <Alepha/swappable.h>
#include <Alepha/Exception.h>
#include <Alepha/error.h>
#include <Alepha/IOStreams/String.h>
#include <Alepha/Utility/evaluation_helpers.h>
#include "Buffer.h"
namespace Alepha::Hydrogen ::detail:: Blob_m
{
inline namespace exports
{
class Blob;
class DataCarveTooLargeError;
class DataCarveOutOfRangeError;
}
namespace C
{
const bool debug= false;
const bool debugLifecycle= false or C::debug;
const bool debugCtors= false or C::debugLifecycle or C::debug;
const bool debugAssignment= false or C::debugLifecycle or C::debug;
const bool debugSwap= false or C::debugLifecycle or C::debug;
const bool debugSplitSharing= false or C::debug;
const bool debugInteriorCarve= false or C::debug;
}
using std::begin, std::end;
using IOStreams::stringify;
class exports::DataCarveTooLargeError
: public virtual Buffer_m::OutOfRangeError
{
public:
explicit
DataCarveTooLargeError( const void *const location, const std::size_t request, const std::size_t available )
: std::out_of_range( "Tried to carve " + stringify( request ) + " bytes from `Blob` object at location "
+ stringify( location ) + " which only has " + stringify( available ) + " bytes allocated." ),
OutOfRangeError( location, request, available )
{}
};
class exports::DataCarveOutOfRangeError
: public virtual Buffer_m::OutOfRangeError
{
public:
explicit
DataCarveOutOfRangeError( const void *const location, const std::size_t request, const std::size_t available )
: std::out_of_range( "Tried to carve " + stringify( request ) + " bytes from `Blob` object at location "
+ stringify( location ) + " which only has " + stringify( available ) + " bytes allocated." ),
OutOfRangeError( location, request, available )
{}
};
class exports::Blob
: public BufferModel< Blob >, public swappable
{
private:
using IndirectStorage= std::shared_ptr< std::shared_ptr< Blob > >;
IndirectStorage storage; // If this is empty, then this `Blob` object doesn't share ownership. This references the shared pool.
Buffer< Mutable > buffer;
std::size_t viewLimit= 0; // TODO: Consider allowing for unrooted sub-buffer views?
// TODO: Take the `storage` parameter and make it not increment when this ctor is called -- only when the dice roll passes.
explicit
Blob( IndirectStorage storage, Buffer< Mutable > buffer ) noexcept
: storage( Utility::evaluate <=[storage= std::move( storage )] () -> IndirectStorage
{
//if( fastRandomBits( C::storageSplitRandomBitDepth ) )
return std::move( storage );
//if( C::debugSplitSharing ) error() << "Observed a use count of " << storage.use_count() << " when we failed the dice roll." << std::endl;
//auto split= std::make_shared< std::shared_ptr< Blob > >( *storage );
//if( C::
//return split;
}),
buffer( buffer ),
viewLimit( buffer.size() )
{}
public:
~Blob() { reset(); }
auto
swap_lens() noexcept
{
if( C::debugSwap ) error() << "Swap lens called." << std::endl;
return swap_magma( storage, buffer, viewLimit );
}
/*!
* Allocate a new arena of specified size and release the old arena.
*
* This function has the strong guarantee -- if the allocation fails, the
* old arena is still allocated.
*
* @param size The size of the new arena to allocate.
*
* @note: No data are copied.
*/
void
reset() noexcept
{
if( not storage ) delete [] buffer.byte_data();
else storage.reset();
buffer= {};
viewLimit= 0;
}
/*!
* Allocate a new arena of specified size using the specified allocator and release the old arena.
*
* This function has the strong guarantee -- if the allocation fails, the
* old arena is still allocated.
*
* @param size The size of the new arena to allocate.
* @param newAlloc The allocator to use for the replacement arena (and for future allocations as well).
*
* @note: No data are copied.
*/
void
reset( const std::size_t size )
{
Blob tmp{ size };
swap( tmp, *this );
}
// Copy deep copies the data.
Blob( const Blob &copy )
: buffer( new std::byte[ copy.size() ], copy.size() ),
viewLimit( copy.size() )
{
if( C::debugCtors ) error() << "Blob copy invoked." << std::endl;
copyData( *this, copy );
}
Blob( Blob &&orig ) noexcept { swap( *this, orig ); }
template< typename ByteIterator >
explicit
Blob( ByteIterator first, ByteIterator last )
: Blob( std::distance( first, last ) )
{
std::copy( first, last, byte_data() );
}
// Move assignment
Blob &
operator= ( Blob &&orig ) noexcept
{
Blob temp= std::move( orig );
swap( *this, temp );
return *this;
}
Blob &
operator= ( const Blob &source )
{
if( buffer.size() < source.size() ) reset( source.size() );
else viewLimit= source.size();
copyData( *this, source );
return *this;
}
void
setSize( const std::size_t size )
{
if( size > buffer.size() ) throw std::runtime_error( "Cannot size `Blob` to a larger size than its allocated buffer." );
viewLimit= size;
}
explicit
Blob( const std::size_t amount )
: buffer( new std::byte[ amount ]{}, amount ), // The data must be 0'ed upon allocation.
viewLimit( amount )
{}
explicit
Blob( const Buffer< Const > b )
: Blob( b.size() )
{
copyData( buffer, b );
}
Blob() noexcept= default;
// Buffer Model adaptors:
constexpr operator Buffer< Mutable > () noexcept { return { buffer, viewLimit }; }
constexpr operator Buffer< Const > () const noexcept { return { buffer, viewLimit }; }
// Carving functions:
/*!
* Carve the head off of a `Blob` object.
*
* "Carving" a `Blob` object splits it into two different `Blob` objects, each sharing and keeping alive
* the original physical memory backing the source `Blob` object. The return value of a "carve"
* operation is a new `Blob` object of the requested size. When the original `Blob` is "carved", it
* will shrink itself down by the requested number of bytes.
*
* Carving is very useful to maintain a large number of `Blob` objects referring to small chunks of data
* inside a large single physical backing. This helps maintain zero-copy semantics.
*
* @param amount The amount of data to carve off.
* @return A new `Blob` object referring to the same physical data, scoped to `amount` bytes.
*/
Blob
carveHead( const std::size_t amount )
{
if( amount > size() ) throw DataCarveTooLargeError( data(), amount, size() );
if( not storage )
{
// If there's no two-layer scheme, we have to start the sharing...
storage= std::make_shared< std::shared_ptr< Blob > >( std::make_shared< Blob >( std::move( *this ) ) );
// Now that it's shared, we repoint ourselves at the invisible `Blob` above.
buffer= (*storage)->buffer;
viewLimit= (*storage)->viewLimit;
}
// Now we assume that there's a two-layer scheme, so we operate based upon that.
Blob rv{ storage, Buffer< Mutable >{ buffer, amount } };
buffer= buffer + amount;
viewLimit-= amount;
if( size() == 0 ) *this= Blob{};
return rv;
}
/*!
* Carve the tail off of a `Blob` object.
*
* @see `Blob::carveTail`
*
* @param amount The amount of data to carve off.
* @return A new `Blob` object referring to the same physical data, scoped to `amount` bytes.
*/
Blob
carveTail( const std::size_t amount )
{
if( amount > this->size() ) throw DataCarveTooLargeError( data(), amount, size() );
Blob temp= carveHead( size() - amount );
swap( *this, temp );
return temp;
}
// Assorted helpers:
template< typename T > void operator []( T ) const= delete;
template< typename T > void operator []( T )= delete;
constexpr std::size_t capacity() const noexcept { return buffer.size(); }
bool
isContiguousWith( const Blob &other ) const & noexcept
{
return
(
storage != nullptr
and
*storage == *other.storage
and
byte_data() + size() == other.byte_data()
);
}
/*!
* This function returns a proof that two `Blob` objects are contiguous.
*
* The proof object can be checked to prove that two `Blob`s are contiguous,
* and the `compose` operation on the proof object can be used to
* actually compose them -- that will cause the `other` `Blob` to be moved
* from, thus leaving it empty.
*/
auto
isContiguousWith( Blob &&other ) & noexcept
{
class ContiguousProof
{
private:
bool result;
Blob &self;
Blob &other;
friend Blob;
explicit constexpr
ContiguousProof( const bool result, Blob &self, Blob &other )
: result( result ), self( self ), other( other ) {}
public:
constexpr operator bool () const noexcept { return result; }
void
compose() const noexcept
{
assert( result );
self= Blob{ self.storage, Buffer< Mutable >{ self.data(), self.size() + other.size() } };
other.reset();
}
};
return ContiguousProof{ std::as_const( *this ).isContiguousWith( other ), *this, other };
}
constexpr friend std::size_t mailboxWeight( const Blob &b ) noexcept { return b.size(); }
/*!
* Determine whether some data can be appended to this `Blob` object.
*
* When this function returns `true`, a call to `concatenate` will return an empty `Buffer`.
*
* Because `Blob` objects can have unused capacity, sometimes it's possible to copy more data into that
* area without having to reallocate. This function returns true when that is possible and false
* otherwise.
*
* @param buffer The data buffer to check if it will fit.
* @return `true` if the data will fit and `false` otherwise.
*/
bool
couldConcatenate( const Buffer< Const > buffer ) const noexcept
{
return buffer.size() <= ( capacity() - size() );
}
/*!
* Determine whether some `Blob` can be appended to this `Blob` object.
*
* When this function returns `true`, a call to `concatenate` will return an empty `Blob` object.
*
* Because `Blob` objects can have unused capacity, sometimes it's possible to copy more data into that
* area without having to reallocate. Additionally, `Blob` objects created by carving can reference
* contiguous parts of the same buffer, so concatenation can be accomplished by shifting ownership
* instead of copying. This function returns true when a non-allocating append is possible and false
* otherwise.
*
* @param data The data buffer to check if it will fit.
* @return `true` if the data will fit and `false` otherwise.
*/
bool
couldConcatenate( const Blob &data ) const noexcept
{
return isContiguousWith( data ) or couldConcatenate( Buffer< Const >{ data } );
}
/*!
* Append, without reallocation, as much data as possible from the argument.
*
* Because `Blob` objects can have unused capacity, this unused space can be used to store more data
* without reallocation. This function copies as much data as will fit, but it will not allocate more
* storage. The `Buffer` which is returned (as `[[nodiscard]]`) refers to the uncopied range of
* data from the input. The returned `Buffer` has the same access level (`Constness`) as the
* parameter.
*
* @note When `couldConcatenate` is true, the returned `Buffer` is always empty.
*
* @param data The data buffer to copy from.
* @return A `Buffer` over the remaining uncopied data.
*/
template< Constness constness >
[[nodiscard]] Buffer< constness >
concatenate( const Buffer< constness > data ) noexcept
{
const auto amount= std::min( capacity() - size(), data.size() );
const Buffer< Const > fitted{ data, amount };
copyData( buffer + size(), fitted );
setSize( size() + amount );
return data + amount;
}
/*!
* Append, without reallocation, as much data as possible from the argument.
*
* Because `Blob` objects can have unused capacity, this unused space can be used to store more data
* without reallocation. Further, `Blob` objects created by carving can reference contiguous parts of
* the same buffer, so concatenation can be accomplished by shifting ownership instead of copying. This
* function attempts to compose two `Blob` objects if contiguous, otherwise it copies as much data as
* will fit, but it will not allocate more storage. The `Blob` object which is returned (as
* `[[nodiscard]]`) refers to the uncopied range of data from the input.
*
* @note When `couldConcatenate` is true, the returned `Blob` object is always empty.
* @note The `Blob` to be appended must be passed by `std::move`.
*
* @param blob The `Blob` object to append.
* @return A `Blob` object owning the uncopied portion.
*/
[[nodiscard]] Blob
concatenate( Blob &&blob ) noexcept
{
if( const auto proof= isContiguousWith( std::move( blob ) ) )
{
proof.compose();
return Blob{};
}
else
{
const auto amount= concatenate( Buffer< Const >{ blob } ).size();
const auto rv= blob.carveTail( amount );
blob.reset();
return rv;
}
}
/*!
* Append some data, reallocating if necessary.
*
* Copies all data from the specified `Buffer`, `data`, to the end of this `Blob` object,
* reallocating if necessary. The specified `requested` is a suggested minimum allocation size.
* The amount allocated will be at least that much, but may be more, if more is needed. This function
* does not attempt to amortize reallocation and copy across multiple calls. When working with `Blob`
* objects, it is the programmer's responsibility to minimize reallocation and copy overhead.
*
* @param data The data to append.
* @param requested The suggested size to allocate -- the amount allocated will be at least this
* much. If omitted, then allocations will be sized to fit.
*
* @note Specifying a `requested` in excess of the combined size may cause reallocation when it
* would otherwise not occur.
*/
void
combine( const Buffer< Const > data, const std::size_t requested= 0 )
{
const std::size_t needed= std::max( requested, size() + data.size() );
if( couldConcatenate( data ) and needed >= requested )
{
std::ignore= concatenate( data );
return;
}
Blob tmp{ needed };
copyData( tmp, *this );
copyData( tmp + size(), data );
tmp.setSize( size() + data.size() );
using std::swap;
swap( *this, tmp );
}
/*!
* Append some data, reallocating if necessary.
*
* Copies or composes all data from the specified `Blob` object, `blob`, to the end of this `Blob`
* object, reallocating if necessary. The specified `requested` is a suggested minimum allocation
* size. This `Blob` object's allocated will be at least that much, but may be more, if more is needed.
* This function does not attempt to amortize reallocation and copy across multiple calls. When working
* with `Blob` objects, it is the programmer's responsibility to minimize reallocation and copy
* overhead.
*
* @note The `Blob` to be appended must be passed by `std::move`.
*
* @param blob The `Blob` object to append.
* @param requested The suggested size to allocate -- the amount allocated will be at least this
* much. If omitted, then allocations will be sized to fit.
*
* @note Specifying a `requested` in excess of the combined size may cause reallocation when it
* would otherwise not occur.
*/
void
combine( Blob &&blob, const std::size_t requested= 0 )
{
const std::size_t needed= std::max( requested, size() + blob.size() );
if( couldConcatenate( blob ) and needed >= requested )
{
std::ignore= concatenate( std::move( blob ) );
}
else
{
combine( Buffer< Const >{ blob }, requested );
}
blob.reset();
}
};
//static_assert( Capability< Blob, swappable > );
//static_assert( detail::swaps::SwapLensable< Blob > );
}
namespace Alepha::Hydrogen::inline exports::inline Blob_m
{
using namespace detail::Blob_m::exports;
}

25
Memory/Blob.test/0.cc Normal file
View File

@ -0,0 +1,25 @@
static_assert( __cplusplus > 2020'99 );
#include "../Blob.h"
#include <Alepha/Testing/test.h>
#include <Alepha/Utility/evaluation_helpers.h>
static auto init= Alepha::Utility::enroll <=[]
{
using namespace Alepha::Testing::literals::test_literals;
"Simple carve head test"_test <=[]
{
Alepha::Blob b{ 1024 };
auto b2= b.carveHead( 256 );
assert( b.size() == 768 );
assert( b2.size() == 256 );
std::string h= "Hello world";
copyData( b2, Alepha::make_buffer( h ) );
};
};

View File

@ -0,0 +1 @@
unit_test( 0 )

456
Memory/Buffer.h Normal file
View File

@ -0,0 +1,456 @@
static_assert( __cplusplus > 2020'99 );
#pragma once
#include <Alepha.h>
#include <cstddef>
#include <cstring>
#include <vector>
#include <string>
#include <array>
#include <typeinfo>
#include <typeindex>
#include <exception>
#include <stdexcept>
#include <Alepha/Constness.h>
#include <Alepha/lifetime.h>
#include <Alepha/IOStreams/String.h>
#include "Concepts.h"
#include "assertion.h"
#include "Capabilities.h"
namespace Alepha::Hydrogen ::detail:: Buffer_m
{
inline namespace exports {}
using namespace std::literals::string_literals;
using IOStreams::stringify;
namespace exports
{
class OutOfRangeError
: public virtual std::out_of_range
{
private:
const void *const baseAddress;
const std::size_t requestedSize;
const std::size_t availableSize;
protected:
~OutOfRangeError()= 0; // Make class abstract
explicit
OutOfRangeError( const void *const address, const std::size_t requestedSize, const std::size_t availableSize )
: baseAddress( address ), requestedSize( requestedSize ), availableSize( availableSize )
{}
public:
const void *getAddress() const noexcept { return baseAddress; }
const std::size_t getRequestedSize() const noexcept { return requestedSize; }
const std::size_t getAvailableSize() const noexcept { return availableSize; }
};
inline OutOfRangeError::~OutOfRangeError()= default;
class InsufficientSizeError
: virtual public OutOfRangeError
{
private:
const std::type_index typeID;
public:
explicit
InsufficientSizeError( const void *const location, const std::size_t requestedSize, const std::size_t availableSize, const std::type_index &type )
: std::out_of_range( "Tried to access an object of type "s + type.name() + " which is " + stringify( requestedSize ) + " bytes in size. "
+ "The request was at location " + stringify( location ) + " which only has " + stringify( availableSize )
+ " bytes allocated" ),
OutOfRangeError( location, requestedSize, availableSize ),
typeID( type )
{}
};
class OutOfRangeSizeError
: virtual public OutOfRangeError
{
public:
explicit
OutOfRangeSizeError( const void *const location, const std::ptrdiff_t requestedOffset, const std::size_t availableSpace )
: std::out_of_range( "Tried to view a byte offset of " + stringify( requestedOffset ) + " into location " + stringify( location )
+ " which is " + stringify( availableSpace ) + " bytes in size." ),
OutOfRangeError( location, requestedOffset, availableSpace )
{}
};
template< Constness constness > class Buffer;
template< typename Derived > class BufferModel;
constexpr Buffer< Mutable > copyData( Buffer< Mutable > destination, Buffer< Const > source );
constexpr void zeroData( Buffer< Mutable > buffer ) noexcept;
}
template< Constness constness >
class exports::Buffer
{
public:
using pointer_type= maybe_const_ptr_t< void, constness >;
using const_pointer_type= const void *;
using byte_pointer_type= maybe_const_ptr_t< std::byte, constness >;
using const_byte_pointer_type= const std::byte *;
private:
byte_pointer_type ptr= nullptr;
std::size_t bytes= 0;
public:
constexpr Buffer() noexcept= default;
constexpr
Buffer( const pointer_type ptr, const std::size_t bytes ) noexcept
: ptr( static_cast< byte_pointer_type >( ptr ) ), bytes( bytes )
{}
constexpr
Buffer( const Buffer< Mutable > &copy ) noexcept
: ptr( copy.byte_data() ), bytes( copy.size() ) {}
template< Constness constness_= constness >
requires( constness_ == Mutable )
constexpr
Buffer( const Buffer< Const > &copy ) noexcept = delete;
constexpr byte_pointer_type byte_data() const noexcept { return ptr; }
constexpr pointer_type data() const noexcept { return ptr; }
constexpr const_byte_pointer_type const_byte_data() const noexcept { return ptr; }
constexpr const_pointer_type const_data() const noexcept { return ptr; }
constexpr std::size_t size() const noexcept { return bytes; }
constexpr bool empty() const noexcept { return size() == 0; }
constexpr byte_pointer_type begin() const noexcept { return byte_data(); }
constexpr byte_pointer_type end() const noexcept { return begin() + size(); }
constexpr const_byte_pointer_type cbegin() const noexcept { return begin(); }
constexpr const_byte_pointer_type cend() const noexcept { return end(); }
template< typename T > void operator[]( T ) const= delete;
template< typename T > void operator[]( T )= delete;
template< typename T >
constexpr std::add_lvalue_reference_t< maybe_const_t< T, constness > >
as( std::nothrow_t ) const noexcept
{
assertion( sizeof( T ) <= bytes );
return *Alepha::start_lifetime_as< std::add_lvalue_reference_t< maybe_const_t< T, constness > > >( ptr );
}
template< typename T >
constexpr maybe_const_t< T &, constness >
as() const
{
if( sizeof( T ) > bytes ) throw InsufficientSizeError{ ptr, sizeof( T ), bytes, typeid( T ) };
return this->as< std::add_lvalue_reference_t< maybe_const_t< T, constness > > >( std::nothrow );
}
template< typename T >
constexpr std::add_lvalue_reference_t< std::add_const_t< T & > >
const_as( std::nothrow_t ) const noexcept
{
assertion( sizeof( T ) <= bytes );
return *Alepha::start_lifetime_as< std::add_const_t< T > >( ptr );
}
template< typename T >
constexpr std::add_lvalue_reference_t< std::add_const_t< T > >
const_as() const
{
if( sizeof( T ) > bytes ) throw InsufficientSizeError{ ptr, sizeof( T ), bytes, typeid( const T ) };
return this->const_as< T >( std::nothrow );
}
constexpr operator pointer_type () const noexcept { return ptr; }
/*!
* Advance the view of this `Buffer` object.
*
* Because `Buffer` objects model a pointer to a block of data which is aware of
* the size of that block, advancing that pointer should permit a view of the remainder
* of that block.
*
* A common technique in working with such blocks is to have to advance a pointer and
* decrease a size count. This operator does both actions in one semantic step.
*
* @note Behaves the same as `window= window + amount`.
*
* Example use case:
*
* ```
* AutoRAII targetFile{[]{ return ::fopen( "output.dat", "wb" ); }, fclose };
* Buffer< Const > myBuf= getSomeBufferFromSomewhere();
* while( not myBuf.empty() )
* {
* const auto amtWritten= fwrite( myBuf, myBuf.size(), 1, targetFile );
* myBuf+= amtWritten;
* }
* ```
*
* In this example, the code walks through the buffer pointed to by `myBuf`. It uses `myBuf`
* as a "smart pointer" which knows the end of its range.
*/
Buffer &
operator+= ( const std::size_t amount )
{
if( amount > bytes ) throw OutOfRangeSizeError( ptr, amount, bytes );
ptr+= amount;
bytes-= amount;
return *this;
}
};
template< Constness constness >
constexpr auto
cbegin( const Buffer< constness > &buffer ) noexcept
{
return buffer.cbegin();
}
template< Constness constness >
constexpr auto
cend( const Buffer< constness > &buffer ) noexcept
{
return buffer.cend();
}
struct BufferModel_capability {};
template< typename T >
concept UndecayedBufferModelable= HasCapability< T, BufferModel_capability >;
template< typename T >
concept BufferModelable= UndecayedBufferModelable< std::decay_t< T > >;
template< typename Derived >
class exports::BufferModel : public BufferModel_capability
{
private:
constexpr auto &crtp() noexcept { return static_cast< Derived & >( *this ); }
constexpr const auto &crtp() const noexcept { return static_cast< const Derived & >( *this ); }
constexpr auto buffer() { return static_cast< Buffer< Mutable > >( crtp() ); }
constexpr auto buffer() const { return static_cast< Buffer< Const > >( crtp() ); }
public:
constexpr auto byte_data() { return buffer().byte_data(); }
constexpr const auto byte_data() const { return buffer().byte_data(); }
constexpr auto data() { return buffer().data(); }
constexpr const auto data() const { return buffer().data(); }
constexpr decltype( auto ) cbegin() const { return buffer().cbegin(); }
constexpr decltype( auto ) cend() const { return buffer().cend(); }
constexpr decltype( auto ) begin() const { return buffer().begin(); }
constexpr decltype( auto ) end() const { return buffer().end(); }
constexpr decltype( auto ) begin() { return buffer().begin(); }
constexpr decltype( auto ) end() { return buffer().end(); }
constexpr std::size_t size() const { return buffer().size(); }
constexpr operator void *() { return buffer(); }
constexpr operator const void *() { return buffer(); }
constexpr operator const void *() const { return buffer(); }
template< typename T > void operator[]( T ) const= delete;
template< typename T > void operator[]( T )= delete;
template< typename T > constexpr decltype( auto ) as() const { return buffer().template as< T >(); }
template< typename T > constexpr decltype( auto ) as() { return buffer().template as< T >(); }
template< typename T > constexpr decltype( auto ) const_as() const { return buffer().template const_as< T >(); }
template< typename T > constexpr decltype( auto ) const_as() { return buffer().template const_as< T >(); }
};
template< typename T >
extern Constness constness_of_v;
template< UndecayedBufferModelable T >
constexpr Constness constness_of_v< T >{ std::is_const_v< T > };
template< Constness constness >
constexpr Constness constness_of_v< Buffer< constness > >{ constness };
template< Constness constness >
constexpr auto
operator + ( const Buffer< constness > buffer, const std::size_t offset )
{
if( offset > buffer.size() ) throw OutOfRangeSizeError{ buffer.data(), std::ptrdiff_t( offset ), buffer.size() };
return Buffer< constness >{ buffer.byte_data() + offset, buffer.size() - offset };
}
template< Constness constness >
constexpr auto
operator + ( const std::size_t offset, const Buffer< constness > buffer )
{
return buffer + offset;
}
// Compute arbitrary offsets with `BufferModel` derivatives.
template< BufferModelable Type >
constexpr auto
operator + ( Type &&item, const std::size_t offset )
{
return static_cast< Buffer< constness_of_v< std::remove_reference_t< Type > > > >( item ) + offset;
}
constexpr auto
operator + ( const std::size_t offset, BufferModelable auto &&item )
{
return item + offset;
}
constexpr Buffer< Mutable >
make_buffer( Concepts::StandardLayoutAggregate auto &aggregate ) noexcept
{
return { &aggregate, sizeof( aggregate ) };
}
constexpr Buffer< Const >
make_buffer( const Concepts::StandardLayoutAggregate auto &aggregate ) noexcept
{
return { &aggregate, sizeof( aggregate ) };
}
template< Concepts::StandardLayout T >
constexpr Buffer< Mutable >
make_buffer( std::vector< T > &vector ) noexcept
{
// TODO: Do we need to consider overflow here?
return { vector.data(), vector.size() * sizeof( T ) };
}
template< Concepts::StandardLayout T >
constexpr Buffer< Const >
make_buffer( const std::vector< T > &vector ) noexcept
{
// TODO: Do we need to consider overflow here?
return { vector.data(), vector.size() * sizeof( T ) };
}
template< Concepts::StandardLayout T, std::size_t size >
constexpr Buffer< Mutable >
make_buffer( std::array< T, size > &array ) noexcept
{
// TODO: Do we need to consider overflow here?
return { array.data(), sizeof( array ) };
}
template< Concepts::StandardLayout T, std::size_t size >
constexpr Buffer< Const >
make_buffer( const std::array< T, size > &array ) noexcept
{
// TODO: Do we need to consider overflow here?
return { array.data(), sizeof( array ) };
}
template< Concepts::StandardLayout T, std::size_t size >
constexpr Buffer< Mutable >
make_buffer( T array[ size ] ) noexcept
{
// TODO: Do we need to consider overflow here?
return { array, sizeof( array ) };
}
template< Concepts::StandardLayout T, std::size_t size >
constexpr Buffer< Const >
make_buffer( const T array[ size ] ) noexcept
{
// TODO: Do we need to consider overflow here?
return { array, sizeof( array ) };
}
inline Buffer< Mutable >
make_buffer( std::string &string ) noexcept
{
return { string.data(), string.size() };
}
inline Buffer< Const >
make_buffer( const std::string &string ) noexcept
{
return { string.data(), string.size() };
}
constexpr Buffer< Mutable >
exports::copyData( const Buffer< Mutable > destination, const Buffer< Const > source )
{
if( source.size() > destination.size() ) throw InsufficientSizeError{ destination.data(), source.size(), destination.size(), typeid( std::byte ) };
std::memcpy( destination, source, source.size() );
return { destination, source.size() };
}
constexpr void
exports::zeroData( const Buffer< Mutable > buffer ) noexcept
{
::memset( buffer, 0, buffer.size() );
}
namespace exports
{
using detail::Buffer_m::make_buffer;
}
}
namespace Alepha::Hydrogen::inline exports::inline Buffer_m
{
using namespace detail::Buffer_m::exports;
}
/*
* It is not possible to explicitly specialize `std::cbegin` and `std::cend` with differing results than what they
* normally return (`decltype( std::as_const( range ).begin() )`), therefore the best we can do is just delete
* them, in the interest of preserving as much correctness as we can.
*
* This really isn't a problem, anyway, as `cbegin` and `cend` are meant to be ADL-found aspect-functions, not
* explictly called from `std::`, just like `swap`.
*
* Correct:
* ```
* using std::cbegin, std::cend;
* std::sort( cbegin( list ), cend( list ) );
* ```
*
* Incorrect:
* ```
* std::sort( std::cbegin( list ), std::cend( list ) );
* ```
*
* Because of the below deletion and the above correct/incorrect examples, it really is not a problem that they're
* deleted. In fact, it's a good thing. It will help catch incorrect usage in your code.
*/
template<>
constexpr auto
std::cbegin( const ::Alepha::Hydrogen::Buffer< Alepha::Hydrogen::Mutable > &range ) -> decltype( range.begin() )= delete;
template<>
constexpr auto
std::cend( const ::Alepha::Hydrogen::Buffer< Alepha::Hydrogen::Mutable > &range ) -> decltype( range.end() )= delete;

1
Memory/CMakeLists.txt Normal file
View File

@ -0,0 +1 @@
add_subdirectory( Blob.test )

171
Memory/DataChain.h Normal file
View File

@ -0,0 +1,171 @@
static_assert( __cplusplus > 2020'99 );
#pragma once
#include <deque>
#include <utility>
#include <algorithm>
#include <iterator>
#include <numeric>
#include "comparisons.h"
#include "Buffer.h"
#include "Blob.h"
namespace Alepha::inline Cavorite ::detail:: DataChain_m
{
inline namespace exports
{
class DataChain;
}
using std::begin, std::end;
class exports::DataChain
{
private:
using Chain= std::deque< Blob >;
Chain chain;
template< Constness constness >
class Iterator : comparable
{
public:
using iterator_category= std::forward_iterator_tag;
using value_type= std::byte;
using difference_type= std::ptrdiff_t;
using pointer= std::byte *;
using reference= std::byte &;
private:
using ChainIter= decltype( std::declval< maybe_const_t< Chain, constness > >().begin() );
ChainIter position;
std::size_t offset;
void
advance() noexcept
{
if( ++offset < pos->size() ) return;
++pos;
offset= 0;
}
friend DataChain;
explicit Iterator( const ChainIter pos, cosnt std::size_t offset ) noexcept : pos( pos ), offset( offset ) {}
public:
auto
value_lens() const noexcept( noexcept( std::tie( pos, offset ) ) )
{
return std::tie( pos, offset );
}
Iterator &operator ++() noexcept { advance(); return *this; }
Iterator
operator++ ( int )
noexcept
(
noexcept( Iterator{ std::declval< Iterator >() } )
and
noexcept( advance() )
)
{
Iterator rv{ *this; }
advance();
return rv;
}
maybe_const_t< std::byte &, constness >
operator *() const noexcept( noexcept( pos->byte_data()[ offset ] ) )
{
return pos->byte_data()[ offset ];
}
public:
template< typename T > void operator []( T ) const= delete;
template< typename T > void operator []( T )= delete;
using iterator= Iterator< Mutable >;
using const_iterator= Iterator< Const >;
auto begin() noexcept { using std::begin; return iterator{ begin( chain ), 0 }; }
auto end() noexcept { using std::end; return iterator{ end( chain ), 0 }; }
auto begin() const noexcept { using std::begin; return const_iterator{ begin( chain ), 0 }; }
auto end() const noexcept { using std::end; return const_iterator{ end( chain ), 0 }; }
auto cbegin() const noexcept { return begin(); }
auto cend() const noexcept { return end(); }
// Please note that this non-const view form provides direct access to the chain.
// This class doesn't store any additional state, so modification of this chain is
// likely safe, for now. But in the future, this could change. Manual modification
// of this chain is strongly discouraged.
Chain &chain_view() noexcept { return chain; }
const Chain &chain_view() const noexcept { return chain; }
std::size_t
size() const
{
using std::begin, std::end;
return std::accumulate( begin( chain_view() ), end( chain_view() ), std::size_t{},
[] ( const std::size_t lhs, const auto &rhs ) { return lhs + rhs.size(); } );
}
std::size_t chain_length() const noexcept { return chain.size(); }
std::size_t chain_empty() const noexcept { return chain.empty(); }
void clear() noexcept { chain.clear(); }
void
append( Blob &block )
{
// Base case is fast:
if( chain.empty() ) return chain.push_back( std::move( block ) );
// If we're getting a `Blob` which is contiguous we try to re-stitch them:
if( const auto contiguous= chain.back().isContiguousWith( std::move( block ) ) ) contiguous.compose();
// As a fallback, we just have to put it at the back of our list:
else chain.push_back( std::move( block ) );
}
void append( const Buffer< Const > &buffer ) { if( buffer.size() ) chain.emplace_back( buffer ); }
Blob
peekHead( const std::size_t amount )
{
if( amount == 0 ) return Blob{};
if( chain.empty() or size() < amount )
{
// TODO: Build a more specific exception for this case?
throw DataCarveToLargeError( nullptr, amount, size() );
}
// TODO: This should be in a common helper with part of `carveHead`'s internals:
Blob rv{ amount };
std::copy_n( begin(), amount, rv.byte_data() );
return rv;
}
Blob
peekTail( const std::size_t amount )
{
if( amount == 0 ) return Data{};
if( chain.empty() or size() < amount )
{
// TODO: Build a more specific exception for this case?
throw DataCarveToLargeError( nullptr, amount, size() );
}
// TODO: This should be in a common helper with part of `carveTail`'s internals:
Blob rv{ amount };
std::copy_n( std::prev( end(), amount ), amount, rv.byte_data() );
return rv;
};
};
}