Commit a71a60be authored by Pietro Saccardi's avatar Pietro Saccardi

Doxygen and rename methods in allocation_pool.

parent 5a470412
......@@ -11,7 +11,7 @@ namespace mem {
allocation_table::pos_in_fragment pos = find_suitable_fragment(bytes, alignment);
if (not pos) {
// Try to allocate a new one of size bytes + alignment - 1 (to satisfy alignment)
if (allocate_new_chunk(bytes + alignment - 1)) {
if (reserve_new_chunk(bytes + alignment - 1)) {
// Try again to find the chunk, now it must be successful
pos = find_suitable_fragment(bytes, alignment);
if (not pos) {
......@@ -28,10 +28,10 @@ namespace mem {
mark_free(c);
}
bool allocation_pool::allocate_new_chunk(std::size_t bytes) {
bool allocation_pool::reserve_new_chunk(std::size_t bytes) {
// Use the end address of the last chunk as a place where to allocate (try to keep at least part of the mem
// contiguous)
if (mapped<chunk> mchunk = do_allocate_chunk(bytes, allocation_end_address()); mchunk) {
if (mapped<chunk> mchunk = do_reserve_chunk(bytes, reservation_end_address()); mchunk) {
_tot_pool_size += mchunk.size();
mark_free(mchunk.chunk());
_chunks.emplace_back(std::move(mchunk));
......
......@@ -10,20 +10,47 @@
namespace mem {
/** @brief Abstract class that bridges between a mapped memory allocator and a memory resource.
* Users of this class will ask to allocate a certain amount of bytes under some alignment constraint. This class
* keeps track of which contiguous portions of memory are reserved for allocation, and if necessary asks the
* subclasses to reserve a chunk of memory of a certain size. It will then use that to serve the allocation request.
* The subclasses may allocate chunks of memory anywhere and with any alignment or size, as long as they are big
* enough.
* @note By implementation, currently the memory chunks are stored as @ref mem::mapped objects. That means that
* as per the destructor of @ref mem::mapped, the memory will be unmapped upon destruction or freeing. It is
* possible to extend this class to remove this constraints by e.g. templating or assigning the deallocator.
* @see allocation_table
* @see do_reserve_chunk
*/
class allocation_pool : private allocation_table {
std::vector<mapped<chunk>> _chunks;
std::size_t _tot_pool_size;
bool allocate_new_chunk(std::size_t bytes);
/** @brief Tries to reserve a new chunk of size at least @p bytes.
* @return True if the reservation succeeded.
* @see do_reserve_chunk
*/
bool reserve_new_chunk(std::size_t bytes);
[[nodiscard]] inline void *allocation_end_address() const;
/** @brief Returns the first free address that is beyond any allocated chunks.
* This is the best position where to reserve to contiguously reserve a new chunk.
*/
[[nodiscard]] inline void *reservation_end_address() const;
protected:
virtual mapped<chunk> do_allocate_chunk(std::size_t bytes, void *hint) = 0;
/** @brief Reserves a raw chunk of memory of size at least @p bytes.
* Subclasses may reserve memory of any size, as long as it's at least @p bytes, with any alignment. They may
* try to reserve memory exactly at @p hint. If it is not possible to reserve, return an empty mapped object.
* @param bytes Size in bytes to reserve (minimum)
* @param hint Preferred position for reservation
*/
virtual mapped<chunk> do_reserve_chunk(std::size_t bytes, void *hint) = 0;
public:
/** @brief Constructs an empty allocation pool.
*/
inline allocation_pool() : allocation_table{}, _tot_pool_size{0} {}
allocation_pool(allocation_pool const &) = delete;
......@@ -33,12 +60,31 @@ namespace mem {
using allocation_table::num_fragments;
using allocation_table::largest_fragment_size;
/** @brief Tries to release chunks of memory that are reserved but unused.
* Loops through all the chunks that were ever reserved by @ref do_reserve_chunk and if they are currently
* unused, releases them for other usages.
* @return The total amounts of bytes from the allocation pool that were freed for other applications.
*/
std::size_t try_reduce_pool_size();
/** @brief Returns the total size of the pool in bytes.
* This is the total amount of memory reserve by @ref do_reserve_chunk upon request from @ref allocate.
*/
[[nodiscard]] inline std::size_t pool_size() const { return _tot_pool_size; }
/** @brief Returns the amount of memory that is currently in use (i.e. reserved and allocated).
*/
[[nodiscard]] inline std::size_t used_memory() const { return pool_size() - available_memory(); }
/** @brief Allocate a chunk of memory of size @p bytes bytes and with address aligned to @p alignment.
* This function may fail and return an empty chunk.
*/
chunk allocate(std::size_t bytes, std::size_t alignment = 1);
/** @brief Returns a chunk of memory to the pool for further allocations.
* @note No check is performed on @p c. The caller is respponsible to make sure that all chunks are consistently
* returned.
*/
void deallocate(chunk const &c);
inline bool operator==(allocation_pool const &other) const { return _chunks == other._chunks; }
......@@ -47,7 +93,7 @@ namespace mem {
void *allocation_pool::allocation_end_address() const {
void *allocation_pool::reservation_end_address() const {
if (_chunks.empty()) {
return nullptr;
}
......
......@@ -29,7 +29,7 @@ namespace mem {
allocation_pool::deallocate({p, bytes});
}
mapped<chunk> resident_memory_resource::do_allocate_chunk(std::size_t bytes, void *hint) {
mapped<chunk> resident_memory_resource::do_reserve_chunk(std::size_t bytes, void *hint) {
static const std::size_t page_size = get_page_size();
// Round up to the next multiple of a page number
bytes = page_size * ((bytes + page_size - 1) / page_size);
......
......@@ -16,7 +16,7 @@ namespace mem {
void *do_allocate(std::size_t bytes, std::size_t alignment) override;
void do_deallocate(void *p, std::size_t bytes, std::size_t) override;
[[nodiscard]] bool do_is_equal(std::experimental::pmr::memory_resource const &other) const noexcept override;
mapped<chunk> do_allocate_chunk(std::size_t bytes, void *hint) override;
mapped<chunk> do_reserve_chunk(std::size_t bytes, void *hint) override;
public:
using allocation_pool::pool_size;
using allocation_pool::used_memory;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment