Commit 85de0d07 authored by Pietro Saccardi's avatar Pietro Saccardi

Implement memory pool and memory resource. Break down into several files.

parent feb1391e
Pipeline #555 passed with stage
in 35 seconds
......@@ -11,4 +11,4 @@ include_directories(${PROJECT_SOURCE_DIR}/src)
add_compile_options(-Wpedantic -Wall -Werror)
add_executable(pwmpi src/main.cpp src/dma/cb.h src/bcm/base.h src/dma/ti.h src/mem/bmap.h src/dma/cs.h src/dma/dbg.h src/dma/int_enable.h src/dma/regset.h src/bcm/bcm2835.h src/mem/mman.h src/mem/pages.h src/mem/pages.cpp src/mem/mman.cpp src/mem/bus.h src/mem/mappers.h src/mem/mappers.cpp src/dma/engine.cpp src/dma/engine.h src/pwm/ctl.h src/pwm/sta.h src/pwm/dmac.h src/pwm/regset.h src/pwm/chn_fifo.h src/pwm/chn_fifo.cpp src/pwm/controller.h src/pwm/controller.cpp src/mem/bus.cpp src/mem/traits.h src/clk/ctl.h src/clk/div.h src/clk/clock.h src/clk/clock.cpp src/clk/manager.h src/clk/manager.cpp src/mem/allocator.h src/mem/allocator.cpp)
\ No newline at end of file
add_executable(pwmpi src/main.cpp src/dma/cb.h src/bcm/base.h src/dma/ti.h src/mem/bmap.h src/dma/cs.h src/dma/dbg.h src/dma/int_enable.h src/dma/regset.h src/bcm/bcm2835.h src/mem/mman.h src/mem/pages.h src/mem/pages.cpp src/mem/mman.cpp src/mem/bus.h src/mem/mappers.h src/mem/mappers.cpp src/dma/engine.cpp src/dma/engine.h src/pwm/ctl.h src/pwm/sta.h src/pwm/dmac.h src/pwm/regset.h src/pwm/chn_fifo.h src/pwm/chn_fifo.cpp src/pwm/controller.h src/pwm/controller.cpp src/mem/bus.cpp src/mem/traits.h src/clk/ctl.h src/clk/div.h src/clk/clock.h src/clk/clock.cpp src/clk/manager.h src/clk/manager.cpp src/mem/resident_allocator.h src/mem/resident_allocator.cpp src/mem/allocation_table.cpp src/mem/allocation_table.h src/mem/allocation_pool.cpp src/mem/allocation_pool.h)
\ No newline at end of file
......@@ -4,6 +4,7 @@
#include <pwm/controller.h>
#include <clk/ctl.h>
#include <clk/manager.h>
#include <mem/resident_allocator.h>
int main() {
if (not mem::default_memory_accessor(mem::access::rw)) {
......
//
// Created by Pietro Saccardi on 2019-07-13.
//
#include "allocation_pool.h"
namespace mem {
chunk allocation_pool::allocate(std::size_t bytes, size_t alignment) {
pfrag fragment{};
std::size_t offset_in_fragment = std::numeric_limits<std::size_t>::max();
std::tie(fragment, offset_in_fragment) = find_suitable_fragment(bytes, alignment);
if (not is_valid(fragment)) {
// Try to allocate a new one of size bytes + alignment - 1 (to satisfy alignment)
if (allocate_new_chunk(bytes + alignment - 1)) {
// Try again to find the chunk, now it must be successful
std::tie(fragment, offset_in_fragment) = find_suitable_fragment(bytes, alignment);
if (not is_valid(fragment)) {
throw std::logic_error("New chunk allocated, but still no suitable fragment!");
}
} else {
return {};
}
}
return mark_used(fragment, bytes, alignment);
}
void allocation_pool::deallocate(const mem::chunk &c) {
mark_free(c);
}
bool allocation_pool::allocate_new_chunk(std::size_t bytes) {
// Use the end address of the last chunk as a place where to allocate (try to keep at least part of the mem
// contiguous)
if (mapped<chunk> mchunk = do_allocate_chunk(bytes, allocation_end_address()); mchunk) {
_tot_pool_size += mchunk.size();
mark_free(mchunk.chunk());
_chunks.emplace_back(std::move(mchunk));
return true;
}
return false;
}
std::size_t allocation_pool::try_reduce_pool_size() {
std::size_t freed_memory = 0;
for (mapped<chunk> &mchunk : _chunks) {
const auto containing_pf = get_containing_fragment(mchunk.chunk());
// A fragmnet identifies space available for allocation
if (is_valid(containing_pf)) {
const auto offset = reinterpret_cast<std::uintptr_t>(mchunk.virt()) - containing_pf->first;
assert(offset < containing_pf->second.size());
// Mark as used, not as free, because it is used (i.e. not available) from the perspective of the table
mark_used(containing_pf, mchunk.size(), offset);
freed_memory += mchunk.size();
_tot_pool_size -= mchunk.size();
mchunk.reset();
}
}
// Remove all the elements that were deallocated
auto chunk_is_empty = [](mapped<chunk> const &mc) -> bool { return bool(mc); };
const auto new_end = std::remove_if(std::begin(_chunks), std::end(_chunks), chunk_is_empty);
_chunks.resize(new_end - std::begin(_chunks));
return freed_memory;
}
}
\ No newline at end of file
//
// Created by Pietro Saccardi on 2019-07-13.
//
#ifndef PWMPI_MEM_ALLOCATION_POOL_H
#define PWMPI_MEM_ALLOCATION_POOL_H
#include <mem/allocation_table.h>
namespace mem {
class allocation_pool : private allocation_table {
std::vector<mapped<chunk>> _chunks;
std::size_t _tot_pool_size;
bool allocate_new_chunk(std::size_t bytes);
inline void *allocation_end_address() const;
protected:
virtual mapped<chunk> do_allocate_chunk(std::size_t bytes, void *hint) = 0;
public:
inline allocation_pool() : allocation_table{}, _tot_pool_size{0} {}
allocation_pool(allocation_pool const &) = delete;
allocation_pool &operator=(allocation_pool const &) = delete;
using allocation_table::available_memory;
using allocation_table::num_fragments;
using allocation_table::largest_fragment_size;
std::size_t try_reduce_pool_size();
inline std::size_t pool_size() const { return _tot_pool_size; }
inline std::size_t used_memory() const { return pool_size() - available_memory(); }
chunk allocate(std::size_t bytes, std::size_t alignment = 1);
void deallocate(chunk const &c);
inline bool operator==(allocation_pool const &other) const { return _chunks == other._chunks; }
inline bool operator!=(allocation_pool const &other) const { return _chunks != other._chunks; }
};
void *allocation_pool::allocation_end_address() const {
if (_chunks.empty()) {
return nullptr;
}
return reinterpret_cast<void *>(
reinterpret_cast<std::uintptr_t>(_chunks.back().virt()) + _chunks.back().size());
}
}
#endif //PWMPI_MEM_ALLOCATION_POOL_H
//
// Created by Pietro Saccardi on 2019-07-07.
// Created by Pietro Saccardi on 2019-07-13.
//
#include "allocator.h"
#include <cassert>
#include "allocation_table.h"
namespace mem {
namespace {
resident_mapper &default_resident_mapper() {
static resident_mapper _mapper;
return _mapper;
}
}
chunk allocation_pool::get_tail_free_memory() {
if (not _chunks.empty()) {
// Try to see if a fragment contains the last allocated byte
const pfrag containing_pf = get_containing_fragment({
reinterpret_cast<void *>(reinterpret_cast<std::uintptr_t>(allocation_end_address()) - 1), 1});
if (is_valid(containing_pf)) {
return {reinterpret_cast<void *>(containing_pf->first), containing_pf->second.size()};
}
}
return {allocation_end_address(), 0};
}
bool allocation_pool::allocate_new_chunk(std::size_t req_mem) {
static const std::size_t page_size = get_page_size();
// Round up to the next multiple of a page number
req_mem = page_size * ((req_mem + page_size - 1) / page_size);
// Use the end address of the last chunk as a place where to allocate (try to keep at least part of the mem
// contiguous)
mapped<chunk> mchunk = default_resident_mapper().map(req_mem, allocation_end_address());
if (mchunk) {
_tot_pool_size += mchunk.size();
mark_free(mchunk.chunk());
_chunks.emplace_back(std::move(mchunk));
return true;
}
return false;
}
std::size_t allocation_pool::try_reduce_pool_size() {
std::size_t freed_memory = 0;
for (mapped<chunk> &mchunk : _chunks) {
const auto containing_pf = get_containing_fragment(mchunk.chunk());
// A fragmnet identifies space available for allocation
if (is_valid(containing_pf)) {
const auto offset = reinterpret_cast<std::uintptr_t>(mchunk.virt()) - containing_pf->first;
assert(offset < containing_pf->second.size());
// Mark as used, not as free, because it is used (i.e. not available) from the perspective of the table
mark_used(containing_pf, mchunk.size(), offset);
freed_memory += mchunk.size();
_tot_pool_size -= mchunk.size();
mchunk.reset();
}
}
// Remove all the elements that were deallocated
auto chunk_is_empty = [](mapped<chunk> const &mc) -> bool { return bool(mc); };
const auto new_end = std::remove_if(std::begin(_chunks), std::end(_chunks), chunk_is_empty);
_chunks.resize(new_end - std::begin(_chunks));
return freed_memory;
}
chunk allocation_table::mark_used(pfrag pf, std::size_t amount, std::size_t offset) {
if (offset + amount > pf->second.size()) {
throw std::invalid_argument("Cannot allocate beyond the end of the chunk");
......@@ -153,10 +95,10 @@ namespace mem {
}
std::pair<allocation_table::pfrag, std::size_t> allocation_table::find_suitable_fragment(std::size_t amount,
std::size_t alignment) const
std::size_t alignment) const
{
std::pair<allocation_table::pfrag, std::size_t> retval{{}, std::numeric_limits<std::size_t>::max()};
if (amount <= largest_available_fragment()) {
if (amount <= largest_fragment_size()) {
auto const candidate_frags_begin = _frag_by_sz.lower_bound(amount);
// Try to satisfy the alignments by simply allocating at the end first. If no fragment has a correctly
// aligned end to allocate @p amount bytes, we then fall back on the first interval large enough to
......@@ -181,6 +123,4 @@ namespace mem {
return retval;
}
}
\ No newline at end of file
//
// Created by Pietro Saccardi on 2019-07-07.
// Created by Pietro Saccardi on 2019-07-13.
//
#ifndef PWMPI_MEM_ALLOCATOR_H
#define PWMPI_MEM_ALLOCATOR_H
#ifndef PWMPI_MEM_ALLOCATION_TABLE_H
#define PWMPI_MEM_ALLOCATION_TABLE_H
#include <mem/mappers.h>
#include <map>
#include <set>
#include <vector>
#include <mem/mman.h>
namespace mem {
class allocation_table {
class fragment;
public:
......@@ -58,44 +56,13 @@ namespace mem {
inline std::size_t num_fragments() const { return _fragments.size(); }
inline std::size_t largest_available_fragment() const;
inline std::size_t largest_fragment_size() const;
inline bool is_valid(pfrag pf) const { return pf != std::end(_fragments); }
inline bool is_valid(pfrag pf) const { return pf != std::end(_fragments) and pf != pfrag{}; }
std::pair<pfrag, std::size_t> find_suitable_fragment(std::size_t amount, std::size_t alignment = 1) const;
};
class allocation_pool : private allocation_table {
std::vector<mapped<chunk>> _chunks;
std::size_t _tot_pool_size;
bool allocate_new_chunk(std::size_t req_mem);
chunk get_tail_free_memory();
inline void *allocation_end_address() const {
if (_chunks.empty()) {
return nullptr;
}
return reinterpret_cast<void *>(
reinterpret_cast<std::uintptr_t>(_chunks.back().virt()) + _chunks.back().size());
}
public:
inline allocation_pool() : allocation_table{}, _tot_pool_size{0} {}
allocation_pool(allocation_pool const &) = delete;
allocation_pool &operator=(allocation_pool const &) = delete;
using allocation_table::available_memory;
using allocation_table::num_fragments;
using allocation_table::largest_available_fragment;
std::size_t try_reduce_pool_size();
inline std::size_t pool_size() const { return _tot_pool_size; }
inline std::size_t used_memory() const { return pool_size() - available_memory(); }
};
bool allocation_table::frag_smaller::operator()(pfrag const &l, pfrag const &r) const {
......@@ -106,10 +73,9 @@ namespace mem {
return l->second.size() < r;
}
inline std::size_t allocation_table::largest_available_fragment() const {
inline std::size_t allocation_table::largest_fragment_size() const {
return _frag_by_sz.empty() ? 0 : (*std::prev(std::end(_frag_by_sz)))->second.size();
}
}
#endif //PWMPI_MEM_ALLOCATOR_H
#endif //PWMPI_MEM_ALLOCATION_TABLE_H
//
// Created by Pietro Saccardi on 2019-07-07.
//
#include "resident_allocator.h"
#include <mem/mappers.h>
namespace mem {
namespace {
resident_mapper &default_resident_mapper() {
static resident_mapper _mapper;
return _mapper;
}
}
bool resident_memory_resource::do_is_equal(const std::experimental::pmr::memory_resource &other) const noexcept {
if (auto const *other_res = dynamic_cast<resident_memory_resource const *>(&other); other_res != nullptr) {
return allocation_pool::operator==(*other_res);
}
return false;
}
void *resident_memory_resource::do_allocate(std::size_t bytes, std::size_t alignment) {
return allocation_pool::allocate(bytes, alignment).virt();
}
void resident_memory_resource::do_deallocate(void *p, std::size_t bytes, std::size_t) {
allocation_pool::deallocate({p, bytes});
}
mapped<chunk> resident_memory_resource::do_allocate_chunk(std::size_t bytes, void *hint) {
static const std::size_t page_size = get_page_size();
// Round up to the next multiple of a page number
bytes = page_size * ((bytes + page_size - 1) / page_size);
return default_resident_mapper().map(bytes, hint);
}
std::experimental::pmr::memory_resource *default_resident_memory_resource() {
static resident_memory_resource _resource{};
return &_resource;
}
}
\ No newline at end of file
//
// Created by Pietro Saccardi on 2019-07-07.
//
#ifndef PWMPI_MEM_RESIDENT_ALLOCATOR_H
#define PWMPI_MEM_RESIDENT_ALLOCATOR_H
#include <mem/allocation_pool.h>
#include <experimental/memory_resource>
namespace mem {
class resident_memory_resource final : private allocation_pool, public std::experimental::pmr::memory_resource {
protected:
void *do_allocate(std::size_t bytes, std::size_t alignment) override;
void do_deallocate(void *p, std::size_t bytes, std::size_t) override;
bool do_is_equal(std::experimental::pmr::memory_resource const &other) const noexcept override;
mapped<chunk> do_allocate_chunk(std::size_t bytes, void *hint) override;
public:
using allocation_pool::pool_size;
using allocation_pool::used_memory;
using allocation_pool::available_memory;
using allocation_pool::largest_fragment_size;
using allocation_pool::try_reduce_pool_size;
using allocation_pool::num_fragments;
using memory_resource::is_equal;
using memory_resource::allocate;
using memory_resource::deallocate;
};
std::experimental::pmr::memory_resource *default_resident_memory_resource();
}
#endif //PWMPI_MEM_RESIDENT_ALLOCATOR_H
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment