Commit 85de0d07 authored by Pietro Saccardi's avatar Pietro Saccardi
Browse files

Implement memory pool and memory resource. Break down into several files.

parent feb1391e
Loading
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -11,4 +11,4 @@ include_directories(${PROJECT_SOURCE_DIR}/src)

add_compile_options(-Wpedantic -Wall -Werror)

add_executable(pwmpi src/main.cpp src/dma/cb.h src/bcm/base.h src/dma/ti.h src/mem/bmap.h src/dma/cs.h src/dma/dbg.h src/dma/int_enable.h src/dma/regset.h src/bcm/bcm2835.h src/mem/mman.h src/mem/pages.h src/mem/pages.cpp src/mem/mman.cpp src/mem/bus.h src/mem/mappers.h src/mem/mappers.cpp src/dma/engine.cpp src/dma/engine.h src/pwm/ctl.h src/pwm/sta.h src/pwm/dmac.h src/pwm/regset.h src/pwm/chn_fifo.h src/pwm/chn_fifo.cpp src/pwm/controller.h src/pwm/controller.cpp src/mem/bus.cpp src/mem/traits.h src/clk/ctl.h src/clk/div.h src/clk/clock.h src/clk/clock.cpp src/clk/manager.h src/clk/manager.cpp src/mem/allocator.h src/mem/allocator.cpp)
 No newline at end of file
add_executable(pwmpi src/main.cpp src/dma/cb.h src/bcm/base.h src/dma/ti.h src/mem/bmap.h src/dma/cs.h src/dma/dbg.h src/dma/int_enable.h src/dma/regset.h src/bcm/bcm2835.h src/mem/mman.h src/mem/pages.h src/mem/pages.cpp src/mem/mman.cpp src/mem/bus.h src/mem/mappers.h src/mem/mappers.cpp src/dma/engine.cpp src/dma/engine.h src/pwm/ctl.h src/pwm/sta.h src/pwm/dmac.h src/pwm/regset.h src/pwm/chn_fifo.h src/pwm/chn_fifo.cpp src/pwm/controller.h src/pwm/controller.cpp src/mem/bus.cpp src/mem/traits.h src/clk/ctl.h src/clk/div.h src/clk/clock.h src/clk/clock.cpp src/clk/manager.h src/clk/manager.cpp src/mem/resident_allocator.h src/mem/resident_allocator.cpp src/mem/allocation_table.cpp src/mem/allocation_table.h src/mem/allocation_pool.cpp src/mem/allocation_pool.h)
 No newline at end of file
+1 −0
Original line number Diff line number Diff line
@@ -4,6 +4,7 @@
#include <pwm/controller.h>
#include <clk/ctl.h>
#include <clk/manager.h>
#include <mem/resident_allocator.h>

int main() {
    if (not mem::default_memory_accessor(mem::access::rw)) {
+66 −0
Original line number Diff line number Diff line
//
// Created by Pietro Saccardi on 2019-07-13.
//

#include "allocation_pool.h"

namespace mem {

    chunk allocation_pool::allocate(std::size_t bytes, size_t alignment) {
        pfrag fragment{};
        std::size_t offset_in_fragment = std::numeric_limits<std::size_t>::max();
        std::tie(fragment, offset_in_fragment) = find_suitable_fragment(bytes, alignment);
        if (not is_valid(fragment)) {
            // Try to allocate a new one of size bytes + alignment - 1 (to satisfy alignment)
            if (allocate_new_chunk(bytes + alignment - 1)) {
                // Try again to find the chunk, now it must be successful
                std::tie(fragment, offset_in_fragment) = find_suitable_fragment(bytes, alignment);
                if (not is_valid(fragment)) {
                    throw std::logic_error("New chunk allocated, but still no suitable fragment!");
                }
            } else {
                return {};
            }
        }
        return mark_used(fragment, bytes, alignment);
    }

    void allocation_pool::deallocate(const mem::chunk &c) {
        mark_free(c);
    }

    bool allocation_pool::allocate_new_chunk(std::size_t bytes) {
        // Use the end address of the last chunk as a place where to allocate (try to keep at least part of the mem
        // contiguous)
        if (mapped<chunk> mchunk = do_allocate_chunk(bytes, allocation_end_address()); mchunk) {
            _tot_pool_size += mchunk.size();
            mark_free(mchunk.chunk());
            _chunks.emplace_back(std::move(mchunk));
            return true;
        }
        return false;
    }

    std::size_t allocation_pool::try_reduce_pool_size() {
        std::size_t freed_memory = 0;
        for (mapped<chunk> &mchunk : _chunks) {
            const auto containing_pf = get_containing_fragment(mchunk.chunk());
            // A fragmnet identifies space available for allocation
            if (is_valid(containing_pf)) {
                const auto offset = reinterpret_cast<std::uintptr_t>(mchunk.virt()) - containing_pf->first;
                assert(offset < containing_pf->second.size());
                // Mark as used, not as free, because it is used (i.e. not available) from the perspective of the table
                mark_used(containing_pf, mchunk.size(), offset);
                freed_memory += mchunk.size();
                _tot_pool_size -= mchunk.size();
                mchunk.reset();
            }
        }
        // Remove all the elements that were deallocated
        auto chunk_is_empty = [](mapped<chunk> const &mc) -> bool { return bool(mc); };
        const auto new_end = std::remove_if(std::begin(_chunks), std::end(_chunks), chunk_is_empty);
        _chunks.resize(new_end - std::begin(_chunks));
        return freed_memory;
    }

}
 No newline at end of file
+59 −0
Original line number Diff line number Diff line
//
// Created by Pietro Saccardi on 2019-07-13.
//

#ifndef PWMPI_MEM_ALLOCATION_POOL_H
#define PWMPI_MEM_ALLOCATION_POOL_H

#include <mem/allocation_table.h>

namespace mem {


    class allocation_pool : private allocation_table {
        std::vector<mapped<chunk>> _chunks;
        std::size_t _tot_pool_size;

        bool allocate_new_chunk(std::size_t bytes);

        inline void *allocation_end_address() const;

    protected:

        virtual mapped<chunk> do_allocate_chunk(std::size_t bytes, void *hint) = 0;

    public:

        inline allocation_pool() : allocation_table{}, _tot_pool_size{0} {}

        allocation_pool(allocation_pool const &) = delete;
        allocation_pool &operator=(allocation_pool const &) = delete;

        using allocation_table::available_memory;
        using allocation_table::num_fragments;
        using allocation_table::largest_fragment_size;

        std::size_t try_reduce_pool_size();

        inline std::size_t pool_size() const { return _tot_pool_size; }
        inline std::size_t used_memory() const { return pool_size() - available_memory(); }

        chunk allocate(std::size_t bytes, std::size_t alignment = 1);
        void deallocate(chunk const &c);

        inline bool operator==(allocation_pool const &other) const { return _chunks == other._chunks; }
        inline bool operator!=(allocation_pool const &other) const { return _chunks != other._chunks; }
    };



    void *allocation_pool::allocation_end_address() const {
        if (_chunks.empty()) {
            return nullptr;
        }
        return reinterpret_cast<void *>(
                reinterpret_cast<std::uintptr_t>(_chunks.back().virt()) + _chunks.back().size());
    }
}

#endif //PWMPI_MEM_ALLOCATION_POOL_H
+4 −64
Original line number Diff line number Diff line
//
// Created by Pietro Saccardi on 2019-07-07.
// Created by Pietro Saccardi on 2019-07-13.
//

#include "allocator.h"
#include <cassert>
#include "allocation_table.h"

namespace mem {

    namespace {
        resident_mapper &default_resident_mapper() {
            static resident_mapper _mapper;
            return _mapper;
        }
    }

    chunk allocation_pool::get_tail_free_memory() {
        if (not _chunks.empty()) {
            // Try to see if a fragment contains the last allocated byte
            const pfrag containing_pf = get_containing_fragment({
                reinterpret_cast<void *>(reinterpret_cast<std::uintptr_t>(allocation_end_address()) - 1), 1});
            if (is_valid(containing_pf)) {
                return {reinterpret_cast<void *>(containing_pf->first), containing_pf->second.size()};
            }
        }
        return {allocation_end_address(), 0};
    }

    bool allocation_pool::allocate_new_chunk(std::size_t req_mem) {
        static const std::size_t page_size = get_page_size();
        // Round up to the next multiple of a page number
        req_mem = page_size * ((req_mem + page_size - 1) / page_size);
        // Use the end address of the last chunk as a place where to allocate (try to keep at least part of the mem
        // contiguous)
        mapped<chunk> mchunk = default_resident_mapper().map(req_mem, allocation_end_address());
        if (mchunk) {
            _tot_pool_size += mchunk.size();
            mark_free(mchunk.chunk());
            _chunks.emplace_back(std::move(mchunk));
            return true;
        }
        return false;
    }

    std::size_t allocation_pool::try_reduce_pool_size() {
        std::size_t freed_memory = 0;
        for (mapped<chunk> &mchunk : _chunks) {
            const auto containing_pf = get_containing_fragment(mchunk.chunk());
            // A fragmnet identifies space available for allocation
            if (is_valid(containing_pf)) {
                const auto offset = reinterpret_cast<std::uintptr_t>(mchunk.virt()) - containing_pf->first;
                assert(offset < containing_pf->second.size());
                // Mark as used, not as free, because it is used (i.e. not available) from the perspective of the table
                mark_used(containing_pf, mchunk.size(), offset);
                freed_memory += mchunk.size();
                _tot_pool_size -= mchunk.size();
                mchunk.reset();
            }
        }
        // Remove all the elements that were deallocated
        auto chunk_is_empty = [](mapped<chunk> const &mc) -> bool { return bool(mc); };
        const auto new_end = std::remove_if(std::begin(_chunks), std::end(_chunks), chunk_is_empty);
        _chunks.resize(new_end - std::begin(_chunks));
        return freed_memory;
    }

    chunk allocation_table::mark_used(pfrag pf, std::size_t amount, std::size_t offset) {
        if (offset + amount > pf->second.size()) {
            throw std::invalid_argument("Cannot allocate beyond the end of the chunk");
@@ -156,7 +98,7 @@ namespace mem {
                                                                                             std::size_t alignment) const
    {
        std::pair<allocation_table::pfrag, std::size_t> retval{{}, std::numeric_limits<std::size_t>::max()};
        if (amount <= largest_available_fragment()) {
        if (amount <= largest_fragment_size()) {
            auto const candidate_frags_begin = _frag_by_sz.lower_bound(amount);
            // Try to satisfy the alignments by simply allocating at the end first. If no fragment has a correctly
            // aligned end to allocate @p amount bytes, we then fall back on the first interval large enough to
@@ -181,6 +123,4 @@ namespace mem {
        return retval;
    }



}
 No newline at end of file
Loading