Jamie Garside | f65bb9c | 2020-04-02 11:17:20 +0100 | [diff] [blame] | 1 | // Copyright 2020 The Pigweed Authors |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not |
| 4 | // use this file except in compliance with the License. You may obtain a copy of |
| 5 | // the License at |
| 6 | // |
| 7 | // https://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
| 11 | // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
| 12 | // License for the specific language governing permissions and limitations under |
| 13 | // the License. |
| 14 | |
| 15 | #include "pw_allocator/freelist_heap.h" |
| 16 | |
Chenghan | 0ac0275 | 2020-06-01 13:02:04 -0400 | [diff] [blame] | 17 | #include <cstring> |
| 18 | |
Jamie Garside | f65bb9c | 2020-04-02 11:17:20 +0100 | [diff] [blame] | 19 | namespace pw::allocator { |
| 20 | |
| 21 | FreeListHeap::FreeListHeap(span<std::byte> region, FreeList& freelist) |
| 22 | : freelist_(freelist) { |
| 23 | Block* block; |
| 24 | Block::Init(region, &block); |
| 25 | |
| 26 | freelist_.AddChunk(BlockToSpan(block)); |
| 27 | |
| 28 | region_ = region; |
| 29 | } |
| 30 | |
| 31 | void* FreeListHeap::Allocate(size_t size) { |
| 32 | // Find a chunk in the freelist. Split it if needed, then return |
| 33 | auto chunk = freelist_.FindChunk(size); |
| 34 | |
| 35 | if (chunk.data() == nullptr) { |
| 36 | return nullptr; |
| 37 | } |
Jamie Garside | f65bb9c | 2020-04-02 11:17:20 +0100 | [diff] [blame] | 38 | freelist_.RemoveChunk(chunk); |
| 39 | |
| 40 | Block* chunk_block = Block::FromUsableSpace(chunk.data()); |
| 41 | |
| 42 | // Split that chunk. If there's a leftover chunk, add it to the freelist |
| 43 | Block* leftover; |
| 44 | auto status = chunk_block->Split(size, &leftover); |
| 45 | if (status == PW_STATUS_OK) { |
| 46 | freelist_.AddChunk(BlockToSpan(leftover)); |
| 47 | } |
| 48 | |
| 49 | chunk_block->MarkUsed(); |
| 50 | |
| 51 | return chunk_block->UsableSpace(); |
| 52 | } |
| 53 | |
| 54 | void FreeListHeap::Free(void* ptr) { |
Wyatt Hepler | fc988dc | 2020-06-18 09:46:02 -0700 | [diff] [blame] | 55 | std::byte* bytes = static_cast<std::byte*>(ptr); |
Jamie Garside | f65bb9c | 2020-04-02 11:17:20 +0100 | [diff] [blame] | 56 | |
| 57 | if (bytes < region_.data() || bytes >= region_.data() + region_.size()) { |
| 58 | return; |
| 59 | } |
| 60 | |
| 61 | Block* chunk_block = Block::FromUsableSpace(bytes); |
Jamie Garside | f65bb9c | 2020-04-02 11:17:20 +0100 | [diff] [blame] | 62 | // Ensure that the block is in-use |
| 63 | if (!chunk_block->Used()) { |
| 64 | return; |
| 65 | } |
Chenghan | 0ac0275 | 2020-06-01 13:02:04 -0400 | [diff] [blame] | 66 | chunk_block->MarkFree(); |
Jamie Garside | f65bb9c | 2020-04-02 11:17:20 +0100 | [diff] [blame] | 67 | // Can we combine with the left or right blocks? |
| 68 | Block* prev = chunk_block->PrevBlock(); |
| 69 | Block* next = nullptr; |
| 70 | |
| 71 | if (!chunk_block->Last()) { |
| 72 | next = chunk_block->NextBlock(); |
| 73 | } |
| 74 | |
| 75 | if (prev != nullptr && !prev->Used()) { |
| 76 | // Remove from freelist and merge |
| 77 | freelist_.RemoveChunk(BlockToSpan(prev)); |
| 78 | chunk_block->MergePrev(); |
| 79 | |
| 80 | // chunk_block is now invalid; prev now encompasses it. |
| 81 | chunk_block = prev; |
| 82 | } |
| 83 | |
| 84 | if (next != nullptr && !next->Used()) { |
| 85 | freelist_.RemoveChunk(BlockToSpan(next)); |
| 86 | chunk_block->MergeNext(); |
| 87 | } |
Jamie Garside | f65bb9c | 2020-04-02 11:17:20 +0100 | [diff] [blame] | 88 | // Add back to the freelist |
| 89 | freelist_.AddChunk(BlockToSpan(chunk_block)); |
| 90 | } |
| 91 | |
Chenghan | 0ac0275 | 2020-06-01 13:02:04 -0400 | [diff] [blame] | 92 | // Follows constract of the C standard realloc() function |
| 93 | // If ptr is free'd, will return nullptr. |
| 94 | void* FreeListHeap::Realloc(void* ptr, size_t size) { |
| 95 | if (size == 0) { |
| 96 | Free(ptr); |
| 97 | return nullptr; |
| 98 | } |
| 99 | |
| 100 | // If the pointer is nullptr, allocate a new memory. |
| 101 | if (ptr == nullptr) { |
| 102 | return Allocate(size); |
| 103 | } |
| 104 | |
Wyatt Hepler | fc988dc | 2020-06-18 09:46:02 -0700 | [diff] [blame] | 105 | std::byte* bytes = static_cast<std::byte*>(ptr); |
Chenghan | 0ac0275 | 2020-06-01 13:02:04 -0400 | [diff] [blame] | 106 | |
| 107 | // TODO(chenghanzh): Enhance with debug information for out-of-range and more. |
| 108 | if (bytes < region_.data() || bytes >= region_.data() + region_.size()) { |
| 109 | return nullptr; |
| 110 | } |
| 111 | |
| 112 | Block* chunk_block = Block::FromUsableSpace(bytes); |
| 113 | if (!chunk_block->Used()) { |
| 114 | return nullptr; |
| 115 | } |
| 116 | size_t old_size = chunk_block->InnerSize(); |
| 117 | |
| 118 | // Do nothing and return ptr if the required memory size is smaller than |
| 119 | // the current size. |
| 120 | // TODO: Currently do not support shrink of memory chunk. |
| 121 | if (old_size >= size) { |
| 122 | return ptr; |
| 123 | } |
| 124 | |
| 125 | void* new_ptr = Allocate(size); |
| 126 | // Don't invalidate ptr if Allocate(size) fails to initilize the memory. |
| 127 | if (new_ptr == nullptr) { |
| 128 | return nullptr; |
| 129 | } |
| 130 | memcpy(new_ptr, ptr, old_size); |
| 131 | |
| 132 | Free(ptr); |
| 133 | return new_ptr; |
| 134 | } |
| 135 | |
| 136 | void* FreeListHeap::Calloc(size_t num, size_t size) { |
| 137 | void* ptr = Allocate(num * size); |
| 138 | if (ptr != nullptr) { |
| 139 | memset(ptr, 0, num * size); |
| 140 | } |
| 141 | return ptr; |
| 142 | } |
| 143 | |
Jamie Garside | f65bb9c | 2020-04-02 11:17:20 +0100 | [diff] [blame] | 144 | } // namespace pw::allocator |