pw_allocator: Allocator utility implementations.

This adds the basic implementation of a couple of allocator utilities
for Pigweed. Currently, this is just a Block class that supports
splitting and merging (mostly stolen from Keir :) ), and a super-basic
freelist implementation.

This should provide enough building blocks to build a small allocator on
top.

Change-Id: I328488e89ad734be004108483401cd7ccbaf2a51
diff --git a/pw_allocator/BUILD b/pw_allocator/BUILD
new file mode 100644
index 0000000..1107506
--- /dev/null
+++ b/pw_allocator/BUILD
@@ -0,0 +1,74 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+    "//pw_build:pigweed.bzl",
+    "pw_cc_library",
+    "pw_cc_test",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])  # Apache License 2.0
+
+pw_cc_library(
+    name = "block",
+    srcs = [
+        "block.cc",
+    ],
+    hdrs = [
+        "public/pw_allocator/block.h",
+    ],
+    includes = ["public"],
+    deps = [
+        "//pw_span",
+        "//pw_status",
+    ],
+)
+
+pw_cc_library(
+    name = "freelist",
+    srcs = [
+        "freelist.cc",
+    ],
+    hdrs = [
+        "public/pw_allocator/freelist.h",
+    ],
+    includes = ["public"],
+    deps = [
+        "//pw_containers",
+        "//pw_span",
+        "//pw_status",
+    ],
+)
+
+pw_cc_test(
+    name = "block_test",
+    srcs = [
+        "block_test.cc",
+    ],
+    deps = [
+        ":block",
+    ],
+)
+
+pw_cc_test(
+    name = "freelist_test",
+    srcs = [
+        "freelist_test.cc",
+    ],
+    deps = [
+        ":freelist",
+    ],
+)
diff --git a/pw_allocator/BUILD.gn b/pw_allocator/BUILD.gn
new file mode 100644
index 0000000..3c8515a
--- /dev/null
+++ b/pw_allocator/BUILD.gn
@@ -0,0 +1,71 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_unit_test/test.gni")
+
+config("default_config") {
+  include_dirs = [ "public" ]
+}
+
+group("pw_allocator") {
+  deps = [
+    ":block",
+    ":freelist",
+  ]
+}
+
+source_set("block") {
+  public_configs = [ ":default_config" ]
+  public = [ "public/pw_allocator/block.h" ]
+  public_deps = [
+    "$dir_pw_span",
+    "$dir_pw_status",
+  ]
+  sources = [ "block.cc" ]
+  sources += public
+}
+
+source_set("freelist") {
+  public_configs = [ ":default_config" ]
+  public = [ "public/pw_allocator/freelist.h" ]
+  public_deps = [
+    "$dir_pw_containers",
+    "$dir_pw_span",
+    "$dir_pw_status",
+  ]
+  sources = [ "freelist.cc" ]
+  sources += public
+}
+
+pw_test_group("tests") {
+  tests = [
+    ":block_test",
+    ":freelist_test",
+  ]
+}
+
+pw_test("block_test") {
+  deps = [ ":block" ]
+  sources = [ "block_test.cc" ]
+}
+
+pw_test("freelist_test") {
+  deps = [ ":freelist" ]
+  sources = [ "freelist_test.cc" ]
+}
+
+pw_doc_group("docs") {
+  sources = [ "docs.rst" ]
+}
diff --git a/pw_allocator/block.cc b/pw_allocator/block.cc
new file mode 100644
index 0000000..8f93ff4
--- /dev/null
+++ b/pw_allocator/block.cc
@@ -0,0 +1,145 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_allocator/block.h"
+
+namespace pw::allocator {
+
+Status Block::Init(const span<std::byte> region, Block** block) {
+  // Ensure the region we're given is aligned and sized accordingly
+  if (reinterpret_cast<uintptr_t>(region.data()) % alignof(Block) != 0) {
+    return Status::INVALID_ARGUMENT;
+  }
+
+  if (region.size() < sizeof(Block)) {
+    return Status::INVALID_ARGUMENT;
+  }
+
+  union {
+    Block* block;
+    std::byte* bytes;
+  } aliased;
+  aliased.bytes = region.data();
+
+  // Make "next" point just past the end of this block; forming a linked list
+  // with the following storage. Since the space between this block and the
+  // next are implicitly part of the raw data, size can be computed by
+  // subtracting the pointers.
+  aliased.block->next = reinterpret_cast<Block*>(region.end());
+  aliased.block->MarkLast();
+
+  aliased.block->prev = nullptr;
+  *block = aliased.block;
+  return Status::OK;
+}
+
+Status Block::Split(size_t head_block_inner_size, Block** new_block) {
+  if (new_block == nullptr) {
+    return Status::INVALID_ARGUMENT;
+  }
+
+  // Don't split used blocks.
+  // TODO: Relax this restriction? Flag to enable/disable this check?
+  if (Used()) {
+    return Status::FAILED_PRECONDITION;
+  }
+
+  // First round the head_block_inner_size up to a alignof(Block) bounary.
+  // This ensures that the next block header is aligned accordingly.
+  // Alignment must be a power of two, hence align()-1 will return the
+  // remainder.
+  auto align_bit_mask = alignof(Block) - 1;
+  size_t aligned_head_block_inner_size = head_block_inner_size;
+  if ((head_block_inner_size & align_bit_mask) != 0) {
+    aligned_head_block_inner_size =
+        (head_block_inner_size & ~align_bit_mask) + alignof(Block);
+  }
+
+  // (1) Are we trying to allocate a head block larger than the current head
+  // block? This may happen because of the alignment above.
+  if (aligned_head_block_inner_size > InnerSize()) {
+    return Status::OUT_OF_RANGE;
+  }
+
+  // (2) Does the resulting block have enough space to store the header?
+  // TODO: What to do if the returned section is empty (i.e. remaining
+  // size == sizeof(Block))?
+  if (InnerSize() - aligned_head_block_inner_size < sizeof(Block)) {
+    return Status::RESOURCE_EXHAUSTED;
+  }
+
+  // Create the new block inside the current one.
+  Block* new_next = reinterpret_cast<Block*>(
+      // From the current position...
+      reinterpret_cast<intptr_t>(this) +
+      // skip past the current header...
+      sizeof(*this) +
+      // into the usable bytes by the new inner size.
+      aligned_head_block_inner_size);
+
+  // If we're inserting in the middle, we need to update the current next
+  // block to point to what we're inserting
+  if (!Last()) {
+    NextBlock()->prev = new_next;
+  }
+
+  // Copy next verbatim so the next block also gets the "last"-ness
+  new_next->next = next;
+  new_next->prev = this;
+
+  // Update the current block to point to the new head.
+  next = new_next;
+
+  *new_block = next;
+  return Status::OK;
+}
+
+Status Block::MergeNext() {
+  // Anything to merge with?
+  if (Last()) {
+    return Status::OUT_OF_RANGE;
+  }
+
+  // Is this or the next block in use?
+  if (Used() || NextBlock()->Used()) {
+    return Status::FAILED_PRECONDITION;
+  }
+
+  // Simply enough, this block's next pointer becomes the next block's
+  // next pointer. We then need to re-wire the "next next" block's prev
+  // pointer to point back to us though.
+  next = NextBlock()->next;
+
+  // Copying the pointer also copies the "last" status, so this is safe.
+  if (!Last()) {
+    NextBlock()->prev = this;
+  }
+
+  return Status::OK;
+}
+
+Status Block::MergePrev() {
+  // We can't merge if we have no previous. After that though, merging with
+  // the previous block is just MergeNext from the previous block.
+  if (prev == nullptr) {
+    return Status::OUT_OF_RANGE;
+  }
+
+  // WARNING: This class instance will still exist, but technically be invalid
+  // after this has been invoked. Be careful when doing anything with `this`
+  // After doing the below.
+  return prev->MergeNext();
+}
+
+}  // namespace pw::allocator
diff --git a/pw_allocator/block_test.cc b/pw_allocator/block_test.cc
new file mode 100644
index 0000000..7979bdc
--- /dev/null
+++ b/pw_allocator/block_test.cc
@@ -0,0 +1,307 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_allocator/block.h"
+
+#include "gtest/gtest.h"
+#include "pw_span/span.h"
+
+using std::byte;
+
+namespace pw::allocator {
+
+TEST(Block, CanCreateSingleBlock) {
+  constexpr size_t kN = 200;
+  alignas(Block*) byte bytes[kN];
+
+  Block* block = nullptr;
+  auto status = Block::Init(span(bytes, kN), &block);
+
+  ASSERT_EQ(status, Status::OK);
+  EXPECT_EQ(block->OuterSize(), kN);
+  EXPECT_EQ(block->InnerSize(), kN - sizeof(Block));
+  EXPECT_EQ(block->PrevBlock(), nullptr);
+  EXPECT_EQ(block->NextBlock(), (Block*)((uintptr_t)block + kN));
+  EXPECT_EQ(block->Used(), false);
+  EXPECT_EQ(block->Last(), true);
+}
+
+TEST(Block, CannotCreateUnalignedSingleBlock) {
+  constexpr size_t kN = 1024;
+
+  // Force alignment, so we can un-force it below
+  alignas(Block*) byte bytes[kN];
+  byte* byte_ptr = bytes;
+
+  Block* block = nullptr;
+  auto status = Block::Init(span(byte_ptr + 1, kN - 1), &block);
+
+  EXPECT_EQ(status, Status::INVALID_ARGUMENT);
+}
+
+TEST(Block, CannotCreateTooSmallBlock) {
+  constexpr size_t kN = 2;
+  alignas(Block*) byte bytes[kN];
+  Block* block = nullptr;
+  auto status = Block::Init(span(bytes, kN), &block);
+
+  EXPECT_EQ(status, Status::INVALID_ARGUMENT);
+}
+
+TEST(Block, CanSplitBlock) {
+  constexpr size_t kN = 1024;
+  constexpr size_t kSplitN = 512;
+  alignas(Block*) byte bytes[kN];
+
+  Block* block = nullptr;
+  Block::Init(span(bytes, kN), &block);
+
+  Block* next_block = nullptr;
+  auto status = block->Split(kSplitN, &next_block);
+
+  ASSERT_EQ(status, Status::OK);
+  EXPECT_EQ(block->InnerSize(), kSplitN);
+  EXPECT_EQ(block->OuterSize(), kSplitN + sizeof(Block));
+  EXPECT_EQ(block->Last(), false);
+
+  EXPECT_EQ(next_block->OuterSize(), kN - kSplitN - sizeof(Block));
+  EXPECT_EQ(next_block->Used(), false);
+  EXPECT_EQ(next_block->Last(), true);
+
+  EXPECT_EQ(block->NextBlock(), next_block);
+  EXPECT_EQ(next_block->PrevBlock(), block);
+}
+
+TEST(Block, CanSplitBlockUnaligned) {
+  constexpr size_t kN = 1024;
+  constexpr size_t kSplitN = 513;
+
+  alignas(Block*) byte bytes[kN];
+
+  // We should split at sizeof(Block) + kSplitN bytes. Then
+  // we need to round that up to an alignof(Block*) boundary.
+  uintptr_t split_addr = ((uintptr_t)&bytes) + kSplitN;
+  split_addr += alignof(Block*) - (split_addr % alignof(Block*));
+  uintptr_t split_len = split_addr - (uintptr_t)&bytes;
+
+  Block* block = nullptr;
+  Block::Init(span(bytes, kN), &block);
+
+  Block* next_block = nullptr;
+  auto status = block->Split(kSplitN, &next_block);
+
+  ASSERT_EQ(status, Status::OK);
+  EXPECT_EQ(block->InnerSize(), split_len);
+  EXPECT_EQ(block->OuterSize(), split_len + sizeof(Block));
+  EXPECT_EQ(next_block->OuterSize(), kN - split_len - sizeof(Block));
+  EXPECT_EQ(next_block->Used(), false);
+  EXPECT_EQ(block->NextBlock(), next_block);
+  EXPECT_EQ(next_block->PrevBlock(), block);
+}
+
+TEST(Block, CanSplitMidBlock) {
+  // Split once, then split the original block again to ensure that the
+  // pointers get rewired properly.
+  // I.e.
+  // [[             BLOCK 1            ]]
+  // block1->Split()
+  // [[       BLOCK1       ]][[ BLOCK2 ]]
+  // block1->Split()
+  // [[ BLOCK1 ]][[ BLOCK3 ]][[ BLOCK2 ]]
+
+  constexpr size_t kN = 1024;
+  constexpr size_t kSplit1 = 512;
+  constexpr size_t kSplit2 = 256;
+  byte bytes[kN];
+
+  Block* block = nullptr;
+  Block::Init(span(bytes, kN), &block);
+
+  Block* block2 = nullptr;
+  block->Split(kSplit1, &block2);
+
+  Block* block3 = nullptr;
+  block->Split(kSplit2, &block3);
+
+  EXPECT_EQ(block->NextBlock(), block3);
+  EXPECT_EQ(block3->NextBlock(), block2);
+  EXPECT_EQ(block2->PrevBlock(), block3);
+  EXPECT_EQ(block3->PrevBlock(), block);
+}
+
+TEST(Block, CannotSplitBlockWithoutHeaderSpace) {
+  constexpr size_t kN = 1024;
+  constexpr size_t kSplitN = kN - sizeof(Block) - 1;
+  byte bytes[kN];
+
+  Block* block = nullptr;
+  Block::Init(span(bytes, kN), &block);
+
+  Block* next_block = nullptr;
+  auto status = block->Split(kSplitN, &next_block);
+
+  EXPECT_EQ(status, Status::RESOURCE_EXHAUSTED);
+  EXPECT_EQ(next_block, nullptr);
+}
+
+TEST(Block, MustProvideNextBlockPointer) {
+  constexpr size_t kN = 1024;
+  constexpr size_t kSplitN = kN - sizeof(Block) - 1;
+  byte bytes[kN];
+
+  Block* block = nullptr;
+  Block::Init(span(bytes, kN), &block);
+
+  auto status = block->Split(kSplitN, nullptr);
+  EXPECT_EQ(status, Status::INVALID_ARGUMENT);
+}
+
+TEST(Block, CannotMakeBlockLargerInSplit) {
+  // Ensure that we can't ask for more space than the block actually has...
+  constexpr size_t kN = 1024;
+  byte bytes[kN];
+
+  Block* block = nullptr;
+  Block::Init(span(bytes, kN), &block);
+
+  Block* next_block = nullptr;
+  auto status = block->Split(block->InnerSize() + 1, &next_block);
+
+  EXPECT_EQ(status, Status::OUT_OF_RANGE);
+}
+
+TEST(Block, CanMakeZeroSizeFirstBlock) {
+  // This block does support splitting with zero payload size.
+  constexpr size_t kN = 1024;
+  byte bytes[kN];
+
+  Block* block = nullptr;
+  Block::Init(span(bytes, kN), &block);
+
+  Block* next_block = nullptr;
+  auto status = block->Split(0, &next_block);
+
+  ASSERT_EQ(status, Status::OK);
+  EXPECT_EQ(block->InnerSize(), static_cast<size_t>(0));
+}
+
+TEST(Block, CanMakeZeroSizeSecondBlock) {
+  // Likewise, the split block can be zero-width.
+  constexpr size_t kN = 1024;
+  byte bytes[kN];
+
+  Block* block = nullptr;
+  Block::Init(span(bytes, kN), &block);
+
+  Block* next_block = nullptr;
+  auto status = block->Split(block->InnerSize() - sizeof(Block), &next_block);
+
+  ASSERT_EQ(status, Status::OK);
+  EXPECT_EQ(next_block->InnerSize(), static_cast<size_t>(0));
+}
+
+TEST(Block, CanMarkBlockUsed) {
+  constexpr size_t kN = 1024;
+  alignas(Block*) byte bytes[kN];
+
+  Block* block = nullptr;
+  Block::Init(span(bytes, kN), &block);
+
+  block->MarkUsed();
+  EXPECT_EQ(block->Used(), true);
+
+  // Mark used packs that data into the next pointer. Check that it's still
+  // valid
+  EXPECT_EQ(block->NextBlock(), (Block*)((uintptr_t)block + kN));
+
+  block->MarkFree();
+  EXPECT_EQ(block->Used(), false);
+}
+
+TEST(Block, CannotSplitUsedBlock) {
+  constexpr size_t kN = 1024;
+  alignas(Block*) byte bytes[kN];
+
+  Block* block = nullptr;
+  Block::Init(span(bytes, kN), &block);
+
+  block->MarkUsed();
+
+  Block* next_block = nullptr;
+  auto status = block->Split(512, &next_block);
+  EXPECT_EQ(status, Status::FAILED_PRECONDITION);
+}
+
+TEST(Block, CanMergeWithNextBlock) {
+  // Do the three way merge from "CanSplitMidBlock", and let's
+  // merge block 3 and 2
+  constexpr size_t kN = 1024;
+  constexpr size_t kSplit1 = 512;
+  constexpr size_t kSplit2 = 256;
+  byte bytes[kN];
+
+  Block* block = nullptr;
+  Block::Init(pw::span(bytes, kN), &block);
+
+  Block* block2 = nullptr;
+  block->Split(kSplit1, &block2);
+
+  Block* block3 = nullptr;
+  block->Split(kSplit2, &block3);
+
+  EXPECT_EQ(block3->MergeNext(), Status::OK);
+
+  EXPECT_EQ(block->NextBlock(), block3);
+  EXPECT_EQ(block3->PrevBlock(), block);
+  EXPECT_EQ(block->InnerSize(), kSplit2);
+
+  // The resulting "right hand" block should have an outer size of 1024 - 256 -
+  // sizeof(Block), which accounts for the first block.
+  EXPECT_EQ(block3->OuterSize(), kN - kSplit2 - sizeof(Block));
+}
+
+TEST(Block, CannotMergeWithFirstOrLastBlock) {
+  constexpr size_t kN = 1024;
+  byte bytes[kN];
+
+  // Do a split, just to sanity check that the checks on Next/Prev are
+  // different...
+  Block* block = nullptr;
+  Block::Init(span(bytes, kN), &block);
+
+  Block* next_block = nullptr;
+  block->Split(512, &next_block);
+
+  EXPECT_EQ(next_block->MergeNext(), Status::OUT_OF_RANGE);
+  EXPECT_EQ(block->MergePrev(), Status::OUT_OF_RANGE);
+}
+
+TEST(Block, CannotMergeUsedBlock) {
+  constexpr size_t kN = 1024;
+  byte bytes[kN];
+
+  // Do a split, just to sanity check that the checks on Next/Prev are
+  // different...
+  Block* block = nullptr;
+  Block::Init(span(bytes, kN), &block);
+
+  Block* next_block = nullptr;
+  block->Split(512, &next_block);
+
+  block->MarkUsed();
+  EXPECT_EQ(block->MergeNext(), Status::FAILED_PRECONDITION);
+  EXPECT_EQ(next_block->MergePrev(), Status::FAILED_PRECONDITION);
+}
+
+}  // namespace pw::allocator
diff --git a/pw_allocator/docs.rst b/pw_allocator/docs.rst
new file mode 100644
index 0000000..ae31c55
--- /dev/null
+++ b/pw_allocator/docs.rst
@@ -0,0 +1,19 @@
+.. _chapter-pw-allocator:
+
+.. default-domain:: cpp
+
+-----------
+pw_alloctor
+-----------
+
+This module provides various building blocks
+for a dynamic allocator. This is composed of the following parts:
+
+- ``block`` - An implementation of a linked list of memory blocks, supporting
+  splitting and merging of blocks.
+- ``freelist`` - A freelist, suitable for fast lookups of available memory
+  chunks (i.e. ``block``s)
+
+Note, this module, and its documentation, is currently incomplete and
+experimental.
+
diff --git a/pw_allocator/freelist.cc b/pw_allocator/freelist.cc
new file mode 100644
index 0000000..34bcbb7
--- /dev/null
+++ b/pw_allocator/freelist.cc
@@ -0,0 +1,127 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_allocator/freelist.h"
+
+namespace pw::allocator {
+
+Status FreeList::AddChunk(span<std::byte> chunk) {
+  // Check that the size is enough to actually store what we need
+  if (chunk.size() < sizeof(FreeListNode)) {
+    return Status::OUT_OF_RANGE;
+  }
+
+  union {
+    FreeListNode* node;
+    std::byte* bytes;
+  } aliased;
+
+  aliased.bytes = chunk.data();
+
+  size_t chunk_ptr = FindChunkPtrForSize(chunk.size(), false);
+
+  // Add it to the correct list.
+  aliased.node->size = chunk.size();
+  aliased.node->next = chunks_[chunk_ptr];
+  chunks_[chunk_ptr] = aliased.node;
+
+  return Status::OK;
+}
+
+span<std::byte> FreeList::FindChunk(size_t size) const {
+  if (size == 0) {
+    return span<std::byte>();
+  }
+
+  size_t chunk_ptr = FindChunkPtrForSize(size, true);
+
+  // Check that there's data. This catches the case where we run off the
+  // end of the array
+  if (chunks_[chunk_ptr] == nullptr) {
+    return span<std::byte>();
+  }
+
+  // Now iterate up the buckets, walking each list to find a good candidate
+  for (size_t i = chunk_ptr; i < chunks_.size(); i++) {
+    union {
+      FreeListNode* node;
+      std::byte* data;
+    } aliased;
+    aliased.node = chunks_[i];
+
+    while (aliased.node != nullptr) {
+      if (aliased.node->size >= size) {
+        return span<std::byte>(aliased.data, aliased.node->size);
+      }
+
+      aliased.node = aliased.node->next;
+    }
+  }
+
+  // If we get here, we've checked every block in every bucket. There's
+  // nothing that can support this allocation.
+  return span<std::byte>();
+}
+
+Status FreeList::RemoveChunk(span<std::byte> chunk) {
+  size_t chunk_ptr = FindChunkPtrForSize(chunk.size(), true);
+
+  // Walk that list, finding the chunk.
+  union {
+    FreeListNode* node;
+    std::byte* data;
+  } aliased, aliased_next;
+
+  // Check head first.
+  if (chunks_[chunk_ptr] == nullptr) {
+    return Status::NOT_FOUND;
+  }
+
+  aliased.node = chunks_[chunk_ptr];
+  if (aliased.data == chunk.data()) {
+    chunks_[chunk_ptr] = aliased.node->next;
+
+    return Status::OK;
+  }
+
+  // No? Walk the nodes.
+  aliased.node = chunks_[chunk_ptr];
+
+  while (aliased.node->next != nullptr) {
+    aliased_next.node = aliased.node->next;
+    if (aliased_next.data == chunk.data()) {
+      // Found it, remove this node out of the chain
+      aliased.node->next = aliased_next.node->next;
+      return Status::OK;
+    }
+
+    aliased.node = aliased.node->next;
+  }
+
+  return Status::NOT_FOUND;
+}
+
+size_t FreeList::FindChunkPtrForSize(size_t size, bool non_null) const {
+  size_t chunk_ptr = 0;
+  for (chunk_ptr = 0; chunk_ptr < sizes_.size(); chunk_ptr++) {
+    if (sizes_[chunk_ptr] >= size &&
+        (!non_null || chunks_[chunk_ptr] != nullptr)) {
+      break;
+    }
+  }
+
+  return chunk_ptr;
+}
+
+}  // namespace pw::allocator
diff --git a/pw_allocator/freelist_test.cc b/pw_allocator/freelist_test.cc
new file mode 100644
index 0000000..7697155
--- /dev/null
+++ b/pw_allocator/freelist_test.cc
@@ -0,0 +1,173 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_allocator/freelist.h"
+
+#include "gtest/gtest.h"
+#include "pw_span/span.h"
+#include "pw_status/status.h"
+
+using std::byte;
+
+namespace pw::allocator {
+
+static const std::array<size_t, 8> example_sizes = {
+    64, 128, 256, 512, 1024, 2048, 4096, 8192};
+
+TEST(FreeList, EmptyListHasNoMembers) {
+  FreeListBuffer list(example_sizes);
+
+  auto item = list.FindChunk(4);
+  EXPECT_EQ(item.size(), static_cast<size_t>(0));
+  item = list.FindChunk(128);
+  EXPECT_EQ(item.size(), static_cast<size_t>(0));
+}
+
+TEST(FreeList, CanRetrieveAddedMember) {
+  FreeListBuffer list(example_sizes);
+  constexpr size_t kN = 512;
+
+  byte data[kN] = {std::byte(0)};
+
+  auto status = list.AddChunk(pw::span(data, kN));
+  EXPECT_EQ(status, Status::OK);
+
+  auto item = list.FindChunk(kN);
+  EXPECT_EQ(item.size(), kN);
+  EXPECT_EQ(item.data(), data);
+}
+
+TEST(FreeList, CanRetrieveAddedMemberForSmallerSize) {
+  FreeListBuffer list(example_sizes);
+  constexpr size_t kN = 512;
+
+  byte data[kN] = {std::byte(0)};
+
+  list.AddChunk(pw::span(data, kN));
+  auto item = list.FindChunk(kN / 2);
+  EXPECT_EQ(item.size(), kN);
+  EXPECT_EQ(item.data(), data);
+}
+
+TEST(FreeList, CanRemoveItem) {
+  FreeListBuffer list(example_sizes);
+  constexpr size_t kN = 512;
+
+  byte data[kN] = {std::byte(0)};
+
+  list.AddChunk(pw::span(data, kN));
+  auto status = list.RemoveChunk(pw::span(data, kN));
+  EXPECT_EQ(status, Status::OK);
+
+  auto item = list.FindChunk(kN);
+  EXPECT_EQ(item.size(), static_cast<size_t>(0));
+}
+
+TEST(FreeList, FindReturnsSmallestChunk) {
+  FreeListBuffer list(example_sizes);
+  constexpr size_t kN1 = 512;
+  constexpr size_t kN2 = 1024;
+
+  byte data1[kN1] = {std::byte(0)};
+  byte data2[kN2] = {std::byte(0)};
+
+  list.AddChunk(pw::span(data1, kN1));
+  list.AddChunk(pw::span(data2, kN2));
+
+  auto chunk = list.FindChunk(kN1 / 2);
+  EXPECT_EQ(chunk.size(), kN1);
+  EXPECT_EQ(chunk.data(), data1);
+
+  chunk = list.FindChunk(kN1);
+  EXPECT_EQ(chunk.size(), kN1);
+  EXPECT_EQ(chunk.data(), data1);
+
+  chunk = list.FindChunk(kN1 + 1);
+  EXPECT_EQ(chunk.size(), kN2);
+  EXPECT_EQ(chunk.data(), data2);
+}
+
+TEST(FreeList, FindReturnsCorrectChunkInSameBucket) {
+  // If we have two values in the same bucket, ensure that the allocation will
+  // pick an appropriately sized one.
+  FreeListBuffer list(example_sizes);
+  constexpr size_t kN1 = 512;
+  constexpr size_t kN2 = 257;
+
+  byte data1[kN1] = {std::byte(0)};
+  byte data2[kN2] = {std::byte(0)};
+
+  // List should now be 257 -> 512 -> NULL
+  list.AddChunk(pw::span(data1, kN1));
+  list.AddChunk(pw::span(data2, kN2));
+
+  auto chunk = list.FindChunk(kN2 + 1);
+  EXPECT_EQ(chunk.size(), kN1);
+}
+
+TEST(FreeList, FindCanMoveUpThroughBuckets) {
+  // Ensure that finding a chunk will move up through buckets if no appropriate
+  // chunks were found in a given bucket
+  FreeListBuffer list(example_sizes);
+  constexpr size_t kN1 = 257;
+  constexpr size_t kN2 = 513;
+
+  byte data1[kN1] = {std::byte(0)};
+  byte data2[kN2] = {std::byte(0)};
+
+  // List should now be:
+  // bkt[3] (257 bytes up to 512 bytes) -> 257 -> NULL
+  // bkt[4] (513 bytes up to 1024 bytes) -> 513 -> NULL
+  list.AddChunk(pw::span(data1, kN1));
+  list.AddChunk(pw::span(data2, kN2));
+
+  // Request a 300 byte chunk. This should return the 513 byte one
+  auto chunk = list.FindChunk(kN1 + 1);
+  EXPECT_EQ(chunk.size(), kN2);
+}
+
+TEST(FreeList, RemoveUnknownChunkReturnsNotFound) {
+  FreeListBuffer list(example_sizes);
+  constexpr size_t kN = 512;
+
+  byte data[kN] = {std::byte(0)};
+  byte data2[kN] = {std::byte(0)};
+
+  list.AddChunk(pw::span(data, kN));
+  auto status = list.RemoveChunk(pw::span(data2, kN));
+  EXPECT_EQ(status, Status::NOT_FOUND);
+}
+
+TEST(FreeList, CanStoreMultipleChunksPerBucket) {
+  FreeListBuffer list(example_sizes);
+  constexpr size_t kN = 512;
+
+  byte data1[kN] = {std::byte(0)};
+  byte data2[kN] = {std::byte(0)};
+
+  list.AddChunk(pw::span(data1, kN));
+  list.AddChunk(pw::span(data2, kN));
+
+  auto chunk1 = list.FindChunk(kN);
+  list.RemoveChunk(chunk1);
+  auto chunk2 = list.FindChunk(kN);
+  list.RemoveChunk(chunk2);
+
+  // Ordering of the chunks doesn't matter
+  EXPECT_TRUE(chunk1.data() != chunk2.data());
+  EXPECT_TRUE(chunk1.data() == data1 || chunk1.data() == data2);
+  EXPECT_TRUE(chunk2.data() == data1 || chunk2.data() == data2);
+}
+
+}  // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/block.h b/pw_allocator/public/pw_allocator/block.h
new file mode 100644
index 0000000..adefe30
--- /dev/null
+++ b/pw_allocator/public/pw_allocator/block.h
@@ -0,0 +1,195 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// WARNING: This code is a experimental WIP & exploration only, and is far from
+// usable.
+#pragma once
+
+#include "pw_span/span.h"
+#include "pw_status/status.h"
+
+namespace pw::allocator {
+
+// The "Block" type is intended to be a building block component for
+// allocators. In the this design, there is an explicit pointer to next and
+// prev from the block header; the size is not encoded. The below diagram shows
+// what this would look like for two blocks.
+//
+//   .------+---------------------------------.-----------------------------
+//   |            Block A (first)             |       Block B (second)
+//
+//   +------+------+--------------------------+------+------+---------------
+//   | Next | Prev |   usable space           | Next | Prev | usable space..
+//   +------+------+--------------------------+------+--+---+---------------
+//   ^  |                                     ^         |
+//   |  '-------------------------------------'         |
+//   |                                                  |
+//   '----------- Block B's prev points to Block A -----'
+//
+// One use for these blocks is to use them as allocations, where each block
+// represents an allocation handed out by malloc(). These blocks could also be
+// used as part of a slab or buddy allocator.
+//
+// Each block also contains flags for whether it is the last block (i.e. whether
+// the "next" pointer points to a valid block, or just denotes the end of this
+// block), and whether the block is in use. These are encoded into the last two
+// bits of the "next" pointer, as follows:
+//
+//  .-----------------------------------------------------------------------.
+//  |                            Block                                      |
+//  +-----------------------------------------------------------------------+
+//  |              Next            | Prev |         usable space            |
+//  +----------------+------+------+      +                                 |
+//  |   Ptr[N..2]    | Last | Used |      |                                 |
+//  +----------------+------+------+------+---------------------------------+
+//  ^
+//  |
+//  '----------- NextBlock() = Next & ~0x3 --------------------------------->
+//
+// The first block in a chain is denoted by a nullptr "prev" field, and the last
+// block is denoted by the "Last" bit being set.
+//
+// Note, This block class requires that the given block is aligned to a
+// alignof(Block*) boundary. Because of this alignment requirement, each
+// returned block will also be aligned to a alignof(Block*) boundary, and the
+// size will always be rounded up to a multiple of alignof(Block*).
+//
+// This class must be constructed using the static Init call.
+class Block final {
+ public:
+  // No copy/move
+  Block(const Block& other) = delete;
+  Block& operator=(const Block& other) = delete;
+  Block(Block&& other) = delete;
+  Block& operator=(Block&& other) = delete;
+
+  // Create the first block for a given memory region.
+  // Note that the start of the given memory region must be aligned to an
+  // alignof(Block) boundary.
+  // Returns:
+  //   INVALID_ARGUMENT if the given region is unaligned to too small, or
+  //   OK otherwise.
+  static Status Init(const span<std::byte> region, Block** block);
+
+  // Size including the header.
+  size_t OuterSize() const {
+    return reinterpret_cast<intptr_t>(NextBlock()) -
+           reinterpret_cast<intptr_t>(this);
+  }
+
+  // Usable bytes inside the block.
+  size_t InnerSize() const { return OuterSize() - sizeof(*this); }
+
+  // Split this block, such that this block has an inner size of
+  // `head_block_inner_size`, and return a new block in the remainder of the
+  // space in `new_block`.
+  //
+  // The "remainder" block will be aligned to a alignof(Block*) boundary (and
+  // `head_block_inner_size` will be rounded up). If the remaining space is not
+  // large enough to store a new `Block` after rounding, no splitting will
+  // occur.
+  //
+  // This may return the following:
+  //   OK: The split completed successfully.
+  //   INVALID_ARGUMENT: new_block is null
+  //   FAILED_PRECONDITION: This block is in use and cannot be split.
+  //   OUT_OF_RANGE: The requested size for "this" block is greater than the
+  //                 current inner_size.
+  //   RESOURCE_EXHAUSTED: The split cannot occur because the "remainder" block
+  //                       would not be large enough to store a block header.
+  Status Split(size_t head_block_inner_size, Block** new_block);
+
+  // Merge this block with the one that comes after it.
+  // This function will not merge blocks if either are in use.
+  //
+  // This may return the following:
+  //   OK: Merge was successful.
+  //   OUT_OF_RANGE: Attempting to merge the "last" block.
+  //   FAILED_PRECONDITION: The blocks could not be merged because one of them
+  //                        was in use.
+  Status MergeNext();
+
+  // Merge this block with the one that comes before it.
+  // This function will not merge blocks if either are in use.
+  //
+  // Warning: merging with a previous block will invalidate this block instance.
+  // do not perform any operations on this instance after merging.
+  //
+  // This may return the following:
+  //   OK: Merge was successful.
+  //   OUT_OF_RANGE: Attempting to merge the "first" block.
+  //   FAILED_PRECONDITION: The blocks could not be merged because one of them
+  //                        was in use.
+  Status MergePrev();
+
+  // Returns whether this block is in-use or not
+  bool Used() const { return (NextAsUIntPtr() & kInUseFlag) == kInUseFlag; }
+
+  // Returns whether this block is the last block or
+  // not (i.e. whether NextBlock points to a valid block or not).
+  // This is needed because NextBlock points to the end of this block,
+  // whether there is a valid block there or not.
+  bool Last() const { return (NextAsUIntPtr() & kLastFlag) == kLastFlag; }
+
+  // Mark this block as in-use
+  void MarkUsed() {
+    next = reinterpret_cast<Block*>((NextAsUIntPtr() | kInUseFlag));
+  }
+
+  // Mark this block as free
+  void MarkFree() {
+    next = reinterpret_cast<Block*>((NextAsUIntPtr() & ~kInUseFlag));
+  }
+
+  // Mark this block as the last one in the chain.
+  void MarkLast() {
+    next = reinterpret_cast<Block*>((NextAsUIntPtr() | kLastFlag));
+  }
+
+  // Clear the "last" bit from this block.
+  void ClearLast() {
+    next = reinterpret_cast<Block*>((NextAsUIntPtr() & ~kLastFlag));
+  }
+
+  // Fetch the block immediately after this one.
+  // Note: you should also check Last(); this function may return a valid
+  // block, even if one does not exist.
+  Block* NextBlock() const {
+    return reinterpret_cast<Block*>(
+        (NextAsUIntPtr() & ~(kInUseFlag | kLastFlag)));
+  }
+
+  // Return the block immediately before this one. This will return nullptr
+  // if this is the "first" block.
+  Block* PrevBlock() const { return prev; }
+
+ private:
+  static constexpr uintptr_t kInUseFlag = 0x1;
+  static constexpr uintptr_t kLastFlag = 0x2;
+
+  Block() = default;
+
+  // Helper to reduce some of the casting nesting in the block management
+  // functions.
+  uintptr_t NextAsUIntPtr() const { return reinterpret_cast<uintptr_t>(next); }
+
+  // Note: Consider instead making these next/prev offsets from the current
+  // block, with templated type for the offset size. There are some interesting
+  // tradeoffs here; perhaps a pool of small allocations could use 1-byte
+  // next/prev offsets to reduce size further.
+  Block* next;
+  Block* prev;
+};
+
+}  // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/freelist.h b/pw_allocator/public/pw_allocator/freelist.h
new file mode 100644
index 0000000..ac4444a
--- /dev/null
+++ b/pw_allocator/public/pw_allocator/freelist.h
@@ -0,0 +1,121 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <array>
+
+#include "pw_containers/vector.h"
+#include "pw_span/span.h"
+#include "pw_status/status.h"
+
+namespace pw::allocator {
+
+template <size_t num_buckets>
+class FreeListBuffer;
+
+// Basic freelist implementation for an allocator.
+// This implementation buckets by chunk size, with a list of user-provided
+// buckets. Each bucket is a linked list of storage chunks. Because this
+// freelist uses the added chunks themselves as list nodes, there is lower bound
+// of sizeof(FreeList.FreeListNode) bytes for chunks which can be added to this
+// freelist. There is also an implicit bucket for "everything else", for chunks
+// which do not fit into a bucket.
+//
+// Each added chunk will be added to the smallest bucket under which it fits. If
+// it does not fit into any user-provided bucket, it will be added to the
+// default bucket.
+//
+// As an example, assume that the FreeList is configured with buckets of sizes
+// {64, 128, 256 and 512} bytes. The internal state may look like the following.
+//
+// bucket[0] (64B) --> chunk[12B] --> chunk[42B] --> chunk[64B] --> NULL
+// bucket[1] (128B) --> chunk[65B] --> chunk[72B] --> NULL
+// bucket[2] (256B) --> NULL
+// bucket[3] (512B) --> chunk[312B] --> chunk[512B] --> chunk[416B] --> NULL
+// bucket[4] (implciit) --> chunk[1024B] --> chunk[513B] --> NULL
+//
+// Note that added chunks should be aligned to a 4-byte boundary.
+//
+// This class is split into two parts; FreeList implements all of the
+// logic, and takes in pointers for two pw::Vector instances for its storage.
+// This prevents us from having to specialise this class for every kMaxSize
+// parameter for the vector. FreeListBuffer then provides the storage for these
+// two pw::Vector instances and instantiates FreeListInternal.
+class FreeList {
+ public:
+  // Remove copy/move ctors
+  FreeList(const FreeList& other) = delete;
+  FreeList(FreeList&& other) = delete;
+  FreeList& operator=(const FreeList& other) = delete;
+  FreeList& operator=(FreeList&& other) = delete;
+
+  // Adds a chunk to this freelist. Returns:
+  //   OK: The chunk was added successfully
+  //   OUT_OF_RANGE: The chunk could not be added for size reasons (e.g. if
+  //                 the chunk is too small to store the FreeListNode).
+  Status AddChunk(span<std::byte> chunk);
+
+  // Finds an eligible chunk for an allocation of size `size`. Note that this
+  // will return the first allocation possible within a given bucket, it does
+  // not currently optimize for finding the smallest chunk. Returns a pw::span
+  // representing the chunk. This will be "valid" on success, and will have size
+  // = 0 on failure (if there were no chunks available for that allocation).
+  span<std::byte> FindChunk(size_t size) const;
+
+  // Remove a chunk from this freelist. Returns:
+  //   OK: The chunk was removed successfully
+  //   NOT_FOUND: The chunk could not be found in this freelist.
+  Status RemoveChunk(span<std::byte> chunk);
+
+ private:
+  // For a given size, find which index into chunks_ the node should be written
+  // to.
+  size_t FindChunkPtrForSize(size_t size, bool non_null) const;
+
+ private:
+  template <size_t num_buckets>
+  friend class FreeListBuffer;
+
+  struct FreeListNode {
+    // TODO: Double-link this? It'll make removal easier/quicker.
+    FreeListNode* next;
+    size_t size;
+  };
+
+  constexpr FreeList(Vector<FreeListNode*>& chunks, Vector<size_t>& sizes)
+      : chunks_(chunks), sizes_(sizes) {}
+
+  Vector<FreeListNode*>& chunks_;
+  Vector<size_t>& sizes_;
+};
+
+// Holder for FreeList's storage.
+template <size_t num_buckets>
+class FreeListBuffer : public FreeList {
+ public:
+  // These constructors are a little hacky because of the initialization order.
+  // Because FreeList has a trivial constructor, this is safe, however.
+  explicit FreeListBuffer(std::initializer_list<size_t> sizes)
+      : FreeList(chunks_, sizes_), sizes_(sizes), chunks_(num_buckets + 1, 0) {}
+  explicit FreeListBuffer(std::array<size_t, num_buckets> sizes)
+      : FreeList(chunks_, sizes_),
+        sizes_(sizes.begin(), sizes.end()),
+        chunks_(num_buckets + 1, 0) {}
+
+ private:
+  Vector<size_t, num_buckets> sizes_;
+  Vector<FreeList::FreeListNode*, num_buckets + 1> chunks_;
+};
+
+}  // namespace pw::allocator