Release snapshot
diff --git a/Kbuild b/Kbuild
new file mode 100644
index 0000000..0279788
--- /dev/null
+++ b/Kbuild
@@ -0,0 +1,14 @@
+#
+# Copyright 2014, NICTA
+#
+# This software may be distributed and modified according to the terms of
+# the BSD 2-Clause license. Note that NO WARRANTY is provided.
+# See "LICENSE_BSD2.txt" for details.
+#
+# @TAG(NICTA_BSD)
+#
+
+libs-$(CONFIG_LIB_SEL4_UTILS) += libsel4utils
+libsel4vspace-$(CONFIG_LIB_SEL4_VSPACE) := libsel4vspace
+libsel4vka-$(CONFIG_LIB_SEL4_VKA) := libsel4vka
+libsel4utils: $(libsel4vspace-y) $(libsel4vka-y) libutils libelf libcpio libsel4 common $(libc)
diff --git a/Kconfig b/Kconfig
new file mode 100644
index 0000000..d2fcc6b
--- /dev/null
+++ b/Kconfig
@@ -0,0 +1,28 @@
+#
+# Copyright 2014, NICTA
+#
+# This software may be distributed and modified according to the terms of
+# the BSD 2-Clause license. Note that NO WARRANTY is provided.
+# See "LICENSE_BSD2.txt" for details.
+#
+# @TAG(NICTA_BSD)
+#
+
+menuconfig LIB_SEL4_UTILS
+    bool "Build sel4utils"
+    depends on LIB_SEL4 && HAVE_LIBC && LIB_ELF && LIB_CPIO && LIB_UTILS
+    select HAVE_SEL4_LIBS
+    default y
+    help
+        A library of utils for building an OS on seL4, including virtual memory and process and thread creation.
+
+if LIB_SEL4_UTILS
+    config SEL4UTILS_STACK_SIZE
+    int "Size of stacks in bytes to allocate if using vspace interface in this library"
+    default 65536
+
+    config SEL4UTILS_CSPACE_SIZE_BITS
+    int "Size of default cspace to spawn processes with"
+    range 2 27
+    default 12
+endif
diff --git a/LICENSE_BSD2.txt b/LICENSE_BSD2.txt
new file mode 100644
index 0000000..85cda3e
--- /dev/null
+++ b/LICENSE_BSD2.txt
@@ -0,0 +1,31 @@
+Files described as being under the "BSD 2-Clause" license fall under the
+following license.
+
+-----------------------------------------------------------------------
+
+Copyright (c) 2014 National ICT Australia and other contributors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
+
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..ab9fd59
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,23 @@
+#
+# Copyright 2014, NICTA
+#
+# This software may be distributed and modified according to the terms of
+# the BSD 2-Clause license. Note that NO WARRANTY is provided.
+# See "LICENSE_BSD2.txt" for details.
+#
+# @TAG(NICTA_BSD)
+#
+
+# Targets
+TARGETS := libsel4utils.a
+
+# Source files required to build the target
+CFILES := $(patsubst $(SOURCE_DIR)/%,%,$(wildcard $(SOURCE_DIR)/src/*.c))
+CFILES += $(patsubst $(SOURCE_DIR)/%,%,$(wildcard $(SOURCE_DIR)/src/vspace/*.c))
+
+# Header files/directories this library provides
+HDRFILES := $(wildcard $(SOURCE_DIR)/include/*)
+HDRFILES += $(wildcard $(SOURCE_DIR)/arch_include/$(ARCH)/*)
+
+include $(SEL4_COMMON)/common.mk
+
diff --git a/arch_include/arm/sel4utils/arch/util.h b/arch_include/arm/sel4utils/arch/util.h
new file mode 100644
index 0000000..d8dc118
--- /dev/null
+++ b/arch_include/arm/sel4utils/arch/util.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+#ifndef _SEL4UTILS_ARCH_UTIL_H
+#define _SEL4UTILS_ARCH_UTIL_H
+
+#include <sel4/sel4.h>
+#include <sel4/arch/pfIPC.h>
+#include <sel4/arch/exIPC.h>
+
+#define EXCEPT_IPC_SYS_MR_IP EXCEPT_IPC_SYS_MR_PC
+
+static inline int
+sel4utils_is_read_fault(void) 
+{
+    seL4_Word fsr = seL4_GetMR(SEL4_PFIPC_FSR);
+#if defined(ARM_HYP)
+    return (fsr & (1 << 6)) == 0;
+#else
+    return (fsr & (1 << 11)) == 0;
+#endif
+}
+
+
+static inline void 
+sel4utils_set_instruction_pointer(seL4_UserContext *regs, seL4_Word value) 
+{
+    regs->pc = value;
+}
+
+static inline seL4_Word
+sel4utils_get_instruction_pointer(seL4_UserContext regs) 
+{
+    return regs.pc;
+}
+
+static inline void
+sel4utils_set_stack_pointer(seL4_UserContext *regs, seL4_Word value)
+{
+    regs->sp = value;
+}
+
+static inline void
+sel4utils_set_arg0(seL4_UserContext *regs, seL4_Word value)
+{
+    regs->r0 = value;
+}
+#endif /* _SEL4UTILS_ARCH_UTIL_H */
+
+
diff --git a/arch_include/ia32/sel4utils/arch/util.h b/arch_include/ia32/sel4utils/arch/util.h
new file mode 100644
index 0000000..21faac1
--- /dev/null
+++ b/arch_include/ia32/sel4utils/arch/util.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+#ifndef _SEL4UTILS_ARCH_UTIL_H
+#define _SEL4UTILS_ARCH_UTIL_H
+
+#include <sel4/sel4.h>
+#include <sel4/arch/pfIPC.h>
+#include <sel4/arch/exIPC.h>
+
+#define EXCEPT_IPC_SYS_MR_IP EXCEPT_IPC_SYS_MR_EIP
+
+
+static inline int
+sel4utils_is_read_fault(void)
+{
+    seL4_Word fsr = seL4_GetMR(SEL4_PFIPC_FSR);
+    return (fsr & (1 << 1)) == 0;
+}
+
+
+static inline void 
+sel4utils_set_instruction_pointer(seL4_UserContext *regs, seL4_Word value) 
+{
+    regs->eip = value;
+}
+
+static inline seL4_Word
+sel4utils_get_instruction_pointer(seL4_UserContext regs) 
+{
+    return regs.eip;
+}
+
+static inline void
+sel4utils_set_stack_pointer(seL4_UserContext *regs, seL4_Word value)
+{
+    regs->esp = value;
+}
+
+static inline void
+sel4utils_set_arg0(seL4_UserContext *regs, seL4_Word value)
+{
+    regs->eip = value;
+}
+#endif /* _SEL4UTILS_ARCH_UTIL_H */
+
+
diff --git a/arch_include/ia64/sel4utils/arch/util.h b/arch_include/ia64/sel4utils/arch/util.h
new file mode 100644
index 0000000..90714bc
--- /dev/null
+++ b/arch_include/ia64/sel4utils/arch/util.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+#ifndef _SEL4UTILS_ARCH_UTIL_H
+#define _SEL4UTILS_ARCH_UTIL_H
+
+#include <sel4/sel4.h>
+#include <sel4/arch/pfIPC.h>
+#include <sel4/arch/exIPC.h>
+
+#define EXCEPT_IPC_SYS_MR_IP EXCEPT_IPC_RIP
+
+static inline int
+sel4utils_is_read_fault(void)
+{
+    seL4_Word fsr = seL4_GetMR(SEL4_PFIPC_FSR);
+    return (fsr & (1 << 1)) == 0;
+}
+
+
+static inline void 
+sel4utils_set_instruction_pointer(seL4_UserContext *regs, seL4_Word value) 
+{
+    regs->rip = value;
+}
+
+static inline seL4_Word
+sel4utils_get_instruction_pointer(seL4_UserContext regs) 
+{
+    return regs.rip;
+}
+
+static inline void
+sel4utils_set_stack_pointer(seL4_UserContext *regs, seL4_Word value)
+{
+    regs->rsp = value;
+}
+
+static inline void
+sel4utils_set_arg0(seL4_UserContext *regs, seL4_Word value)
+{
+    regs->rdi = value;
+}
+#endif /* _SEL4UTILS_ARCH_UTIL_H */
+
+
diff --git a/include/sel4utils/client_server_vspace.h b/include/sel4utils/client_server_vspace.h
new file mode 100644
index 0000000..de1561d
--- /dev/null
+++ b/include/sel4utils/client_server_vspace.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+/*
+ * This implements a virtual address space that proxies calls two different
+ * vspaces to ensure that pages in one are mapped into the other. This allows
+ * a server to efficiently read/write memory from a client. No attempt is made
+ * to construct contiguous regions in the server when contiguous regions are
+ * created in the client.
+ * Currently cannot correctly handle stacks (since their size is unknown to us)
+ * and cacheability attributes (currently assume all cached mappings)
+ */
+#ifndef _UTILS_CLIENT_SERVER_VSPACE_H
+#define _UTILS_CLIENT_SERVER_VSPACE_H
+
+#include <autoconf.h>
+
+#if defined CONFIG_LIB_SEL4_VSPACE && defined CONFIG_LIB_SEL4_VKA
+
+#include <vspace/vspace.h>
+#include <vka/vka.h>
+
+/**
+ * Initialize a new client server vspace
+ *
+ * @param vspace vspace struct to initialize
+ * @param vka vka for future allocations
+ * @param server vspace to duplicate all mappings into
+ * @param client vspace to directly proxy calls to
+ *
+ * @return 0 on success
+ */
+int sel4utils_get_cs_vspace(vspace_t *vspace, vka_t *vka, vspace_t *server, vspace_t *client);
+
+/**
+ * Translate virtual address in client to one in the server.
+ * This is only for that address! Pages are not contiguous
+ *
+ * @param vspace vspace to do translation with
+ * @param addr addr to lookup in the client
+ *
+ * @return Address in the server vspace, or NULL if not found
+ */
+void *sel4utils_cs_vspace_translate(vspace_t *vspace, void *addr);
+
+/**
+ * Perform a callback for every contiguous range of bytes in the server vspace
+ * for the range provided in the client vspace
+ *
+ * @param vspace vspace to do translation in
+ * @param addr Address in the client to start translation from
+ * @param len length of the range in the client
+ * @param proc Callback function that will be called back with ranges of address in the server after
+ *  page boundary splits. If non zero is returned the loop is stopped and the error returned
+ * @param cookie Cookie to be passed to the callback function
+ *
+ * @return Result of callback if non zero occured, internal error code if mapping not found, zero otherwise
+ */
+int sel4utils_cs_vspace_for_each(vspace_t *vspace, void *addr, uint32_t len,
+        int (*proc)(void* addr, uint32_t len, void *cookie),
+        void *cookie);
+
+#endif /* CONFIG_LIB_SEL4_VSPACE && CONFIG_LIB_SEL4_VKA */
+#endif /* _UTILS_CLIENT_SERVER_VSPACE_H */
diff --git a/include/sel4utils/elf.h b/include/sel4utils/elf.h
new file mode 100644
index 0000000..2348bc1
--- /dev/null
+++ b/include/sel4utils/elf.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+#ifndef SEL4UTILS_ELF_H
+#define SEL4UTILS_ELF_H
+
+#include <autoconf.h>
+
+#if (defined CONFIG_LIB_SEL4_VKA && defined CONFIG_LIB_SEL4_VSPACE)
+
+#include <vka/vka.h>
+
+#include <vspace/vspace.h>
+
+typedef struct sel4utils_elf_region {
+    seL4_CapRights rights;
+    /* These two vstarts may differ if the elf was not mapped 1to1. Such an elf is not
+     * runnable, but allows it to be loaded into a vspace where it is not intended to be run */
+    void *elf_vstart;
+    void *reservation_vstart;
+    uint32_t size;
+    reservation_t *reservation;
+    int cacheable;
+} sel4utils_elf_region_t;
+
+/**
+ * Load an elf file into a vspace.
+ *
+ * The loader vspace and vka allocation will be preserved (no extra cslots or objects or vaddrs
+ * will leak from this function), even in the case of an error.
+ *
+ * The loadee vspace and vka will alter: cslots will be allocated for each frame to be
+ * mapped into the address space and frames will be allocated. In case of failure the entire
+ * virtual address space is left in the state where it failed.
+ *
+ * @param loadee the vspace to load the elf file into
+ * @param loader the vspace we are loading from
+ * @param loadee_vka allocator to use for allocation in the loadee vspace
+ * @param loader_vka allocator to use for loader vspace. Can be the same as loadee_vka.
+ * @param image_name name of the image in the cpio archive to load.
+ * @param regions Optional array for list of regions to be placed. Assumed to be the correct
+                  size as reported by a call to sel4utils_elf_num_regions
+ * @param mapanywhere If true allows the elf to be loaded anywhere in the vspace. Regions will
+                      still be contiguous
+ *
+ * @return The entry point of the new process, NULL on error
+ */
+void *
+sel4utils_elf_load_record_regions(vspace_t *loadee, vspace_t *loader, vka_t *loadee_vka,
+        vka_t *loader_vka, char *image_name, sel4utils_elf_region_t *regions, int mapanywhere);
+
+/**
+ * Wrapper for sel4utils_elf_load_record_regions. Does not record/perform resevations and
+ * maps into the correct virtual addresses
+ *
+ * @param loadee the vspace to load the elf file into
+ * @param loader the vspace we are loading from
+ * @param loadee_vka allocator to use for allocation in the loadee vspace
+ * @param loader_vka allocator to use for loader vspace. Can be the same as loadee_vka.
+ * @param image_name name of the image in the cpio archive to load.
+ *
+ * @return The entry point of the new process, NULL on error
+ */
+void *
+sel4utils_elf_load(vspace_t *loadee, vspace_t *loader, vka_t *loadee_vka,
+                   vka_t *loader_vka, char *image_name);
+
+/**
+ * Parses an elf file but does not actually load it. Merely reserves the regions in the vspace
+ * for where the elf segments would go. This is used for lazy loading / copy on write
+
+ * @param loadee the vspace to reserve the elf regions in
+ * @param image_name name of the image in the cpio archive to parse.
+ * @param regions Array for list of regions to be placed. Assumed to be the correct
+                  size as reported by a call to sel4utils_elf_num_regions
+ *
+ * @return The entry point of the elf, NULL on error
+ */
+void *
+sel4utils_elf_reserve(vspace_t *loadee, char *image_name, sel4utils_elf_region_t *regions);
+
+/**
+ * Parses an elf file and returns the number of loadable regions. The result of this
+ * is used to calculate the number of regions to pass to sel4utils_elf_reserve and
+ * sel4utils_elf_load_record_regions
+ *
+ * @param image_name name of the image in the cpio archive to inspect
+ * @return Number of loadable regions in the elf
+ */
+int
+sel4utils_elf_num_regions(char *image_name);
+
+#endif /* (defined CONFIG_LIB_SEL4_VKA && defined CONFIG_LIB_SEL4_VSPACE) */
+#endif /* SEL4UTILS_ELF_H */
diff --git a/include/sel4utils/mapping.h b/include/sel4utils/mapping.h
new file mode 100644
index 0000000..f0b967b
--- /dev/null
+++ b/include/sel4utils/mapping.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+#ifndef _UTILS_MAPPING_H_
+#define _UTILS_MAPPING_H_
+
+#include <utils/util.h>
+#include <sel4/sel4.h>
+#include <autoconf.h>
+
+
+#if defined(CONFIG_ARCH_ARM)
+
+/* sizes (in bits) of pages supported by sel4 in ascending order */
+#if defined(ARM_HYP)
+static int UNUSED sel4_supported_page_sizes[] = {12, 16, 21, 25};
+#else
+static int UNUSED sel4_supported_page_sizes[] = {12, 16, 20, 24};
+#endif
+
+#define seL4_ARCH_Uncached_VMAttributes 0
+
+#define seL4_ARCH_Page_Map             seL4_ARM_Page_Map
+#define seL4_ARCH_Page_Unmap           seL4_ARM_Page_Unmap
+#define seL4_ARCH_PageTable_Map        seL4_ARM_PageTable_Map
+#define seL4_ARCH_ASIDPool_Assign      seL4_ARM_ASIDPool_Assign
+#define seL4_ARCH_PageTableObject      seL4_ARM_PageTableObject
+#define seL4_ARCH_PageDirectoryObject  seL4_ARM_PageDirectoryObject
+#define seL4_ARCH_Default_VMAttributes seL4_ARM_Default_VMAttributes
+#define seL4_ARCH_VMAttributes         seL4_ARM_VMAttributes
+#define seL4_ARCH_4KPage               seL4_ARM_SmallPageObject
+
+#elif defined(CONFIG_X86_64)
+
+/* sizes (in bits) of pages supported by sel4 in ascending order */
+static int UNUSED sel4_supported_page_sizes[] = {12, 21};
+
+#define seL4_ARCH_Page_Unmap                    seL4_X64_Page_Unmap
+#define seL4_ARCH_Page_Map                      seL4_X64_Page_Map
+#define seL4_ARCH_PageTable_Map                 seL4_X64_PageTable_Map
+#define seL4_ARCH_PageDirectory_Map             seL4_X64_PageDirectory_Map
+#define seL4_ARCH_PageDirectoryPointerTable_Map seL4_X64_PageDirectoryPointerTable_Map
+#define seL4_CapInitThreadPD                    seL4_CapInitThreadPML4
+#define seL4_ARCH_VMAttributes                  seL4_IA32_VMAttributes
+#define seL4_ARCH_4KPage                        seL4_X64_4K
+#define seL4_ARCH_Default_VMAttributes          seL4_IA32_Default_VMAttributes
+
+#elif defined(CONFIG_ARCH_IA32) /* CONFIG_ARCH_ARM */
+
+/* sizes (in bits) of pages supported by sel4 in ascending order */
+static int UNUSED sel4_supported_page_sizes[] = {12, 22};
+
+#define seL4_ARCH_Page_Map             seL4_IA32_Page_Map
+#define seL4_ARCH_Page_Unmap           seL4_IA32_Page_Unmap
+#define seL4_ARCH_PageTable_Map        seL4_IA32_PageTable_Map
+#define seL4_ARCH_ASIDPool_Assign      seL4_IA32_ASIDPool_Assign
+#define seL4_ARCH_PageTableObject      seL4_IA32_PageTableObject
+#define seL4_ARCH_PageDirectoryObject  seL4_IA32_PageDirectoryObject
+#define seL4_ARCH_Default_VMAttributes seL4_IA32_Default_VMAttributes
+#define seL4_ARCH_VMAttributes         seL4_IA32_VMAttributes
+#define seL4_ARCH_4KPage               seL4_IA32_4K
+#define seL4_ARCH_Uncached_VMAttributes 0
+
+#endif /* CONFIG_ARCH_IA32 */
+
+#define NUM_SEL4_PAGE_SIZES ((int) ARRAY_SIZE(sel4_supported_page_sizes))
+
+#ifdef CONFIG_LIB_SEL4_VKA
+
+#include <vka/vka.h>
+#include <vka/object.h>
+
+/* Map a page to a virtual address, allocating a page table if neccessary.
+*
+*
+* @param vka a vka compliant allocator
+* @param pd page directory to map the page into
+* @param page capability to the page to map in
+* @param vaddr unmapped virutal address to map the page into
+* @param rights permissions to map the page with
+* @param cacheable 1 if the page should be cached (0 if it is for DMA)
+* @param pagetable empty vka_object_t structure to be populated with page table
+*                  info if one is allocated
+*
+* @return error sel4 error code or -1 if allocation failed.
+*/
+int sel4utils_map_page(vka_t *vka, seL4_CPtr pd, seL4_CPtr frame, void *vaddr,
+        seL4_CapRights rights, int cacheable, vka_object_t *pagetable);
+
+/** convenient wrapper this if you don't want to track allocated page tables */
+static inline int sel4utils_map_page_leaky(vka_t *vka, seL4_CPtr pd, seL4_CPtr frame, void *vaddr,
+        seL4_CapRights rights, int cacheable)
+{
+    vka_object_t pagetable;
+    return sel4utils_map_page(vka, pd, frame, vaddr, rights, cacheable, &pagetable);
+}
+
+#endif /* CONFIG_LIB_SEL4_VKA */
+
+#ifdef CONFIG_ARCH_IA32
+
+#ifdef CONFIG_IOMMU
+int sel4utils_map_iospace_page(vka_t *vka, seL4_CPtr iospace, seL4_CPtr frame, seL4_Word vaddr,
+                         seL4_CapRights rights, int cacheable, seL4_Word size_bits,
+                         vka_object_t *pts, int *num_pts);
+#endif
+
+#ifdef CONFIG_VTX
+int sel4utils_map_ept_page(vka_t *vka, seL4_CPtr pd, seL4_CPtr frame, seL4_Word vaddr,
+                   seL4_CapRights rights, int cacheable, seL4_Word size_bits, vka_object_t *pagetable, vka_object_t *pagedir);
+
+#endif 
+#endif 
+
+
+
+#endif /* UTILS_MAPPING_H_ */
diff --git a/include/sel4utils/process.h b/include/sel4utils/process.h
new file mode 100644
index 0000000..4e0d909
--- /dev/null
+++ b/include/sel4utils/process.h
@@ -0,0 +1,242 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+#ifndef SEL4UTILS_PROCESS_H
+#define SEL4UTILS_PROCESS_H
+
+#include <autoconf.h>
+
+#if (defined CONFIG_LIB_SEL4_VSPACE && defined CONFIG_LIB_SEL4_VKA)
+
+#include <vka/vka.h>
+#include <vspace/vspace.h>
+
+#include <sel4utils/thread.h>
+#include <sel4utils/vspace.h>
+#include <sel4utils/elf.h>
+
+typedef struct object_node object_node_t;
+
+struct object_node {
+    vka_object_t object;
+    object_node_t *next;
+};
+
+typedef struct {
+    vka_object_t pd;
+    vspace_t vspace;
+    sel4utils_alloc_data_t data;
+    vka_object_t cspace;
+    uint32_t cspace_size;
+    uint32_t cspace_next_free;
+    sel4utils_thread_t thread;
+    vka_object_t fault_endpoint;
+    void *entry_point;
+    object_node_t *allocated_object_list_head;
+    /* if the elf wasn't loaded into the address space, this describes the regions.
+     * this permits lazy loading / copy on write / page sharing / whatever crazy thing
+     * you want to implement */
+    int num_elf_regions;
+    sel4utils_elf_region_t *elf_regions;
+} sel4utils_process_t;
+
+/* sel4utils processes start with some caps in their cspace.
+ * These are the caps
+ */
+enum sel4utils_cspace_layout {
+    /*
+     * The root cnode (with appropriate guard)
+     */
+    SEL4UTILS_CNODE_SLOT = 1,
+    /* The slot on the cspace that fault_endpoint is put if
+     * sel4utils_configure_process is used.
+     */
+    SEL4UTILS_ENDPOINT_SLOT = 2,
+};
+
+typedef struct {
+    /* should we handle elf logic at all? */
+    bool is_elf;
+    /* if so what is the image name? */
+    char *image_name;
+    /* Do you want the elf image preloaded? */
+    bool do_elf_load;
+    
+    /* otherwise what is the entry point? */
+    void *entry_point;
+
+    /* should we create a default single level cspace? */
+    bool create_cspace;
+    /* if so how big ? */
+    int one_level_cspace_size_bits;
+    
+    /* otherwise what is the root cnode ?*/
+    /* Note if you use a custom cspace then 
+     * sel4utils_copy_cap_to_process etc will not work */
+    vka_object_t cnode;
+
+    /* do you want us to create a vspace for you? */
+    bool create_vspace;
+    /* if not what is the page dir, and what is the vspace */
+    vspace_t *vspace;
+    vka_object_t page_dir;
+
+    /* if so, is there a regions you want left clear?*/
+    sel4utils_elf_region_t *reservations;
+    int num_reservations;
+
+    /* do you want a fault endpoint created? */ 
+    bool create_fault_endpoint;
+    /* otherwise what is it */
+    vka_object_t fault_endpoint;
+
+    int priority;
+#ifndef CONFIG_KERNEL_STABLE
+    seL4_CPtr asid_pool;
+#endif
+} sel4utils_process_config_t;
+
+/**
+ * Copy data from one vspace to another. Data is in POSIX standard argc, argv style.
+ *
+ * This is intented to use when loading applications of the format:
+ *
+ * int main(int argc, char **argv) { };
+ *
+ *
+ * @param current the vspace we are loading the arguments from
+ * @param target  the vspace we are loading the arguments to
+ * @param vka     the vka allocation interface to use when allocating pages.
+ *                the pages will first be mapped into the current vspace, then unmapped and
+ *                mapped into the target vspace, and left that way.
+ * @param argc    the number of strings
+ * @param argv    an array of c strings.
+ *
+ * @return NULL on failure, otherwise a pointer in the target address space where argv has
+ *              been written.
+ */
+void *sel4utils_copy_args(vspace_t *current, vspace_t *target, vka_t *vka,
+        int argc, char *argv[]);
+
+/**
+ * Start a process, and copy arguments into the processes address space.
+ *
+ * This is intented to use when loading applications of the format:
+ *
+ * int main(int argc, char **argv) { };
+ *
+ * The third argument (in r2 for arm, on stack for ia32) will be the address of the ipc buffer.
+ * This is intended to be loaded by the crt into the data word of the ipc buffer,
+ * or you can just add it to your function prototype.
+ *
+ * Add the following to your crt to use it this way:
+ *
+ * arm: str r2, [r2, #484]
+ * ia32: popl %ebp
+ *       movl %ebp, 484(%ebp)
+ *
+ * @param process initialised sel4utils process struct.
+ * @param vka     vka interface to use for allocation of frames.
+ * @param vspace  the current vspace.
+ * @param argc    the number of arguments.
+ * @param argv    a pointer to an array of strings in the current vspace.
+ * @param resume  1 to start the process, 0 to leave suspended.
+ *
+ * @return -1 on error, 0 on success.
+ *
+ */
+int sel4utils_spawn_process(sel4utils_process_t *process, vka_t *vka, vspace_t *vspace,
+        int argc, char *argv[], int resume);
+
+/**
+ * This is the function to use if you just want to set up a process as fast as possible.
+ * It creates a simple cspace and vspace for you, allocates a fault endpoint and puts
+ * it into the new cspace.
+ *
+ * It loads the elf file into the new vspace, parses the elf file and stores the entry point
+ * into process->entry_point.
+ *
+ * It uses the same vka for both allocations - the new process will not have an allocator.
+ *
+ * The process will have just one thread.
+ *
+ * Otherwise, use it as a model for loading processes and use the elf_load.
+ *
+ * WARNING: when launching processes on ia32 please ensure your calling convention
+ * matches that documented in sel4utils_start_thread. Otherwise your arguments
+ * won't come through correctly. 
+ *
+ * @param process      uninitialised process struct.
+ * @param vka          allocator to use to allocate objects.
+ * @param vspace vspace allocator for the current vspace.
+ * @param priority     priority to configure the process to run as.
+ * @param image_name   name of the elf image to load from the cpio archive.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int sel4utils_configure_process(sel4utils_process_t *process, vka_t *vka, vspace_t *vspace,
+        uint8_t priority, char *image_name);
+
+/**
+ * Configure a process with more customisations (Create your own vspace, customise cspace size).
+ *
+ * @param process               uninitliased process struct
+ * @param vka                   allocator to use to allocate objects.
+ * @param spawner_vspace        vspace to use to allocate virtual memory in the current address space.
+ * @param config process config. 
+ *
+ * @return 0 on success, -1 on error.
+ */
+int sel4utils_configure_process_custom(sel4utils_process_t *process, vka_t *target_vka,
+        vspace_t *spawner_vspace, sel4utils_process_config_t config);
+
+/**
+ * Copy a cap into a process' cspace.
+ *
+ * This will only work if you configured the process using one of the above functions, or
+ * have mimiced their functionality.
+ *
+ * @param process process to copy the cap to
+ * @param src     path in the current cspace to copy the cap from
+ *
+ * @return 0 on failure, otherwise the slot in the processes cspace.
+ */
+seL4_CPtr sel4utils_copy_cap_to_process(sel4utils_process_t *process, cspacepath_t src);
+
+/**
+ *
+ * Mint a cap into a process' cspace.
+ *
+ * As above, except mint the cap with rights and data.
+ *
+ * @return 0 on failure, otherwise the slot in the cspace where the new cap is.
+ */
+seL4_CPtr sel4utils_mint_cap_to_process(sel4utils_process_t *process, cspacepath_t src, seL4_CapRights rights, seL4_CapData_t data);
+
+/**
+ * Destroy a process.
+ *
+ * This will free everything possible associated with a process and teardown the vspace.
+ *
+ * @param process process to destroy
+ * @param vka allocator used to allocate objects for this process
+ */
+void sel4utils_destroy_process(sel4utils_process_t *process, vka_t *vka);
+
+
+/* 
+ * sel4utils default allocated object function for vspaces.
+ * 
+ * Stores a list of allocated objects in the process struct and frees them
+ * when sel4utils_destroy_process is called.
+ */
+void sel4utils_allocated_object(void *cookie, vka_object_t object);
+
+#endif /* (defined CONFIG_LIB_SEL4_VSPACE && defined CONFIG_LIB_SEL4_VKA) */
+#endif /* SEL4UTILS_PROCESS_H */
diff --git a/include/sel4utils/profile.h b/include/sel4utils/profile.h
new file mode 100644
index 0000000..07ad529
--- /dev/null
+++ b/include/sel4utils/profile.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+#ifndef _UTILS_PROFILE_H_
+#define _UTILS_PROFILE_H_
+
+#include <sel4utils/util.h>
+#include <stdint.h>
+
+#define PROFILE_VAR_TYPE_INT32 1
+#define PROFILE_VAR_TYPE_INT64 2
+
+typedef struct profile_var {
+    int type;
+    void *var;
+    const char *varname;
+    const char *description;
+} profile_var_t;
+
+#define __WATCH_VAR(_type, _var, _description, _unique) \
+    static profile_var_t profile_##_unique \
+        __attribute__((used)) __attribute__((section("_profile_var"))) \
+        = {.type = _type, .var = &_var, .varname = #_var, .description = _description}
+
+#define _WATCH_VAR32(var, description, unique) \
+    compile_time_assert(profile_size_##unique, sizeof(var) == 4); \
+    __WATCH_VAR(PROFILE_VAR_TYPE_INT32, var, description, unique)
+
+#define WATCH_VAR32(var, description) \
+    _WATCH_VAR32(var, description, __LINE__)
+
+#define _WATCH_VAR64(var, description, unique) \
+    compile_time_assert(profile_size_##unique, sizeof(var) == 8); \
+    __WATCH_VAR(PROFILE_VAR_TYPE_INT64, var, description, unique)
+
+#define WATCH_VAR64(var, description) \
+    _WATCH_VAR64(var, description, __LINE__)
+
+typedef void (*profile_callback32)(uint32_t value, const char *varname, const char *descrption, void *cookie);
+typedef void (*profile_callback64)(uint64_t value, const char *varname, const char *descrption, void *cookie);
+
+/* Sample functions that just output the vars using printf. cookie is ignored */
+void profile_print32(uint32_t value, const char *varname, const char *description, void *cookie);
+void profile_print64(uint64_t value, const char *varname, const char *description, void *cookie);
+
+/* Iterates over all profile variables and calls back the specified function(s)
+ * with the current value. */
+void profile_scrape(profile_callback32 callback32, profile_callback64 callback64, void *cookie);
+
+#define _PSTART_TIME(x) uint32_t __begin_time##x = read_ccnt()
+#define _PEND_TIME(x) uint32_t __end_time##x = read_ccnt()
+#define _PTIME_DIFF(x) (__end_time##x - __begin_time##x)
+#define PINC(name) PADD(name, 1)
+#define PADDTIMEDIFF(name) PADD(name, PTIMEDIFF(name))
+#define PADDTIME(name) PEND_TIME(name); PADDTIMEDIFF(name)
+#define PTIMEDIFF(x) _PTIME_DIFF(x)
+
+#ifdef PROFILE
+#define PVARUINT32(name, desc) static uint32_t name = 0; WATCH_VAR32(name, desc);
+#define PVARUINT64(name, desc) static uint64_t name = 0; WATCH_VAR64(name, desc);
+#define PADD(name, x) do {name += (x); } while(0)
+#define PSTART_TIME(x) _PSTART_TIME(x)
+#define PEND_TIME(x) _PEND_TIME(x)
+#else
+#define PVARUINT32(name, desc)
+#define PVARUINT64(name, desc)
+#define PADD(name, x) do { } while(0)
+#define PSTART_TIME(x) do { } while(0)
+#define PEND_TIME(x) do { } while(0)
+#endif
+
+#endif
diff --git a/include/sel4utils/sel4_debug.h b/include/sel4utils/sel4_debug.h
new file mode 100644
index 0000000..e1358c4
--- /dev/null
+++ b/include/sel4utils/sel4_debug.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+/*
+ Authors: Cristan Szmajda, Ben Leslie
+ Description:
+  Program diagnostics as per 7.2
+ Status: Complete.
+*/
+
+/*
+ * <assert.h> may safely be included multiple times with or without NDEBUG
+ */
+#undef sel4_error
+
+#define	sel4_error(e, str)	((e == seL4_NoError) ? (void)0 : __sel4_error(e, __FILE__, __func__, __LINE__, str))
+
+/* Implemented in src/assert.c */
+#ifndef _SEL4_DEBUG_H_
+#define _SEL4_DEBUG_H_
+
+void __sel4_error(int, const char *, const char *, int, char *);
+
+#endif /* _SEL4_DEBUG_ */
diff --git a/include/sel4utils/stack.h b/include/sel4utils/stack.h
new file mode 100644
index 0000000..02e639e
--- /dev/null
+++ b/include/sel4utils/stack.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+#ifndef __SEL4UTILS_STACK_H
+#define __SEL4UTILS_STACK_H
+
+#include <vspace/vspace.h>
+
+/**
+ * Allocate a new stack and start running func on it.
+ * If func returns, you will be back on the old stack.
+ *
+ * @param vspace interface to allocate stack with
+ * @param func to jump to with the new stack.
+ * @ret   the return value of func. -1 if stack allocation fails.
+ *        Note: it would be unwise to write func such that it returns -1.
+ *
+ */
+int sel4utils_run_on_stack(vspace_t *vspace, int (*func)(void));
+
+#endif /* __SEL4UTILS_STACK_H */
diff --git a/include/sel4utils/thread.h b/include/sel4utils/thread.h
new file mode 100644
index 0000000..0f665a8
--- /dev/null
+++ b/include/sel4utils/thread.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+/**
+ *
+ * Provides basic thread configuration/starting/cleanup functions.
+ *
+ * Any other operations (start, stop, resume) should use the seL4 API directly on
+ * sel4utils_thread_t->tcb.cptr.
+ *
+ */
+#ifndef _SEL4UTILS_THREAD_H
+#define _SEL4UTILS_THREAD_H
+
+#include <autoconf.h>
+
+#ifdef CONFIG_LIB_SEL4_VSPACE
+
+#include <sel4/sel4.h>
+
+#include <vka/vka.h>
+
+#include <vspace/vspace.h>
+
+typedef struct sel4utils_thread {
+    vka_object_t tcb;
+    void *stack_top;
+    seL4_CPtr ipc_buffer;
+    seL4_Word ipc_buffer_addr;
+} sel4utils_thread_t;
+
+/**
+ * Configure a thread, allocating any resources required.
+ *
+ * @param vka initialised vka to allocate objects with
+ * @param alloc initialised vspace structure to allocate virtual memory with
+ * @param fault_endpoint endpoint to set as the threads fault endpoint. Can be 0.
+ * @param priority seL4 priority for the thread to be scheduled with.
+ * @param cspace the root of the cspace to start the thread in
+ * @param cspace_root_data data for cspace access
+ * @param res an uninitialised sel4utils_thread_t data structure that will be initialised
+ *            after this operation.
+ *
+ * @return 0 on success, -1 on failure. Use CONFIG_DEBUG to see error messages.
+ */
+int sel4utils_configure_thread(vka_t *vka, vspace_t *alloc, seL4_CPtr fault_endpoint,
+        uint8_t priority, seL4_CNode cspace, seL4_CapData_t cspace_root_data,
+        sel4utils_thread_t *res);
+
+/**
+ * Start a thread, allocating any resources required.
+ * The third argument to the thread (in r2 for arm, on stack for ia32) will be the
+ * address of the ipc buffer.
+ *
+ * @param thread      thread data structure that has been initialised with sel4utils_configure_thread
+ * @param entry_point the address that the thread will start at
+ *                     
+ *                    NOTE: In order for the on-stack argument passing to work for ia32, 
+ *                    entry points must be functions. 
+ *
+ *                    ie. jumping to this start symbol will work:
+ *
+ *                    void _start(int argc, char **argv) {
+ *                        int ret = main(argc, argv);
+ *                        exit(ret);
+ *                    }
+ * 
+ *
+ *                    However, jumping to a start symbol like this:
+ *
+ *                    _start:
+ *                         call main
+ *
+ *                    will NOT work, as call pushes an extra value (the return value)
+ *                    onto the stack. If you really require an assembler stub, it should
+ *                    decrement the stack value to account for this.
+ *
+ *                    ie.
+ *
+ *                    _start:
+ *                         popl %eax
+ *                         call main
+ *
+ *                    This does not apply for arm, as arguments are passed in registers.
+ *
+ *
+ * @param arg0        a pointer to the arguments for this thread. User decides the protocol.
+ * @param arg1        another pointer. User decides the protocol. Note that there are two args here
+ *                    to easily support C standard: int main(int argc, char **argv).
+ * @param resume      1 to start the thread immediately, 0 otherwise.
+ *
+ * @return 0 on success, -1 on failure.
+ */
+int sel4utils_start_thread(sel4utils_thread_t *thread, void *entry_point, void *arg0, void *arg1,
+        int resume);
+
+/**
+ * Release any resources used by this thread. The thread data structure will not be usable
+ * until sel4utils_thread_configure is called again.
+ *
+ * @param vka the vka interface that this thread was initialised with
+ * @param alloc the allocation interface that this thread was initialised with
+ * @param thread the thread structure that was returned when the thread started
+ */
+void sel4utils_clean_up_thread(vka_t *vka, vspace_t *alloc, sel4utils_thread_t *thread);
+
+/**
+ * Start a fault handling thread that will print the name of the thread that faulted
+ * as well as debugging information.
+ *
+ * @param fault_endpoint the fault_endpoint to wait on
+ * @param vka allocator
+ * @param vspace vspace (this library must be mapped into that vspace).
+ * @param prio the priority to run the thread at (recommend highest possible)
+ * @param cspace the cspace that the fault_endpoint is in
+ * @param data the cspace_data for that cspace (with correct guard)
+ * @param name the name of the thread to print if it faults
+ * @param thread the thread data structure to populate
+ *
+ * @return 0 on success.
+ */
+int sel4utils_start_fault_handler(seL4_CPtr fault_endpoint, vka_t *vka, vspace_t *vspace, 
+        uint8_t prio, seL4_CPtr cspace, seL4_CapData_t data, char *name, sel4utils_thread_t *res);
+ 
+
+/**
+ * Pretty print a fault messge.
+ *
+ * @param tag the message info tag delivered by the fault.
+ * @param name thread name 
+ */
+void sel4utils_print_fault_message(seL4_MessageInfo_t tag, char *name);
+
+static inline seL4_TCB
+sel4utils_get_tcb(sel4utils_thread_t *thread)
+{
+    return thread->tcb.cptr;
+}
+
+static inline int
+sel4utils_suspend_thread(sel4utils_thread_t *thread)
+{
+    return seL4_TCB_Suspend(thread->tcb.cptr);
+}
+
+#endif /* CONFIG_LIB_SEL4_VSPACE */
+#endif /* _SEL4UTILS_THREAD_H */
diff --git a/include/sel4utils/util.h b/include/sel4utils/util.h
new file mode 100644
index 0000000..a6a59d4
--- /dev/null
+++ b/include/sel4utils/util.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+#ifndef _SEL4_UTILS_UTIL_H_
+#define _SEL4_UTILS_UTIL_H_
+
+#include <autoconf.h>
+#include <assert.h>
+#include <stdint.h>
+
+#include <utils/util.h>
+
+#endif
diff --git a/include/sel4utils/vspace.h b/include/sel4utils/vspace.h
new file mode 100644
index 0000000..252e635
--- /dev/null
+++ b/include/sel4utils/vspace.h
@@ -0,0 +1,279 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+/*
+ * An implementation of the vspace virtual memory allocation interface, using a two
+ * level page table.
+ *
+ * This implementation expects malloc to work, although it only uses it for small amounts of
+ * memory.
+ *
+ * Stack size is constant and be configured in menuconfig.
+ *
+ * Stacks are allocated with 1 guard page between them, but they are continguous in the vmem
+ * region.
+ *
+ * Allocation starts at 0x00001000.
+ *
+ * This library will allow you to map over anything that it doesn't know about, so
+ * make sure you tell it if you do any external mapping.
+ *
+ */
+#ifndef _UTILS_VSPACE_H
+#define _UTILS_VSPACE_H
+
+#include <autoconf.h>
+
+#if defined CONFIG_LIB_SEL4_VSPACE && defined CONFIG_LIB_SEL4_VKA
+
+#include <vspace/vspace.h>
+#include <vka/vka.h>
+#include <sel4utils/util.h>
+
+/* These definitions are only here so that you can take the size of them.
+ * TOUCHING THESE DATA STRUCTURES IN ANY WAY WILL BREAK THE WORLD
+ * */
+#ifdef CONFIG_X86_64
+#define VSPACE_LEVEL_BITS 9
+#else
+#define VSPACE_LEVEL_BITS 10
+#endif
+
+#define VSPACE_LEVEL_SIZE BIT(VSPACE_LEVEL_BITS)
+
+typedef struct bottom_level {
+    seL4_CPtr bottom_level[VSPACE_LEVEL_SIZE];
+} bottom_level_t;
+
+typedef int(*sel4utils_map_page_fn)(vspace_t *vspace, seL4_CPtr cap, void *vaddr, seL4_CapRights rights, int cacheable, size_t size_bits);
+
+typedef struct sel4utils_alloc_data {
+    vka_t *vka;
+    bottom_level_t **top_level;
+    void *next_bottom_level_vaddr;
+    void *last_allocated;
+    vspace_t *bootstrap;
+    sel4utils_map_page_fn map_page;
+} sel4utils_alloc_data_t;
+
+struct reservation {
+    void *start;
+    void *end;
+    seL4_CapRights rights;
+    int cacheable;
+};
+
+/**
+ * This is a mostly internal function for constructing a vspace. Allows a vspace to be created
+ * with an arbitrary function to invoke for the mapping of pages. This is useful if you want
+ * a vspace manager, but you do not want to use seL4 page directories
+ *
+ * @param loader                  vspace of the current process, used to allocate
+ *                                virtual memory book keeping.
+ * @param new_vspace              uninitialised vspace struct to populate.
+ * @param data                    uninitialised vspace data struct to populate.
+ * @param vka                     initialised vka that this virtual memory allocator will use to
+ *                                allocate pages and pagetables. This allocator will never invoke free.
+ * @param page_directory          page directory for the new vspace.
+ * @param allocated_object_fn     function to call when objects are allocated. Can be null.
+ * @param allocated_object_cookie cookie passed when the above function is called. Can be null.
+ * @param map_page                Function that will be called to map seL4 pages
+ *
+ * @return 0 on success.
+ */
+int
+sel4utils_get_vspace_with_map(vspace_t *loader, vspace_t *new_vspace, sel4utils_alloc_data_t *data,
+        vka_t *vka, seL4_CPtr page_directory,
+        vspace_allocated_object_fn allocated_object_fn, void *allocated_object_cookie, sel4utils_map_page_fn map_page);
+
+/**
+ * Initialise a vspace allocator for a new address space (not the current one).
+ *
+ * @param loader                  vspace of the current process, used to allocate
+ *                                virtual memory book keeping.
+ * @param new_vspace              uninitialised vspace struct to populate.
+ * @param data                    uninitialised vspace data struct to populate.
+ * @param vka                     initialised vka that this virtual memory allocator will use to
+ *                                allocate pages and pagetables. This allocator will never invoke free.
+ * @param page_directory          page directory for the new vspace.
+ * @param allocated_object_fn     function to call when objects are allocated. Can be null.
+ * @param allocated_object_cookie cookie passed when the above function is called. Can be null.
+ *
+ * @return 0 on success.
+ */
+int sel4utils_get_vspace(vspace_t *loader, vspace_t *new_vspace, sel4utils_alloc_data_t *data,
+        vka_t *vka, seL4_CPtr page_directory, vspace_allocated_object_fn allocated_object_fn,
+        void *allocated_object_cookie);
+
+#ifdef CONFIG_VTX
+/**
+ * Initialise a vspace allocator for an EPT address space
+ *
+ * @param loader                  vspace of the current process, used to allocate
+ *                                virtual memory book keeping.
+ * @param new_vspace              uninitialised vspace struct to populate.
+ * @param vka                     initialised vka that this virtual memory allocator will use to
+ *                                allocate pages and pagetables. This allocator will never invoke free.
+ * @param ept                     EPT page directory for the new vspace.
+ * @param allocated_object_fn     function to call when objects are allocated. Can be null.
+ * @param allocated_object_cookie cookie passed when the above function is called. Can be null.
+ *
+ * @return 0 on success.
+ */
+int sel4utils_get_vspace_ept(vspace_t *loader, vspace_t *new_vspace, vka_t *vka,
+        seL4_CPtr ept, vspace_allocated_object_fn allocated_object_fn, void *allocated_object_cookie);
+#endif /* CONFIG_VTX */
+
+/**
+ * Initialise a vspace allocator for the current address space (this is intended
+ * for use a task that is not the root task but has no vspace, ie one loaded by the capDL loader).
+ *
+ * @param vspace                  uninitialised vspace struct to populate.
+ * @param data                    uninitialised vspace data struct to populate.
+ * @param vka                     initialised vka that this virtual memory allocator will use to
+ *                                allocate pages and pagetables. This allocator will never invoke free.
+ * @param page_directory          page directory for the new vspace.
+ * @param allocated_object_fn     function to call when objects are allocated. Can be null.
+ * @param allocated_object_cookie cookie passed when the above function is called. Can be null.
+ * @param existing_frames         a NULL terminated list of virtual addresses for 4K frames that are
+ *                                already allocated. For larger frames, just pass in the virtual
+ *                                address range in 4K addresses. This will prevent the allocator
+ *                                from overriding these frames.
+ *
+ * @return 0 on succes.
+ *
+ */
+int sel4utils_bootstrap_vspace(vspace_t *vspace, sel4utils_alloc_data_t *data,
+        seL4_CPtr page_directory, vka_t *vka,
+        vspace_allocated_object_fn allocated_object_fn, void *allocated_object_cookie,
+        void *existing_frames[]);
+
+/**
+ * Initialise a vspace allocator for the current address space (this is intended
+ * for use by the root task). Take details of existing frames from bootinfo.
+ *
+ * @param vspace                  uninitialised vspace struct to populate.
+ * @param data                    uninitialised vspace data struct to populate.
+ * @param vka                     initialised vka that this virtual memory allocator will use to
+ *                                allocate pages and pagetables. This allocator will never invoke free.
+ * @param info                    seL4 boot info
+ * @param page_directory          page directory for the new vspace.
+ * @param allocated_object_fn     function to call when objects are allocated. Can be null.
+ * @param allocated_object_cookie cookie passed when the above function is called. Can be null.
+ *
+ * @return 0 on succes.
+ *
+ */
+int sel4utils_bootstrap_vspace_with_bootinfo(vspace_t *vspace, sel4utils_alloc_data_t *data,
+        seL4_CPtr page_directory,
+        vka_t *vka, seL4_BootInfo *info, vspace_allocated_object_fn allocated_object_fn,
+        void *allocated_object_cookie);
+
+/* Wrapper function that configures a vspaceator such that all allocated objects are not
+ * tracked.
+ */
+static inline int
+sel4utils_get_vspace_leaky(vspace_t *loader, vspace_t *new_vspace, sel4utils_alloc_data_t *data,
+        vka_t *vka, seL4_CPtr page_directory)
+{
+    return sel4utils_get_vspace(loader, new_vspace, data, vka, page_directory,
+            (vspace_allocated_object_fn) NULL, NULL);
+}
+
+#ifdef CONFIG_VTX
+static inline int
+sel4utils_get_vspace_ept_leaky(vspace_t *loader, vspace_t *new_vspace,
+        vka_t *vka, seL4_CPtr page_directory)
+{
+    return sel4utils_get_vspace_ept(loader, new_vspace, vka, page_directory,
+            (vspace_allocated_object_fn) NULL, NULL);
+}
+#endif /* CONFIG_VTX */
+
+static inline int
+sel4utils_bootstrap_vspace_with_bootinfo_leaky(vspace_t *vspace, sel4utils_alloc_data_t *data, seL4_CPtr page_directory,
+        vka_t *vka, seL4_BootInfo *info)
+{
+    return sel4utils_bootstrap_vspace_with_bootinfo(vspace, data, page_directory, vka, info, NULL, NULL);
+}
+
+static inline int
+sel4utils_bootstrap_vspace_leaky(vspace_t *vspace, sel4utils_alloc_data_t *data, seL4_CPtr page_directory, vka_t *vka,
+        void *existing_frames[])
+{
+    return sel4utils_bootstrap_vspace(vspace, data, page_directory, vka, NULL, NULL, existing_frames);
+}
+
+/**
+ * Attempts to create a new vspace reservation. Function behaves similarly to vspace_reserve_range
+ * except a reservation struct is passed in, instead of being malloc'ed. This is intended to be
+ * used during bootstrapping where malloc has not yet been setup.
+ * Reservations created with this function should *only* be freed with sel4utils_reserve_range_at_no_alloc
+ *
+ * @param vspace the virtual memory allocator to use.
+ * @param reservation Allocated reservation struct to fill out
+ * @param bytes the size in bytes to map.
+ * @param rights the rights to map the pages in with in this reservation
+ * @param cacheable 1 if the pages should be mapped with cacheable attributes. 0 for DMA.
+ * @param vaddr the virtual address of the reserved range will be returned here.
+ *
+ * @param Returns 0 on success
+ */
+int sel4utils_reserve_range_no_alloc(vspace_t *vspace, reservation_t *reservation, size_t size,
+        seL4_CapRights rights, int cacheable, void **result);
+
+/**
+ * Attempts to create a new vspace reservation. Function behaves similarly to vspace_reserve_range_at
+ * except a reservation struct is passed in, instead of being malloc'ed. This is intended to be
+ * used during bootstrapping where malloc has not yet been setup.
+ * Reservations created with this function should *only* be freed with sel4utils_reserve_range_at_no_alloc
+ *
+ * @param vspace the virtual memory allocator to use.
+ * @param reservation Allocated reservation struct to fill out
+ * @param vaddr the virtual address to start the range at.
+ * @param bytes the size in bytes to map.
+ * @param rights the rights to map the pages in with in this reservatio
+ * @param cacheable 1 if the pages should be mapped with cacheable attributes. 0 for DMA.
+ *
+ * @param Returns 0 on success
+ */
+int sel4utils_reserve_range_at_no_alloc(vspace_t *vspace, reservation_t *reservation, void *vaddr,
+        size_t size, seL4_CapRights rights, int cacheable);
+
+/**
+ * Frees a reservation. Function behaves similarly to vspace_free_reservation, except
+ * it should only be called on reservations that were created with sel4utils_reserve_range_no_alloc
+ * and sel4utils_reserve_range_at_no_alloc
+ * @param vspace the virtual memory allocator to use.
+ * @param reservation the reservation to free.
+ */
+void sel4utils_free_reservation_no_alloc(vspace_t *vspace, reservation_t *reservation);
+
+/*
+ * Copy the code and data segment (the image effectively) from current vspace
+ * into clone vspace. The clone vspace should be initialised.
+ * 
+ * @param current the vspace to copy from.
+ * @param clone the vspace to copy to.
+ * @param reservation the previously established reservation in clone to copy.
+ * @return 0 on success.
+ */
+int sel4utils_bootstrap_clone_into_vspace(vspace_t *current, vspace_t *clone, reservation_t *reserve);
+
+/**
+ * Get the bounds of _executable_start and _end.
+ *
+ * @param va_start return va_start.
+ * @param va_end return va_end.
+ */
+void sel4utils_get_image_region(seL4_Word *va_start, seL4_Word *va_end);
+
+#endif /* CONFIG_LIB_SEL4_VSPACE && CONFIG_LIB_SEL4_VKA */
+#endif /* _UTILS_VSPACE_H */
diff --git a/include/sel4utils/vspace_internal.h b/include/sel4utils/vspace_internal.h
new file mode 100644
index 0000000..f1b78b2
--- /dev/null
+++ b/include/sel4utils/vspace_internal.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+#ifndef __SEL4UILS_VSPACE_PRIVATE_H
+#define __SEL4UILS_VSPACE_PRIVATE_H
+
+#include <vka/vka.h>
+#include <vspace/vspace.h>
+
+#include <sel4utils/util.h>
+#include <sel4utils/mapping.h>
+#include <sel4utils/vspace.h>
+
+#define KERNEL_RESERVED_START 0xE0000000
+#define KERNEL_RESERVED_END   0xFFFFF000
+
+#define RESERVED 0xFFFFFFFF
+#define EMPTY    0
+
+/* Bits to remove to get to the start of the second level translation bits. There are
+ * many implicit assumptions in the code that this is actually 12 bits and hence
+ * translation addresses can be stepped using the PAGE_SIZE_4K constant. */
+#ifdef CONFIG_X86_64
+#define BOTTOM_LEVEL_BITS_OFFSET (30 - 2 * VSPACE_LEVEL_BITS)
+#else
+#define BOTTOM_LEVEL_BITS_OFFSET (seL4_WordBits - 2 * VSPACE_LEVEL_BITS)
+#endif
+
+/* Bits to remove to get to the start of the first level translation bits */
+#ifdef CONFIG_X86_64
+#define TOP_LEVEL_BITS_OFFSET (30 - VSPACE_LEVEL_BITS)
+#else
+#define TOP_LEVEL_BITS_OFFSET (seL4_WordBits - VSPACE_LEVEL_BITS)
+#endif
+
+#define LEVEL_MASK MASK_UNSAFE(VSPACE_LEVEL_BITS)
+
+#define BOTTOM_LEVEL_INDEX(x) ((((uint32_t) ((seL4_Word)(x))) >> BOTTOM_LEVEL_BITS_OFFSET) & LEVEL_MASK)
+#define TOP_LEVEL_INDEX(x) ((((uint32_t) ((seL4_Word)(x))) >> TOP_LEVEL_BITS_OFFSET)  & LEVEL_MASK)
+
+int assure_level(vspace_t *vspace, void *vaddr);
+int bootstrap_create_level(vspace_t *vspace, void *vaddr);
+
+static inline sel4utils_alloc_data_t *
+get_alloc_data(vspace_t *vspace)
+{
+    return (sel4utils_alloc_data_t *) vspace->data;
+}
+
+static inline int
+update_entry(vspace_t *vspace, void *vaddr, seL4_CPtr page)
+{
+    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+
+    if (assure_level(vspace, vaddr) != seL4_NoError) {
+        return -1;
+    }
+
+    assert(data->top_level[TOP_LEVEL_INDEX(vaddr)] != (void *) RESERVED);
+    assert(data->top_level[TOP_LEVEL_INDEX(vaddr)] != NULL);
+    data->top_level[TOP_LEVEL_INDEX(vaddr)]->bottom_level[BOTTOM_LEVEL_INDEX(vaddr)] = page;
+
+    return seL4_NoError;
+}
+
+static inline int
+update(vspace_t *vspace, void *vaddr, seL4_CPtr page)
+{
+   
+#if 0
+    if (page == RESERVED) {
+        LOG_ERROR("Cap %x cannot be used!", RESERVED);
+        return -1;
+    }
+#endif 
+    assert(page != 0);
+    assert(get_alloc_data(vspace)->top_level[TOP_LEVEL_INDEX(vaddr)] != (void *) RESERVED);
+    return update_entry(vspace, vaddr, page);
+}
+
+static inline int
+reserve(vspace_t *vspace, void *vaddr)
+{
+    assert(get_alloc_data(vspace)->top_level[TOP_LEVEL_INDEX(vaddr)] != (void *) RESERVED);
+    return update_entry(vspace, vaddr, RESERVED);
+}
+
+static inline void
+clear(vspace_t *vspace, void *vaddr)
+{
+    /* if we are clearing an entry it should have been allocated before */
+    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+    assert(data->top_level[TOP_LEVEL_INDEX(vaddr)] != NULL);
+    assert(data->top_level[TOP_LEVEL_INDEX(vaddr)] != (void *) RESERVED);
+
+    update_entry(vspace, vaddr, 0);
+    if (vaddr < data->last_allocated) {
+        data->last_allocated = vaddr;
+    }
+}
+
+static inline seL4_CPtr
+get_entry(bottom_level_t *top[], void *vaddr)
+{
+    if (top[TOP_LEVEL_INDEX(vaddr)] == NULL) {
+        return 0;
+    }
+
+    return top[TOP_LEVEL_INDEX(vaddr)]->bottom_level[BOTTOM_LEVEL_INDEX(vaddr)];
+}
+
+/* update entry in page table and handle large pages */
+static inline int
+update_entries(vspace_t *vspace, void *vaddr, seL4_CPtr cap, size_t size_bits)
+{
+    int error = seL4_NoError;
+    for (int i = 0; i < (1 << size_bits) && error == seL4_NoError; i += PAGE_SIZE_4K) {
+        error = update(vspace, vaddr + i, cap);
+    }
+
+    return error;
+}
+
+/* reserve entry in page table and handle large pages */
+static inline int
+reserve_entries(vspace_t *vspace, void *vaddr, size_t size_bits)
+{
+    int error = seL4_NoError;
+    for (int i = 0; i < (1 << size_bits) && error == seL4_NoError; i += PAGE_SIZE_4K) {
+        error = reserve(vspace, vaddr + i);
+    }
+
+    return error;
+}
+
+/* clear an entry in the page table and handle large pages */
+static inline void
+clear_entries(vspace_t *vspace, void *vaddr, size_t size_bits)
+{
+    assert( ((uintptr_t)vaddr & ((1 << size_bits) - 1)) == 0);
+    for (int i = 0; i < (1 << size_bits); i += PAGE_SIZE_4K) {
+        clear(vspace, vaddr + i);
+    }
+}
+
+static inline int
+is_available(bottom_level_t *top_level[VSPACE_LEVEL_SIZE], void *vaddr)
+{
+    /* if there is no bottom level page table then this entry is available */
+    if (top_level[TOP_LEVEL_INDEX(vaddr)] == NULL) {
+        return 1;
+        /* otherwise check the entry explicitly */
+    } else if (top_level[TOP_LEVEL_INDEX(vaddr)] == (void *) RESERVED) {
+        return 0;
+    }
+
+    return top_level[TOP_LEVEL_INDEX(vaddr)]->bottom_level[BOTTOM_LEVEL_INDEX(vaddr)] == 0;
+}
+
+static inline int
+is_reserved(bottom_level_t *top_level[VSPACE_LEVEL_SIZE], void *vaddr)
+{
+    if (top_level[TOP_LEVEL_INDEX(vaddr)] == (void *)  RESERVED) {
+        return 1;
+    }
+
+    if (top_level[TOP_LEVEL_INDEX(vaddr)] == NULL) {
+        return 0;
+    }
+#if 0
+    printf("is reserved vaddr 0x%x 0x%x \n",vaddr, top_level[TOP_LEVEL_INDEX(vaddr)]->bottom_level[BOTTOM_LEVEL_INDEX(vaddr)]);
+#endif
+    return top_level[TOP_LEVEL_INDEX(vaddr)]->bottom_level[BOTTOM_LEVEL_INDEX(vaddr)] == RESERVED;
+}
+
+/* Internal interface functions */
+int sel4utils_map_page_pd(vspace_t *vspace, seL4_CPtr cap, void *vaddr, seL4_CapRights rights, int cacheable, size_t size_bits);
+#ifdef CONFIG_VTX
+int sel4utils_map_page_ept(vspace_t *vspace, seL4_CPtr cap, void *vaddr, seL4_CapRights rights, int cacheable, size_t size_bits);
+#endif /* CONFIG_VTX */
+
+/* interface functions */
+int sel4utils_map_pages_at_vaddr(vspace_t *vspace, seL4_CPtr caps[], void *vaddr,
+        size_t num_pages, size_t size_bits, reservation_t *reservation);
+seL4_CPtr sel4utils_get_cap(vspace_t *vspace, void *vaddr);
+void *sel4utils_map_pages(vspace_t *vspace, seL4_CPtr caps[], seL4_CapRights rights,
+        size_t num_pages, size_t size_bits, int cacheable);
+void sel4utils_unmap_pages(vspace_t *vspace, void *vaddr, size_t num_pages, size_t size_bits);
+
+int sel4utils_unmap_reserved_pages(vspace_t *vspace, void *vaddr, size_t num_pages, size_t size_bits, reservation_t *reservation);
+
+int sel4utils_new_pages_at_vaddr(vspace_t *vspace, void *vaddr, size_t num_pages,
+        size_t size_bits, reservation_t *reservation);
+void *sel4utils_new_pages(vspace_t *vspace, seL4_CapRights rights, size_t num_pages,
+        size_t size_bits);
+void sel4utils_free_pages(vspace_t *vspace, void *vaddr, size_t num_pages, size_t size_bits);
+
+void *sel4utils_new_stack(vspace_t *vspace);
+void sel4utils_free_stack(vspace_t *vspace, void *stack_top);
+
+void * sel4utils_new_ipc_buffer(vspace_t *vspace, seL4_CPtr *page);
+void sel4utils_free_ipc_buffer(vspace_t *vspace, void *vaddr);
+
+reservation_t *sel4utils_reserve_range(vspace_t *vspace, size_t size, seL4_CapRights rights,
+        int cacheable, void **vaddr);
+reservation_t *sel4utils_reserve_range_at(vspace_t *vspace, void *vaddr, size_t size,
+        seL4_CapRights rights, int cacheable);
+void sel4utils_free_reservation(vspace_t *vspace, reservation_t *reservation);
+
+#endif /* __SEL4UILS_VSPACE_PRIVATE_H */
diff --git a/src/elf.c b/src/elf.c
new file mode 100644
index 0000000..734439f
--- /dev/null
+++ b/src/elf.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+#include <autoconf.h>
+
+#if (defined CONFIG_LIB_SEL4_VKA && defined CONFIG_LIB_SEL4_VSPACE)
+
+#include <string.h>
+#include <sel4/sel4.h>
+#include <elf/elf.h>
+#include <cpio/cpio.h>
+#include <vka/capops.h>
+#include <sel4utils/thread.h>
+#include <sel4utils/util.h>
+#include <sel4utils/mapping.h>
+#include <sel4utils/elf.h>
+
+#ifdef CONFIG_X86_64
+#define SZFMT   "%ld"
+#else
+#define SZFMT   "%u"
+#endif
+
+/* This library works with our cpio set up in the build system */
+extern char _cpio_archive[];
+
+/*
+ * Convert ELF permissions into seL4 permissions.
+ *
+ * @param permissions elf permissions
+ * @return seL4 permissions
+ */
+static inline seL4_CapRights
+rights_from_elf(unsigned long permissions)
+{
+    seL4_CapRights result = 0;
+
+    if (permissions & PF_R || permissions & PF_X) {
+        result |= seL4_CanRead;
+    }
+
+    if (permissions & PF_W) {
+        result |= seL4_CanWrite;
+    }
+
+    return result;
+}
+
+static int
+load_segment(vspace_t *loadee_vspace, vspace_t *loader_vspace,
+             vka_t *loadee_vka, vka_t *loader_vka,
+             char *src, size_t segment_size, size_t file_size, uint32_t dst,
+             reservation_t *reservation)
+{
+    int error = seL4_NoError;
+
+    if (file_size > segment_size) {
+        LOG_ERROR("Error, file_size "SZFMT" > segment_size "SZFMT, file_size, segment_size);
+        return seL4_InvalidArgument;
+    }
+
+    /* create a slot to map a page into the loader address space */
+    seL4_CPtr loader_slot;
+    cspacepath_t loader_frame_cap;
+
+    error = vka_cspace_alloc(loader_vka, &loader_slot);
+    if (error) {
+        LOG_ERROR("Failed to allocate cslot by loader vka: %d", error);
+        return error;
+    }
+    vka_cspace_make_path(loader_vka, loader_slot, &loader_frame_cap);
+
+    /* We work a page at a time */
+    unsigned int pos = 0;
+    while (pos < segment_size && error == seL4_NoError) {
+        void *loader_vaddr = 0;
+        void *loadee_vaddr = (void *) ((seL4_Word)ROUND_DOWN(dst, PAGE_SIZE_4K));
+
+        /* create and map the frame in the loadee address space */
+        error = vspace_new_pages_at_vaddr(loadee_vspace, loadee_vaddr, 1, seL4_PageBits, reservation);
+        if (error != seL4_NoError) {
+            LOG_ERROR("ERROR: failed to allocate frame by loadee vka: %d", error);
+            continue;
+        }
+
+        /* copy the frame cap to map into the loader address space */
+        cspacepath_t loadee_frame_cap;
+
+        vka_cspace_make_path(loadee_vka, vspace_get_cap(loadee_vspace, loadee_vaddr),
+                &loadee_frame_cap);
+        error = vka_cnode_copy(&loader_frame_cap, &loadee_frame_cap, seL4_AllRights);
+        if (error != seL4_NoError) {
+            LOG_ERROR("ERROR: failed to copy frame cap into loader cspace: %d", error);
+            continue;
+        }
+
+        /* map the frame into the loader address space */
+        loader_vaddr = vspace_map_pages(loader_vspace, &loader_frame_cap.capPtr, seL4_AllRights,
+                       1, seL4_PageBits, 1);
+        if (loader_vaddr == NULL) {
+            LOG_ERROR("failed to map frame into loader vspace.");
+            error = -1;
+            continue;
+        }
+
+        /* finally copy the data */
+        int nbytes = PAGE_SIZE_4K - (dst & PAGE_MASK_4K);
+        if (pos < file_size) {
+            memcpy(loader_vaddr + (dst % PAGE_SIZE_4K), (void*)src, MIN(nbytes, file_size - pos));
+        }
+        /* Note that we don't need to explicitly zero frames as seL4 gives us zero'd frames */
+
+#ifdef CONFIG_ARCH_ARM
+        /* Flush the caches */
+        seL4_ARM_Page_Unify_Instruction(loader_frame_cap.capPtr,0, PAGE_SIZE_4K);
+        seL4_ARM_Page_Unify_Instruction(loadee_frame_cap.capPtr,0, PAGE_SIZE_4K);
+#endif /* CONFIG_ARCH_ARM */
+
+        /* now unmap the page in the loader address space */
+        vspace_unmap_pages(loader_vspace, (void *) loader_vaddr, 1, seL4_PageBits);
+        vka_cnode_delete(&loader_frame_cap);
+
+        pos += nbytes;
+        dst += nbytes;
+        src += nbytes;
+    }
+
+    /* clear the cslot */
+    vka_cspace_free(loader_vka, loader_frame_cap.capPtr);
+
+    return error;
+}
+
+int
+sel4utils_elf_num_regions(char *image_name)
+{
+    int i;
+    unsigned long elf_size;
+    char *elf_file;
+    assert(image_name);
+    elf_file = cpio_get_file(_cpio_archive, image_name, &elf_size);
+    if (elf_file == NULL) {
+        LOG_ERROR("ERROR: failed to load elf file %s", image_name);
+        return 0;
+    }
+
+    int num_headers = elf_getNumProgramHeaders(elf_file);
+    int loadable_headers = 0;
+
+    for (i = 0; i < num_headers; i++) {
+        /* Skip non-loadable segments (such as debugging data). */
+        if (elf_getProgramHeaderType(elf_file, i) == PT_LOAD) {
+            loadable_headers++;
+        }
+    }
+    return loadable_headers;
+}
+
+static int 
+make_region(vspace_t *loadee, unsigned long flags, unsigned long segment_size,
+        unsigned long vaddr, sel4utils_elf_region_t *region, int anywhere)
+{
+    region->cacheable = 1;
+    region->rights = rights_from_elf(flags);
+    region->elf_vstart = (void*)PAGE_ALIGN_4K(vaddr);
+    region->size = PAGE_ALIGN_4K(vaddr + segment_size - 1) + PAGE_SIZE_4K - (uint32_t)((seL4_Word)region->elf_vstart);
+    
+    if (loadee) {
+        if (anywhere) {
+            region->reservation = vspace_reserve_range(loadee, region->size, region->rights, 1, (void**)&region->reservation_vstart);
+        } else {
+            region->reservation_vstart = region->elf_vstart;
+            region->reservation = vspace_reserve_range_at(loadee,
+                region->elf_vstart,
+                region->size,
+                region->rights,
+                1);
+        }
+        return !region->reservation;
+    }
+
+    return 0;
+}
+
+void *
+sel4utils_elf_reserve(vspace_t *loadee, char *image_name, sel4utils_elf_region_t *regions)
+{
+    unsigned long elf_size;
+    char *elf_file = cpio_get_file(_cpio_archive, image_name, &elf_size);
+    if (elf_file == NULL) {
+        LOG_ERROR("ERROR: failed to load elf file %s", image_name);
+        return NULL;
+    }
+
+    int num_headers = elf_getNumProgramHeaders(elf_file);
+    int region = 0;
+
+    for (int i = 0; i < num_headers; i++) {
+        unsigned long flags, segment_size, vaddr;
+
+        /* Skip non-loadable segments (such as debugging data). */
+        if (elf_getProgramHeaderType(elf_file, i) == PT_LOAD) {
+            /* Fetch information about this segment. */
+            segment_size = elf_getProgramHeaderMemorySize(elf_file, i);
+            vaddr = elf_getProgramHeaderVaddr(elf_file, i);
+            flags = elf_getProgramHeaderFlags(elf_file, i);
+            if (make_region(loadee, flags, segment_size, vaddr, &regions[region], 0)) {
+                for (region--; region >= 0; region--) {
+                    vspace_free_reservation(loadee, regions[region].reservation);
+                    regions[region].reservation = NULL;
+                }
+                LOG_ERROR("Failed to create reservation");
+                return NULL;
+            }
+            region++;
+        }
+    }
+
+    uint64_t entry_point = elf_getEntryPoint(elf_file);
+    if ((uint32_t) (entry_point >> 32) != 0) {
+        LOG_ERROR("ERROR: this code hasn't been tested for 64bit!");
+        return NULL;
+    }
+    assert(entry_point != 0);
+    return (void*)(seL4_Word)entry_point;
+}
+
+void *
+sel4utils_elf_load_record_regions(vspace_t *loadee, vspace_t *loader, vka_t *loadee_vka, vka_t *loader_vka, char *image_name, sel4utils_elf_region_t *regions, int mapanywhere)
+{
+    unsigned long elf_size;
+    char *elf_file = cpio_get_file(_cpio_archive, image_name, &elf_size);
+    if (elf_file == NULL) {
+        LOG_ERROR("ERROR: failed to load elf file %s", image_name);
+        return NULL;
+    }
+
+    int num_headers = elf_getNumProgramHeaders(elf_file);
+    int error = seL4_NoError;
+    int region_count = 0;
+
+    uint64_t entry_point = elf_getEntryPoint(elf_file);
+    if ((uint32_t) (entry_point >> 32) != 0) {
+        LOG_ERROR("ERROR: this code hasn't been tested for 64bit!");
+        return NULL;
+    }
+    assert(entry_point != 0);
+
+    for (int i = 0; i < num_headers; i++) {
+        char *source_addr;
+        unsigned long flags, file_size, segment_size, vaddr;
+
+        /* Skip non-loadable segments (such as debugging data). */
+        if (elf_getProgramHeaderType(elf_file, i) == PT_LOAD) {
+            /* Fetch information about this segment. */
+            source_addr = elf_file + elf_getProgramHeaderOffset(elf_file, i);
+            file_size = elf_getProgramHeaderFileSize(elf_file, i);
+            segment_size = elf_getProgramHeaderMemorySize(elf_file, i);
+            vaddr = elf_getProgramHeaderVaddr(elf_file, i);
+            flags = elf_getProgramHeaderFlags(elf_file, i);
+            /* make reservation */
+            sel4utils_elf_region_t region;
+            error = make_region(loadee, flags, segment_size, vaddr, &region, mapanywhere);
+            if (error) {
+                LOG_ERROR("Failed to reserve region");
+                break;
+            }
+            unsigned long offset = vaddr - PAGE_ALIGN_4K(vaddr);
+            /* Copy it across to the vspace */
+            LOG_INFO(" * Loading segment %08x-->%08x", (int)vaddr, (int)(vaddr + segment_size));
+            error = load_segment(loadee, loader, loadee_vka, loader_vka, source_addr,
+                    segment_size, file_size, offset + (uint32_t)((seL4_Word)region.reservation_vstart), region.reservation);
+            if (error) {
+                LOG_ERROR("Failed to load segment");
+                break;
+            }
+            /* record the region if requested */
+            if (regions) {
+                regions[region_count] = region;
+            } else {
+                vspace_free_reservation(loadee, region.reservation);
+            }
+            /* increment the count of the number of loaded regions */
+            region_count++;
+        }
+    }
+
+    return error == seL4_NoError ? (void*)(seL4_Word)entry_point : NULL;
+}
+
+void *
+sel4utils_elf_load(vspace_t *loadee, vspace_t *loader, vka_t *loadee_vka, vka_t *loader_vka, char *image_name)
+{
+    return sel4utils_elf_load_record_regions(loadee, loader, loadee_vka, loader_vka, image_name, NULL, 0);
+}
+
+#endif /* (defined CONFIG_LIB_SEL4_VKA && defined CONFIG_LIB_SEL4_VSPACE) */
diff --git a/src/helpers.h b/src/helpers.h
new file mode 100644
index 0000000..d9d02da
--- /dev/null
+++ b/src/helpers.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+#ifndef SEL4UTILS_HELPERS_H
+#define SEL4UTILS_HELPERS_H
+/**
+ * Start a thread.
+ *
+ * @param stack_top address of the stack in the current vspace.
+ * Other parameters see sel4utils_start_thread.
+ */
+int sel4utils_internal_start_thread(sel4utils_thread_t *thread, void *entry_point, void *arg0,
+        void *arg1, int resume, void *stack_top);
+
+
+#endif /* SEL4UTILS_HELPERS_H */
diff --git a/src/mapping.c b/src/mapping.c
new file mode 100644
index 0000000..fe97abf
--- /dev/null
+++ b/src/mapping.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+#include <autoconf.h>
+
+#ifdef CONFIG_LIB_SEL4_VKA
+
+#include <sel4/sel4.h>
+#include <vka/object.h>
+#include <sel4utils/mapping.h>
+#include <sel4utils/util.h>
+
+#ifndef DFMT
+#ifdef CONFIG_X86_64
+#define DFMT    "%lld"
+#else
+#define DFMT    "%d"
+#endif
+#endif
+
+int
+sel4utils_map_page(vka_t *vka, seL4_CPtr pd, seL4_CPtr frame, void *vaddr,
+                   seL4_CapRights rights, int cacheable, vka_object_t *pagetable)
+{
+    assert(vka != NULL);
+    assert(pd != 0);
+    assert(frame != 0);
+    assert(vaddr != 0);
+    assert(rights != 0);
+    assert(pagetable != NULL);
+
+    seL4_ARCH_VMAttributes attr = 0;
+
+#ifdef CONFIG_ARCH_IA32
+    if (!cacheable) {
+        attr = seL4_IA32_CacheDisabled;
+    }
+#elif CONFIG_ARCH_ARM /* CONFIG_ARCH_IA32 */
+    if (cacheable) {
+        attr = seL4_ARM_PageCacheable;
+    }
+#endif /* CONFIG_ARCH_ARM */
+    int error = seL4_ARCH_Page_Map(frame, pd, (seL4_Word) vaddr, rights, attr);
+
+#ifdef CONFIG_X86_64
+
+page_map_retry:
+    
+    if (error == seL4_FailedLookupPDPT) {
+        error = vka_alloc_page_directory_pointer_table(vka, pagetable);
+        if (!error) {
+            error = seL4_ARCH_PageDirectoryPointerTable_Map(pagetable->cptr, pd, (seL4_Word)vaddr,
+                    seL4_ARCH_Default_VMAttributes);
+        } else {
+            LOG_ERROR("Page directory pointer table allocation failed %d", error);
+        }
+
+        if (!error) {
+            error = seL4_ARCH_Page_Map(frame, pd, (seL4_Word)vaddr, rights, attr);
+            if (error != seL4_NoError) {
+                goto page_map_retry;
+            }
+        } else {
+            LOG_ERROR("Page directory pointer table mapping failed %d\n", error);
+        }
+    }
+    
+    if (error == seL4_FailedLookupPD) {
+        error = vka_alloc_page_directory(vka, pagetable);
+        if (!error) {
+            error = seL4_ARCH_PageDirectory_Map(pagetable->cptr, pd, (seL4_Word)vaddr, 
+                    seL4_ARCH_Default_VMAttributes);
+        } else {
+            LOG_ERROR("Page direcotry allocation failed %d\n", error);
+        }
+
+        if (!error) {
+            error = seL4_ARCH_Page_Map(frame, pd, (seL4_Word)vaddr, rights, attr);
+            if (error != seL4_NoError) {
+                goto page_map_retry;
+            }
+        
+        } else {
+            LOG_ERROR("Page directory mapping failed %d\n", error);
+        }
+    }
+
+#endif
+
+    if (error == seL4_FailedLookup) {
+        /* need a page table, allocate one */
+        error = vka_alloc_page_table(vka, pagetable);
+
+        /* map in the page table */
+        if (!error) {
+            error = seL4_ARCH_PageTable_Map(pagetable->cptr, pd, (seL4_Word) vaddr,
+                    seL4_ARCH_Default_VMAttributes);
+        } else {
+            LOG_ERROR("Page table allocation failed, %d", error);
+        }
+
+        /* now map in the frame again, if pagetable allocation was successful */
+        if (!error) {
+            error = seL4_ARCH_Page_Map(frame, pd, (seL4_Word) vaddr, rights, attr);
+        } else {
+            LOG_ERROR("Page table mapping failed, %d", error);
+        }
+    }
+
+    if (error != seL4_NoError) {
+        LOG_ERROR("Failed to map page at address %p with cap "DFMT", error: %d", vaddr, frame, error);
+    }
+
+    return error;
+}
+
+#ifdef CONFIG_ARCH_IA32
+
+#ifdef CONFIG_IOMMU
+int
+sel4utils_map_iospace_page(vka_t *vka, seL4_CPtr iospace, seL4_CPtr frame, seL4_Word vaddr,
+                           seL4_CapRights rights, int cacheable, seL4_Word size_bits,
+                           vka_object_t *pts, int *num_pts)
+{
+    int error;
+    int num;
+    assert(vka);
+    assert(iospace);
+    assert(frame);
+    assert(cacheable);
+    assert(size_bits == seL4_PageBits);
+    /* Start mapping in pages and page tables until it bloody well works */
+    num = 0;
+    while ( (error = seL4_IA32_Page_MapIO(frame, iospace, rights, vaddr)) == seL4_FailedLookup && error != seL4_NoError) {
+        /* Need a page table? */
+        vka_object_t pt;
+        error = vka_alloc_io_page_table(vka, &pt);
+        if (error) {
+            return error;
+        }
+        if (pts) {
+            pts[num] = pt;
+            num++;
+            assert(num_pts);
+            *num_pts = num;
+        }
+        error = seL4_IA32_IOPageTable_Map(pt.cptr, iospace, vaddr);
+        if (error != seL4_NoError) {
+            return error;
+        }
+    }
+    return error;
+}
+
+#endif
+
+#ifdef CONFIG_VTX
+
+/*map a frame into guest os's physical address space*/
+int
+sel4utils_map_ept_page(vka_t *vka, seL4_CPtr pd, seL4_CPtr frame, seL4_Word vaddr,
+                       seL4_CapRights rights, int cacheable, seL4_Word size_bits,
+                       vka_object_t *pagetable, vka_object_t *pagedir)
+{
+    int ret;
+
+    assert(vka);
+    assert(pd);
+    assert(frame);
+    assert(rights);
+    assert(size_bits);
+    assert(pagetable);
+    assert(pagedir);
+
+    seL4_ARCH_VMAttributes attr = 0;
+
+    if (!cacheable) {
+        attr = seL4_IA32_CacheDisabled;
+    }
+
+    /* Try map into EPT directory first. */
+    ret = seL4_IA32_Page_Map(frame, pd, vaddr, rights, attr);
+    if (ret == seL4_NoError) {
+        /* Successful! */
+        return ret;
+    }
+    if (ret != seL4_FailedLookup) {
+        LOG_ERROR("Failed to map page, error: %d", ret);
+        return ret;
+    }
+
+    if (size_bits == seL4_4MBits) {
+        /* Allocate a directory table for a 4M entry. */
+        ret = vka_alloc_ept_page_directory(vka, pagedir);
+        if (ret != seL4_NoError) {
+            LOG_ERROR("Allocation of EPT page directory failed, error: %d", ret);
+            return ret;
+        }
+
+        /* Map in the page directory. */
+        ret = seL4_IA32_EPTPageDirectory_Map(pagedir->cptr, pd, vaddr, attr);
+        if (ret != seL4_NoError) {
+            LOG_ERROR("Failed to map EPT PD, error: %d", ret);
+            return ret;
+        }
+    } else {
+        /* Allocate an EPT page table for a 4K entry. */
+        ret = vka_alloc_ept_page_table(vka, pagetable);
+        if (ret != seL4_NoError) {
+            LOG_ERROR("allocating ept page table failed\n");
+            return ret;
+        }
+
+        /* Map in the EPT page table. */
+        ret = seL4_IA32_EPTPageTable_Map(pagetable->cptr, pd, vaddr, attr);
+
+        /* Mapping could fail due to missing PD. In this case, we map the PD and try again. */
+        if (ret == seL4_FailedLookup) {
+            /* Allocate an EPT page directory for the page table. */
+            ret = vka_alloc_ept_page_directory(vka, pagedir);
+            if (ret != seL4_NoError) {
+                LOG_ERROR("Allocating EPT page directory failed\n");
+                return ret;
+            }
+
+            /* Map the EPT page directory in. */
+            ret = seL4_IA32_EPTPageDirectory_Map(pagedir->cptr, pd, vaddr, attr);
+            if (ret != seL4_NoError) {
+                LOG_ERROR("Failed to map EPT PD, error: %d", ret);
+                return ret;
+            }
+
+            /* Try to map the page table again. */
+            ret = seL4_IA32_EPTPageTable_Map(pagetable->cptr, pd, vaddr, attr);
+            if (ret != seL4_NoError) {
+                LOG_ERROR("Second attempt at mapping EPT PT failed, error: %d", ret);
+                return ret;
+            }
+        } else if (ret) {
+            LOG_ERROR("EPT PT failed to map, error: %d", ret);
+            return ret;
+        }
+    }
+
+    if (ret != seL4_NoError) {
+        return ret;
+    }
+
+    /* Try to map the frame again. */
+    return seL4_IA32_Page_Map(frame, pd, vaddr, rights, attr);
+}
+
+
+#endif
+#endif
+#endif /* CONFIG_LIB_SEL4_VKA */
diff --git a/src/process.c b/src/process.c
new file mode 100644
index 0000000..2d4ce98
--- /dev/null
+++ b/src/process.c
@@ -0,0 +1,565 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+#include <autoconf.h>
+
+#if (defined CONFIG_LIB_SEL4_VKA && defined CONFIG_LIB_SEL4_VSPACE)
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <sel4/sel4.h>
+#include <vka/object.h>
+#include <vka/capops.h>
+#include <sel4utils/vspace.h>
+#include <sel4utils/process.h>
+#include <sel4utils/util.h>
+#include <sel4utils/elf.h>
+#include <sel4utils/mapping.h>
+#include "helpers.h"
+
+static size_t
+compute_length(int argc, char **argv)
+{
+    size_t length = 0;
+
+    assert(argv != NULL);
+    for (int i = 0; i < argc; i++) {
+        assert(argv[i] != NULL);
+        length += strlen(argv[i]) + 1;
+    }
+
+    return length;
+}
+
+static int recurse = 0;
+
+void
+sel4utils_allocated_object(void *cookie, vka_object_t object)
+{
+    if (recurse == 1) {
+        assert(!"VSPACE RECURSION ON MALLOC, YOU ARE DEAD\n");
+    }
+    recurse = 1;
+
+    sel4utils_process_t *process = (sel4utils_process_t *) cookie;
+
+    object_node_t *node = (object_node_t *) malloc(sizeof(object_node_t));
+    assert(node != NULL);
+    node->object = object;
+
+    node->next = process->allocated_object_list_head;
+    process->allocated_object_list_head = node;
+
+    recurse = 0;
+}
+
+static void
+clear_objects(sel4utils_process_t *process, vka_t *vka)
+{
+    assert(process != NULL);
+    assert(vka != NULL);
+
+    while (process->allocated_object_list_head != NULL) {
+        object_node_t *prev = process->allocated_object_list_head;
+
+        process->allocated_object_list_head = prev->next;
+
+        vka_free_object(vka, &prev->object);
+        free(prev);
+    }
+}
+
+static int
+next_free_slot(sel4utils_process_t *process, cspacepath_t *dest)
+{
+    if (process->cspace_next_free >= (1 << process->cspace_size)) {
+        LOG_ERROR("Can't allocate slot, cspace is full.\n");
+        return -1;
+    }
+
+    dest->root = process->cspace.cptr;
+    dest->capPtr = process->cspace_next_free;
+    dest->capDepth = process->cspace_size;
+
+    return 0;
+}
+
+static void 
+allocate_next_slot(sel4utils_process_t *process)
+{
+    assert(process->cspace_next_free < (1 << process->cspace_size));
+    process->cspace_next_free++;
+}
+
+
+seL4_CPtr
+sel4utils_mint_cap_to_process(sel4utils_process_t *process, cspacepath_t src, seL4_CapRights rights, seL4_CapData_t data)
+{
+    cspacepath_t dest;
+    if (next_free_slot(process, &dest) == -1) {
+        return 0;
+    }
+
+    int error = vka_cnode_mint(&dest, &src, rights, data);
+    if (error != seL4_NoError) {
+        LOG_ERROR("Failed to mint cap\n");
+        return 0;
+    }
+
+    /* success */
+    allocate_next_slot(process);
+    return dest.capPtr;
+}
+
+seL4_CPtr
+sel4utils_copy_cap_to_process(sel4utils_process_t *process, cspacepath_t src)
+{
+    cspacepath_t dest;
+    if (next_free_slot(process, &dest) == -1) {
+        return 0;
+    }
+
+    int error = vka_cnode_copy(&dest, &src, seL4_AllRights);
+    if (error != seL4_NoError) {
+        LOG_ERROR("Failed to copy cap\n");
+        return 0;
+    }
+
+    /* success */
+    allocate_next_slot(process);
+    return dest.capPtr;
+}
+
+void *
+sel4utils_copy_args(vspace_t *current_vspace, vspace_t *target_vspace,
+                    vka_t *vka, int argc, char *argv[])
+{
+    assert(current_vspace != target_vspace);
+
+    size_t length = compute_length(argc, argv);
+    size_t npages UNUSED = BYTES_TO_4K_PAGES(length);
+    //TODO handle multiple pages of args. Implement this if you need it.
+    assert(npages == 1);
+
+    /* allocate pages in the target address space */
+    vka_object_t target_page = {0};
+    cspacepath_t target_cap = {0};
+    cspacepath_t current_cap = {0};
+    void *target_vaddr = NULL;
+    void *current_vaddr = NULL;
+    seL4_CPtr slot = 0;
+
+    int error = vka_alloc_frame(vka, seL4_PageBits, &target_page);
+    vka_cspace_make_path(vka, target_page.cptr, &target_cap);
+    if (error) {
+        target_page.cptr = 0;
+        LOG_ERROR("Failed to allocate frame\n");
+        goto error;
+    }
+
+    error = vka_cspace_alloc(vka, &slot);
+    if (error) {
+        slot = 0;
+        LOG_ERROR("Failed to allocate cslot\n");
+        goto error;
+    }
+
+    vka_cspace_make_path(vka, slot, &current_cap);
+    error = vka_cnode_copy(&current_cap, &target_cap, seL4_AllRights);
+    if (error) {
+        LOG_ERROR("Failed to make a copy of frame cap\n");
+        current_cap.capPtr = 0;
+        goto error;
+    }
+
+    /* map the pages */
+    current_vaddr = vspace_map_pages(current_vspace, &current_cap.capPtr,
+                    seL4_AllRights, 1, seL4_PageBits, 1);
+    if (current_vaddr == NULL) {
+        LOG_ERROR("Failed to map page into current address space\n");
+        goto error;
+    }
+
+    target_vaddr = vspace_map_pages(target_vspace, &target_cap.capPtr, seL4_CanRead,
+                   1, seL4_PageBits, 1);
+    if (target_vaddr == NULL) {
+        LOG_ERROR("Failed to map page into target address space\n");
+        goto error;
+    }
+
+    /* we aren't bailing now - inform caller we allocated a frame */
+    vspace_maybe_call_allocated_object(current_vspace, target_page);
+
+    /* do the copy */
+    char **current_argv = current_vaddr;
+    char *strings = current_vaddr + (argc * sizeof(char*));
+
+    for (int i = 0; i < argc; i++) {
+        current_argv[i] = target_vaddr + ((uint32_t) ((seL4_Word)strings) % PAGE_SIZE_4K);
+        strncpy(strings, argv[i], strlen(argv[i]) + 1);
+        strings += strlen(argv[i]) + 1;
+    }
+
+    /* flush & unmap */
+#ifdef CONFIG_ARCH_ARM
+    seL4_ARM_Page_Unify_Instruction(current_cap.capPtr, 0, PAGE_SIZE_4K);
+    seL4_ARM_Page_Unify_Instruction(target_cap.capPtr, 0, PAGE_SIZE_4K);
+#endif /* CONFIG_ARCH_ARM */
+
+    vspace_unmap_pages(current_vspace, current_vaddr, 1, seL4_PageBits);
+
+    vka_cnode_delete(&current_cap);
+    vka_cspace_free(vka, slot);
+
+    return target_vaddr;
+
+error:
+    if (current_vaddr != NULL) {
+        vspace_unmap_pages(current_vspace, current_vaddr, 1, seL4_PageBits);
+    }
+
+    if (current_cap.capPtr != 0) {
+        vka_cnode_delete(&current_cap);
+    }
+
+    if (slot != 0) {
+        vka_cspace_free(vka, slot);
+    }
+
+    if (target_page.cptr != 0) {
+        vka_free_object(vka, &target_page);
+    }
+
+    return NULL;
+}
+
+int
+sel4utils_spawn_process(sel4utils_process_t *process, vka_t *vka, vspace_t *vspace, int argc,
+        char *argv[], int resume)
+{
+    char **new_process_argv = NULL;
+    if (argc > 0) {
+        new_process_argv = sel4utils_copy_args(vspace, &process->vspace, vka, argc, argv);
+        if (new_process_argv == NULL) {
+            return -1;
+        }
+    }
+
+    /* On arm we write the arguments to registers */
+#ifdef CONFIG_ARCH_IA32
+    /* map in the first page of the stack for this thread */
+    seL4_CPtr page_cap = vspace_get_cap(&process->vspace, process->thread.stack_top - PAGE_SIZE_4K);
+
+    /* make a copy of the cap */
+    seL4_CPtr page_cap_copy;
+    int error = vka_cspace_alloc(vka, &page_cap_copy);
+    if (error) {
+        LOG_ERROR("Failed to allocate cslot\n");
+        return error;
+    }
+
+    cspacepath_t src, dest;
+
+    vka_cspace_make_path(vka, page_cap, &src);
+    vka_cspace_make_path(vka, page_cap_copy, &dest);
+
+    error = vka_cnode_copy(&dest, &src, seL4_AllRights);
+    if (error) {
+        LOG_ERROR("Failed to copy cap of stack page, error: %d\n", error);
+        vka_cspace_free(vka, page_cap_copy);
+        return error;
+    }
+
+    /* map the page in */
+    void *stack = vspace_map_pages(vspace, &page_cap_copy, seL4_AllRights, 1, seL4_PageBits, 1);
+
+    if (stack == NULL) {
+        LOG_ERROR("Failed to map stack page into process loading vspace\n");
+        vka_cspace_free(vka, page_cap_copy);
+        return -1;
+    }
+
+    error = sel4utils_internal_start_thread(&process->thread, process->entry_point,
+            (void *) ((seL4_Word)argc), (void *) new_process_argv, resume, stack + PAGE_SIZE_4K);
+
+    /* unmap it */
+    vspace_free_pages(vspace, stack, 1, seL4_PageBits);
+    vka_cnode_delete(&dest);
+    vka_cspace_free(vka, page_cap_copy);
+#elif CONFIG_ARCH_ARM
+    int error = sel4utils_internal_start_thread(&process->thread, process->entry_point,
+                (void *) argc, (void *) new_process_argv, resume, process->thread.stack_top);
+#endif /* CONFIG_ARCH_IA32 */
+
+    return error;
+}
+
+int
+sel4utils_configure_process(sel4utils_process_t *process, vka_t *vka,
+        vspace_t *vspace, uint8_t priority, char *image_name)
+{
+    sel4utils_process_config_t config = {
+        .is_elf = true,
+        .image_name = image_name,
+        .do_elf_load = true,
+        .create_cspace = true,
+        .one_level_cspace_size_bits = CONFIG_SEL4UTILS_CSPACE_SIZE_BITS,
+        .create_vspace = true,
+        .create_fault_endpoint = true,
+        .priority = priority,
+#ifndef CONFIG_KERNEL_STABLE
+        .asid_pool = seL4_CapInitThreadASIDPool,
+#endif 
+    };
+
+    return sel4utils_configure_process_custom(process, vka, vspace, config);
+}
+
+static int 
+create_reservations(vspace_t *vspace, int num, sel4utils_elf_region_t regions[])
+{
+    for (int i = 0; i < num; i++) {
+        sel4utils_elf_region_t *region = &regions[i];
+        region->reservation = vspace_reserve_range_at(vspace, region->elf_vstart, 
+            region->size, region->rights, region->cacheable);
+        if (region == NULL) {
+            LOG_ERROR("Failed to create region\n");
+            for (int j = i - 1; j >= 0; j--) {
+                free(regions[i].reservation);
+            }
+            return -1;
+        }
+    }
+
+    return 0;
+}
+
+#ifndef CONFIG_KERNEL_STABLE
+static int 
+assign_asid_pool(seL4_CPtr asid_pool, seL4_CPtr pd) {
+
+    int error;
+
+    if (asid_pool == 0) {
+        LOG_INFO("This method will fail if run in a thread that is not in the root server cspace\n");
+        asid_pool = seL4_CapInitThreadASIDPool;
+    }
+
+    error = seL4_ARCH_ASIDPool_Assign(asid_pool, pd);    
+    if (error) {
+        LOG_ERROR("Failed to assign asid pool\n");
+    }
+
+    return error;
+}
+#endif
+
+static int
+create_cspace(vka_t *vka, int size_bits, sel4utils_process_t *process, 
+        seL4_CapData_t cspace_root_data) {
+    int error;
+    
+    /* create a cspace */
+    error = vka_alloc_cnode_object(vka, size_bits, &process->cspace);
+    if (error) {
+        LOG_ERROR("Failed to create cspace: %d\n", error);
+        return error;
+    }
+    
+    process->cspace_size = size_bits;
+    /* first slot is always 1, never allocate 0 as a cslot */
+    process->cspace_next_free = 1;
+
+    /*  mint the cnode cap into the process cspace */
+    cspacepath_t src;
+    vka_cspace_make_path(vka, process->cspace.cptr, &src);
+    error = sel4utils_mint_cap_to_process(process, src, seL4_AllRights, cspace_root_data);
+    assert(error == SEL4UTILS_CNODE_SLOT);
+
+    return 0;
+}
+
+static int
+create_fault_endpoint(vka_t *vka, sel4utils_process_t *process) 
+{
+    /* create a fault endpoint and put it into the cspace */
+    int error = vka_alloc_endpoint(vka, &process->fault_endpoint);
+    if (error) {
+        LOG_ERROR("Failed to allocate fault endpoint: %d\n", error);
+        return error;
+    }
+   
+    cspacepath_t src;
+    vka_cspace_make_path(vka, process->fault_endpoint.cptr, &src);
+    error = sel4utils_copy_cap_to_process(process, src);
+
+    if (error == 0) {
+        LOG_ERROR("Failed to move allocated endpoint: %d\n", error);
+        return error;
+    }
+    assert(error == SEL4UTILS_ENDPOINT_SLOT);
+
+    return 0;
+}
+
+
+int sel4utils_configure_process_custom(sel4utils_process_t *process, vka_t *vka,
+        vspace_t *spawner_vspace, sel4utils_process_config_t config)
+{
+    int error;
+    sel4utils_alloc_data_t * data = NULL;
+    memset(process, 0, sizeof(sel4utils_process_t));
+    seL4_CapData_t cspace_root_data = seL4_CapData_Guard_new(0, 
+            seL4_WordBits - config.one_level_cspace_size_bits);
+
+    /* create a page directory */
+    if (config.create_vspace) {
+        error = vka_alloc_page_directory(vka, &process->pd);
+        if (error) {
+            LOG_ERROR("Failed to allocate page directory for new process: %d\n", error);
+            goto error;
+        }
+
+#ifndef CONFIG_KERNEL_STABLE
+#ifndef CONFIG_X86_64
+        /* assign an asid pool */
+        if (assign_asid_pool(config.asid_pool, process->pd.cptr) != seL4_NoError) {
+            goto error;
+        }
+#endif /* end of !CONFIG_X86_64 */
+#endif /* CONFIG_KERNEL_STABLE */
+    } else {
+        process->pd = config.page_dir;
+    }
+
+    if (config.create_cspace) {
+        if (create_cspace(vka, config.one_level_cspace_size_bits, process, cspace_root_data) != 0) {
+            goto error;
+        }
+    } else {
+        process->cspace = config.cnode;
+    }
+
+    if (config.create_fault_endpoint) {
+        if (create_fault_endpoint(vka, process) != 0) {
+            goto error;
+        }
+    } else {
+        process->fault_endpoint = config.fault_endpoint;
+    }
+
+    /* create a vspace */
+    if (config.create_vspace) {
+        sel4utils_get_vspace(spawner_vspace, &process->vspace, &process->data, vka, process->pd.cptr,
+                sel4utils_allocated_object, (void *) process);
+      
+        if (config.num_reservations > 0) {
+            if (create_reservations(&process->vspace, config.num_reservations, 
+                        config.reservations)) {
+                goto error;
+            }
+        }
+    } else {
+        memcpy(&process->vspace, config.vspace, sizeof(process->vspace));
+    }
+
+    /* finally elf load */
+    if (config.is_elf) {
+        if (config.do_elf_load) {
+            process->entry_point = sel4utils_elf_load(&process->vspace, spawner_vspace, vka, vka, config.image_name);
+        } else {
+            process->num_elf_regions = sel4utils_elf_num_regions(config.image_name);
+            process->elf_regions = calloc(process->num_elf_regions, sizeof(*process->elf_regions));
+            if (!process->elf_regions) {
+                LOG_ERROR("Failed to allocate memory for elf region information");
+                goto error;
+            }
+            process->entry_point = sel4utils_elf_reserve(&process->vspace, config.image_name, process->elf_regions);
+        }
+    
+        if (process->entry_point == NULL) {
+            LOG_ERROR("Failed to load elf file\n");
+            goto error;
+        }
+    } else {
+        process->entry_point = config.entry_point;
+    }
+
+    /* create the thread, do this *after* elf-loading so that we don't clobber
+     * the required virtual memory*/
+    error = sel4utils_configure_thread(vka, &process->vspace, SEL4UTILS_ENDPOINT_SLOT,
+            config.priority, process->cspace.cptr, cspace_root_data, &process->thread);
+
+    if (error) {
+        LOG_ERROR("ERROR: failed to configure thread for new process %d\n", error);
+        goto error;
+    }
+
+    return 0;
+
+error:
+    /* try to clean up */
+    if (config.create_fault_endpoint && process->fault_endpoint.cptr != 0) {
+        vka_free_object(vka, &process->fault_endpoint);
+    }
+
+    if (config.create_cspace && process->cspace.cptr != 0) {
+        vka_free_object(vka, &process->cspace);
+    }
+
+    if (config.create_vspace && process->pd.cptr != 0) {
+        vka_free_object(vka, &process->pd);
+        if (process->vspace.page_directory != 0) {
+            LOG_ERROR("Could not clean up vspace\n");
+        }
+    }
+
+    if (process->elf_regions) {
+        free(process->elf_regions);
+    }
+
+    if (data != NULL) {
+        free(data);
+    }
+
+    return -1;
+}
+
+void
+sel4utils_destroy_process(sel4utils_process_t *process, vka_t *vka)
+{
+    /* delete all of the caps in the cspace */
+    for (int i = 1; i < process->cspace_next_free; i++) {
+        cspacepath_t path;
+        path.root = process->cspace.cptr;
+        path.capPtr = i;
+        path.capDepth = process->cspace_size;
+        vka_cnode_delete(&path);
+    }
+
+    /* destroy the thread */
+    sel4utils_clean_up_thread(vka, &process->vspace, &process->thread);
+
+    /* destroy the cnode */
+    vka_free_object(vka, &process->cspace);
+    
+    /* free any objects created by the vspace */
+    clear_objects(process, vka);
+
+    /* destroy the endpoint */
+    if (process->fault_endpoint.cptr != 0) {
+        vka_free_object(vka, &process->fault_endpoint);
+    }
+
+    /* destroy the page directory */
+    vka_free_object(vka, &process->pd);
+}
+
+#endif /*(defined CONFIG_LIB_SEL4_VKA && defined CONFIG_LIB_SEL4_VSPACE)*/
diff --git a/src/profile.c b/src/profile.c
new file mode 100644
index 0000000..fb2dac8
--- /dev/null
+++ b/src/profile.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+#include <sel4utils/profile.h>
+#include <stdio.h>
+
+/*
+ *  __start_SECTION_NAME and __stop_SECTION_NAME are magic symbols inserted by the gcc
+ *  linker
+ */
+extern profile_var_t __start__profile_var[];
+extern profile_var_t __stop__profile_var[];
+
+void profile_print32(uint32_t value, const char *varname, const char *description, void *cookie)
+{
+    printf("%s: %u %s\n", varname, value, description);
+}
+void profile_print64(uint64_t value, const char *varname, const char *description, void *cookie)
+{
+    printf("%s: %llu %s\n", varname, value, description);
+}
+
+void profile_scrape(profile_callback32 callback32, profile_callback64 callback64, void *cookie)
+{
+    profile_var_t *i;
+    for (i = __start__profile_var; i < __stop__profile_var; i++) {
+        switch (i->type) {
+        case PROFILE_VAR_TYPE_INT32:
+            callback32(*(uint32_t*)i->var, i->varname, i->description, cookie);
+            break;
+        case PROFILE_VAR_TYPE_INT64:
+            callback64(*(uint64_t*)i->var, i->varname, i->description, cookie);
+            break;
+        default:
+            LOG_ERROR("Unknown profile var. Probable memory corruption or linker failure!");
+            break;
+        }
+    }
+}
diff --git a/src/sel4_debug.c b/src/sel4_debug.c
new file mode 100644
index 0000000..da8ac35
--- /dev/null
+++ b/src/sel4_debug.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+/*
+  Authors: Kevin Elphinstone
+*/
+
+#include <stdio.h> /* For fprintf() */
+#include <stdlib.h> /* For abort() */
+#include <sel4utils/sel4_debug.h>
+
+char *sel4_errlist[] = {
+    "seL4_NoError",
+    "seL4_InvalidArgument",
+    "seL4_InvalidCapability",
+    "seL4_IllegalOperation",
+    "seL4_RangeError",
+    "seL4_AlignmentError",
+    "seL4_FailedLookup",
+    "seL4_TruncatedMessage",
+    "seL4_DeleteFirst",
+    "seL4_RevokeFirst",
+    "seL4_NotEnoughMemory"
+};
+
+void
+__sel4_error(int sel4_error, const char *file,
+             const char *function, int line, char * str)
+{
+    fprintf(stderr, "seL4 Error: %s, function %s, file %s, line %d: %s\n",
+            sel4_errlist[sel4_error],
+            function, file, line, str);
+    abort();
+}
diff --git a/src/stack.c b/src/stack.c
new file mode 100644
index 0000000..7fca7a7
--- /dev/null
+++ b/src/stack.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+#include <errno.h>
+#include <vspace/vspace.h>
+#include <sel4utils/stack.h>
+#include <sel4utils/util.h>
+#include <utils/stack.h>
+
+int
+sel4utils_run_on_stack(vspace_t *vspace, int (*func)(void)) 
+{
+    void *stack_top = vspace_new_stack(vspace);
+    if (stack_top == NULL) {
+        LOG_ERROR("Failed to allocate new stack\n");
+        return -1;
+    }
+
+    return utils_run_on_stack(stack_top, func);
+}
diff --git a/src/thread.c b/src/thread.c
new file mode 100644
index 0000000..c0f998f
--- /dev/null
+++ b/src/thread.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+#include <autoconf.h>
+
+#ifdef CONFIG_LIB_SEL4_VSPACE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <sel4/sel4.h>
+#include <vka/vka.h>
+#include <vka/object.h>
+#include <vspace/vspace.h>
+#include <sel4utils/thread.h>
+#include <sel4utils/util.h>
+#include <sel4utils/arch/util.h>
+
+#include "helpers.h"
+
+int
+sel4utils_configure_thread(vka_t *vka, vspace_t *alloc, seL4_CPtr fault_endpoint,
+        uint8_t priority, seL4_CNode cspace, seL4_CapData_t cspace_root_data,
+        sel4utils_thread_t *res)
+{
+    memset(res, 0, sizeof(sel4utils_thread_t));
+
+    int error = vka_alloc_tcb(vka, &res->tcb);
+    if (error == -1) {
+        LOG_ERROR("vka_alloc tcb failed");
+        sel4utils_clean_up_thread(vka, alloc, res);
+        return -1;
+    }
+
+    res->ipc_buffer_addr = (seL4_Word) vspace_new_ipc_buffer(alloc, &res->ipc_buffer);
+
+    if (res->ipc_buffer_addr == 0) {
+        LOG_ERROR("ipc buffer allocation failed");
+        return -1;
+    }
+
+    seL4_CapData_t null_cap_data = {{0}};
+    error = seL4_TCB_Configure(res->tcb.cptr, fault_endpoint, priority, cspace, cspace_root_data,
+            vspace_get_root(alloc), null_cap_data,
+            res->ipc_buffer_addr, res->ipc_buffer);
+
+    if (error != seL4_NoError) {
+        LOG_ERROR("TCB configure failed with seL4 error code %d", error);
+        sel4utils_clean_up_thread(vka, alloc, res);
+        return -1;
+    }
+
+    res->stack_top = vspace_new_stack(alloc);
+
+    if (res->stack_top == NULL) {
+        LOG_ERROR("Stack allocation failed!");
+        sel4utils_clean_up_thread(vka, alloc, res);
+        return -1;
+    }
+
+    return 0;
+}
+
+int
+sel4utils_internal_start_thread(sel4utils_thread_t *thread, void *entry_point, void *arg0,
+        void *arg1, int resume, void *stack_top)
+{
+    seL4_UserContext context = {0};
+    size_t context_size = sizeof(seL4_UserContext) / sizeof(seL4_Word);
+    int error;
+
+#ifdef ARCH_IA32
+
+#ifdef CONFIG_X86_64
+    context.rdi = (seL4_Word)arg0;
+    context.rsi = (seL4_Word)arg1;
+    context.rdx = (seL4_Word)thread->ipc_buffer_addr;
+    context.rsp = (seL4_Word) thread->stack_top;
+    context.gs = IPCBUF_GDT_SELECTOR;
+    context.rip = (seL4_Word)entry_point;
+#else
+    seL4_Word *stack_ptr = (seL4_Word *) stack_top;
+    stack_ptr[-5] = (seL4_Word) arg0;
+    stack_ptr[-4] = (seL4_Word) arg1;
+    stack_ptr[-3] = (seL4_Word) thread->ipc_buffer_addr;
+    context.esp = (seL4_Word) (thread->stack_top - 24);
+    context.gs = IPCBUF_GDT_SELECTOR;
+    context.eip = (seL4_Word)entry_point;
+#endif
+
+    /* check that stack is aligned correctly */
+    assert((seL4_Word) thread->stack_top % (sizeof(seL4_Word) * 2) == 0);
+    error = seL4_TCB_WriteRegisters(thread->tcb.cptr, resume, 0, context_size, &context);
+#elif ARCH_ARM /* ARCH_IA32 */
+    context.pc = (seL4_Word) entry_point;
+    context.sp = (seL4_Word) thread->stack_top;
+    context.r0 = (seL4_Word) arg0;
+    context.r1 = (seL4_Word) arg1;
+    context.r2 = (seL4_Word) thread->ipc_buffer_addr;
+
+    /* check that stack is aligned correctly */
+    assert((uint32_t) thread->stack_top % (sizeof(seL4_Word) * 2) == 0);
+    error = seL4_TCB_WriteRegisters(thread->tcb.cptr, resume, 0, context_size, &context);
+#endif /* ARCH_ARM */
+
+    if (error) {
+        LOG_ERROR("seL4_TCB_WriteRegisters failed with error %d", error);
+        return -1;
+    }
+
+    return 0;
+}
+
+
+int
+sel4utils_start_thread(sel4utils_thread_t *thread, void *entry_point, void *arg0, void *arg1,
+        int resume)
+{
+    return sel4utils_internal_start_thread(thread, entry_point, arg0, arg1, resume, thread->stack_top);
+}
+
+void
+sel4utils_clean_up_thread(vka_t *vka, vspace_t *alloc, sel4utils_thread_t *thread)
+{
+    if (thread->tcb.cptr != 0) {
+        vka_free_object(vka, &thread->tcb);
+    }
+
+    if (thread->ipc_buffer_addr != 0) {
+        vspace_free_ipc_buffer(alloc, (seL4_Word *) thread->ipc_buffer_addr);
+    }
+
+    if (thread->stack_top != 0) {
+        vspace_free_stack(alloc, thread->stack_top);
+    }
+
+    memset(thread, 0, sizeof(sel4utils_thread_t));
+}
+
+void
+sel4utils_print_fault_message(seL4_MessageInfo_t tag, char *thread_name) 
+{
+    switch (seL4_MessageInfo_get_label(tag)) {
+        case SEL4_PFIPC_LABEL:
+            assert(seL4_MessageInfo_get_length(tag) == SEL4_PFIPC_LENGTH);
+            printf("%sPagefault from [%s]: %s %s at PC: 0x"XFMT" vaddr: 0x"XFMT"%s\n",
+                   COLOR_ERROR,
+                   thread_name,
+                   sel4utils_is_read_fault() ? "read" : "write",
+                   seL4_GetMR(SEL4_PFIPC_PREFETCH_FAULT) ? "prefetch fault" : "fault",
+                   seL4_GetMR(SEL4_PFIPC_FAULT_IP),
+                   seL4_GetMR(SEL4_PFIPC_FAULT_ADDR),
+                   COLOR_NORMAL);
+            break;
+
+        case SEL4_EXCEPT_IPC_LABEL:
+            assert(seL4_MessageInfo_get_length(tag) == SEL4_EXCEPT_IPC_LENGTH);
+            printf("%sBad syscall from [%s]: scno "DFMT" at PC: 0x"XFMT"%s\n",
+                   COLOR_ERROR,
+                   thread_name,
+                   seL4_GetMR(EXCEPT_IPC_SYS_MR_SYSCALL),
+                   seL4_GetMR(EXCEPT_IPC_SYS_MR_IP),
+                   COLOR_NORMAL
+                  );
+
+            break;
+
+        case SEL4_USER_EXCEPTION_LABEL:
+            assert(seL4_MessageInfo_get_length(tag) == SEL4_USER_EXCEPTION_LENGTH);
+            printf("%sInvalid instruction from [%s] at PC: 0x"XFMT"%s\n",
+                   COLOR_ERROR,
+                   thread_name,
+                   seL4_GetMR(0),
+                   COLOR_NORMAL);
+            break;
+
+        default:
+            /* What? Why are we here? What just happened? */
+            printf("Unknown fault from [%s]: "DFMT" (length = "DFMT")\n", thread_name, seL4_MessageInfo_get_label(tag), seL4_MessageInfo_get_length(tag));
+            break;
+        }
+}
+
+
+static int 
+fault_handler(char *name, seL4_CPtr endpoint) {
+    seL4_Word badge;
+    seL4_MessageInfo_t info = seL4_Wait(endpoint, &badge);
+    
+    sel4utils_print_fault_message(info, name);
+    
+    /* go back to sleep so other things can run */
+    seL4_Wait(endpoint, &badge);
+
+    return 0;
+}
+
+int
+sel4utils_start_fault_handler(seL4_CPtr fault_endpoint, vka_t *vka, vspace_t *vspace, 
+        uint8_t prio, seL4_CPtr cspace, seL4_CapData_t cap_data, char *name, 
+        sel4utils_thread_t *res)
+{
+    int error = sel4utils_configure_thread(vka, vspace, 0, prio, cspace, 
+            cap_data, res);
+
+    if (error) {
+        LOG_ERROR("Failed to configure fault handling thread\n");
+        return -1;
+    }
+
+    return sel4utils_start_thread(res, fault_handler, (void *) name,
+            (void *) fault_endpoint, 1);
+}
+
+#endif /* CONFIG_LIB_SEL4_VSPACE */
diff --git a/src/vspace/bootstrap.c b/src/vspace/bootstrap.c
new file mode 100644
index 0000000..243b82b
--- /dev/null
+++ b/src/vspace/bootstrap.c
@@ -0,0 +1,451 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+#include <autoconf.h>
+
+#if defined CONFIG_LIB_SEL4_VSPACE && defined CONFIG_LIB_SEL4_VKA
+
+#include <stdlib.h>
+#include <string.h>
+#include <vka/vka.h>
+#include <vka/capops.h>
+#include <sel4utils/vspace.h>
+#include <sel4utils/vspace_internal.h>
+
+#ifdef CONFIG_KERNEL_STABLE
+#include <sel4/arch/bootinfo.h>
+#endif
+
+/* For the initial vspace, we must always guarantee we have virtual memory available
+ * for each bottom level page table. Future vspaces can then use the initial vspace
+ * to allocate bottom level page tables until memory runs out.
+ *
+ * The initial vspace then looks like this:
+ * 0xffffffff ↑
+ *            | kernel code
+ * 0xe0000000 ↓
+ * 0xdfffffff ↑
+ *            | bottom level page tables
+ * 0xdfc00000 ↓
+ * 0xdfbfffff ↑
+ *            | top level page table
+ * 0xdfbff000 ↓
+ * 0xdfbfefff ↑
+ *            | available address space
+ * 0x00001000 ↓
+ * 0x00000fff ↑
+ *            | reserved (null page)
+ * 0x00000000 ↓
+ *
+ * The following constants come from the above layout.
+ */
+
+/* reserve enough virtual memory just below the kernel window such that there is enough
+ * space to create all possible bottom level structures. This amount of memory is therefore
+ * size of second level structure * number of second level structures
+ * where number of second level structures is equivalent to number of bits in the top level translation */
+#define FIRST_BOTTOM_LEVEL_PAGE_TABLE_VADDR (KERNEL_RESERVED_START - sizeof(bottom_level_t) * VSPACE_LEVEL_SIZE)
+/* we also need some memory for top level structure. place that just below all our bottom
+   level page tables. no good way of specifying the size, will assume one page */
+#define TOP_LEVEL_PAGE_TABLE_VADDR (FIRST_BOTTOM_LEVEL_PAGE_TABLE_VADDR - PAGE_SIZE_4K)
+/* reserve the first page to catch null pointer dereferences */
+#define FIRST_VADDR (0 + PAGE_SIZE_4K)
+
+/* sanity checks - check our constants above are what we say they are */
+#ifndef CONFIG_X86_64
+compile_time_assert(sel4utils_vspace_1, TOP_LEVEL_INDEX(TOP_LEVEL_PAGE_TABLE_VADDR) == 894);
+compile_time_assert(sel4utils_vspace_2, BOTTOM_LEVEL_INDEX(TOP_LEVEL_PAGE_TABLE_VADDR) == 1023);
+compile_time_assert(sel4utils_vspace_3, TOP_LEVEL_INDEX(FIRST_BOTTOM_LEVEL_PAGE_TABLE_VADDR) == 895);
+compile_time_assert(sel4utils_vspace_4, BOTTOM_LEVEL_INDEX(FIRST_BOTTOM_LEVEL_PAGE_TABLE_VADDR) == 0);
+#endif
+
+/* check our data structures are 4K page sized like the rest of the code assumes they are */
+compile_time_assert(bottom_level_4k, sizeof(bottom_level_t) == PAGE_SIZE_4K);
+compile_time_assert(top_level_4k, sizeof(bottom_level_t *) * VSPACE_LEVEL_SIZE == PAGE_SIZE_4K);
+
+static int
+common_init(vspace_t *vspace, vka_t *vka, seL4_CPtr page_directory,
+            vspace_allocated_object_fn allocated_object_fn, void *cookie)
+{
+    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+    data->vka = vka;
+    data->last_allocated = (void *) 0x10000000;
+
+    vspace->page_directory = page_directory;
+    vspace->allocated_object = allocated_object_fn;
+    vspace->allocated_object_cookie = cookie;
+
+    return 0;
+}
+
+static void
+common_init_post_bootstrap(vspace_t *vspace, sel4utils_map_page_fn map_page)
+{
+    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+    /* reserve the kernel region, we do this by marking the
+     * top level entry as RESERVED */
+    for (int i = TOP_LEVEL_INDEX(KERNEL_RESERVED_START);
+            i <= TOP_LEVEL_INDEX(KERNEL_RESERVED_END); i++) {
+        data->top_level[i] = (bottom_level_t *) RESERVED;
+    }
+
+    data->map_page = map_page;
+
+    /* initialise the rest of the functions now that they are useable */
+    vspace->new_stack = sel4utils_new_stack;
+    vspace->free_stack = sel4utils_free_stack;
+
+    vspace->new_ipc_buffer = sel4utils_new_ipc_buffer;
+    vspace->free_ipc_buffer = sel4utils_free_ipc_buffer;
+
+    vspace->new_pages = sel4utils_new_pages;
+    vspace->new_pages_at_vaddr = sel4utils_new_pages_at_vaddr;
+    vspace->free_pages = sel4utils_free_pages;
+
+    vspace->map_pages = sel4utils_map_pages;
+    vspace->map_pages_at_vaddr = sel4utils_map_pages_at_vaddr;
+    vspace->unmap_pages = sel4utils_unmap_pages;
+    vspace->unmap_reserved_pages = sel4utils_unmap_reserved_pages;
+
+    vspace->reserve_range = sel4utils_reserve_range;
+    vspace->reserve_range_at = sel4utils_reserve_range_at;
+    vspace->free_reservation = sel4utils_free_reservation;
+
+    vspace->get_cap = sel4utils_get_cap;
+}
+
+static int
+reserve_range(vspace_t *vspace, void *start, void *end)
+{
+    int pages = BYTES_TO_4K_PAGES(ROUND_UP((uint32_t) ((seL4_Word)end), PAGE_SIZE_4K) -
+                ROUND_DOWN((uint32_t) ((seL4_Word)start), PAGE_SIZE_4K));
+
+    int error = 0;
+    for (int i = 0; i < pages && error == 0; i++) {
+        error = reserve_entries(vspace, start + (i * PAGE_SIZE_4K) , seL4_PageBits);
+    }
+
+    if (error) {
+        LOG_ERROR("Error reserving range between %p and %p", start, end);
+    }
+
+    return error;
+}
+
+/**
+ * Symbols in this function need to be provided by your
+ * crt0.S or your linker script, such that we can figure out
+ * what virtual addresses are taken up by the current task
+ */
+void
+sel4utils_get_image_region(seL4_Word *va_start, seL4_Word *va_end) {
+    extern char __executable_start[];
+    extern char _end[];
+
+    *va_start = (seL4_Word) __executable_start;
+    *va_end = (seL4_Word) _end;
+    *va_end = (seL4_Word) (seL4_Word)ROUND_UP((uint32_t) ((seL4_Word)va_end), (uint32_t) PAGE_SIZE_4K);
+}
+
+
+static int
+reserve_initial_task_regions(vspace_t *vspace, void *existing_frames[])
+{
+    
+    /* mark the code and data segments as used */
+    seL4_Word va_start, va_end;
+
+    sel4utils_get_image_region(&va_start, &va_end);
+
+    /* this is the scope of the virtual memory used by the image, including
+     * data, text and stack */
+    if (reserve_range(vspace, (void *) va_start, (void *) va_end)) {
+        LOG_ERROR("Error reserving code/data segment");
+        return -1;
+    }
+
+    /* mark boot info as used */
+    if (existing_frames != NULL) {
+        for (int i = 0; existing_frames[i] != NULL; i++) {
+            if (reserve_range(vspace, existing_frames[i], existing_frames[i] + PAGE_SIZE_4K)) {
+                LOG_ERROR("Error reserving frame at %p", existing_frames[i]);
+                return -1;
+            }
+        }
+    }
+
+    return 0;
+}
+
+static int
+alloc_and_map_bootstrap_frame(vspace_t *vspace, vka_object_t *frame, void *vaddr)
+{
+    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+
+    int error = vka_alloc_frame(data->vka, seL4_PageBits, frame);
+    if (error) {
+        LOG_ERROR("Failed to allocate bootstrap frame, error: %d", error);
+        return error;
+    }
+
+    vka_object_t pagetable = {0};
+
+    error = sel4utils_map_page(data->vka, vspace->page_directory, frame->cptr, vaddr,
+            seL4_AllRights, 1, &pagetable);
+
+    if (error) {
+        LOG_ERROR("Failed to map bootstrap frame at %p, error: %d", vaddr, error);
+        return error;
+    }
+
+    /* Zero the memory */
+    memset(vaddr, 0, PAGE_SIZE_4K);
+
+    if (pagetable.cptr != 0) {
+        vspace_maybe_call_allocated_object(vspace, pagetable);
+    }
+
+    return seL4_NoError;
+}
+
+int
+bootstrap_create_level(vspace_t *vspace, void *vaddr)
+{
+    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+
+    vka_object_t bottom_level = {0};
+    if (alloc_and_map_bootstrap_frame(vspace, &bottom_level,
+            data->next_bottom_level_vaddr) != seL4_NoError) {
+        return -1;
+    }
+
+    int error __attribute__((unused)) = update(vspace, data->next_bottom_level_vaddr, bottom_level.cptr);
+    /* this cannot fail */
+    assert(error == seL4_NoError);
+
+    data->top_level[TOP_LEVEL_INDEX(vaddr)] = (bottom_level_t *) data->next_bottom_level_vaddr;
+
+    data->next_bottom_level_vaddr += PAGE_SIZE_4K;
+
+    return 0;
+}
+
+static int
+bootstrap_page_table(vspace_t *vspace)
+{
+    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+
+    /* First we allocate a page for the top level page table to live in */
+    vka_object_t top_level = {0};
+    if (alloc_and_map_bootstrap_frame(vspace, &top_level, (void *) TOP_LEVEL_PAGE_TABLE_VADDR)) {
+        return -1;
+    }
+
+    /* The top level page table vaddr is the last entry in the second last bottom level
+     * page table - create that level and map it in*/
+    vka_object_t first_bottom_level = {0};
+    if (alloc_and_map_bootstrap_frame(vspace, &first_bottom_level,
+            (void *) FIRST_BOTTOM_LEVEL_PAGE_TABLE_VADDR)) {
+        return -1;
+    }
+
+    /* The level we just created is the first entry in the last bottom level page table -
+     * create that level and map it in */
+    vka_object_t second_bottom_level = {0};
+    if (alloc_and_map_bootstrap_frame(vspace, &second_bottom_level,
+            (void *) FIRST_BOTTOM_LEVEL_PAGE_TABLE_VADDR + PAGE_SIZE_4K)) {
+        return -1;
+    }
+
+    /* set up the pointers to our new page tables */
+    data->top_level = (bottom_level_t **) TOP_LEVEL_PAGE_TABLE_VADDR;
+    data->top_level[TOP_LEVEL_INDEX(TOP_LEVEL_PAGE_TABLE_VADDR)] =
+        (bottom_level_t *) FIRST_BOTTOM_LEVEL_PAGE_TABLE_VADDR;
+    data->top_level[TOP_LEVEL_INDEX(FIRST_BOTTOM_LEVEL_PAGE_TABLE_VADDR)] =
+        (bottom_level_t *) (FIRST_BOTTOM_LEVEL_PAGE_TABLE_VADDR + PAGE_SIZE_4K);
+
+    /* now update those entries in our new page tables */
+    /* these cannot fail - failure is only caused by lack of a bottom level page table,
+     * and we just allocated them. */
+    int error = update(vspace, (void *) TOP_LEVEL_PAGE_TABLE_VADDR, top_level.cptr);
+    (void)error;
+    assert(error == seL4_NoError);
+    error = update(vspace, (void *) FIRST_BOTTOM_LEVEL_PAGE_TABLE_VADDR, first_bottom_level.cptr);
+    assert(error == seL4_NoError);
+    error = update(vspace, (void *) FIRST_BOTTOM_LEVEL_PAGE_TABLE_VADDR + PAGE_SIZE_4K,
+                   second_bottom_level.cptr);
+    assert(error == seL4_NoError);
+
+    /* Finally reserve the rest of the entries for the rest of the bottom level page tables */
+    void *vaddr = (void *) FIRST_BOTTOM_LEVEL_PAGE_TABLE_VADDR;
+    data->next_bottom_level_vaddr = vaddr + (2 * PAGE_SIZE_4K);
+    for (int i = 2; i < VSPACE_LEVEL_SIZE; i++) {
+        error = reserve(vspace, vaddr + (i * PAGE_SIZE_4K));
+        assert(error == seL4_NoError);
+    }
+
+    return 0;
+}
+
+/* Interface functions */
+
+int
+sel4utils_get_vspace_with_map(vspace_t *loader, vspace_t *new_vspace, sel4utils_alloc_data_t *data,
+        vka_t *vka, seL4_CPtr page_directory,
+        vspace_allocated_object_fn allocated_object_fn, void *allocated_object_cookie, sel4utils_map_page_fn map_page)
+{
+    new_vspace->data = (void *) data;
+
+    if (common_init(new_vspace, vka, page_directory, allocated_object_fn, allocated_object_cookie)) {
+        return -1;
+    }
+
+    data->bootstrap = loader;
+
+    /* create the top level page table from the loading vspace */
+    data->top_level = (bottom_level_t **) vspace_new_pages(loader, seL4_AllRights, 1, seL4_PageBits);
+    if (data->top_level == NULL) {
+        return -1;
+    }
+
+    common_init_post_bootstrap(new_vspace, map_page);
+
+    return 0;
+}
+
+int
+sel4utils_get_vspace(vspace_t *loader, vspace_t *new_vspace, sel4utils_alloc_data_t *data,
+        vka_t *vka, seL4_CPtr page_directory,
+        vspace_allocated_object_fn allocated_object_fn, void *allocated_object_cookie)
+{
+    return sel4utils_get_vspace_with_map(loader, new_vspace, data, vka, page_directory, allocated_object_fn, allocated_object_cookie, sel4utils_map_page_pd);
+}
+
+#ifdef CONFIG_VTX
+int sel4utils_get_vspace_ept(vspace_t *loader, vspace_t *new_vspace, vka_t *vka,
+        seL4_CPtr ept, vspace_allocated_object_fn allocated_object_fn, void *allocated_object_cookie)
+{
+    sel4utils_alloc_data_t *data = malloc(sizeof(*data));
+    if (!data) {
+        return -1;
+    }
+
+    return sel4utils_get_vspace_with_map(loader, new_vspace, data, vka, ept, allocated_object_fn, allocated_object_cookie, sel4utils_map_page_ept);
+}
+#endif /* CONFIG_VTX */
+
+int
+sel4utils_bootstrap_vspace(vspace_t *vspace, sel4utils_alloc_data_t *data,
+        seL4_CPtr page_directory, vka_t *vka,
+        vspace_allocated_object_fn allocated_object_fn, void *cookie, void *existing_frames[])
+{
+    vspace->data = (void *) data;
+
+    if (common_init(vspace, vka, page_directory, allocated_object_fn, cookie)) {
+        return -1;
+    }
+
+    data->bootstrap = NULL;
+
+    if (bootstrap_page_table(vspace)) {
+        return -1;
+    }
+
+    reserve_initial_task_regions(vspace, existing_frames);
+
+    common_init_post_bootstrap(vspace, sel4utils_map_page_pd);
+
+    return 0;
+}
+
+
+int
+sel4utils_bootstrap_vspace_with_bootinfo(vspace_t *vspace, sel4utils_alloc_data_t *data,
+        seL4_CPtr page_directory,
+        vka_t *vka, seL4_BootInfo *info, vspace_allocated_object_fn allocated_object_fn,
+        void *allocated_object_cookie)
+{
+    void *existing_frames[] = {
+        (void *) info,
+#if defined(CONFIG_ARCH_IA32) && defined(CONFIG_KERNEL_STABLE)
+        (void *) seL4_IA32_GetBootInfo(),
+#endif
+        /* We assume the IPC buffer is less than a page and fits into one page */
+        (void *) (seL4_Word)ROUND_DOWN((uint32_t) ((seL4_Word)(info->ipcBuffer)), PAGE_SIZE_4K),
+        NULL
+    };
+
+    return sel4utils_bootstrap_vspace(vspace, data, page_directory, vka, allocated_object_fn,
+            allocated_object_cookie, existing_frames);
+}
+
+int 
+sel4utils_bootstrap_clone_into_vspace(vspace_t *current, vspace_t *clone, reservation_t *image)
+{  
+    seL4_CPtr slot;
+    int error = vka_cspace_alloc(get_alloc_data(current)->vka, &slot);
+
+    if (error) {
+        return -1;
+    }
+
+    cspacepath_t dest;
+    vka_cspace_make_path(get_alloc_data(current)->vka, slot, &dest);
+   
+    for (void *page = image->start; page < image->end - 1; page += PAGE_SIZE_4K) {
+       /* we don't know if the current vspace has caps to its mappings - 
+        * it probably doesn't.
+        *
+        * So we map the page in and copy the data across instead :( */
+        
+        /* create the page in the clone vspace */
+        int error = vspace_new_pages_at_vaddr(clone, page, 1, seL4_PageBits, image);
+        if (error) {
+            /* vspace will be left inconsistent */
+            LOG_ERROR("Error %d while trying to map page at %p\n", error, page);
+        }
+
+        seL4_CPtr cap = vspace_get_cap(clone, page);
+        /* copy the cap */
+        cspacepath_t src;
+
+
+        vka_cspace_make_path(get_alloc_data(clone)->vka, cap, &src);
+        error = vka_cnode_copy(&dest, &src, seL4_AllRights);
+        assert(error == 0);
+
+        /* map a copy of it the current vspace */
+        void *dest_addr = vspace_map_pages(current, &dest.capPtr, seL4_AllRights, 1, seL4_PageBits, 1);
+        if (dest_addr == NULL) {
+            /* vspace will be left inconsistent */
+            LOG_ERROR("Error! Vspace mapping failed, bailing\n");
+            return -1;
+        }
+
+        /* copy the data */
+        memcpy(dest_addr, page, PAGE_SIZE_4K);
+
+#ifdef CONFIG_ARCH_ARM
+        seL4_ARM_Page_Unify_Instruction(dest.capPtr, 0, PAGE_SIZE_4K);
+        seL4_ARM_Page_Unify_Instruction(cap, 0, PAGE_SIZE_4K);
+#endif /* CONFIG_ARCH_ARM */
+
+        /* unmap our copy */
+        vspace_unmap_pages(current, dest_addr, 1, seL4_PageBits);
+        vka_cnode_delete(&dest);
+    }
+
+    /* TODO swap out fault handler temporarily to ignore faults here */
+    vka_cspace_free(get_alloc_data(current)->vka, slot);
+    return 0;
+}
+
+
+#endif /* CONFIG_LIB_SEL4_VSPACE && CONFIG_LIB_SEL4_VKA */
diff --git a/src/vspace/client_server_vspace.c b/src/vspace/client_server_vspace.c
new file mode 100644
index 0000000..689acc8
--- /dev/null
+++ b/src/vspace/client_server_vspace.c
@@ -0,0 +1,393 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+#include <autoconf.h>
+
+#if defined CONFIG_LIB_SEL4_VKA && defined CONFIG_LIB_SEL4_VSPACE
+
+#include <sel4utils/client_server_vspace.h>
+#include <vka/capops.h>
+#include <sel4utils/mapping.h>
+#include <sel4utils/util.h>
+#include <stdlib.h>
+#include <sel4utils/vspace.h>
+#include <sel4utils/vspace_internal.h>
+
+#define OFFSET(x) ( ((uint32_t)((seL4_Word)(x))) & MASK(BOTTOM_LEVEL_BITS_OFFSET) )
+#define ADDR(start, page) ( (start) + (page) * PAGE_SIZE_4K )
+
+typedef struct client_server_vspace {
+    /* vspace of the server. we also use this to request memory for metadata */
+    vspace_t *server;
+    /* vspace of the client we are proxying calls for */
+    vspace_t *client;
+    vka_t *vka;
+    /* a fake sel4utils vspace that acts as a translation layer */
+    vspace_t translation;
+    sel4utils_alloc_data_t translation_data;
+} client_server_vspace_t;
+
+static void unmap(client_server_vspace_t *cs_vspace, void *caddr, size_t size_bits)
+{
+    /* determine the virtual address of the start */
+    void *saddr = (void*)get_entry(cs_vspace->translation_data.top_level, caddr);
+    assert(saddr);
+    /* get the cap from the server vspace */
+    seL4_CPtr cap = vspace_get_cap(cs_vspace->server, saddr);
+    assert(cap);
+    /* unmap */
+    vspace_unmap_pages(cs_vspace->server, saddr, 1, size_bits);
+    /* delete and free the cap */
+    cspacepath_t cap_path;
+    vka_cspace_make_path(cs_vspace->vka, cap, &cap_path);
+    vka_cnode_delete(&cap_path);
+    vka_cspace_free(cs_vspace->vka, cap);
+    /* remove our translation */
+    clear_entries(&cs_vspace->translation, caddr, size_bits);
+}
+
+static void unmap_many(client_server_vspace_t *cs_vspace, void *caddr, size_t num_pages, size_t size_bits)
+{
+    int i;
+    for (i = 0; i < num_pages; i++) {
+        unmap(cs_vspace, caddr + BIT(size_bits) * i, size_bits);
+    }
+}
+
+static int dup_and_map(client_server_vspace_t *cs_vspace, seL4_CPtr cap, void *caddr, size_t size_bits)
+{
+    /* duplicate the cap */
+    seL4_CPtr server_cap;
+    cspacepath_t cap_path, server_cap_path;
+    int error = vka_cspace_alloc(cs_vspace->vka, &server_cap);
+    if (error) {
+        LOG_ERROR("Failed to allocate cslot");
+        return error;
+    }
+    vka_cspace_make_path(cs_vspace->vka, cap, &cap_path);
+    vka_cspace_make_path(cs_vspace->vka, server_cap, &server_cap_path);
+    error = vka_cnode_copy(&server_cap_path, &cap_path, seL4_AllRights);
+    if (error != seL4_NoError) {
+        LOG_ERROR("Error copying frame cap");
+        vka_cspace_free(cs_vspace->vka, server_cap);
+        return -1;
+    }
+    /* map it into the server */
+    void *saddr = vspace_map_pages(cs_vspace->server, &server_cap_path.capPtr, seL4_AllRights, 1, size_bits, 1);
+    if (!saddr) {
+        LOG_ERROR("Error mapping frame into server vspace");
+        vka_cnode_delete(&server_cap_path);
+        vka_cspace_free(cs_vspace->vka, server_cap);
+        return -1;
+    }
+    /* update our translation */
+    int i;
+    uint32_t pages = BYTES_TO_4K_PAGES(BIT(size_bits));
+    for (i = 0; i < pages; i++) {
+        if (update_entry(&cs_vspace->translation, ADDR(caddr, i), (seL4_CPtr)ADDR(saddr, i))) {
+            /* remove the entries we already put in. we can't call
+             * clear_entries because the portion to remove is not
+             * a power of two size */
+            for (i--; i >= 0; i--) {
+                clear(&cs_vspace->translation, ADDR(caddr, i));
+            }
+            vka_cnode_delete(&server_cap_path);
+            vka_cspace_free(cs_vspace->vka, server_cap);
+            return -1;
+        }
+    }
+    return 0;
+}
+
+static int dup_and_map_many(client_server_vspace_t *cs_vspace, seL4_CPtr *caps, void *caddr, size_t num_pages, size_t size_bits)
+{
+    int i;
+    for (i = 0; i < num_pages; i++) {
+        int error = dup_and_map(cs_vspace, caps[i], caddr + BIT(size_bits) * i, size_bits);
+        if (error) {
+            unmap_many(cs_vspace, caddr, i - 1, size_bits);
+            return error;
+        }
+    }
+    return 0;
+}
+
+static int find_dup_and_map_many(client_server_vspace_t *cs_vspace, void *caddr, size_t num_pages, size_t size_bits)
+{
+    int i;
+    seL4_CPtr caps[num_pages];
+    for (i = 0; i < num_pages; i++) {
+        caps[i] = vspace_get_cap(cs_vspace->client, caddr + BIT(size_bits) * i);
+        assert(caps[i]);
+    }
+    return dup_and_map_many(cs_vspace, caps, caddr, num_pages, size_bits);
+}
+
+static void *cs_new_stack(vspace_t *vspace)
+{
+    client_server_vspace_t *cs_vspace = (client_server_vspace_t*)vspace->data;
+    cs_vspace->client->page_directory = vspace->page_directory;
+    /* we do not know how much stack will get mapped. so just proxy the call
+     * and hope we do not want to inspect their stack */
+    return vspace_new_stack(cs_vspace->client);
+}
+
+static void cs_free_stack(vspace_t *vspace, void *stack_top)
+{
+    client_server_vspace_t *cs_vspace = (client_server_vspace_t*)vspace->data;
+    cs_vspace->client->page_directory = vspace->page_directory;
+    vspace_free_stack(cs_vspace->client, stack_top);
+}
+
+static void *cs_new_ipc_buffer(vspace_t *vspace, seL4_CPtr *page)
+{
+    client_server_vspace_t *cs_vspace = (client_server_vspace_t*)vspace->data;
+    cs_vspace->client->page_directory = vspace->page_directory;
+    /* it makes no sense to want to inspect their IPC buffer. so this call can
+     * just be proxied */
+    return vspace_new_ipc_buffer(cs_vspace->client, page);
+}
+
+static void cs_free_ipc_buffer(vspace_t *vspace, void *vaddr)
+{
+    client_server_vspace_t *cs_vspace = (client_server_vspace_t*)vspace->data;
+    cs_vspace->client->page_directory = vspace->page_directory;
+    vspace_free_ipc_buffer(cs_vspace->client, vaddr);
+}
+
+static reservation_t *cs_reserve_range(vspace_t *vspace, size_t size, seL4_CapRights rights,
+        int cacheable, void **result)
+{
+    client_server_vspace_t *cs_vspace = (client_server_vspace_t*)vspace->data;
+    assert(cacheable);
+    cs_vspace->client->page_directory = vspace->page_directory;
+    /* we are not interested in client reservations, just proxy */
+    return vspace_reserve_range(cs_vspace->client, size, rights, cacheable, result);
+}
+
+static reservation_t *cs_reserve_range_at(vspace_t *vspace, void *vaddr, size_t size, seL4_CapRights
+        rights, int cacheable)
+{
+    client_server_vspace_t *cs_vspace = (client_server_vspace_t*)vspace->data;
+    assert(cacheable);
+    cs_vspace->client->page_directory = vspace->page_directory;
+    /* we are not interested in client reservations, just proxy */
+    return vspace_reserve_range_at(cs_vspace->client, vaddr, size, rights, cacheable);
+}
+
+static void cs_free_reservation(vspace_t *vspace, reservation_t *reservation)
+{
+    client_server_vspace_t *cs_vspace = (client_server_vspace_t*)vspace->data;
+    cs_vspace->client->page_directory = vspace->page_directory;
+    vspace_free_reservation(cs_vspace->client, reservation);
+}
+
+static int cs_map_pages_at_vaddr(vspace_t *vspace, seL4_CPtr caps[], void *vaddr,
+        size_t num_pages, size_t size_bits, reservation_t *reservation)
+{
+    client_server_vspace_t *cs_vspace = (client_server_vspace_t*)vspace->data;
+    cs_vspace->client->page_directory = vspace->page_directory;
+    assert(size_bits >= 12);
+    /* first map all the pages into the client */
+    int result = vspace_map_pages_at_vaddr(cs_vspace->client, caps, vaddr, num_pages, size_bits, reservation);
+    if (result) {
+        LOG_ERROR("Error mapping pages into client vspace");
+        /* some error, propogate up */
+        return result;
+    }
+    /* now map them all into the server */
+    result = dup_and_map_many(cs_vspace, caps, vaddr, num_pages, size_bits);
+    if (result) {
+        /* unmap */
+        LOG_ERROR("Error mapping pages into server vspace");
+        vspace_unmap_pages(cs_vspace->client, vaddr, num_pages, size_bits);
+        return result;
+    }
+    return 0;
+}
+
+static seL4_CPtr cs_get_cap(vspace_t *vspace, void *vaddr)
+{
+    client_server_vspace_t *cs_vspace = (client_server_vspace_t*)vspace->data;
+    cs_vspace->client->page_directory = vspace->page_directory;
+    /* return the client cap */
+    return vspace_get_cap(cs_vspace->client, vaddr);
+}
+
+static void *cs_map_pages(vspace_t *vspace, seL4_CPtr caps[], seL4_CapRights rights,
+        size_t num_pages, size_t size_bits, int cacheable)
+{
+    client_server_vspace_t *cs_vspace = (client_server_vspace_t*)vspace->data;
+    cs_vspace->client->page_directory = vspace->page_directory;
+    assert(size_bits >= 12);
+    assert(cacheable);
+    /* first map all the pages into the client */
+    void *result = vspace_map_pages(cs_vspace->client, caps, rights, num_pages, size_bits, cacheable);
+    if (!result) {
+        LOG_ERROR("Error mapping pages into client vspace");
+        return result;
+    }
+    /* now map them all into the server */
+    int error = dup_and_map_many(cs_vspace, caps, result, num_pages, size_bits);
+    if (error) {
+        /* unmap */
+        LOG_ERROR("Error mapping pages into server vspace");
+        vspace_unmap_pages(cs_vspace->client, result, num_pages, size_bits);
+        return NULL;
+    }
+    return result;
+}
+
+static void cs_unmap_pages(vspace_t *vspace, void *vaddr, size_t num_pages, size_t size_bits)
+{
+    client_server_vspace_t *cs_vspace = (client_server_vspace_t*)vspace->data;
+    cs_vspace->client->page_directory = vspace->page_directory;
+    /* remove our mappings */
+    unmap_many(cs_vspace, vaddr, num_pages, size_bits);
+    /* unmap from the client */
+    vspace_unmap_pages(cs_vspace->client, vaddr, num_pages, size_bits);
+}
+
+static int cs_new_pages_at_vaddr(vspace_t *vspace, void *vaddr, size_t num_pages,
+        size_t size_bits, reservation_t *reservation)
+{
+    client_server_vspace_t *cs_vspace = (client_server_vspace_t*)vspace->data;
+    cs_vspace->client->page_directory = vspace->page_directory;
+    assert(size_bits >= 12);
+    /* create new pages in the client first */
+    int result = vspace_new_pages_at_vaddr(cs_vspace->client, vaddr, num_pages, size_bits, reservation);
+    if (result) {
+        LOG_ERROR("Error creating pages in client vspace");
+        return result;
+    }
+    result = find_dup_and_map_many(cs_vspace, vaddr, num_pages, size_bits);
+    if (result) {
+        LOG_ERROR("Error mapping new page in server vspace");
+        assert(!"Cannot delete pages we created in client vspace");
+        return -1;
+    }
+    return 0;
+}
+
+static void *cs_new_pages(vspace_t *vspace, seL4_CapRights rights, size_t num_pages,
+        size_t size_bits)
+{
+    client_server_vspace_t *cs_vspace = (client_server_vspace_t*)vspace->data;
+    cs_vspace->client->page_directory = vspace->page_directory;
+    assert(size_bits >= 12);
+    void *vaddr = vspace_new_pages(cs_vspace->client, rights, num_pages, size_bits);
+    if (!vaddr) {
+        LOG_ERROR("Error creating new pages in client vspace");
+        return NULL;
+    }
+    int result = find_dup_and_map_many(cs_vspace, vaddr, num_pages, size_bits);
+    if (result) {
+        LOG_ERROR("Error mapping new page in server vspace");
+        assert(!"Cannot delete pages we created in client vspace");
+        return NULL;
+    }
+    return vaddr;
+}
+
+static void cs_free_pages(vspace_t *vspace, void *vaddr, size_t num_pages,
+        size_t size_bits)
+{
+    client_server_vspace_t *cs_vspace = (client_server_vspace_t*)vspace->data;
+    cs_vspace->client->page_directory = vspace->page_directory;
+    /* remove our mappings */
+    unmap_many(cs_vspace, vaddr, num_pages, size_bits);
+    /* free in client */
+    vspace_free_pages(cs_vspace->client, vaddr, num_pages, size_bits);
+}
+
+int sel4utils_get_cs_vspace(vspace_t *vspace, vka_t *vka, vspace_t *server, vspace_t *client)
+{
+    int error;
+    client_server_vspace_t *cs_vspace = malloc(sizeof(*cs_vspace));
+    cs_vspace->server = server;
+    cs_vspace->client = client;
+    cs_vspace->vka = vka;
+
+    error = sel4utils_get_vspace(server, &cs_vspace->translation, &cs_vspace->translation_data,
+            vka, 0, NULL, NULL);
+    if (error) {
+        LOG_ERROR("Failed to create translation vspace");
+        free(cs_vspace);
+        return error;
+    }
+
+    vspace->data = cs_vspace;
+
+    vspace->new_stack = cs_new_stack;
+    vspace->free_stack = cs_free_stack;
+
+    vspace->new_ipc_buffer = cs_new_ipc_buffer;
+    vspace->free_ipc_buffer = cs_free_ipc_buffer;
+
+    vspace->new_pages = cs_new_pages;
+    vspace->new_pages_at_vaddr = cs_new_pages_at_vaddr;
+    vspace->free_pages = cs_free_pages;
+
+    vspace->map_pages = cs_map_pages;
+    vspace->map_pages_at_vaddr = cs_map_pages_at_vaddr;
+    vspace->unmap_pages = cs_unmap_pages;
+
+    vspace->reserve_range = cs_reserve_range;
+    vspace->reserve_range_at = cs_reserve_range_at;
+    vspace->free_reservation = cs_free_reservation;
+
+    vspace->get_cap = cs_get_cap;
+
+    vspace->allocated_object = NULL;
+    vspace->allocated_object_cookie = NULL;
+
+    /* we always pretend to be the client. due to api limitations
+     * we need to constatly reset the clients page directory to ours
+     * in case ours was modified as a result of process loading */
+    vspace->page_directory = client->page_directory;
+
+    return 0;
+}
+
+void *sel4utils_cs_vspace_translate(vspace_t *vspace, void *addr)
+{
+    client_server_vspace_t *cs_vspace = (client_server_vspace_t*)vspace->data;
+
+    seL4_CPtr translation_result = get_entry(cs_vspace->translation_data.top_level, addr);
+
+    /* the translation will through away any non aligned bits, if we got a successful
+     * translation then add the offset back onto the result */
+    return translation_result == 0 ? NULL : ((void*)translation_result) + OFFSET(addr);
+}
+
+int sel4utils_cs_vspace_for_each(vspace_t *vspace, void *addr, uint32_t len,
+                                 int (*proc)(void* addr, uint32_t len, void *cookie),
+                                 void *cookie)
+{
+    uint32_t current_addr;
+    uint32_t next_addr;
+    uint32_t end_addr = (uint32_t)(addr + len);
+    for (current_addr = (uint32_t)addr; current_addr < end_addr; current_addr = next_addr) {
+        uint32_t current_aligned = PAGE_ALIGN_4K(current_addr);
+        uint32_t next_page_start = current_aligned + PAGE_SIZE_4K;
+        next_addr = MIN(end_addr, next_page_start);
+        void *saddr = sel4utils_cs_vspace_translate(vspace, (void*)current_aligned);
+        if (!saddr) {
+            return -1;
+        }
+        int result = proc((void*)(saddr + (current_addr - current_aligned)), next_addr - current_addr, cookie);
+        if (result) {
+            return result;
+        }
+    }
+    return 0;
+}
+
+#endif
diff --git a/src/vspace/vspace.c b/src/vspace/vspace.c
new file mode 100644
index 0000000..c8b8a16
--- /dev/null
+++ b/src/vspace/vspace.c
@@ -0,0 +1,566 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+/* see sel4utils/vspace.h for details */
+#include <autoconf.h>
+
+#if defined CONFIG_LIB_SEL4_VKA && defined CONFIG_LIB_SEL4_VSPACE
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <sel4utils/vspace.h>
+
+#include <sel4utils/vspace_internal.h>
+
+#include <utils/util.h>
+
+/* see comment in vspace_internal for why these checks exist */
+compile_time_assert(pages_are_4k1, BOTTOM_LEVEL_BITS_OFFSET == seL4_PageBits);
+compile_time_assert(pages_are_4k2, BIT(BOTTOM_LEVEL_BITS_OFFSET) == PAGE_SIZE_4K);
+
+static int
+create_level(vspace_t *vspace, void* vaddr)
+{
+    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+
+    /* We need a level in the bootstrapper vspace */
+    if (data->bootstrap == NULL) {
+        return bootstrap_create_level(vspace, vaddr);
+    }
+
+    /* Otherwise we allocate our level out of the bootstrapper vspace -
+     * which is where bookkeeping is mapped */
+    bottom_level_t *bottom_level = vspace_new_pages(data->bootstrap, seL4_AllRights, 1, seL4_PageBits);
+    if (bottom_level == NULL) {
+        return -1;
+    }
+    memset(bottom_level, 0, BIT(seL4_PageBits));
+
+    data->top_level[TOP_LEVEL_INDEX(vaddr)] = bottom_level;
+
+    return 0;
+}
+
+int
+assure_level(vspace_t *vspace, void *vaddr)
+{
+    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+
+    assert(data->top_level != NULL);
+    assert(data->top_level != (void *) RESERVED);
+
+    if (data->top_level[TOP_LEVEL_INDEX(vaddr)] == NULL) {
+        int error = create_level(vspace, vaddr);
+        if (error) {
+            return -1;
+        }
+    }
+
+    return 0;
+}
+
+static int
+check_empty_range(bottom_level_t *top_level[], void *vaddr, size_t num_pages, size_t size_bits)
+{
+    int result = 1;
+    int num_4k_pages = BYTES_TO_4K_PAGES(num_pages * (1 << size_bits));
+    for (int i = 0; i < num_4k_pages && result; i++) {
+        /* a range is empty if it does not have a mapped cap and it isn't reserved */
+        result = is_available(top_level, vaddr + (i * PAGE_SIZE_4K))
+                 && !is_reserved(top_level, vaddr + (i * PAGE_SIZE_4K));
+    }
+
+    return result;
+}
+
+/* check a range of pages is all reserved */
+static int
+check_reserved_range(bottom_level_t *top_level[], void *vaddr, size_t num_pages, size_t size_bits)
+{
+    int result = 1;
+    int num_4k_pages = BYTES_TO_4K_PAGES(num_pages * (1 << size_bits));
+    for (int i = 0; i < num_4k_pages && result; i++) {
+        /* a range is empty if it does not have a mapped cap and it isn't reserved */
+        result = is_reserved(top_level, vaddr + (i * PAGE_SIZE_4K));
+       
+    }
+
+    return result;
+}
+
+/* check that vaddr is actually in the reservation */
+static int
+check_reservation_bounds(reservation_t *reservation, void *vaddr, size_t num_pages, size_t size_bits)
+{
+    return (vaddr >= reservation->start) &&
+           (vaddr + (num_pages * (1 << size_bits))) <= reservation->end;
+}
+
+static int
+check_reservation(bottom_level_t *top_level[], reservation_t *reservation, void *vaddr,
+                  size_t num_pages, size_t size_bits)
+{
+    return check_reservation_bounds(reservation, vaddr, num_pages, size_bits) &
+           check_reserved_range(top_level, vaddr, num_pages, size_bits);
+}
+
+static void
+perform_reservation(vspace_t *vspace, reservation_t *reservation, void *vaddr, size_t bytes,
+                    seL4_CapRights rights, int cacheable)
+{
+    reservation->start = (void *) (seL4_Word)ROUND_DOWN((uint32_t) ((seL4_Word)vaddr), PAGE_SIZE_4K);
+    reservation->end = (void *) (seL4_Word)ROUND_UP((uint32_t) ((seL4_Word)vaddr) + bytes, PAGE_SIZE_4K);
+
+    reservation->rights = rights;
+    reservation->cacheable = cacheable;
+
+    int error = seL4_NoError;
+    void *v = reservation->start;
+
+    for (v = reservation->start; v < reservation->end &&
+            error == seL4_NoError; v += PAGE_SIZE_4K) {
+        error = reserve(vspace, v);
+    }
+
+    /* return the amount we sucessfully reserved */
+    reservation->end = v;
+}
+
+int
+sel4utils_map_page_pd(vspace_t *vspace, seL4_CPtr cap, void *vaddr, seL4_CapRights rights,
+         int cacheable, size_t size_bits)
+{
+    vka_object_t pagetable = {0};
+    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+
+    int error = sel4utils_map_page(data->vka, vspace->page_directory, cap, vaddr,
+                rights, cacheable, &pagetable);
+    if (error) {
+        /* everything has gone to hell. Do no clean up. */
+        LOG_ERROR("Error mapping pages, bailing: %d", error);
+        return -1;
+    }
+
+    if (pagetable.cptr != 0) {
+        vspace_maybe_call_allocated_object(vspace, pagetable);
+    }
+    pagetable.cptr = 0;
+
+    return seL4_NoError;
+}
+
+#ifdef CONFIG_VTX
+int
+sel4utils_map_page_ept(vspace_t *vspace, seL4_CPtr cap, void *vaddr, seL4_CapRights rights,
+        int cacheable, size_t size_bits)
+{
+    struct sel4utils_alloc_data *data = get_alloc_data(vspace);
+    vka_object_t pagetable = {0};
+    vka_object_t pagedir = {0};
+
+    int error = sel4utils_map_ept_page(data->vka, vspace->page_directory, cap,
+                (seL4_Word) vaddr, rights, cacheable, size_bits, &pagetable, &pagedir);
+    if (error) {
+        LOG_ERROR("Error mapping pages, bailing\n");
+        return -1;
+    }
+
+    if (pagetable.cptr != 0) {
+        vspace_maybe_call_allocated_object(vspace, pagetable);
+        pagetable.cptr = 0;
+    }
+
+    if (pagedir.cptr != 0) {
+        vspace_maybe_call_allocated_object(vspace, pagedir);
+        pagedir.cptr = 0;
+    }
+
+    return seL4_NoError;
+}
+#endif /* CONFIG_VTX */
+
+static int
+map_page(vspace_t *vspace, seL4_CPtr cap, void *vaddr, seL4_CapRights rights,
+        int cacheable, size_t size_bits)
+{
+    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+    return data->map_page(vspace, cap, vaddr, rights, cacheable, size_bits);
+}
+
+static void *
+find_range(sel4utils_alloc_data_t *data, size_t num_pages, size_t size_bits)
+{
+    /* look for a contiguous range that is free.
+     * We use first-fit with the optimisation that we store
+     * a pointer to the last thing we freed/allocated */
+    int num_4k_pages = BYTES_TO_4K_PAGES(num_pages * (1 << size_bits));
+    void *current = data->last_allocated;
+    size_t contiguous = 0;
+    void *start = data->last_allocated;
+    while (contiguous < num_4k_pages) {
+
+        if (is_available(data->top_level, current) && IS_ALIGNED((uint32_t)((seL4_Word)start), size_bits)) {
+            contiguous++;
+        } else {
+            start = current + PAGE_SIZE_4K;
+            contiguous = 0;
+        }
+
+        current += PAGE_SIZE_4K;
+
+        if (current >= (void *) KERNEL_RESERVED_START) {
+            LOG_ERROR("Out of virtual memory");
+            return NULL;
+        }
+
+    }
+
+    data->last_allocated = current;
+
+    return start;
+}
+
+static int
+alloc_pages(vspace_t *vspace, seL4_CPtr pages[], size_t num_pages, size_t size_bits)
+{
+    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+    for (int i = 0; i < num_pages; i++) {
+        vka_object_t object;
+        if (vka_alloc_frame(data->vka, size_bits, &object) != 0) {
+            /* abort! */
+            LOG_ERROR("Failed to allocate page");
+            return -1;
+        }
+
+        vspace_maybe_call_allocated_object(vspace, object);
+        pages[i] = object.cptr;
+    }
+
+    return 0;
+}
+
+static int
+map_pages_at_vaddr(vspace_t *vspace, seL4_CPtr caps[], void *vaddr, size_t num_pages,
+                   size_t size_bits, seL4_CapRights rights, int cacheable)
+{
+    int error = seL4_NoError;
+    for (int i = 0; i < num_pages && error == seL4_NoError; i++) {
+        assert(caps[i] != 0);
+        error = map_page(vspace, caps[i], vaddr, rights, cacheable, size_bits);
+
+        if (error == seL4_NoError) {
+            error = update_entries(vspace, vaddr, caps[i], size_bits);
+            vaddr += (1 << size_bits);
+        }
+    }
+    return error;
+}
+
+static int
+new_pages_at_vaddr(vspace_t *vspace, void *vaddr, size_t num_pages, size_t size_bits,
+                   seL4_CapRights rights, int cacheable)
+{
+    seL4_CPtr pages[num_pages];
+
+    int error = alloc_pages(vspace, pages, num_pages, size_bits);
+    if (error == 0) {
+        error = map_pages_at_vaddr(vspace, pages, vaddr, num_pages, size_bits, rights, cacheable);
+    }
+
+    return error;
+}
+
+/* VSPACE INTERFACE FUNCTIONS */
+
+int
+sel4utils_map_pages_at_vaddr(vspace_t *vspace, seL4_CPtr caps[], void *vaddr,
+        size_t num_pages, size_t size_bits, reservation_t *reservation)
+{
+    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+
+    if (!check_reservation(data->top_level, reservation, vaddr, num_pages, size_bits)) {
+        return -1;
+    }
+
+    return map_pages_at_vaddr(vspace, caps, vaddr, num_pages, size_bits,
+            reservation->rights, reservation->cacheable);
+}
+
+seL4_CPtr
+sel4utils_get_cap(vspace_t *vspace, void *vaddr)
+{
+    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+    seL4_CPtr entry = get_entry(data->top_level, vaddr);
+
+    if (entry == RESERVED) {
+        entry = 0;
+    }
+    return entry;
+}
+
+void *
+sel4utils_map_pages(vspace_t *vspace, seL4_CPtr caps[], seL4_CapRights rights,
+                    size_t num_pages, size_t size_bits, int cacheable)
+{
+    struct sel4utils_alloc_data *data = get_alloc_data(vspace);
+    void *vaddr = find_range(data, num_pages, size_bits);
+
+    if (vaddr == NULL) {
+        return NULL;
+    }
+
+    int error = map_pages_at_vaddr(vspace, caps, vaddr, num_pages, size_bits, rights,
+                cacheable);
+    if (error == seL4_NoError) {
+        return vaddr;
+    } else {
+        return NULL;
+    }
+}
+
+
+int sel4utils_unmap_reserved_pages(vspace_t *vspace, void *vaddr, size_t num_pages, size_t size_bits, reservation_t *reservation) {
+    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+
+    if (!check_reservation_bounds(reservation, vaddr, num_pages, size_bits)) {
+        printf("check reservation failed vaddr %p\n", vaddr);
+        return -1;
+    }
+    for (int i = 0; i < num_pages; i++) {
+        seL4_CPtr cap = sel4utils_get_cap(vspace, vaddr);
+
+        if (cap != 0) {
+            int error = seL4_ARCH_Page_Unmap(get_entry(data->top_level, vaddr));
+            if (error != seL4_NoError) {
+                LOG_ERROR("Failed to unmap page at vaddr %p", vaddr);
+            }
+
+            /*update entires*/
+            update_entries(vspace, vaddr, RESERVED, size_bits);
+        }
+
+        vaddr += (1 << size_bits);
+    }
+
+    return 0;
+}
+
+void
+sel4utils_unmap_pages(vspace_t *vspace, void *vaddr, size_t num_pages, size_t size_bits)
+{
+    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+    for (int i = 0; i < num_pages; i++) {
+        seL4_CPtr cap = sel4utils_get_cap(vspace, vaddr);
+
+        if (cap != 0) {
+            int error = seL4_ARCH_Page_Unmap(get_entry(data->top_level, vaddr));
+            if (error != seL4_NoError) {
+                LOG_ERROR("Failed to unmap page at vaddr %p", vaddr);
+            }
+        }
+
+        vaddr += (1 << size_bits);
+    }
+}
+
+int
+sel4utils_new_pages_at_vaddr(vspace_t *vspace, void *vaddr, size_t num_pages,
+        size_t size_bits, reservation_t *reservation)
+{
+    struct sel4utils_alloc_data *data = get_alloc_data(vspace);
+
+    if (!check_reservation(data->top_level, reservation, vaddr, num_pages, size_bits)) {
+        LOG_ERROR("Range for vaddr %p with "DFMT" 4k pages not reserved!", vaddr, num_pages);
+        return -1;
+    }
+
+    return new_pages_at_vaddr(vspace, vaddr, num_pages, size_bits,
+            reservation->rights, reservation->cacheable);
+}
+
+void *
+sel4utils_new_pages(vspace_t *vspace, seL4_CapRights rights, size_t num_pages,
+                    size_t size_bits)
+{
+    struct sel4utils_alloc_data *data = get_alloc_data(vspace);
+
+    void *vaddr = find_range(data, num_pages, size_bits);
+    if (vaddr == NULL) {
+        return NULL;
+    }
+
+    int error = new_pages_at_vaddr(vspace, vaddr, num_pages, size_bits, rights, 1);
+    if (error == seL4_NoError) {
+        return vaddr;
+    } else {
+        return NULL;
+    }
+}
+
+void
+sel4utils_free_pages(vspace_t *vspace, void *vaddr, size_t num_pages,
+        size_t size_bits)
+{
+    /* first unmap the pages */
+    sel4utils_unmap_pages(vspace, vaddr, num_pages, size_bits);
+
+    /* now mark them all as free */
+    for (int i = 0; i < num_pages; i++) {
+        clear_entries(vspace, vaddr, size_bits);
+        vaddr += PAGE_SIZE_4K;
+    }
+}
+
+void *
+sel4utils_new_stack(vspace_t *vspace)
+{
+    /* this implementation allocates stacks with small pages. */
+    int num_pages = BYTES_TO_4K_PAGES(CONFIG_SEL4UTILS_STACK_SIZE);
+    struct sel4utils_alloc_data *data = get_alloc_data(vspace);
+
+    void *vaddr = find_range(data, num_pages + 1, seL4_PageBits);
+
+    /* reserve the first page as the guard */
+    int error = reserve(vspace, vaddr);
+    if (error) {
+        return NULL;
+    }
+
+    void *stack_bottom = vaddr + PAGE_SIZE_4K;
+    error = new_pages_at_vaddr(vspace, stack_bottom, num_pages, seL4_PageBits,
+            seL4_AllRights, 1);
+
+    /* abort */
+    if (error != seL4_NoError) {
+        clear(vspace, vaddr);
+        return NULL;
+    }
+
+    /* return a pointer to the TOP of the stack */
+    return stack_bottom + CONFIG_SEL4UTILS_STACK_SIZE;
+}
+
+void
+sel4utils_free_stack(vspace_t *vspace, void *stack_top)
+{
+    int num_pages = BYTES_TO_4K_PAGES(CONFIG_SEL4UTILS_STACK_SIZE);
+
+    sel4utils_free_pages(vspace, stack_top - CONFIG_SEL4UTILS_STACK_SIZE,
+            num_pages, seL4_PageBits);
+
+    /* unreserve the guard page */
+    clear(vspace, stack_top - CONFIG_SEL4UTILS_STACK_SIZE - PAGE_SIZE_4K);
+}
+
+void *
+sel4utils_new_ipc_buffer(vspace_t *vspace, seL4_CPtr *page)
+{
+    void *vaddr = sel4utils_new_pages(vspace, seL4_AllRights, 1, seL4_PageBits);
+    if (vaddr == NULL) {
+        LOG_ERROR("Failed to create ipc buffer");
+        return NULL;
+    }
+
+    *page = sel4utils_get_cap(vspace, vaddr);
+
+    return vaddr;
+}
+
+void
+sel4utils_free_ipc_buffer(vspace_t *vspace, void *vaddr)
+{
+    sel4utils_free_pages(vspace, vaddr, 1, seL4_PageBits);
+}
+
+
+int sel4utils_reserve_range_no_alloc(vspace_t *vspace, reservation_t *reservation, size_t size,
+        seL4_CapRights rights, int cacheable, void **result)
+{
+    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+    void *vaddr = find_range(data, BYTES_TO_4K_PAGES(size), seL4_PageBits);
+
+    if (vaddr == NULL) {
+        return -1;
+    }
+
+    *result = vaddr;
+    perform_reservation(vspace, reservation, vaddr, size, rights, cacheable);
+    return 0;
+}
+
+reservation_t *
+sel4utils_reserve_range(vspace_t *vspace, size_t size, seL4_CapRights rights,
+        int cacheable, void **result)
+{
+    reservation_t *reservation = malloc(sizeof(struct reservation));
+    if (reservation == NULL) {
+        LOG_ERROR("Malloc failed");
+        return NULL;
+    }
+
+    int error = sel4utils_reserve_range_no_alloc(vspace, reservation, size, rights, cacheable, result);
+    if (error) {
+        free(reservation);
+        return NULL;
+    }
+
+    return reservation;
+}
+
+int sel4utils_reserve_range_at_no_alloc(vspace_t *vspace, reservation_t *reservation, void *vaddr,
+        size_t size, seL4_CapRights rights, int cacheable)
+{
+    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+    if (!check_empty_range(data->top_level, vaddr, BYTES_TO_4K_PAGES(size),
+            seL4_PageBits)) {
+        LOG_ERROR("Range not available at %p, size 0x"XFMT"", vaddr, size);
+        return -1;
+    }
+    perform_reservation(vspace, reservation, vaddr, size, rights, cacheable);
+    return 0;
+}
+
+reservation_t *
+sel4utils_reserve_range_at(vspace_t *vspace, void *vaddr, size_t size, seL4_CapRights
+        rights, int cacheable)
+{
+    reservation_t *reservation = malloc(sizeof(struct reservation));
+    if (reservation == NULL) {
+        LOG_ERROR("Malloc failed");
+        return NULL;
+    }
+
+    int error = sel4utils_reserve_range_at_no_alloc(vspace, reservation, vaddr, size, rights, cacheable);
+    if (error) {
+        free(reservation);
+        return NULL;
+    }
+    return reservation;
+}
+
+void sel4utils_free_reservation_no_alloc(vspace_t *vspace, reservation_t *reservation)
+{
+    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+    for (void *current = reservation->start; current < reservation->end; current += PAGE_SIZE_4K) {
+        if (get_entry(data->top_level, current) == RESERVED) {
+            clear(vspace, current);
+        }
+    }
+}
+
+void
+sel4utils_free_reservation(vspace_t *vspace, reservation_t *reservation)
+{
+    sel4utils_free_reservation_no_alloc(vspace, reservation);
+    free(reservation);
+}
+
+#endif /* CONFIG_LIB_SEL4_VKA && CONFIG_LIB_SEL4_VSPACE */