Release snapshot
diff --git a/include/sel4utils/client_server_vspace.h b/include/sel4utils/client_server_vspace.h
new file mode 100644
index 0000000..de1561d
--- /dev/null
+++ b/include/sel4utils/client_server_vspace.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+/*
+ * This implements a virtual address space that proxies calls two different
+ * vspaces to ensure that pages in one are mapped into the other. This allows
+ * a server to efficiently read/write memory from a client. No attempt is made
+ * to construct contiguous regions in the server when contiguous regions are
+ * created in the client.
+ * Currently cannot correctly handle stacks (since their size is unknown to us)
+ * and cacheability attributes (currently assume all cached mappings)
+ */
+#ifndef _UTILS_CLIENT_SERVER_VSPACE_H
+#define _UTILS_CLIENT_SERVER_VSPACE_H
+
+#include <autoconf.h>
+
+#if defined CONFIG_LIB_SEL4_VSPACE && defined CONFIG_LIB_SEL4_VKA
+
+#include <vspace/vspace.h>
+#include <vka/vka.h>
+
+/**
+ * Initialize a new client server vspace
+ *
+ * @param vspace vspace struct to initialize
+ * @param vka vka for future allocations
+ * @param server vspace to duplicate all mappings into
+ * @param client vspace to directly proxy calls to
+ *
+ * @return 0 on success
+ */
+int sel4utils_get_cs_vspace(vspace_t *vspace, vka_t *vka, vspace_t *server, vspace_t *client);
+
+/**
+ * Translate virtual address in client to one in the server.
+ * This is only for that address! Pages are not contiguous
+ *
+ * @param vspace vspace to do translation with
+ * @param addr addr to lookup in the client
+ *
+ * @return Address in the server vspace, or NULL if not found
+ */
+void *sel4utils_cs_vspace_translate(vspace_t *vspace, void *addr);
+
+/**
+ * Perform a callback for every contiguous range of bytes in the server vspace
+ * for the range provided in the client vspace
+ *
+ * @param vspace vspace to do translation in
+ * @param addr Address in the client to start translation from
+ * @param len length of the range in the client
+ * @param proc Callback function that will be called back with ranges of address in the server after
+ * page boundary splits. If non zero is returned the loop is stopped and the error returned
+ * @param cookie Cookie to be passed to the callback function
+ *
+ * @return Result of callback if non zero occured, internal error code if mapping not found, zero otherwise
+ */
+int sel4utils_cs_vspace_for_each(vspace_t *vspace, void *addr, uint32_t len,
+ int (*proc)(void* addr, uint32_t len, void *cookie),
+ void *cookie);
+
+#endif /* CONFIG_LIB_SEL4_VSPACE && CONFIG_LIB_SEL4_VKA */
+#endif /* _UTILS_CLIENT_SERVER_VSPACE_H */
diff --git a/include/sel4utils/elf.h b/include/sel4utils/elf.h
new file mode 100644
index 0000000..2348bc1
--- /dev/null
+++ b/include/sel4utils/elf.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+#ifndef SEL4UTILS_ELF_H
+#define SEL4UTILS_ELF_H
+
+#include <autoconf.h>
+
+#if (defined CONFIG_LIB_SEL4_VKA && defined CONFIG_LIB_SEL4_VSPACE)
+
+#include <vka/vka.h>
+
+#include <vspace/vspace.h>
+
+typedef struct sel4utils_elf_region {
+ seL4_CapRights rights;
+ /* These two vstarts may differ if the elf was not mapped 1to1. Such an elf is not
+ * runnable, but allows it to be loaded into a vspace where it is not intended to be run */
+ void *elf_vstart;
+ void *reservation_vstart;
+ uint32_t size;
+ reservation_t *reservation;
+ int cacheable;
+} sel4utils_elf_region_t;
+
+/**
+ * Load an elf file into a vspace.
+ *
+ * The loader vspace and vka allocation will be preserved (no extra cslots or objects or vaddrs
+ * will leak from this function), even in the case of an error.
+ *
+ * The loadee vspace and vka will alter: cslots will be allocated for each frame to be
+ * mapped into the address space and frames will be allocated. In case of failure the entire
+ * virtual address space is left in the state where it failed.
+ *
+ * @param loadee the vspace to load the elf file into
+ * @param loader the vspace we are loading from
+ * @param loadee_vka allocator to use for allocation in the loadee vspace
+ * @param loader_vka allocator to use for loader vspace. Can be the same as loadee_vka.
+ * @param image_name name of the image in the cpio archive to load.
+ * @param regions Optional array for list of regions to be placed. Assumed to be the correct
+ size as reported by a call to sel4utils_elf_num_regions
+ * @param mapanywhere If true allows the elf to be loaded anywhere in the vspace. Regions will
+ still be contiguous
+ *
+ * @return The entry point of the new process, NULL on error
+ */
+void *
+sel4utils_elf_load_record_regions(vspace_t *loadee, vspace_t *loader, vka_t *loadee_vka,
+ vka_t *loader_vka, char *image_name, sel4utils_elf_region_t *regions, int mapanywhere);
+
+/**
+ * Wrapper for sel4utils_elf_load_record_regions. Does not record/perform resevations and
+ * maps into the correct virtual addresses
+ *
+ * @param loadee the vspace to load the elf file into
+ * @param loader the vspace we are loading from
+ * @param loadee_vka allocator to use for allocation in the loadee vspace
+ * @param loader_vka allocator to use for loader vspace. Can be the same as loadee_vka.
+ * @param image_name name of the image in the cpio archive to load.
+ *
+ * @return The entry point of the new process, NULL on error
+ */
+void *
+sel4utils_elf_load(vspace_t *loadee, vspace_t *loader, vka_t *loadee_vka,
+ vka_t *loader_vka, char *image_name);
+
+/**
+ * Parses an elf file but does not actually load it. Merely reserves the regions in the vspace
+ * for where the elf segments would go. This is used for lazy loading / copy on write
+
+ * @param loadee the vspace to reserve the elf regions in
+ * @param image_name name of the image in the cpio archive to parse.
+ * @param regions Array for list of regions to be placed. Assumed to be the correct
+ size as reported by a call to sel4utils_elf_num_regions
+ *
+ * @return The entry point of the elf, NULL on error
+ */
+void *
+sel4utils_elf_reserve(vspace_t *loadee, char *image_name, sel4utils_elf_region_t *regions);
+
+/**
+ * Parses an elf file and returns the number of loadable regions. The result of this
+ * is used to calculate the number of regions to pass to sel4utils_elf_reserve and
+ * sel4utils_elf_load_record_regions
+ *
+ * @param image_name name of the image in the cpio archive to inspect
+ * @return Number of loadable regions in the elf
+ */
+int
+sel4utils_elf_num_regions(char *image_name);
+
+#endif /* (defined CONFIG_LIB_SEL4_VKA && defined CONFIG_LIB_SEL4_VSPACE) */
+#endif /* SEL4UTILS_ELF_H */
diff --git a/include/sel4utils/mapping.h b/include/sel4utils/mapping.h
new file mode 100644
index 0000000..f0b967b
--- /dev/null
+++ b/include/sel4utils/mapping.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+#ifndef _UTILS_MAPPING_H_
+#define _UTILS_MAPPING_H_
+
+#include <utils/util.h>
+#include <sel4/sel4.h>
+#include <autoconf.h>
+
+
+#if defined(CONFIG_ARCH_ARM)
+
+/* sizes (in bits) of pages supported by sel4 in ascending order */
+#if defined(ARM_HYP)
+static int UNUSED sel4_supported_page_sizes[] = {12, 16, 21, 25};
+#else
+static int UNUSED sel4_supported_page_sizes[] = {12, 16, 20, 24};
+#endif
+
+#define seL4_ARCH_Uncached_VMAttributes 0
+
+#define seL4_ARCH_Page_Map seL4_ARM_Page_Map
+#define seL4_ARCH_Page_Unmap seL4_ARM_Page_Unmap
+#define seL4_ARCH_PageTable_Map seL4_ARM_PageTable_Map
+#define seL4_ARCH_ASIDPool_Assign seL4_ARM_ASIDPool_Assign
+#define seL4_ARCH_PageTableObject seL4_ARM_PageTableObject
+#define seL4_ARCH_PageDirectoryObject seL4_ARM_PageDirectoryObject
+#define seL4_ARCH_Default_VMAttributes seL4_ARM_Default_VMAttributes
+#define seL4_ARCH_VMAttributes seL4_ARM_VMAttributes
+#define seL4_ARCH_4KPage seL4_ARM_SmallPageObject
+
+#elif defined(CONFIG_X86_64)
+
+/* sizes (in bits) of pages supported by sel4 in ascending order */
+static int UNUSED sel4_supported_page_sizes[] = {12, 21};
+
+#define seL4_ARCH_Page_Unmap seL4_X64_Page_Unmap
+#define seL4_ARCH_Page_Map seL4_X64_Page_Map
+#define seL4_ARCH_PageTable_Map seL4_X64_PageTable_Map
+#define seL4_ARCH_PageDirectory_Map seL4_X64_PageDirectory_Map
+#define seL4_ARCH_PageDirectoryPointerTable_Map seL4_X64_PageDirectoryPointerTable_Map
+#define seL4_CapInitThreadPD seL4_CapInitThreadPML4
+#define seL4_ARCH_VMAttributes seL4_IA32_VMAttributes
+#define seL4_ARCH_4KPage seL4_X64_4K
+#define seL4_ARCH_Default_VMAttributes seL4_IA32_Default_VMAttributes
+
+#elif defined(CONFIG_ARCH_IA32) /* CONFIG_ARCH_ARM */
+
+/* sizes (in bits) of pages supported by sel4 in ascending order */
+static int UNUSED sel4_supported_page_sizes[] = {12, 22};
+
+#define seL4_ARCH_Page_Map seL4_IA32_Page_Map
+#define seL4_ARCH_Page_Unmap seL4_IA32_Page_Unmap
+#define seL4_ARCH_PageTable_Map seL4_IA32_PageTable_Map
+#define seL4_ARCH_ASIDPool_Assign seL4_IA32_ASIDPool_Assign
+#define seL4_ARCH_PageTableObject seL4_IA32_PageTableObject
+#define seL4_ARCH_PageDirectoryObject seL4_IA32_PageDirectoryObject
+#define seL4_ARCH_Default_VMAttributes seL4_IA32_Default_VMAttributes
+#define seL4_ARCH_VMAttributes seL4_IA32_VMAttributes
+#define seL4_ARCH_4KPage seL4_IA32_4K
+#define seL4_ARCH_Uncached_VMAttributes 0
+
+#endif /* CONFIG_ARCH_IA32 */
+
+#define NUM_SEL4_PAGE_SIZES ((int) ARRAY_SIZE(sel4_supported_page_sizes))
+
+#ifdef CONFIG_LIB_SEL4_VKA
+
+#include <vka/vka.h>
+#include <vka/object.h>
+
+/* Map a page to a virtual address, allocating a page table if neccessary.
+*
+*
+* @param vka a vka compliant allocator
+* @param pd page directory to map the page into
+* @param page capability to the page to map in
+* @param vaddr unmapped virutal address to map the page into
+* @param rights permissions to map the page with
+* @param cacheable 1 if the page should be cached (0 if it is for DMA)
+* @param pagetable empty vka_object_t structure to be populated with page table
+* info if one is allocated
+*
+* @return error sel4 error code or -1 if allocation failed.
+*/
+int sel4utils_map_page(vka_t *vka, seL4_CPtr pd, seL4_CPtr frame, void *vaddr,
+ seL4_CapRights rights, int cacheable, vka_object_t *pagetable);
+
+/** convenient wrapper this if you don't want to track allocated page tables */
+static inline int sel4utils_map_page_leaky(vka_t *vka, seL4_CPtr pd, seL4_CPtr frame, void *vaddr,
+ seL4_CapRights rights, int cacheable)
+{
+ vka_object_t pagetable;
+ return sel4utils_map_page(vka, pd, frame, vaddr, rights, cacheable, &pagetable);
+}
+
+#endif /* CONFIG_LIB_SEL4_VKA */
+
+#ifdef CONFIG_ARCH_IA32
+
+#ifdef CONFIG_IOMMU
+int sel4utils_map_iospace_page(vka_t *vka, seL4_CPtr iospace, seL4_CPtr frame, seL4_Word vaddr,
+ seL4_CapRights rights, int cacheable, seL4_Word size_bits,
+ vka_object_t *pts, int *num_pts);
+#endif
+
+#ifdef CONFIG_VTX
+int sel4utils_map_ept_page(vka_t *vka, seL4_CPtr pd, seL4_CPtr frame, seL4_Word vaddr,
+ seL4_CapRights rights, int cacheable, seL4_Word size_bits, vka_object_t *pagetable, vka_object_t *pagedir);
+
+#endif
+#endif
+
+
+
+#endif /* UTILS_MAPPING_H_ */
diff --git a/include/sel4utils/process.h b/include/sel4utils/process.h
new file mode 100644
index 0000000..4e0d909
--- /dev/null
+++ b/include/sel4utils/process.h
@@ -0,0 +1,242 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+#ifndef SEL4UTILS_PROCESS_H
+#define SEL4UTILS_PROCESS_H
+
+#include <autoconf.h>
+
+#if (defined CONFIG_LIB_SEL4_VSPACE && defined CONFIG_LIB_SEL4_VKA)
+
+#include <vka/vka.h>
+#include <vspace/vspace.h>
+
+#include <sel4utils/thread.h>
+#include <sel4utils/vspace.h>
+#include <sel4utils/elf.h>
+
+typedef struct object_node object_node_t;
+
+struct object_node {
+ vka_object_t object;
+ object_node_t *next;
+};
+
+typedef struct {
+ vka_object_t pd;
+ vspace_t vspace;
+ sel4utils_alloc_data_t data;
+ vka_object_t cspace;
+ uint32_t cspace_size;
+ uint32_t cspace_next_free;
+ sel4utils_thread_t thread;
+ vka_object_t fault_endpoint;
+ void *entry_point;
+ object_node_t *allocated_object_list_head;
+ /* if the elf wasn't loaded into the address space, this describes the regions.
+ * this permits lazy loading / copy on write / page sharing / whatever crazy thing
+ * you want to implement */
+ int num_elf_regions;
+ sel4utils_elf_region_t *elf_regions;
+} sel4utils_process_t;
+
+/* sel4utils processes start with some caps in their cspace.
+ * These are the caps
+ */
+enum sel4utils_cspace_layout {
+ /*
+ * The root cnode (with appropriate guard)
+ */
+ SEL4UTILS_CNODE_SLOT = 1,
+ /* The slot on the cspace that fault_endpoint is put if
+ * sel4utils_configure_process is used.
+ */
+ SEL4UTILS_ENDPOINT_SLOT = 2,
+};
+
+typedef struct {
+ /* should we handle elf logic at all? */
+ bool is_elf;
+ /* if so what is the image name? */
+ char *image_name;
+ /* Do you want the elf image preloaded? */
+ bool do_elf_load;
+
+ /* otherwise what is the entry point? */
+ void *entry_point;
+
+ /* should we create a default single level cspace? */
+ bool create_cspace;
+ /* if so how big ? */
+ int one_level_cspace_size_bits;
+
+ /* otherwise what is the root cnode ?*/
+ /* Note if you use a custom cspace then
+ * sel4utils_copy_cap_to_process etc will not work */
+ vka_object_t cnode;
+
+ /* do you want us to create a vspace for you? */
+ bool create_vspace;
+ /* if not what is the page dir, and what is the vspace */
+ vspace_t *vspace;
+ vka_object_t page_dir;
+
+ /* if so, is there a regions you want left clear?*/
+ sel4utils_elf_region_t *reservations;
+ int num_reservations;
+
+ /* do you want a fault endpoint created? */
+ bool create_fault_endpoint;
+ /* otherwise what is it */
+ vka_object_t fault_endpoint;
+
+ int priority;
+#ifndef CONFIG_KERNEL_STABLE
+ seL4_CPtr asid_pool;
+#endif
+} sel4utils_process_config_t;
+
+/**
+ * Copy data from one vspace to another. Data is in POSIX standard argc, argv style.
+ *
+ * This is intented to use when loading applications of the format:
+ *
+ * int main(int argc, char **argv) { };
+ *
+ *
+ * @param current the vspace we are loading the arguments from
+ * @param target the vspace we are loading the arguments to
+ * @param vka the vka allocation interface to use when allocating pages.
+ * the pages will first be mapped into the current vspace, then unmapped and
+ * mapped into the target vspace, and left that way.
+ * @param argc the number of strings
+ * @param argv an array of c strings.
+ *
+ * @return NULL on failure, otherwise a pointer in the target address space where argv has
+ * been written.
+ */
+void *sel4utils_copy_args(vspace_t *current, vspace_t *target, vka_t *vka,
+ int argc, char *argv[]);
+
+/**
+ * Start a process, and copy arguments into the processes address space.
+ *
+ * This is intented to use when loading applications of the format:
+ *
+ * int main(int argc, char **argv) { };
+ *
+ * The third argument (in r2 for arm, on stack for ia32) will be the address of the ipc buffer.
+ * This is intended to be loaded by the crt into the data word of the ipc buffer,
+ * or you can just add it to your function prototype.
+ *
+ * Add the following to your crt to use it this way:
+ *
+ * arm: str r2, [r2, #484]
+ * ia32: popl %ebp
+ * movl %ebp, 484(%ebp)
+ *
+ * @param process initialised sel4utils process struct.
+ * @param vka vka interface to use for allocation of frames.
+ * @param vspace the current vspace.
+ * @param argc the number of arguments.
+ * @param argv a pointer to an array of strings in the current vspace.
+ * @param resume 1 to start the process, 0 to leave suspended.
+ *
+ * @return -1 on error, 0 on success.
+ *
+ */
+int sel4utils_spawn_process(sel4utils_process_t *process, vka_t *vka, vspace_t *vspace,
+ int argc, char *argv[], int resume);
+
+/**
+ * This is the function to use if you just want to set up a process as fast as possible.
+ * It creates a simple cspace and vspace for you, allocates a fault endpoint and puts
+ * it into the new cspace.
+ *
+ * It loads the elf file into the new vspace, parses the elf file and stores the entry point
+ * into process->entry_point.
+ *
+ * It uses the same vka for both allocations - the new process will not have an allocator.
+ *
+ * The process will have just one thread.
+ *
+ * Otherwise, use it as a model for loading processes and use the elf_load.
+ *
+ * WARNING: when launching processes on ia32 please ensure your calling convention
+ * matches that documented in sel4utils_start_thread. Otherwise your arguments
+ * won't come through correctly.
+ *
+ * @param process uninitialised process struct.
+ * @param vka allocator to use to allocate objects.
+ * @param vspace vspace allocator for the current vspace.
+ * @param priority priority to configure the process to run as.
+ * @param image_name name of the elf image to load from the cpio archive.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int sel4utils_configure_process(sel4utils_process_t *process, vka_t *vka, vspace_t *vspace,
+ uint8_t priority, char *image_name);
+
+/**
+ * Configure a process with more customisations (Create your own vspace, customise cspace size).
+ *
+ * @param process uninitliased process struct
+ * @param vka allocator to use to allocate objects.
+ * @param spawner_vspace vspace to use to allocate virtual memory in the current address space.
+ * @param config process config.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int sel4utils_configure_process_custom(sel4utils_process_t *process, vka_t *target_vka,
+ vspace_t *spawner_vspace, sel4utils_process_config_t config);
+
+/**
+ * Copy a cap into a process' cspace.
+ *
+ * This will only work if you configured the process using one of the above functions, or
+ * have mimiced their functionality.
+ *
+ * @param process process to copy the cap to
+ * @param src path in the current cspace to copy the cap from
+ *
+ * @return 0 on failure, otherwise the slot in the processes cspace.
+ */
+seL4_CPtr sel4utils_copy_cap_to_process(sel4utils_process_t *process, cspacepath_t src);
+
+/**
+ *
+ * Mint a cap into a process' cspace.
+ *
+ * As above, except mint the cap with rights and data.
+ *
+ * @return 0 on failure, otherwise the slot in the cspace where the new cap is.
+ */
+seL4_CPtr sel4utils_mint_cap_to_process(sel4utils_process_t *process, cspacepath_t src, seL4_CapRights rights, seL4_CapData_t data);
+
+/**
+ * Destroy a process.
+ *
+ * This will free everything possible associated with a process and teardown the vspace.
+ *
+ * @param process process to destroy
+ * @param vka allocator used to allocate objects for this process
+ */
+void sel4utils_destroy_process(sel4utils_process_t *process, vka_t *vka);
+
+
+/*
+ * sel4utils default allocated object function for vspaces.
+ *
+ * Stores a list of allocated objects in the process struct and frees them
+ * when sel4utils_destroy_process is called.
+ */
+void sel4utils_allocated_object(void *cookie, vka_object_t object);
+
+#endif /* (defined CONFIG_LIB_SEL4_VSPACE && defined CONFIG_LIB_SEL4_VKA) */
+#endif /* SEL4UTILS_PROCESS_H */
diff --git a/include/sel4utils/profile.h b/include/sel4utils/profile.h
new file mode 100644
index 0000000..07ad529
--- /dev/null
+++ b/include/sel4utils/profile.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+#ifndef _UTILS_PROFILE_H_
+#define _UTILS_PROFILE_H_
+
+#include <sel4utils/util.h>
+#include <stdint.h>
+
+#define PROFILE_VAR_TYPE_INT32 1
+#define PROFILE_VAR_TYPE_INT64 2
+
+typedef struct profile_var {
+ int type;
+ void *var;
+ const char *varname;
+ const char *description;
+} profile_var_t;
+
+#define __WATCH_VAR(_type, _var, _description, _unique) \
+ static profile_var_t profile_##_unique \
+ __attribute__((used)) __attribute__((section("_profile_var"))) \
+ = {.type = _type, .var = &_var, .varname = #_var, .description = _description}
+
+#define _WATCH_VAR32(var, description, unique) \
+ compile_time_assert(profile_size_##unique, sizeof(var) == 4); \
+ __WATCH_VAR(PROFILE_VAR_TYPE_INT32, var, description, unique)
+
+#define WATCH_VAR32(var, description) \
+ _WATCH_VAR32(var, description, __LINE__)
+
+#define _WATCH_VAR64(var, description, unique) \
+ compile_time_assert(profile_size_##unique, sizeof(var) == 8); \
+ __WATCH_VAR(PROFILE_VAR_TYPE_INT64, var, description, unique)
+
+#define WATCH_VAR64(var, description) \
+ _WATCH_VAR64(var, description, __LINE__)
+
+typedef void (*profile_callback32)(uint32_t value, const char *varname, const char *descrption, void *cookie);
+typedef void (*profile_callback64)(uint64_t value, const char *varname, const char *descrption, void *cookie);
+
+/* Sample functions that just output the vars using printf. cookie is ignored */
+void profile_print32(uint32_t value, const char *varname, const char *description, void *cookie);
+void profile_print64(uint64_t value, const char *varname, const char *description, void *cookie);
+
+/* Iterates over all profile variables and calls back the specified function(s)
+ * with the current value. */
+void profile_scrape(profile_callback32 callback32, profile_callback64 callback64, void *cookie);
+
+#define _PSTART_TIME(x) uint32_t __begin_time##x = read_ccnt()
+#define _PEND_TIME(x) uint32_t __end_time##x = read_ccnt()
+#define _PTIME_DIFF(x) (__end_time##x - __begin_time##x)
+#define PINC(name) PADD(name, 1)
+#define PADDTIMEDIFF(name) PADD(name, PTIMEDIFF(name))
+#define PADDTIME(name) PEND_TIME(name); PADDTIMEDIFF(name)
+#define PTIMEDIFF(x) _PTIME_DIFF(x)
+
+#ifdef PROFILE
+#define PVARUINT32(name, desc) static uint32_t name = 0; WATCH_VAR32(name, desc);
+#define PVARUINT64(name, desc) static uint64_t name = 0; WATCH_VAR64(name, desc);
+#define PADD(name, x) do {name += (x); } while(0)
+#define PSTART_TIME(x) _PSTART_TIME(x)
+#define PEND_TIME(x) _PEND_TIME(x)
+#else
+#define PVARUINT32(name, desc)
+#define PVARUINT64(name, desc)
+#define PADD(name, x) do { } while(0)
+#define PSTART_TIME(x) do { } while(0)
+#define PEND_TIME(x) do { } while(0)
+#endif
+
+#endif
diff --git a/include/sel4utils/sel4_debug.h b/include/sel4utils/sel4_debug.h
new file mode 100644
index 0000000..e1358c4
--- /dev/null
+++ b/include/sel4utils/sel4_debug.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+/*
+ Authors: Cristan Szmajda, Ben Leslie
+ Description:
+ Program diagnostics as per 7.2
+ Status: Complete.
+*/
+
+/*
+ * <assert.h> may safely be included multiple times with or without NDEBUG
+ */
+#undef sel4_error
+
+#define sel4_error(e, str) ((e == seL4_NoError) ? (void)0 : __sel4_error(e, __FILE__, __func__, __LINE__, str))
+
+/* Implemented in src/assert.c */
+#ifndef _SEL4_DEBUG_H_
+#define _SEL4_DEBUG_H_
+
+void __sel4_error(int, const char *, const char *, int, char *);
+
+#endif /* _SEL4_DEBUG_ */
diff --git a/include/sel4utils/stack.h b/include/sel4utils/stack.h
new file mode 100644
index 0000000..02e639e
--- /dev/null
+++ b/include/sel4utils/stack.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+#ifndef __SEL4UTILS_STACK_H
+#define __SEL4UTILS_STACK_H
+
+#include <vspace/vspace.h>
+
+/**
+ * Allocate a new stack and start running func on it.
+ * If func returns, you will be back on the old stack.
+ *
+ * @param vspace interface to allocate stack with
+ * @param func to jump to with the new stack.
+ * @ret the return value of func. -1 if stack allocation fails.
+ * Note: it would be unwise to write func such that it returns -1.
+ *
+ */
+int sel4utils_run_on_stack(vspace_t *vspace, int (*func)(void));
+
+#endif /* __SEL4UTILS_STACK_H */
diff --git a/include/sel4utils/thread.h b/include/sel4utils/thread.h
new file mode 100644
index 0000000..0f665a8
--- /dev/null
+++ b/include/sel4utils/thread.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+/**
+ *
+ * Provides basic thread configuration/starting/cleanup functions.
+ *
+ * Any other operations (start, stop, resume) should use the seL4 API directly on
+ * sel4utils_thread_t->tcb.cptr.
+ *
+ */
+#ifndef _SEL4UTILS_THREAD_H
+#define _SEL4UTILS_THREAD_H
+
+#include <autoconf.h>
+
+#ifdef CONFIG_LIB_SEL4_VSPACE
+
+#include <sel4/sel4.h>
+
+#include <vka/vka.h>
+
+#include <vspace/vspace.h>
+
+typedef struct sel4utils_thread {
+ vka_object_t tcb;
+ void *stack_top;
+ seL4_CPtr ipc_buffer;
+ seL4_Word ipc_buffer_addr;
+} sel4utils_thread_t;
+
+/**
+ * Configure a thread, allocating any resources required.
+ *
+ * @param vka initialised vka to allocate objects with
+ * @param alloc initialised vspace structure to allocate virtual memory with
+ * @param fault_endpoint endpoint to set as the threads fault endpoint. Can be 0.
+ * @param priority seL4 priority for the thread to be scheduled with.
+ * @param cspace the root of the cspace to start the thread in
+ * @param cspace_root_data data for cspace access
+ * @param res an uninitialised sel4utils_thread_t data structure that will be initialised
+ * after this operation.
+ *
+ * @return 0 on success, -1 on failure. Use CONFIG_DEBUG to see error messages.
+ */
+int sel4utils_configure_thread(vka_t *vka, vspace_t *alloc, seL4_CPtr fault_endpoint,
+ uint8_t priority, seL4_CNode cspace, seL4_CapData_t cspace_root_data,
+ sel4utils_thread_t *res);
+
+/**
+ * Start a thread, allocating any resources required.
+ * The third argument to the thread (in r2 for arm, on stack for ia32) will be the
+ * address of the ipc buffer.
+ *
+ * @param thread thread data structure that has been initialised with sel4utils_configure_thread
+ * @param entry_point the address that the thread will start at
+ *
+ * NOTE: In order for the on-stack argument passing to work for ia32,
+ * entry points must be functions.
+ *
+ * ie. jumping to this start symbol will work:
+ *
+ * void _start(int argc, char **argv) {
+ * int ret = main(argc, argv);
+ * exit(ret);
+ * }
+ *
+ *
+ * However, jumping to a start symbol like this:
+ *
+ * _start:
+ * call main
+ *
+ * will NOT work, as call pushes an extra value (the return value)
+ * onto the stack. If you really require an assembler stub, it should
+ * decrement the stack value to account for this.
+ *
+ * ie.
+ *
+ * _start:
+ * popl %eax
+ * call main
+ *
+ * This does not apply for arm, as arguments are passed in registers.
+ *
+ *
+ * @param arg0 a pointer to the arguments for this thread. User decides the protocol.
+ * @param arg1 another pointer. User decides the protocol. Note that there are two args here
+ * to easily support C standard: int main(int argc, char **argv).
+ * @param resume 1 to start the thread immediately, 0 otherwise.
+ *
+ * @return 0 on success, -1 on failure.
+ */
+int sel4utils_start_thread(sel4utils_thread_t *thread, void *entry_point, void *arg0, void *arg1,
+ int resume);
+
+/**
+ * Release any resources used by this thread. The thread data structure will not be usable
+ * until sel4utils_thread_configure is called again.
+ *
+ * @param vka the vka interface that this thread was initialised with
+ * @param alloc the allocation interface that this thread was initialised with
+ * @param thread the thread structure that was returned when the thread started
+ */
+void sel4utils_clean_up_thread(vka_t *vka, vspace_t *alloc, sel4utils_thread_t *thread);
+
+/**
+ * Start a fault handling thread that will print the name of the thread that faulted
+ * as well as debugging information.
+ *
+ * @param fault_endpoint the fault_endpoint to wait on
+ * @param vka allocator
+ * @param vspace vspace (this library must be mapped into that vspace).
+ * @param prio the priority to run the thread at (recommend highest possible)
+ * @param cspace the cspace that the fault_endpoint is in
+ * @param data the cspace_data for that cspace (with correct guard)
+ * @param name the name of the thread to print if it faults
+ * @param thread the thread data structure to populate
+ *
+ * @return 0 on success.
+ */
+int sel4utils_start_fault_handler(seL4_CPtr fault_endpoint, vka_t *vka, vspace_t *vspace,
+ uint8_t prio, seL4_CPtr cspace, seL4_CapData_t data, char *name, sel4utils_thread_t *res);
+
+
+/**
+ * Pretty print a fault messge.
+ *
+ * @param tag the message info tag delivered by the fault.
+ * @param name thread name
+ */
+void sel4utils_print_fault_message(seL4_MessageInfo_t tag, char *name);
+
+static inline seL4_TCB
+sel4utils_get_tcb(sel4utils_thread_t *thread)
+{
+ return thread->tcb.cptr;
+}
+
+static inline int
+sel4utils_suspend_thread(sel4utils_thread_t *thread)
+{
+ return seL4_TCB_Suspend(thread->tcb.cptr);
+}
+
+#endif /* CONFIG_LIB_SEL4_VSPACE */
+#endif /* _SEL4UTILS_THREAD_H */
diff --git a/include/sel4utils/util.h b/include/sel4utils/util.h
new file mode 100644
index 0000000..a6a59d4
--- /dev/null
+++ b/include/sel4utils/util.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+#ifndef _SEL4_UTILS_UTIL_H_
+#define _SEL4_UTILS_UTIL_H_
+
+#include <autoconf.h>
+#include <assert.h>
+#include <stdint.h>
+
+#include <utils/util.h>
+
+#endif
diff --git a/include/sel4utils/vspace.h b/include/sel4utils/vspace.h
new file mode 100644
index 0000000..252e635
--- /dev/null
+++ b/include/sel4utils/vspace.h
@@ -0,0 +1,279 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+/*
+ * An implementation of the vspace virtual memory allocation interface, using a two
+ * level page table.
+ *
+ * This implementation expects malloc to work, although it only uses it for small amounts of
+ * memory.
+ *
+ * Stack size is constant and be configured in menuconfig.
+ *
+ * Stacks are allocated with 1 guard page between them, but they are continguous in the vmem
+ * region.
+ *
+ * Allocation starts at 0x00001000.
+ *
+ * This library will allow you to map over anything that it doesn't know about, so
+ * make sure you tell it if you do any external mapping.
+ *
+ */
+#ifndef _UTILS_VSPACE_H
+#define _UTILS_VSPACE_H
+
+#include <autoconf.h>
+
+#if defined CONFIG_LIB_SEL4_VSPACE && defined CONFIG_LIB_SEL4_VKA
+
+#include <vspace/vspace.h>
+#include <vka/vka.h>
+#include <sel4utils/util.h>
+
+/* These definitions are only here so that you can take the size of them.
+ * TOUCHING THESE DATA STRUCTURES IN ANY WAY WILL BREAK THE WORLD
+ * */
+#ifdef CONFIG_X86_64
+#define VSPACE_LEVEL_BITS 9
+#else
+#define VSPACE_LEVEL_BITS 10
+#endif
+
+#define VSPACE_LEVEL_SIZE BIT(VSPACE_LEVEL_BITS)
+
+typedef struct bottom_level {
+ seL4_CPtr bottom_level[VSPACE_LEVEL_SIZE];
+} bottom_level_t;
+
+typedef int(*sel4utils_map_page_fn)(vspace_t *vspace, seL4_CPtr cap, void *vaddr, seL4_CapRights rights, int cacheable, size_t size_bits);
+
+typedef struct sel4utils_alloc_data {
+ vka_t *vka;
+ bottom_level_t **top_level;
+ void *next_bottom_level_vaddr;
+ void *last_allocated;
+ vspace_t *bootstrap;
+ sel4utils_map_page_fn map_page;
+} sel4utils_alloc_data_t;
+
+struct reservation {
+ void *start;
+ void *end;
+ seL4_CapRights rights;
+ int cacheable;
+};
+
+/**
+ * This is a mostly internal function for constructing a vspace. Allows a vspace to be created
+ * with an arbitrary function to invoke for the mapping of pages. This is useful if you want
+ * a vspace manager, but you do not want to use seL4 page directories
+ *
+ * @param loader vspace of the current process, used to allocate
+ * virtual memory book keeping.
+ * @param new_vspace uninitialised vspace struct to populate.
+ * @param data uninitialised vspace data struct to populate.
+ * @param vka initialised vka that this virtual memory allocator will use to
+ * allocate pages and pagetables. This allocator will never invoke free.
+ * @param page_directory page directory for the new vspace.
+ * @param allocated_object_fn function to call when objects are allocated. Can be null.
+ * @param allocated_object_cookie cookie passed when the above function is called. Can be null.
+ * @param map_page Function that will be called to map seL4 pages
+ *
+ * @return 0 on success.
+ */
+int
+sel4utils_get_vspace_with_map(vspace_t *loader, vspace_t *new_vspace, sel4utils_alloc_data_t *data,
+ vka_t *vka, seL4_CPtr page_directory,
+ vspace_allocated_object_fn allocated_object_fn, void *allocated_object_cookie, sel4utils_map_page_fn map_page);
+
+/**
+ * Initialise a vspace allocator for a new address space (not the current one).
+ *
+ * @param loader vspace of the current process, used to allocate
+ * virtual memory book keeping.
+ * @param new_vspace uninitialised vspace struct to populate.
+ * @param data uninitialised vspace data struct to populate.
+ * @param vka initialised vka that this virtual memory allocator will use to
+ * allocate pages and pagetables. This allocator will never invoke free.
+ * @param page_directory page directory for the new vspace.
+ * @param allocated_object_fn function to call when objects are allocated. Can be null.
+ * @param allocated_object_cookie cookie passed when the above function is called. Can be null.
+ *
+ * @return 0 on success.
+ */
+int sel4utils_get_vspace(vspace_t *loader, vspace_t *new_vspace, sel4utils_alloc_data_t *data,
+ vka_t *vka, seL4_CPtr page_directory, vspace_allocated_object_fn allocated_object_fn,
+ void *allocated_object_cookie);
+
+#ifdef CONFIG_VTX
+/**
+ * Initialise a vspace allocator for an EPT address space
+ *
+ * @param loader vspace of the current process, used to allocate
+ * virtual memory book keeping.
+ * @param new_vspace uninitialised vspace struct to populate.
+ * @param vka initialised vka that this virtual memory allocator will use to
+ * allocate pages and pagetables. This allocator will never invoke free.
+ * @param ept EPT page directory for the new vspace.
+ * @param allocated_object_fn function to call when objects are allocated. Can be null.
+ * @param allocated_object_cookie cookie passed when the above function is called. Can be null.
+ *
+ * @return 0 on success.
+ */
+int sel4utils_get_vspace_ept(vspace_t *loader, vspace_t *new_vspace, vka_t *vka,
+ seL4_CPtr ept, vspace_allocated_object_fn allocated_object_fn, void *allocated_object_cookie);
+#endif /* CONFIG_VTX */
+
+/**
+ * Initialise a vspace allocator for the current address space (this is intended
+ * for use a task that is not the root task but has no vspace, ie one loaded by the capDL loader).
+ *
+ * @param vspace uninitialised vspace struct to populate.
+ * @param data uninitialised vspace data struct to populate.
+ * @param vka initialised vka that this virtual memory allocator will use to
+ * allocate pages and pagetables. This allocator will never invoke free.
+ * @param page_directory page directory for the new vspace.
+ * @param allocated_object_fn function to call when objects are allocated. Can be null.
+ * @param allocated_object_cookie cookie passed when the above function is called. Can be null.
+ * @param existing_frames a NULL terminated list of virtual addresses for 4K frames that are
+ * already allocated. For larger frames, just pass in the virtual
+ * address range in 4K addresses. This will prevent the allocator
+ * from overriding these frames.
+ *
+ * @return 0 on succes.
+ *
+ */
+int sel4utils_bootstrap_vspace(vspace_t *vspace, sel4utils_alloc_data_t *data,
+ seL4_CPtr page_directory, vka_t *vka,
+ vspace_allocated_object_fn allocated_object_fn, void *allocated_object_cookie,
+ void *existing_frames[]);
+
+/**
+ * Initialise a vspace allocator for the current address space (this is intended
+ * for use by the root task). Take details of existing frames from bootinfo.
+ *
+ * @param vspace uninitialised vspace struct to populate.
+ * @param data uninitialised vspace data struct to populate.
+ * @param vka initialised vka that this virtual memory allocator will use to
+ * allocate pages and pagetables. This allocator will never invoke free.
+ * @param info seL4 boot info
+ * @param page_directory page directory for the new vspace.
+ * @param allocated_object_fn function to call when objects are allocated. Can be null.
+ * @param allocated_object_cookie cookie passed when the above function is called. Can be null.
+ *
+ * @return 0 on succes.
+ *
+ */
+int sel4utils_bootstrap_vspace_with_bootinfo(vspace_t *vspace, sel4utils_alloc_data_t *data,
+ seL4_CPtr page_directory,
+ vka_t *vka, seL4_BootInfo *info, vspace_allocated_object_fn allocated_object_fn,
+ void *allocated_object_cookie);
+
+/* Wrapper function that configures a vspaceator such that all allocated objects are not
+ * tracked.
+ */
+static inline int
+sel4utils_get_vspace_leaky(vspace_t *loader, vspace_t *new_vspace, sel4utils_alloc_data_t *data,
+ vka_t *vka, seL4_CPtr page_directory)
+{
+ return sel4utils_get_vspace(loader, new_vspace, data, vka, page_directory,
+ (vspace_allocated_object_fn) NULL, NULL);
+}
+
+#ifdef CONFIG_VTX
+static inline int
+sel4utils_get_vspace_ept_leaky(vspace_t *loader, vspace_t *new_vspace,
+ vka_t *vka, seL4_CPtr page_directory)
+{
+ return sel4utils_get_vspace_ept(loader, new_vspace, vka, page_directory,
+ (vspace_allocated_object_fn) NULL, NULL);
+}
+#endif /* CONFIG_VTX */
+
+static inline int
+sel4utils_bootstrap_vspace_with_bootinfo_leaky(vspace_t *vspace, sel4utils_alloc_data_t *data, seL4_CPtr page_directory,
+ vka_t *vka, seL4_BootInfo *info)
+{
+ return sel4utils_bootstrap_vspace_with_bootinfo(vspace, data, page_directory, vka, info, NULL, NULL);
+}
+
+static inline int
+sel4utils_bootstrap_vspace_leaky(vspace_t *vspace, sel4utils_alloc_data_t *data, seL4_CPtr page_directory, vka_t *vka,
+ void *existing_frames[])
+{
+ return sel4utils_bootstrap_vspace(vspace, data, page_directory, vka, NULL, NULL, existing_frames);
+}
+
+/**
+ * Attempts to create a new vspace reservation. Function behaves similarly to vspace_reserve_range
+ * except a reservation struct is passed in, instead of being malloc'ed. This is intended to be
+ * used during bootstrapping where malloc has not yet been setup.
+ * Reservations created with this function should *only* be freed with sel4utils_reserve_range_at_no_alloc
+ *
+ * @param vspace the virtual memory allocator to use.
+ * @param reservation Allocated reservation struct to fill out
+ * @param bytes the size in bytes to map.
+ * @param rights the rights to map the pages in with in this reservation
+ * @param cacheable 1 if the pages should be mapped with cacheable attributes. 0 for DMA.
+ * @param vaddr the virtual address of the reserved range will be returned here.
+ *
+ * @param Returns 0 on success
+ */
+int sel4utils_reserve_range_no_alloc(vspace_t *vspace, reservation_t *reservation, size_t size,
+ seL4_CapRights rights, int cacheable, void **result);
+
+/**
+ * Attempts to create a new vspace reservation. Function behaves similarly to vspace_reserve_range_at
+ * except a reservation struct is passed in, instead of being malloc'ed. This is intended to be
+ * used during bootstrapping where malloc has not yet been setup.
+ * Reservations created with this function should *only* be freed with sel4utils_reserve_range_at_no_alloc
+ *
+ * @param vspace the virtual memory allocator to use.
+ * @param reservation Allocated reservation struct to fill out
+ * @param vaddr the virtual address to start the range at.
+ * @param bytes the size in bytes to map.
+ * @param rights the rights to map the pages in with in this reservatio
+ * @param cacheable 1 if the pages should be mapped with cacheable attributes. 0 for DMA.
+ *
+ * @param Returns 0 on success
+ */
+int sel4utils_reserve_range_at_no_alloc(vspace_t *vspace, reservation_t *reservation, void *vaddr,
+ size_t size, seL4_CapRights rights, int cacheable);
+
+/**
+ * Frees a reservation. Function behaves similarly to vspace_free_reservation, except
+ * it should only be called on reservations that were created with sel4utils_reserve_range_no_alloc
+ * and sel4utils_reserve_range_at_no_alloc
+ * @param vspace the virtual memory allocator to use.
+ * @param reservation the reservation to free.
+ */
+void sel4utils_free_reservation_no_alloc(vspace_t *vspace, reservation_t *reservation);
+
+/*
+ * Copy the code and data segment (the image effectively) from current vspace
+ * into clone vspace. The clone vspace should be initialised.
+ *
+ * @param current the vspace to copy from.
+ * @param clone the vspace to copy to.
+ * @param reservation the previously established reservation in clone to copy.
+ * @return 0 on success.
+ */
+int sel4utils_bootstrap_clone_into_vspace(vspace_t *current, vspace_t *clone, reservation_t *reserve);
+
+/**
+ * Get the bounds of _executable_start and _end.
+ *
+ * @param va_start return va_start.
+ * @param va_end return va_end.
+ */
+void sel4utils_get_image_region(seL4_Word *va_start, seL4_Word *va_end);
+
+#endif /* CONFIG_LIB_SEL4_VSPACE && CONFIG_LIB_SEL4_VKA */
+#endif /* _UTILS_VSPACE_H */
diff --git a/include/sel4utils/vspace_internal.h b/include/sel4utils/vspace_internal.h
new file mode 100644
index 0000000..f1b78b2
--- /dev/null
+++ b/include/sel4utils/vspace_internal.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2014, NICTA
+ *
+ * This software may be distributed and modified according to the terms of
+ * the BSD 2-Clause license. Note that NO WARRANTY is provided.
+ * See "LICENSE_BSD2.txt" for details.
+ *
+ * @TAG(NICTA_BSD)
+ */
+
+#ifndef __SEL4UILS_VSPACE_PRIVATE_H
+#define __SEL4UILS_VSPACE_PRIVATE_H
+
+#include <vka/vka.h>
+#include <vspace/vspace.h>
+
+#include <sel4utils/util.h>
+#include <sel4utils/mapping.h>
+#include <sel4utils/vspace.h>
+
+#define KERNEL_RESERVED_START 0xE0000000
+#define KERNEL_RESERVED_END 0xFFFFF000
+
+#define RESERVED 0xFFFFFFFF
+#define EMPTY 0
+
+/* Bits to remove to get to the start of the second level translation bits. There are
+ * many implicit assumptions in the code that this is actually 12 bits and hence
+ * translation addresses can be stepped using the PAGE_SIZE_4K constant. */
+#ifdef CONFIG_X86_64
+#define BOTTOM_LEVEL_BITS_OFFSET (30 - 2 * VSPACE_LEVEL_BITS)
+#else
+#define BOTTOM_LEVEL_BITS_OFFSET (seL4_WordBits - 2 * VSPACE_LEVEL_BITS)
+#endif
+
+/* Bits to remove to get to the start of the first level translation bits */
+#ifdef CONFIG_X86_64
+#define TOP_LEVEL_BITS_OFFSET (30 - VSPACE_LEVEL_BITS)
+#else
+#define TOP_LEVEL_BITS_OFFSET (seL4_WordBits - VSPACE_LEVEL_BITS)
+#endif
+
+#define LEVEL_MASK MASK_UNSAFE(VSPACE_LEVEL_BITS)
+
+#define BOTTOM_LEVEL_INDEX(x) ((((uint32_t) ((seL4_Word)(x))) >> BOTTOM_LEVEL_BITS_OFFSET) & LEVEL_MASK)
+#define TOP_LEVEL_INDEX(x) ((((uint32_t) ((seL4_Word)(x))) >> TOP_LEVEL_BITS_OFFSET) & LEVEL_MASK)
+
+int assure_level(vspace_t *vspace, void *vaddr);
+int bootstrap_create_level(vspace_t *vspace, void *vaddr);
+
+static inline sel4utils_alloc_data_t *
+get_alloc_data(vspace_t *vspace)
+{
+ return (sel4utils_alloc_data_t *) vspace->data;
+}
+
+static inline int
+update_entry(vspace_t *vspace, void *vaddr, seL4_CPtr page)
+{
+ sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+
+ if (assure_level(vspace, vaddr) != seL4_NoError) {
+ return -1;
+ }
+
+ assert(data->top_level[TOP_LEVEL_INDEX(vaddr)] != (void *) RESERVED);
+ assert(data->top_level[TOP_LEVEL_INDEX(vaddr)] != NULL);
+ data->top_level[TOP_LEVEL_INDEX(vaddr)]->bottom_level[BOTTOM_LEVEL_INDEX(vaddr)] = page;
+
+ return seL4_NoError;
+}
+
+static inline int
+update(vspace_t *vspace, void *vaddr, seL4_CPtr page)
+{
+
+#if 0
+ if (page == RESERVED) {
+ LOG_ERROR("Cap %x cannot be used!", RESERVED);
+ return -1;
+ }
+#endif
+ assert(page != 0);
+ assert(get_alloc_data(vspace)->top_level[TOP_LEVEL_INDEX(vaddr)] != (void *) RESERVED);
+ return update_entry(vspace, vaddr, page);
+}
+
+static inline int
+reserve(vspace_t *vspace, void *vaddr)
+{
+ assert(get_alloc_data(vspace)->top_level[TOP_LEVEL_INDEX(vaddr)] != (void *) RESERVED);
+ return update_entry(vspace, vaddr, RESERVED);
+}
+
+static inline void
+clear(vspace_t *vspace, void *vaddr)
+{
+ /* if we are clearing an entry it should have been allocated before */
+ sel4utils_alloc_data_t *data = get_alloc_data(vspace);
+ assert(data->top_level[TOP_LEVEL_INDEX(vaddr)] != NULL);
+ assert(data->top_level[TOP_LEVEL_INDEX(vaddr)] != (void *) RESERVED);
+
+ update_entry(vspace, vaddr, 0);
+ if (vaddr < data->last_allocated) {
+ data->last_allocated = vaddr;
+ }
+}
+
+static inline seL4_CPtr
+get_entry(bottom_level_t *top[], void *vaddr)
+{
+ if (top[TOP_LEVEL_INDEX(vaddr)] == NULL) {
+ return 0;
+ }
+
+ return top[TOP_LEVEL_INDEX(vaddr)]->bottom_level[BOTTOM_LEVEL_INDEX(vaddr)];
+}
+
+/* update entry in page table and handle large pages */
+static inline int
+update_entries(vspace_t *vspace, void *vaddr, seL4_CPtr cap, size_t size_bits)
+{
+ int error = seL4_NoError;
+ for (int i = 0; i < (1 << size_bits) && error == seL4_NoError; i += PAGE_SIZE_4K) {
+ error = update(vspace, vaddr + i, cap);
+ }
+
+ return error;
+}
+
+/* reserve entry in page table and handle large pages */
+static inline int
+reserve_entries(vspace_t *vspace, void *vaddr, size_t size_bits)
+{
+ int error = seL4_NoError;
+ for (int i = 0; i < (1 << size_bits) && error == seL4_NoError; i += PAGE_SIZE_4K) {
+ error = reserve(vspace, vaddr + i);
+ }
+
+ return error;
+}
+
+/* clear an entry in the page table and handle large pages */
+static inline void
+clear_entries(vspace_t *vspace, void *vaddr, size_t size_bits)
+{
+ assert( ((uintptr_t)vaddr & ((1 << size_bits) - 1)) == 0);
+ for (int i = 0; i < (1 << size_bits); i += PAGE_SIZE_4K) {
+ clear(vspace, vaddr + i);
+ }
+}
+
+static inline int
+is_available(bottom_level_t *top_level[VSPACE_LEVEL_SIZE], void *vaddr)
+{
+ /* if there is no bottom level page table then this entry is available */
+ if (top_level[TOP_LEVEL_INDEX(vaddr)] == NULL) {
+ return 1;
+ /* otherwise check the entry explicitly */
+ } else if (top_level[TOP_LEVEL_INDEX(vaddr)] == (void *) RESERVED) {
+ return 0;
+ }
+
+ return top_level[TOP_LEVEL_INDEX(vaddr)]->bottom_level[BOTTOM_LEVEL_INDEX(vaddr)] == 0;
+}
+
+static inline int
+is_reserved(bottom_level_t *top_level[VSPACE_LEVEL_SIZE], void *vaddr)
+{
+ if (top_level[TOP_LEVEL_INDEX(vaddr)] == (void *) RESERVED) {
+ return 1;
+ }
+
+ if (top_level[TOP_LEVEL_INDEX(vaddr)] == NULL) {
+ return 0;
+ }
+#if 0
+ printf("is reserved vaddr 0x%x 0x%x \n",vaddr, top_level[TOP_LEVEL_INDEX(vaddr)]->bottom_level[BOTTOM_LEVEL_INDEX(vaddr)]);
+#endif
+ return top_level[TOP_LEVEL_INDEX(vaddr)]->bottom_level[BOTTOM_LEVEL_INDEX(vaddr)] == RESERVED;
+}
+
+/* Internal interface functions */
+int sel4utils_map_page_pd(vspace_t *vspace, seL4_CPtr cap, void *vaddr, seL4_CapRights rights, int cacheable, size_t size_bits);
+#ifdef CONFIG_VTX
+int sel4utils_map_page_ept(vspace_t *vspace, seL4_CPtr cap, void *vaddr, seL4_CapRights rights, int cacheable, size_t size_bits);
+#endif /* CONFIG_VTX */
+
+/* interface functions */
+int sel4utils_map_pages_at_vaddr(vspace_t *vspace, seL4_CPtr caps[], void *vaddr,
+ size_t num_pages, size_t size_bits, reservation_t *reservation);
+seL4_CPtr sel4utils_get_cap(vspace_t *vspace, void *vaddr);
+void *sel4utils_map_pages(vspace_t *vspace, seL4_CPtr caps[], seL4_CapRights rights,
+ size_t num_pages, size_t size_bits, int cacheable);
+void sel4utils_unmap_pages(vspace_t *vspace, void *vaddr, size_t num_pages, size_t size_bits);
+
+int sel4utils_unmap_reserved_pages(vspace_t *vspace, void *vaddr, size_t num_pages, size_t size_bits, reservation_t *reservation);
+
+int sel4utils_new_pages_at_vaddr(vspace_t *vspace, void *vaddr, size_t num_pages,
+ size_t size_bits, reservation_t *reservation);
+void *sel4utils_new_pages(vspace_t *vspace, seL4_CapRights rights, size_t num_pages,
+ size_t size_bits);
+void sel4utils_free_pages(vspace_t *vspace, void *vaddr, size_t num_pages, size_t size_bits);
+
+void *sel4utils_new_stack(vspace_t *vspace);
+void sel4utils_free_stack(vspace_t *vspace, void *stack_top);
+
+void * sel4utils_new_ipc_buffer(vspace_t *vspace, seL4_CPtr *page);
+void sel4utils_free_ipc_buffer(vspace_t *vspace, void *vaddr);
+
+reservation_t *sel4utils_reserve_range(vspace_t *vspace, size_t size, seL4_CapRights rights,
+ int cacheable, void **vaddr);
+reservation_t *sel4utils_reserve_range_at(vspace_t *vspace, void *vaddr, size_t size,
+ seL4_CapRights rights, int cacheable);
+void sel4utils_free_reservation(vspace_t *vspace, reservation_t *reservation);
+
+#endif /* __SEL4UILS_VSPACE_PRIVATE_H */