trivial: fix style
diff --git a/include/sel4_arch/aarch64/sel4runtime/thread_arch.h b/include/sel4_arch/aarch64/sel4runtime/thread_arch.h
index 615f466..56426df 100644
--- a/include/sel4_arch/aarch64/sel4runtime/thread_arch.h
+++ b/include/sel4_arch/aarch64/sel4runtime/thread_arch.h
@@ -14,33 +14,38 @@
 /*
  * Obtain the value of the TLS base for the current thread.
  */
-static inline sel4runtime_uintptr_t sel4runtime_read_tpidr_el0(void) {
+static inline sel4runtime_uintptr_t sel4runtime_read_tpidr_el0(void)
+{
     sel4runtime_uintptr_t reg;
-    __asm__ __volatile__ ("mrs %0,tpidr_el0" : "=r"(reg));
+    __asm__ __volatile__("mrs %0,tpidr_el0" : "=r"(reg));
     return reg;
 }
 
-static inline void sel4runtime_write_tpidr_el0(sel4runtime_uintptr_t reg) {
-    __asm__ __volatile__ ("msr tpidr_el0,%0" :: "r"(reg));
+static inline void sel4runtime_write_tpidr_el0(sel4runtime_uintptr_t reg)
+{
+    __asm__ __volatile__("msr tpidr_el0,%0" :: "r"(reg));
 }
 
-static inline sel4runtime_uintptr_t sel4runtime_read_tpidrro_el0(void) {
+static inline sel4runtime_uintptr_t sel4runtime_read_tpidrro_el0(void)
+{
     sel4runtime_uintptr_t reg;
-    __asm__ __volatile__ ("mrs %0,tpidrro_el0" : "=r"(reg));
+    __asm__ __volatile__("mrs %0,tpidrro_el0" : "=r"(reg));
     return reg;
 }
 
 /*
  * Obtain the value of the TLS base for the current thread.
  */
-static inline sel4runtime_uintptr_t sel4runtime_get_tls_base(void) {
+static inline sel4runtime_uintptr_t sel4runtime_get_tls_base(void)
+{
     return sel4runtime_read_tpidr_el0();
 }
 
 /*
  * Set the value of the TLS base for the current thread.
  */
-static inline void sel4runtime_set_tls_base(sel4runtime_uintptr_t tls_base) {
+static inline void sel4runtime_set_tls_base(sel4runtime_uintptr_t tls_base)
+{
     sel4runtime_write_tpidr_el0(tls_base);
 }
 
diff --git a/src/init.c b/src/init.c
index d4b2636..7dde43f 100644
--- a/src/init.c
+++ b/src/init.c
@@ -31,26 +31,28 @@
 extern routine __fini_array_start[];
 extern routine __fini_array_end[];
 
-void __sel4runtime_run_constructors(void) {
+void __sel4runtime_run_constructors(void)
+{
     int preinit_array_len
         = &__preinit_array_end[0]
-        - &__preinit_array_start[0];
+          - &__preinit_array_start[0];
     for (int f = 0; f < preinit_array_len; f++) {
         __preinit_array_start[f]();
     }
     _init();
     int init_array_len
         = &__init_array_end[0]
-        - &__init_array_start[0];
+          - &__init_array_start[0];
     for (int f = 0; f < init_array_len; f++) {
         __init_array_start[f]();
     }
 }
 
-void __sel4runtime_run_destructors(void) {
+void __sel4runtime_run_destructors(void)
+{
     int fini_array_len
         = &__fini_array_end[0]
-        - &__fini_array_start[0];
+          - &__fini_array_start[0];
     for (int f = fini_array_len - 1; f >= 0; f--) {
         __fini_array_start[f]();
     }
diff --git a/src/memcpy.c b/src/memcpy.c
index ffa7efe..5a8b333 100644
--- a/src/memcpy.c
+++ b/src/memcpy.c
@@ -28,8 +28,8 @@
 
 void *__sel4runtime_memcpy(void *restrict dest, const void *restrict src, sel4runtime_size_t n)
 {
-	unsigned char *d = dest;
-	const unsigned char *s = src;
+    unsigned char *d = dest;
+    const unsigned char *s = src;
 
 #ifdef __GNUC__
 
@@ -41,108 +41,137 @@
 #define RS >>
 #endif
 
-	typedef sel4runtime_uint32_t __attribute__((__may_alias__)) u32;
-	sel4runtime_uint32_t w, x;
+    typedef sel4runtime_uint32_t __attribute__((__may_alias__)) u32;
+    sel4runtime_uint32_t w, x;
 
-	for (; (sel4runtime_uintptr_t)s % 4 && n; n--) *d++ = *s++;
+    for (; (sel4runtime_uintptr_t)s % 4 && n; n--) {
+        *d++ = *s++;
+    }
 
-	if ((sel4runtime_uintptr_t)d % 4 == 0) {
-		for (; n>=16; s+=16, d+=16, n-=16) {
-			*(u32 *)(d+0) = *(u32 *)(s+0);
-			*(u32 *)(d+4) = *(u32 *)(s+4);
-			*(u32 *)(d+8) = *(u32 *)(s+8);
-			*(u32 *)(d+12) = *(u32 *)(s+12);
-		}
-		if (n&8) {
-			*(u32 *)(d+0) = *(u32 *)(s+0);
-			*(u32 *)(d+4) = *(u32 *)(s+4);
-			d += 8; s += 8;
-		}
-		if (n&4) {
-			*(u32 *)(d+0) = *(u32 *)(s+0);
-			d += 4; s += 4;
-		}
-		if (n&2) {
-			*d++ = *s++; *d++ = *s++;
-		}
-		if (n&1) {
-			*d = *s;
-		}
-		return dest;
-	}
+    if ((sel4runtime_uintptr_t)d % 4 == 0) {
+        for (; n >= 16; s += 16, d += 16, n -= 16) {
+            *(u32 *)(d + 0) = *(u32 *)(s + 0);
+            *(u32 *)(d + 4) = *(u32 *)(s + 4);
+            *(u32 *)(d + 8) = *(u32 *)(s + 8);
+            *(u32 *)(d + 12) = *(u32 *)(s + 12);
+        }
+        if (n & 8) {
+            *(u32 *)(d + 0) = *(u32 *)(s + 0);
+            *(u32 *)(d + 4) = *(u32 *)(s + 4);
+            d += 8;
+            s += 8;
+        }
+        if (n & 4) {
+            *(u32 *)(d + 0) = *(u32 *)(s + 0);
+            d += 4;
+            s += 4;
+        }
+        if (n & 2) {
+            *d++ = *s++;
+            *d++ = *s++;
+        }
+        if (n & 1) {
+            *d = *s;
+        }
+        return dest;
+    }
 
-	if (n >= 32) switch ((sel4runtime_uintptr_t)d % 4) {
-	case 1:
-		w = *(u32 *)s;
-		*d++ = *s++;
-		*d++ = *s++;
-		*d++ = *s++;
-		n -= 3;
-		for (; n>=17; s+=16, d+=16, n-=16) {
-			x = *(u32 *)(s+1);
-			*(u32 *)(d+0) = (w LS 24) | (x RS 8);
-			w = *(u32 *)(s+5);
-			*(u32 *)(d+4) = (x LS 24) | (w RS 8);
-			x = *(u32 *)(s+9);
-			*(u32 *)(d+8) = (w LS 24) | (x RS 8);
-			w = *(u32 *)(s+13);
-			*(u32 *)(d+12) = (x LS 24) | (w RS 8);
-		}
-		break;
-	case 2:
-		w = *(u32 *)s;
-		*d++ = *s++;
-		*d++ = *s++;
-		n -= 2;
-		for (; n>=18; s+=16, d+=16, n-=16) {
-			x = *(u32 *)(s+2);
-			*(u32 *)(d+0) = (w LS 16) | (x RS 16);
-			w = *(u32 *)(s+6);
-			*(u32 *)(d+4) = (x LS 16) | (w RS 16);
-			x = *(u32 *)(s+10);
-			*(u32 *)(d+8) = (w LS 16) | (x RS 16);
-			w = *(u32 *)(s+14);
-			*(u32 *)(d+12) = (x LS 16) | (w RS 16);
-		}
-		break;
-	case 3:
-		w = *(u32 *)s;
-		*d++ = *s++;
-		n -= 1;
-		for (; n>=19; s+=16, d+=16, n-=16) {
-			x = *(u32 *)(s+3);
-			*(u32 *)(d+0) = (w LS 8) | (x RS 24);
-			w = *(u32 *)(s+7);
-			*(u32 *)(d+4) = (x LS 8) | (w RS 24);
-			x = *(u32 *)(s+11);
-			*(u32 *)(d+8) = (w LS 8) | (x RS 24);
-			w = *(u32 *)(s+15);
-			*(u32 *)(d+12) = (x LS 8) | (w RS 24);
-		}
-		break;
-	}
-	if (n&16) {
-		*d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++;
-		*d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++;
-		*d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++;
-		*d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++;
-	}
-	if (n&8) {
-		*d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++;
-		*d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++;
-	}
-	if (n&4) {
-		*d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++;
-	}
-	if (n&2) {
-		*d++ = *s++; *d++ = *s++;
-	}
-	if (n&1) {
-		*d = *s;
-	}
-	return dest;
+    if (n >= 32) switch ((sel4runtime_uintptr_t)d % 4) {
+        case 1:
+            w = *(u32 *)s;
+            *d++ = *s++;
+            *d++ = *s++;
+            *d++ = *s++;
+            n -= 3;
+            for (; n >= 17; s += 16, d += 16, n -= 16) {
+                x = *(u32 *)(s + 1);
+                *(u32 *)(d + 0) = (w LS 24) | (x RS 8);
+                w = *(u32 *)(s + 5);
+                *(u32 *)(d + 4) = (x LS 24) | (w RS 8);
+                x = *(u32 *)(s + 9);
+                *(u32 *)(d + 8) = (w LS 24) | (x RS 8);
+                w = *(u32 *)(s + 13);
+                *(u32 *)(d + 12) = (x LS 24) | (w RS 8);
+            }
+            break;
+        case 2:
+            w = *(u32 *)s;
+            *d++ = *s++;
+            *d++ = *s++;
+            n -= 2;
+            for (; n >= 18; s += 16, d += 16, n -= 16) {
+                x = *(u32 *)(s + 2);
+                *(u32 *)(d + 0) = (w LS 16) | (x RS 16);
+                w = *(u32 *)(s + 6);
+                *(u32 *)(d + 4) = (x LS 16) | (w RS 16);
+                x = *(u32 *)(s + 10);
+                *(u32 *)(d + 8) = (w LS 16) | (x RS 16);
+                w = *(u32 *)(s + 14);
+                *(u32 *)(d + 12) = (x LS 16) | (w RS 16);
+            }
+            break;
+        case 3:
+            w = *(u32 *)s;
+            *d++ = *s++;
+            n -= 1;
+            for (; n >= 19; s += 16, d += 16, n -= 16) {
+                x = *(u32 *)(s + 3);
+                *(u32 *)(d + 0) = (w LS 8) | (x RS 24);
+                w = *(u32 *)(s + 7);
+                *(u32 *)(d + 4) = (x LS 8) | (w RS 24);
+                x = *(u32 *)(s + 11);
+                *(u32 *)(d + 8) = (w LS 8) | (x RS 24);
+                w = *(u32 *)(s + 15);
+                *(u32 *)(d + 12) = (x LS 8) | (w RS 24);
+            }
+            break;
+        }
+    if (n & 16) {
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+    }
+    if (n & 8) {
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+    }
+    if (n & 4) {
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+        *d++ = *s++;
+    }
+    if (n & 2) {
+        *d++ = *s++;
+        *d++ = *s++;
+    }
+    if (n & 1) {
+        *d = *s;
+    }
+    return dest;
 #endif
 
-	for (; n; n--) *d++ = *s++;
-	return dest;
+    for (; n; n--) {
+        *d++ = *s++;
+    }
+    return dest;
 }
diff --git a/src/memset.c b/src/memset.c
index 0ed0784..badc07b 100644
--- a/src/memset.c
+++ b/src/memset.c
@@ -28,84 +28,98 @@
 
 void *__sel4runtime_memset(void *dest, int c, sel4runtime_size_t n)
 {
-	unsigned char *s = dest;
-	sel4runtime_size_t k;
+    unsigned char *s = dest;
+    sel4runtime_size_t k;
 
-	/* Fill head and tail with minimal branching. Each
-	 * conditional ensures that all the subsequently used
-	 * offsets are well-defined and in the dest region. */
+    /* Fill head and tail with minimal branching. Each
+     * conditional ensures that all the subsequently used
+     * offsets are well-defined and in the dest region. */
 
-	if (!n) return dest;
-	s[0] = s[n-1] = c;
-	if (n <= 2) return dest;
-	s[1] = s[n-2] = c;
-	s[2] = s[n-3] = c;
-	if (n <= 6) return dest;
-	s[3] = s[n-4] = c;
-	if (n <= 8) return dest;
+    if (!n) {
+        return dest;
+    }
+    s[0] = s[n - 1] = c;
+    if (n <= 2) {
+        return dest;
+    }
+    s[1] = s[n - 2] = c;
+    s[2] = s[n - 3] = c;
+    if (n <= 6) {
+        return dest;
+    }
+    s[3] = s[n - 4] = c;
+    if (n <= 8) {
+        return dest;
+    }
 
-	/* Advance pointer to align it at a 4-byte boundary,
-	 * and truncate n to a multiple of 4. The previous code
-	 * already took care of any head/tail that get cut off
-	 * by the alignment. */
+    /* Advance pointer to align it at a 4-byte boundary,
+     * and truncate n to a multiple of 4. The previous code
+     * already took care of any head/tail that get cut off
+     * by the alignment. */
 
-	k = -(sel4runtime_uintptr_t)s & 3;
-	s += k;
-	n -= k;
-	n &= -4;
+    k = -(sel4runtime_uintptr_t)s & 3;
+    s += k;
+    n -= k;
+    n &= -4;
 
 #ifdef __GNUC__
-	typedef sel4runtime_uint32_t __attribute__((__may_alias__)) u32;
-	typedef sel4runtime_uint64_t __attribute__((__may_alias__)) u64;
+    typedef sel4runtime_uint32_t __attribute__((__may_alias__)) u32;
+    typedef sel4runtime_uint64_t __attribute__((__may_alias__)) u64;
 
-	u32 c32 = ((u32)-1)/255 * (unsigned char)c;
+    u32 c32 = ((u32) - 1) / 255 * (unsigned char)c;
 
-	/* In preparation to copy 32 bytes at a time, aligned on
-	 * an 8-byte bounary, fill head/tail up to 28 bytes each.
-	 * As in the initial byte-based head/tail fill, each
-	 * conditional below ensures that the subsequent offsets
-	 * are valid (e.g. !(n<=24) implies n>=28). */
+    /* In preparation to copy 32 bytes at a time, aligned on
+     * an 8-byte bounary, fill head/tail up to 28 bytes each.
+     * As in the initial byte-based head/tail fill, each
+     * conditional below ensures that the subsequent offsets
+     * are valid (e.g. !(n<=24) implies n>=28). */
 
-	*(u32 *)(s+0) = c32;
-	*(u32 *)(s+n-4) = c32;
-	if (n <= 8) return dest;
-	*(u32 *)(s+4) = c32;
-	*(u32 *)(s+8) = c32;
-	*(u32 *)(s+n-12) = c32;
-	*(u32 *)(s+n-8) = c32;
-	if (n <= 24) return dest;
-	*(u32 *)(s+12) = c32;
-	*(u32 *)(s+16) = c32;
-	*(u32 *)(s+20) = c32;
-	*(u32 *)(s+24) = c32;
-	*(u32 *)(s+n-28) = c32;
-	*(u32 *)(s+n-24) = c32;
-	*(u32 *)(s+n-20) = c32;
-	*(u32 *)(s+n-16) = c32;
+    *(u32 *)(s + 0) = c32;
+    *(u32 *)(s + n - 4) = c32;
+    if (n <= 8) {
+        return dest;
+    }
+    *(u32 *)(s + 4) = c32;
+    *(u32 *)(s + 8) = c32;
+    *(u32 *)(s + n - 12) = c32;
+    *(u32 *)(s + n - 8) = c32;
+    if (n <= 24) {
+        return dest;
+    }
+    *(u32 *)(s + 12) = c32;
+    *(u32 *)(s + 16) = c32;
+    *(u32 *)(s + 20) = c32;
+    *(u32 *)(s + 24) = c32;
+    *(u32 *)(s + n - 28) = c32;
+    *(u32 *)(s + n - 24) = c32;
+    *(u32 *)(s + n - 20) = c32;
+    *(u32 *)(s + n - 16) = c32;
 
-	/* Align to a multiple of 8 so we can fill 64 bits at a time,
-	 * and avoid writing the same bytes twice as much as is
-	 * practical without introducing additional branching. */
+    /* Align to a multiple of 8 so we can fill 64 bits at a time,
+     * and avoid writing the same bytes twice as much as is
+     * practical without introducing additional branching. */
 
-	k = 24 + ((sel4runtime_uintptr_t)s & 4);
-	s += k;
-	n -= k;
+    k = 24 + ((sel4runtime_uintptr_t)s & 4);
+    s += k;
+    n -= k;
 
-	/* If this loop is reached, 28 tail bytes have already been
-	 * filled, so any remainder when n drops below 32 can be
-	 * safely ignored. */
+    /* If this loop is reached, 28 tail bytes have already been
+     * filled, so any remainder when n drops below 32 can be
+     * safely ignored. */
 
-	u64 c64 = c32 | ((u64)c32 << 32);
-	for (; n >= 32; n-=32, s+=32) {
-		*(u64 *)(s+0) = c64;
-		*(u64 *)(s+8) = c64;
-		*(u64 *)(s+16) = c64;
-		*(u64 *)(s+24) = c64;
-	}
+    u64 c64 = c32 | ((u64)c32 << 32);
+    for (; n >= 32; n -= 32, s += 32) {
+        *(u64 *)(s + 0) = c64;
+        *(u64 *)(s + 8) = c64;
+        *(u64 *)(s + 16) = c64;
+        *(u64 *)(s + 24) = c64;
+    }
 #else
-	/* Pure C fallback with no aliasing violations. */
-	for (; n; n--, s++) *s = c;
+    /* Pure C fallback with no aliasing violations. */
+    for (; n; n--, s++) {
+        *s = c;
+    }
 #endif
 
-	return dest;
+    return dest;
 }