blob: 106797646675b8625d8e6fa8e2e57b96a7683e8d [file] [log] [blame]
TrustworthySystemscedc0562014-07-22 14:11:16 +10001/*
2 * Copyright 2014, NICTA
3 *
4 * This software may be distributed and modified according to the terms of
5 * the BSD 2-Clause license. Note that NO WARRANTY is provided.
6 * See "LICENSE_BSD2.txt" for details.
7 *
8 * @TAG(NICTA_BSD)
9 */
10
11#include <autoconf.h>
12
13#include <simple/simple.h>
14#include <sel4platsupport/io.h>
Alexander Kroh637e0d42015-06-26 13:44:02 +100015#ifdef ARCH_ARM
16#include <platsupport/clock.h>
17#include <platsupport/mux.h>
18#endif
TrustworthySystemscedc0562014-07-22 14:11:16 +100019#include <utils/util.h>
Anna Lyons3a9da542016-02-29 14:07:08 +110020#include <vspace/page.h>
TrustworthySystemscedc0562014-07-22 14:11:16 +100021
22#include <vspace/vspace.h>
23#include <vka/capops.h>
24
Matthew Fernandez92702162016-06-10 15:35:43 +100025#include <stdint.h>
TrustworthySystemscedc0562014-07-22 14:11:16 +100026#include <stdlib.h>
27
28typedef struct io_mapping {
29 /* 1 if we mapped this into the vspace ourselves, or 0
30 * if simple just gave us the vaddr */
31 int was_mapped;
32 /* address we returned to the user */
33 void *returned_addr;
34 /* base address of the mapping with respect to the vspace */
35 void *mapped_addr;
36 int num_pages;
37 int page_size;
38 seL4_CPtr *caps;
39 struct io_mapping *next, *prev;
40} io_mapping_t;
41
42typedef struct sel4platsupport_io_mapper_cookie {
43 vspace_t vspace;
44 simple_t simple;
45 vka_t vka;
46 io_mapping_t *head;
47} sel4platsupport_io_mapper_cookie_t;
48
Alexander Kroh637e0d42015-06-26 13:44:02 +100049static io_mapping_t *new_unmapped_node(void *addr)
50{
TrustworthySystemscedc0562014-07-22 14:11:16 +100051 io_mapping_t *ret = (io_mapping_t*)malloc(sizeof(*ret));
52 if (!ret) {
53 return NULL;
54 }
55 *ret = (io_mapping_t) {
56 .was_mapped = 0,
Alexander Kroh637e0d42015-06-26 13:44:02 +100057 .returned_addr = addr,
58 .next = NULL,
59 .prev = NULL,
60 .caps = NULL,
61 .mapped_addr = NULL
TrustworthySystemscedc0562014-07-22 14:11:16 +100062 };
63 return ret;
64}
65
66static void
67_insert_node(sel4platsupport_io_mapper_cookie_t *io_mapper, io_mapping_t *node)
68{
69 node->prev = NULL;
70 node->next = io_mapper->head;
71 if (io_mapper->head) {
72 io_mapper->head->prev = node;
73 }
74 io_mapper->head = node;
75}
76
77static io_mapping_t *
78_find_node(sel4platsupport_io_mapper_cookie_t *io_mapper, void *returned_addr)
79{
80 io_mapping_t *current;
81 for (current = io_mapper->head; current; current = current->next) {
82 if (current->returned_addr == returned_addr) {
83 return current;
84 }
85 }
86 return NULL;
87}
88
89static void
90_remove_node(sel4platsupport_io_mapper_cookie_t *io_mapper, io_mapping_t *node)
91{
92 if (node->prev) {
93 node->prev->next = node->next;
94 } else {
95 assert(io_mapper->head == node);
96 io_mapper->head = node->next;
97 }
98 if (node->next) {
99 node->next->prev = node->prev;
100 }
101}
102
103static void
104_free_node(io_mapping_t *node)
105{
106 if (node->caps) {
107 free(node->caps);
108 }
109 free(node);
110}
111
112static void
113_remove_free_node(sel4platsupport_io_mapper_cookie_t *io_mapper, io_mapping_t *node)
114{
115 _remove_node(io_mapper, node);
116 _free_node(node);
117}
118
119static void *
120sel4platsupport_map_paddr_with_page_size(sel4platsupport_io_mapper_cookie_t *io_mapper, uintptr_t paddr, size_t size, int page_size_bits, int cached)
121{
122
123 vka_t *vka = &io_mapper->vka;
124 vspace_t *vspace = &io_mapper->vspace;
125 simple_t *simple = &io_mapper->simple;
126
127 /* search at start of page */
128 int page_size = BIT(page_size_bits);
129 uintptr_t start = ROUND_DOWN(paddr, page_size);
Adrian Danis5f4e7d12016-03-03 15:27:15 +1100130 uintptr_t offset = paddr - start;
TrustworthySystemscedc0562014-07-22 14:11:16 +1000131 size += offset;
132
133 /* calculate number of pages */
Adrian Danis5f4e7d12016-03-03 15:27:15 +1100134 unsigned int num_pages = ROUND_UP(size, page_size) >> page_size_bits;
TrustworthySystemscedc0562014-07-22 14:11:16 +1000135 assert(num_pages << page_size_bits >= size);
136
137 seL4_CPtr *frames = (seL4_CPtr*)malloc(sizeof(*frames) * num_pages);
138 if (!frames) {
Anna Lyons67c869c2016-03-30 11:16:41 +1100139 ZF_LOGE("Failed to allocate array of size %zu", sizeof(*frames) * num_pages);
TrustworthySystemscedc0562014-07-22 14:11:16 +1000140 return NULL;
141 }
142 io_mapping_t *node = (io_mapping_t*)malloc(sizeof(*node));
143 if (!node) {
Anna Lyons67c869c2016-03-30 11:16:41 +1100144 ZF_LOGE("Failed to malloc of size %zu", sizeof(*node));
TrustworthySystemscedc0562014-07-22 14:11:16 +1000145 free(frames);
146 return NULL;
147 }
148
149 /* get all of the physical frame caps */
Adrian Danis5f4e7d12016-03-03 15:27:15 +1100150 for (unsigned int i = 0; i < num_pages; i++) {
TrustworthySystemscedc0562014-07-22 14:11:16 +1000151 /* allocate a cslot */
152 int error = vka_cspace_alloc(vka, &frames[i]);
153 if (error) {
Anna Lyons67c869c2016-03-30 11:16:41 +1100154 ZF_LOGE("cspace alloc failed");
TrustworthySystemscedc0562014-07-22 14:11:16 +1000155 assert(error == 0);
156 /* we don't clean up as everything has gone to hell */
157 return NULL;
158 }
159
160 /* create a path */
161 cspacepath_t path;
162 vka_cspace_make_path(vka, frames[i], &path);
163
164 error = simple_get_frame_cap(simple, (void*)start + (i * page_size), page_size_bits, &path);
165
166 if (error) {
167 /* free this slot, and then do general cleanup of the rest of the slots.
168 * this avoids a needless seL4_CNode_Delete of this slot, as there is no
169 * cap in it */
170 vka_cspace_free(vka, frames[i]);
171 num_pages = i;
172 goto error;
173 }
174
175 }
176
177 /* Now map the frames in */
Anna Lyonsbd70dd32014-09-10 17:10:46 +1000178 void *vaddr = vspace_map_pages(vspace, frames, NULL, seL4_AllRights, num_pages, page_size_bits, cached);
TrustworthySystemscedc0562014-07-22 14:11:16 +1000179 if (vaddr) {
180 /* fill out and insert the node */
181 *node = (io_mapping_t) {
182 .mapped_addr = vaddr,
Alexander Kroh637e0d42015-06-26 13:44:02 +1000183 .returned_addr = vaddr + offset,
184 .num_pages = num_pages,
185 .page_size = page_size_bits,
186 .caps = frames
TrustworthySystemscedc0562014-07-22 14:11:16 +1000187 };
188 _insert_node(io_mapper, node);
189 return vaddr + offset;
190 }
191error:
Adrian Danis5f4e7d12016-03-03 15:27:15 +1100192 for (unsigned int i = 0; i < num_pages; i++) {
TrustworthySystemscedc0562014-07-22 14:11:16 +1000193 cspacepath_t path;
194 vka_cspace_make_path(vka, frames[i], &path);
195 vka_cnode_delete(&path);
196 vka_cspace_free(vka, frames[i]);
197 }
198 free(frames);
199 free(node);
200 return NULL;
201}
202
203static void *
204sel4platsupport_get_vaddr_with_page_size(sel4platsupport_io_mapper_cookie_t *io_mapper, uintptr_t paddr, size_t size, int page_size_bits)
205{
206 simple_t *simple = &io_mapper->simple;
207
208 /* search at start of page */
209 int page_size = BIT(page_size_bits);
210 uintptr_t start = ROUND_DOWN(paddr, page_size);
Adrian Danis5f4e7d12016-03-03 15:27:15 +1100211 uintptr_t offset = paddr - start;
TrustworthySystemscedc0562014-07-22 14:11:16 +1000212 size += offset;
213
214 /* calculate number of pages */
Adrian Danis5f4e7d12016-03-03 15:27:15 +1100215 unsigned int num_pages = ROUND_UP(size, page_size) >> page_size_bits;
TrustworthySystemscedc0562014-07-22 14:11:16 +1000216 assert(num_pages << page_size_bits >= size);
217
218 void *first_vaddr = simple_get_frame_vaddr(simple, (void*)start, page_size_bits);
219 if (!first_vaddr) {
220 return NULL;
221 }
Adrian Danis5f4e7d12016-03-03 15:27:15 +1100222 for (unsigned int i = 1; i < num_pages; i++) {
TrustworthySystemscedc0562014-07-22 14:11:16 +1000223 void *vaddr = simple_get_frame_vaddr(simple, (void*)start + (i * page_size), page_size_bits);
224 if (first_vaddr + (i * page_size) != vaddr) {
225 return NULL;
226 }
227 }
228 io_mapping_t *node = new_unmapped_node(first_vaddr + offset);
229 if (!node) {
Anna Lyons67c869c2016-03-30 11:16:41 +1100230 ZF_LOGE("Failed to allocate node to track mapping");
TrustworthySystemscedc0562014-07-22 14:11:16 +1000231 return NULL;
232 }
233 _insert_node(io_mapper, node);
234 return first_vaddr + offset;
235}
236
237static void *
238sel4platsupport_map_paddr(void *cookie, uintptr_t paddr, size_t size, int cached, ps_mem_flags_t flags)
239{
240 (void)flags; // we don't support these
241 /* The simple interface supports two ways of getting physical addresses.
242 * Unfortunately it tends that precisely one of them will actually be
243 * implemented.
244 * One gives us the cap and we have to map it in, the other gives us the
245 * mapped address. We try the getting the cap technique first as that gives
246 * us better control since we can ensure the correct caching policy.
247 * If that fails then we attempt to get the mappings and ensure that they
248 * are in contiguous virtual address if there is more than one.
249 * In both cases we will try and use the largest frame size possible */
250 sel4platsupport_io_mapper_cookie_t* io_mapper = (sel4platsupport_io_mapper_cookie_t*)cookie;
Alexander Kroh637e0d42015-06-26 13:44:02 +1000251
TrustworthySystemscedc0562014-07-22 14:11:16 +1000252
253 int frame_size_index = 0;
254 /* find the largest reasonable frame size */
Anna Lyons3a9da542016-02-29 14:07:08 +1100255 while (frame_size_index + 1 < SEL4_NUM_PAGE_SIZES) {
256 if (size >> sel4_page_sizes[frame_size_index + 1] == 0) {
TrustworthySystemscedc0562014-07-22 14:11:16 +1000257 break;
258 }
259 frame_size_index++;
260 }
Alexander Kroh637e0d42015-06-26 13:44:02 +1000261
TrustworthySystemscedc0562014-07-22 14:11:16 +1000262 /* try mapping in this and all smaller frame sizes until something works */
263 for (int i = frame_size_index; i >= 0; i--) {
Anna Lyons3a9da542016-02-29 14:07:08 +1100264 void *result = sel4platsupport_map_paddr_with_page_size(io_mapper, paddr, size, sel4_page_sizes[i], cached);
TrustworthySystemscedc0562014-07-22 14:11:16 +1000265 if (result) {
266 return result;
267 }
268 }
269
270 /* try the get_frame_vaddr technique */
271 for (int i = frame_size_index; i >= 0; i--) {
Anna Lyons3a9da542016-02-29 14:07:08 +1100272 void *result = sel4platsupport_get_vaddr_with_page_size(io_mapper, paddr, size, sel4_page_sizes[i]);
TrustworthySystemscedc0562014-07-22 14:11:16 +1000273 if (result) {
274 return result;
275 }
276 }
277
278 /* shit out of luck */
Anna Lyons67c869c2016-03-30 11:16:41 +1100279 ZF_LOGE("Failed to find a way to map address %p", (void *)paddr);
TrustworthySystemscedc0562014-07-22 14:11:16 +1000280 return NULL;
281}
282
283static void
284sel4platsupport_unmap_vaddr(void *cookie, void *vaddr, size_t size)
285{
286 (void)size;
287 sel4platsupport_io_mapper_cookie_t* io_mapper = (sel4platsupport_io_mapper_cookie_t*)cookie;
288
289 io_mapping_t *mapping;
290
291 mapping = _find_node(io_mapper, vaddr);
292 if (!mapping) {
Anna Lyons67c869c2016-03-30 11:16:41 +1100293 ZF_LOGF("Tried to unmap vaddr %p, which was never mapped in", vaddr);
TrustworthySystemscedc0562014-07-22 14:11:16 +1000294 return;
295 }
296 if (!mapping->was_mapped) {
297 /* this vaddr was given directly from simple, so nothing to unmap */
298 _remove_free_node(io_mapper, mapping);
299 return;
300 }
301
302 vspace_t *vspace = &io_mapper->vspace;
303 vka_t *vka = &io_mapper->vka;
304
Alexander Kroh637e0d42015-06-26 13:44:02 +1000305 vspace_unmap_pages(vspace, mapping->mapped_addr, mapping->num_pages, mapping->page_size,
306 VSPACE_PRESERVE);
TrustworthySystemscedc0562014-07-22 14:11:16 +1000307
308 for (int i = 0; i < mapping->num_pages; i++) {
309 cspacepath_t path;
310 vka_cspace_make_path(vka, mapping->caps[i], &path);
311 vka_cnode_delete(&path);
312 vka_cspace_free(vka, mapping->caps[i]);
313 }
314
315 _remove_free_node(io_mapper, mapping);
316}
317
318int
319sel4platsupport_new_io_mapper(simple_t simple, vspace_t vspace, vka_t vka, ps_io_mapper_t *io_mapper)
320{
321 sel4platsupport_io_mapper_cookie_t *cookie;
322 cookie = (sel4platsupport_io_mapper_cookie_t*)malloc(sizeof(*cookie));
323 if (!cookie) {
Anna Lyons67c869c2016-03-30 11:16:41 +1100324 ZF_LOGE("Failed to allocate %zu bytes", sizeof(*cookie));
TrustworthySystemscedc0562014-07-22 14:11:16 +1000325 return -1;
326 }
327 *cookie = (sel4platsupport_io_mapper_cookie_t) {
328 .vspace = vspace,
Alexander Kroh637e0d42015-06-26 13:44:02 +1000329 .simple = simple,
330 .vka = vka
331 };
TrustworthySystemscedc0562014-07-22 14:11:16 +1000332 *io_mapper = (ps_io_mapper_t) {
333 .cookie = cookie,
Alexander Kroh637e0d42015-06-26 13:44:02 +1000334 .io_map_fn = sel4platsupport_map_paddr,
335 .io_unmap_fn = sel4platsupport_unmap_vaddr
336 };
TrustworthySystemscedc0562014-07-22 14:11:16 +1000337 return 0;
338}
Alexander Kroh637e0d42015-06-26 13:44:02 +1000339
340int
341sel4platsupport_new_io_ops(simple_t simple, vspace_t vspace, vka_t vka, ps_io_ops_t *io_ops)
342{
343 int err;
344 err = sel4platsupport_new_io_mapper(simple, vspace, vka, &io_ops->io_mapper);
345 if (err) {
346 return err;
347 }
348#ifdef ARCH_ARM
349 /* We don't consider these as failures */
350 err = clock_sys_init(io_ops, &io_ops->clock_sys);
351 (void)err;
352 err = mux_sys_init(io_ops, &io_ops->mux_sys);
353 (void)err;
354#endif
355 return 0;
356}
357
358