· 5 years ago · Feb 27, 2020, 11:26 AM
1#include <aos/aos.h>
2#include <spawn/spawn.h>
3
4#include <elf/elf.h>
5#include <aos/dispatcher_arch.h>
6#include <barrelfish_kpi/paging_arm_v7.h>
7#include <barrelfish_kpi/domain_params.h>
8#include <spawn/multiboot.h>
9
10extern struct bootinfo *bi;
11
12/// Initialize the cspace for a given module.
13static errval_t init_cspace(struct spawninfo *si)
14{
15 // Create an L1 CNode according to the process's spawn-info.
16 struct cnoderef l1_cnode;
17 CHECK(cnode_create_l1(&si->l1_cnode, &l1_cnode));
18
19 // Go over all root-CNode slots and create L2 CNodes (foreign)
20 // in them (using L1 created before.)
21 for (int i = 0; i <= ROOTCN_SLOTS_USER; i++) {
22 CHECK(cnode_create_foreign_l2(si->l1_cnode, i, &si->l2_cnode_list[i]));
23 }
24
25 DBG(DETAILED, "1. Map TASKCN");
26 // TASKCN contains information about the process. Set its SLOT_ROOTCN
27 // (which contains a capability for the process's root L1 CNode) to point
28 // to our L1 CNode.
29 struct capref taskcn_slot_rootcn = {
30 .cnode = si->l2_cnode_list[ROOTCN_SLOT_TASKCN],
31 .slot = TASKCN_SLOT_ROOTCN
32 };
33 CHECK(cap_copy(taskcn_slot_rootcn, si->l1_cnode));
34
35 DBG(DETAILED, "2. Create RAM caps for SLOT_BASE_PAGE_CN");
36 // Give the SLOT_BASE_PAGE_CN some memory by iterating over all L2 slots.
37 struct capref rootcn_slot_base_page_cn = {
38 .cnode = si->l2_cnode_list[ROOTCN_SLOT_BASE_PAGE_CN]
39 };
40 for (rootcn_slot_base_page_cn.slot = 0;
41 rootcn_slot_base_page_cn.slot < L2_CNODE_SLOTS;
42 rootcn_slot_base_page_cn.slot++) {
43 struct capref memory;
44
45 // Allocate the memory.
46 CHECK(ram_alloc(&memory, BASE_PAGE_SIZE));
47
48 // Copy the memory capability into our SLOT_BASE_PAGE_CN slot.
49 CHECK(cap_copy(rootcn_slot_base_page_cn, memory));
50
51 // Cleanup. Destroy the memory capability again.
52 CHECK(cap_destroy(memory));
53 }
54
55 return SYS_ERR_OK;
56}
57
58static errval_t slot_callback(struct spawninfo* si, struct capref cap){
59 DBG(DETAILED, "Copy slot to child\n");
60 struct capref child = {
61 .cnode = si->l2_cnode_list[ROOTCN_SLOT_PAGECN],
62 .slot = si->next_slot++
63 };
64 return cap_copy(child, cap);
65}
66
67/// Initialize the vspace for a given module.
68static errval_t init_vspace(struct spawninfo *si)
69{
70 // Create an L1 pagetable in the current VSpace.
71 struct capref l1_pt;
72 CHECK(slot_alloc(&l1_pt));
73 CHECK(vnode_create(l1_pt, ObjType_VNode_ARM_l1));
74
75 // Set up the new process's capability.
76 si->process_l1_pt.cnode = si->l2_cnode_list[ROOTCN_SLOT_PAGECN];
77 si->process_l1_pt.slot = PAGECN_SLOT_VROOT;
78
79 // Copy the page table to the new process.
80 CHECK(cap_copy(si->process_l1_pt, l1_pt));
81
82 // Set the spawned process's paging state.
83 // XXX: The (1<<25) is a magic constant at this point. TODO: Find a valid
84 // explanation and make it clear here.
85 CHECK(paging_init_state(&si->paging_state, (1<<25),
86 l1_pt, get_default_slot_allocator()));
87
88 // Add the callback function for slot allocation.
89 si->slot_callback = slot_callback;
90 si->paging_state.spawninfo = si;
91 si->next_slot = PAGECN_SLOT_VROOT + 1;
92
93 return SYS_ERR_OK;
94
95}
96
97/// Initialize the dispatcher for a given module.
98static errval_t init_dispatcher(struct spawninfo *si)
99{
100 DBG(DETAILED, " Allocate a capability for the dispatcher.\n");
101 CHECK(slot_alloc(&si->dispatcher));
102
103 DBG(DETAILED, " Create the dispatcher.\n");
104 CHECK(dispatcher_create(si->dispatcher));
105
106 DBG(DETAILED, " Set an endpoint for the dispatcher.\n");
107 struct capref dispatcher_end;
108 CHECK(slot_alloc(&dispatcher_end));
109 CHECK(cap_retype(dispatcher_end, si->dispatcher, 0, ObjType_EndPoint,
110 0, 1));
111
112 DBG(DETAILED, " Create a memory frame for the dispatcher.\n");
113 size_t retsize;
114 struct capref dispatcher_memframe;
115 CHECK(frame_alloc(&dispatcher_memframe, DISPATCHER_SIZE, &retsize));
116
117 assert(retsize == DISPATCHER_SIZE);
118
119 DBG(DETAILED, " Copy the dispatcher into the spawned process's VSpace.\n");
120 struct capref spawned_dispatcher = {
121 .cnode = si->l2_cnode_list[ROOTCN_SLOT_TASKCN],
122 .slot = TASKCN_SLOT_DISPATCHER
123 };
124 CHECK(cap_copy(spawned_dispatcher, si->dispatcher));
125
126 DBG(DETAILED, " Copy the endpoint into the spawned process's VSpace.\n");
127 struct capref spawned_endpoint = {
128 .cnode = si->l2_cnode_list[ROOTCN_SLOT_TASKCN],
129 .slot = TASKCN_SLOT_SELFEP
130 };
131 CHECK(cap_copy(spawned_endpoint, dispatcher_end));
132
133 DBG(DETAILED, " Copy the dispatcher's mem frame into the new "
134 "process's VSpace.\n");
135 si->spawned_disp_memframe.cnode = si->l2_cnode_list[ROOTCN_SLOT_TASKCN];
136 si->spawned_disp_memframe.slot = TASKCN_SLOT_DISPFRAME;
137
138 CHECK(cap_copy(si->spawned_disp_memframe, dispatcher_memframe));
139
140 DBG(DETAILED, " Map the dispatcher's memory frame into the "
141 "current VSpace.\n");
142 void* disp_current_vaddr;
143 CHECK(paging_map_frame(get_current_paging_state(),
144 &disp_current_vaddr,
145 DISPATCHER_SIZE, dispatcher_memframe, NULL,
146 NULL));
147
148 DBG(DETAILED, " Map the dispatcher's memory frame into the "
149 "spawned VSpace.\n");
150 void* disp_spawn_vaddr;
151 CHECK(paging_map_frame(&si->paging_state,
152 &disp_spawn_vaddr, DISPATCHER_SIZE,
153 dispatcher_memframe, NULL, NULL));
154
155 DBG(DETAILED, " Finalize the dispatcher: (ref. book 4.15)\n");
156 // Get a reference to the dispatcher, name it, set it to disabled at first,
157 // set the dispatcher memframe address in the new VSpace (spawned process),
158 // and set it to trap on FPU instructions.
159 struct dispatcher_shared_generic *disp =
160 get_dispatcher_shared_generic((dispatcher_handle_t)disp_current_vaddr);
161 disp->udisp = (lvaddr_t) disp_spawn_vaddr;
162 disp->disabled = 1;
163 disp->fpu_trap = 1;
164 strncpy(disp->name, si->binary_name, DISP_NAME_LEN);
165 DBG(DETAILED, " Set the core ID of the process and zero the frame(/size) "
166 "and header.\n");
167 struct dispatcher_generic *disp_gen =
168 get_dispatcher_generic((dispatcher_handle_t)disp_current_vaddr);
169 disp_gen->core_id = 0;
170 disp_gen->eh_frame = 0;
171 disp_gen->eh_frame_size = 0;
172 disp_gen->eh_frame_hdr = 0;
173 disp_gen->eh_frame_hdr_size = 0;
174
175 DBG(DETAILED, " Set the base address of the GOT in the new VSpace.\n");
176 struct dispatcher_shared_arm *disp_arm =
177 get_dispatcher_shared_arm((dispatcher_handle_t)disp_current_vaddr);
178 disp_arm->got_base = si->u_got;
179
180 arch_registers_state_t *enabled_area =
181 dispatcher_get_enabled_save_area(
182 (dispatcher_handle_t)disp_current_vaddr);
183 arch_registers_state_t *disabled_area =
184 dispatcher_get_disabled_save_area(
185 (dispatcher_handle_t)disp_current_vaddr);
186 enabled_area->regs[REG_OFFSET(PIC_REGISTER)] = si->u_got;
187 disabled_area->regs[REG_OFFSET(PIC_REGISTER)] = si->u_got;
188 disabled_area->named.pc = si->entry_addr;
189 enabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR;
190 disabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR;
191
192 DBG(DETAILED, " Store enabled area in spawn info, because we need it to "
193 "init the env.\n");
194 si->enabled_area = enabled_area;
195
196 return SYS_ERR_OK;
197}
198
199/// Initialize the environment for a given module.
200static errval_t init_env(struct spawninfo *si, struct mem_region *module)
201{
202 DBG(DETAILED, " Retrieve arguments from the module and allocate memory "
203 "for them.\n");
204 const char *args = multiboot_module_opts(module);
205 DBG(DETAILED, " Found the following command line arguments: %s\n", args);
206 size_t region_size = ROUND_UP(sizeof(struct spawn_domain_params) +
207 strlen(args) + 1, BASE_PAGE_SIZE);
208 struct capref mem_frame;
209 size_t retsize;
210 CHECK(frame_alloc(&mem_frame, region_size, &retsize));
211
212 assert(retsize == region_size);
213
214 DBG(DETAILED, " Map the arguments into the current VSpace.\n");
215 void* args_addr;
216 CHECK(paging_map_frame(get_current_paging_state(), &args_addr,
217 retsize, mem_frame, NULL, NULL));
218
219 DBG(DETAILED, " Map the arguments into the spawned process's CSpace.\n");
220 struct capref spawn_args = {
221 .cnode = si->l2_cnode_list[ROOTCN_SLOT_TASKCN],
222 .slot = TASKCN_SLOT_ARGSPAGE
223 };
224 CHECK(cap_copy(spawn_args, mem_frame));
225
226 DBG(DETAILED, " Map the arguments into the spawned process's VSpace.\n");
227 void* spawn_args_addr;
228 CHECK(paging_map_frame(&si->paging_state,
229 &spawn_args_addr, retsize, mem_frame,
230 NULL, NULL));
231
232 DBG(DETAILED, " Complete spawn_domain_params.\n");
233 struct spawn_domain_params *parameters =
234 (struct spawn_domain_params *)args_addr;
235 memset(¶meters->argv[0], 0, sizeof(parameters->argv));
236 memset(¶meters->envp[0], 0, sizeof(parameters->envp));
237
238 DBG(DETAILED, " Add the arguments into the spawned process's VSpace.\n");
239 char *param_base =
240 (char *) parameters + sizeof(struct spawn_domain_params);
241 char *param_last = param_base;
242 lvaddr_t spawn_param_base =
243 (lvaddr_t) spawn_args_addr + sizeof(struct spawn_domain_params);
244 strcpy(param_base, args);
245
246 DBG(DETAILED, " Set the arguments correctly.\n");
247 char *current_param = param_base;
248 size_t n_args = 0;
249 while (*current_param != 0) {
250 if (*current_param == ' ') {
251 parameters->argv[n_args] =
252 (void *)spawn_param_base + (param_last - param_base);
253 *current_param = 0;
254 n_args += 1;
255 current_param += 1;
256 param_last = current_param;
257 }
258 current_param += 1;
259 }
260 parameters->argv[n_args] =
261 (void *)spawn_param_base + (param_last - param_base);
262 n_args += 1;
263 parameters->argc = n_args;
264 si->enabled_area->named.r0 = (uint32_t) spawn_args_addr;
265
266 DBG(DETAILED, " Remaining arguments unset.\n");
267 parameters->vspace_buf = NULL;
268 parameters->vspace_buf_len = 0;
269 parameters->tls_init_base = NULL;
270 parameters->tls_init_len = 0;
271 parameters->tls_total_len = 0;
272 parameters->pagesize = 0;
273
274 return SYS_ERR_OK;
275}
276
277/// Callback for elf_load.
278static errval_t elf_alloc_sect_func(void *state, genvaddr_t base, size_t size,
279 uint32_t flags, void **ret)
280{
281 DBG(DETAILED, "start elf_alloc_sect_funci at %"PRIxGENPADDR"\n", base);
282 size_t alignment_offset = BASE_PAGE_OFFSET(base);
283 // Align base address and size.
284 genvaddr_t base_aligned = base - alignment_offset;
285 size_t size_aligned = ROUND_UP(size + alignment_offset, BASE_PAGE_SIZE);
286
287 // Allocate memory frame for this ELF section.
288 struct capref frame;
289 size_t retsize;
290 CHECK(frame_alloc(&frame, size_aligned, &retsize));
291
292 assert(retsize == size_aligned);
293
294 // Map the frame into the spawned process's VSpace.
295 CHECK(paging_map_fixed_attr(&((struct spawninfo *)state)->paging_state,
296 base_aligned, frame, retsize, flags));
297
298 // Map it into the current VSpace.
299 CHECK(paging_map_frame(get_current_paging_state(), ret, retsize,
300 frame, NULL, NULL));
301
302 // Correct return to fit alignment.
303 *ret += alignment_offset;
304 DBG(DETAILED, "end elf_alloc_sect_func. I will return buffer at "
305 "address 0x%"PRIxPTR"\n", *ret);
306 return SYS_ERR_OK;
307}
308errval_t map_paging_state_to_child(struct paging_state *st);
309errval_t map_paging_state_to_child(struct paging_state *st) {
310 struct capref frame;
311 size_t ret;
312 debug_printf("1\n");
313 size_t nodes = 0;
314 struct paging_frame_node *x = &st->free_vspace;
315 while(x->next != NULL) {
316 nodes++;
317 x = x->next;
318 }
319 CHECK(frame_alloc(&frame,sizeof(struct paging_state)+nodes*sizeof(struct paging_frame_node),&ret));
320 debug_printf("2\n");
321 paging_map_fixed(st,0x1000,frame,ret);
322 debug_printf("3\n");
323 void *our_side;
324 paging_map_frame(get_current_paging_state(),&our_side,ret,frame,NULL,NULL);
325 debug_printf("4\n");
326 struct paging_state *mapped_st = (struct paging_state*)our_side;
327 *mapped_st = *st;
328 //we move past the paging_state in the memblock now and then map our nodes
329 struct paging_frame_node *mem = (struct paging_frame_node*)&(mapped_st[1]);
330 x = &st->free_vspace;
331 while(x->next != NULL) {
332 *mem = *x;
333 x = x->next;
334 mem->next = &mem[1];
335 }
336 debug_printf("5\n");
337 return SYS_ERR_OK;
338}
339
340// TODO(M4): Build and pass a messaging channel to your child process
341errval_t spawn_load_by_name(void * binary_name, struct spawninfo * si)
342{
343 DBG(VERBOSE, "spawn start_child: starting: %s\n", binary_name);
344
345 // Init spawninfo.
346 memset(si, 0, sizeof(*si));
347 si->binary_name = binary_name;
348
349 DBG(DETAILED, "I: Getting the binary from the multiboot image.\n");
350 struct mem_region *module = multiboot_find_module(bi, binary_name);
351 if (module == NULL) {
352 DBG(ERR, "multiboot: Could not find module %s\n", binary_name);
353 return SPAWN_ERR_FIND_MODULE;
354 }
355
356 DBG(DETAILED, "II: Mapping the multiboot module into our address space.\n");
357 struct capref child_frame = {
358 .cnode = cnode_module,
359 .slot = module->mrmod_slot
360 };
361
362 struct frame_identity frame_id;
363 CHECK(frame_identify(child_frame, &frame_id));
364
365 lvaddr_t elf_addr;
366 CHECK(paging_map_frame(get_current_paging_state(), (void **)&elf_addr,
367 frame_id.bytes, child_frame, NULL, NULL));
368
369 DBG(VERBOSE, "Magic Number of elf: %i %c%c%c\n",
370 *(char*)elf_addr,
371 *(((char*)elf_addr) + 1),
372 *(((char*)elf_addr) + 2),
373 *(((char*)elf_addr) + 3));
374
375 DBG(DETAILED, "III: Set up the child's cspace.\n");
376 CHECK(init_cspace(si));
377
378 DBG(DETAILED, "IV: Set up the child's vspace.\n");
379 CHECK(init_vspace(si));
380
381 DBG(DETAILED, "V: Load the ELF binary.\n");
382 CHECK(elf_load(EM_ARM, elf_alloc_sect_func, (void *)si, elf_addr,
383 frame_id.bytes, &si->entry_addr));
384
385 struct Elf32_Shdr *global_offset_table =
386 elf32_find_section_header_name(elf_addr, frame_id.bytes, ".got");
387 if (global_offset_table == NULL) {
388 DBG(ERR, "libspawn: Unable to load ELF for binary %s\n",
389 binary_name);
390 return SPAWN_ERR_LOAD;
391 }
392 // Store the uspace Global Offset Table base.
393 si->u_got = global_offset_table->sh_addr;
394
395 DBG(VERBOSE, "Magic Number of elf again: %i %c%c%c\n",
396 *(char*)elf_addr,
397 *(((char*)elf_addr) + 1),
398 *(((char*)elf_addr) + 2),
399 *(((char*)elf_addr) + 3));
400
401 DBG(DETAILED, "VI: Initialize the dispatcher.\n");
402 CHECK(init_dispatcher(si));
403
404 DBG(DETAILED, "VII: Initialize the environment.\n");
405 CHECK(init_env(si, module));
406
407 map_paging_state_to_child(&si->paging_state);
408 // Check the registers...
409 DBG(DETAILED, "dump stuff...\n");
410 DBG(DETAILED, "Entry Address: 0x%"PRIxGENPADDR"\n", si->entry_addr);
411 DBG(DETAILED, "VIII: Make the dispatcher runnable.\n");
412 CHECK(invoke_dispatcher(si->dispatcher, cap_dispatcher, si->l1_cnode,
413 si->process_l1_pt, si->spawned_disp_memframe,
414 true));
415
416 return SYS_ERR_OK;
417}
418
419
420
421
422
423
424/**
425 * \file
426 * \brief AOS paging helpers.
427 */
428
429/*
430 * Copyright (c) 2012, 2013, 2016, ETH Zurich.
431 * All rights reserved.
432 *
433 * This file is distributed under the terms in the attached LICENSE file.
434 * If you do not find this file, copies can be found by writing to:
435 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
436 */
437
438#include <aos/aos.h>
439#include <aos/paging.h>
440#include <aos/except.h>
441#include <aos/slab.h>
442#include "threads_priv.h"
443
444#include <spawn/spawn.h>
445
446#include <stdio.h>
447#include <string.h>
448
449//#define no_page_align_in_frame_alloc
450#undef DEBUG_LEVEL
451#define DEBUG_LEVEL 0
452
453static struct paging_state current;
454
455/**
456 * \brief Helper function that allocates a slot and
457 * creates a ARM l2 page table capability
458 */
459__attribute__((unused))
460static errval_t arml2_alloc(struct paging_state * st, struct capref *ret)
461{
462 CHECK(st->slot_alloc->alloc(st->slot_alloc, ret));
463 CHECK(vnode_create(*ret, ObjType_VNode_ARM_l2));
464 return SYS_ERR_OK;
465}
466
467// For debugging only. Keeps track of number of created paging states.
468static size_t ps_index = 1;
469
470errval_t paging_init_state(struct paging_state *st, lvaddr_t start_vaddr,
471 struct capref pdir, struct slot_allocator * ca)
472{
473 DBG(VERBOSE, "paging_init_state\n");
474
475 // For debugging only. Keeps track of number of created paging states.
476 st->debug_paging_state_index = ps_index++;
477 DBG(DETAILED, "Setting up paging_state #%u", st->debug_paging_state_index);
478
479 // Slot allocator.
480 st->slot_alloc = ca;
481
482 // Set the l1 page table.
483 st->l1_page_table = pdir;
484
485 // TODO: I don't think this is really needed
486 // Initialize the L2 pagetables.
487 size_t i;
488 for (i = 0; i < ARM_L1_MAX_ENTRIES; ++i) {
489 st->l2_page_tables[i].init = false;
490 }
491 // Set the start of the free space.
492 st->free_vspace.base_addr = start_vaddr;
493 DBG(DETAILED, "At time of init our base addr is at: %p \n",
494 st->free_vspace.base_addr);
495 st->free_vspace.region_size = 0xFFFFFFFF - start_vaddr;
496
497 st->free_vspace.next = NULL;
498 st->spawninfo = NULL;
499
500 //TODO: This is an ugly hack so we don't need individual slab allocs. Probably improve at some point
501 size_t minbytes = sizeof(struct paging_frame_node) > sizeof(struct paging_map_node) ? sizeof(struct paging_frame_node) : sizeof(struct paging_map_node);
502 if(sizeof(struct paging_used_node) > minbytes)
503 minbytes = sizeof(struct paging_used_node);
504 slab_init(&st->slab_alloc,minbytes,slab_default_refill);
505 // TODO (M4): Implement page fault handler that installs frames when a page
506 // fault occurs and keeps track of the virtual address space.
507
508 return SYS_ERR_OK;
509}
510
511/**
512 * \brief This function initializes the paging for this domain
513 * It is called once before main.
514 */
515errval_t paging_init(void)
516{
517 DBG(VERBOSE, "paging_init\n");
518
519 // TODO (M4): initialize self-paging handler
520 // TIP: use thread_set_exception_handler() to setup a page fault handler
521 // TIP: Think about the fact that later on, you'll have to make sure that
522 // you can handle page faults in any thread of a domain.
523
524 set_current_paging_state(¤t);
525
526 // According to the book, the L1 page table is at the following address.
527 struct capref l1_pagetable = {
528 .cnode = cnode_page,
529 .slot = 0,
530 };
531
532 // XXX: (1<<25) is the same magic constant that appears in the spawn code.
533 // Dito! (plus maybe define it in header?)
534 paging_init_state(¤t, (1<<25), l1_pagetable,
535 get_default_slot_allocator());
536
537 return SYS_ERR_OK;
538}
539
540/**
541 * \brief Initialize per-thread paging state
542 */
543void paging_init_onthread(struct thread *t)
544{
545 // TODO (M4): setup exception handler for thread `t'.
546}
547
548/**
549 * \brief return a pointer to a bit of the paging region `pr`.
550 * This function gets used in some of the code that is responsible
551 * for allocating Frame (and other) capabilities.
552 */
553errval_t paging_region_init(struct paging_state *st, struct paging_region *pr,
554 size_t size)
555{
556 void *base = NULL;
557 CHECK(paging_alloc(st, &base, size));
558 pr->base_addr = (lvaddr_t)base;
559 pr->current_addr = pr->base_addr;
560 pr->region_size = size;
561 pr->slab_alloc = &st->slab_alloc;
562 // TODO: maybe add paging regions to paging state?
563 return SYS_ERR_OK;
564}
565
566/**
567 * \brief return a pointer to a bit of the paging region `pr`.
568 * This function gets used in some of the code that is responsible
569 * for allocating Frame (and other) capabilities.
570 */
571errval_t paging_region_map(struct paging_region *pr, size_t req_size,
572 void **retbuf, size_t *ret_size)
573{
574 //TODO: allow it to use holes for mapping
575 lvaddr_t end_addr = pr->base_addr + pr->region_size;
576 ssize_t rem = end_addr - pr->current_addr;
577 if (rem > req_size) {
578 // ok
579 *retbuf = (void*)pr->current_addr;
580 *ret_size = req_size;
581 pr->current_addr += req_size;
582 } else if (rem > 0) {
583 *retbuf = (void*)pr->current_addr;
584 *ret_size = rem;
585 pr->current_addr += rem;
586 DBG(WARN, "exhausted paging region, "
587 "expect badness on next allocation\n");
588 } else {
589 return LIB_ERR_VSPACE_MMU_AWARE_NO_SPACE;
590 }
591 return SYS_ERR_OK;
592}
593
594/**
595 * \brief free a bit of the paging region `pr`.
596 * This function gets used in some of the code that is responsible
597 * for allocating Frame (and other) capabilities.
598 * NOTE: Implementing this function is optional.
599 */
600errval_t paging_region_unmap(struct paging_region *pr, lvaddr_t base,
601 size_t bytes)
602{
603 DBG(DETAILED,"paging_region_unmap with %llx and %u",base,bytes);
604 // XXX: should free up some space in paging region, however need to track
605 // holes for non-trivial case
606 assert(pr->base_addr >= base);
607 assert(pr->base_addr+pr->region_size >= base+bytes);
608 assert(pr->current_addr >= base+bytes);
609 if(pr->current_addr - bytes == base) {
610 pr->current_addr = base;
611 }else{
612 //TODO: catch node overlaps via asserts
613 if(pr->holes == NULL) {
614 struct paging_frame_node *new_node = (struct paging_frame_node *) slab_alloc(pr->slab_alloc);
615 new_node->base_addr = base;
616 new_node->region_size = bytes;
617 new_node->next = NULL;
618 pr->holes = new_node;
619 } else {
620 struct paging_frame_node* node = pr->holes;
621 bool done = false;
622 while(node->next != NULL) {
623 if(node->base_addr < base) {
624 if(node->base_addr+node->region_size == base) {
625 node->region_size += bytes;
626 done = true;
627 break;
628 }else{
629 node = node->next;
630 }
631 }else{
632 struct paging_frame_node* new_node = (struct paging_frame_node*) slab_alloc(pr->slab_alloc);
633 new_node->base_addr = node->base_addr;
634 new_node->region_size = node->region_size;
635 new_node->next = node->next;
636 node->base_addr = base;
637 node->region_size = bytes;
638 node->next = new_node;
639 done = true;
640 break;
641 }
642 }
643 if(!done) {
644 struct paging_frame_node* new_node = (struct paging_frame_node*) slab_alloc(pr->slab_alloc);
645 new_node->base_addr = base;
646 new_node->region_size = bytes;
647 new_node->next = NULL;
648 node->next = new_node;
649 }
650 }
651 }
652 //coalesce holes
653 if(pr->holes) {
654 struct paging_frame_node* node = pr->holes;
655 struct paging_frame_node* prev = NULL;
656 while(node->next != NULL) {
657 struct paging_frame_node* temp = node->next;
658 if(temp->base_addr == node->base_addr+node->region_size) {
659 node->region_size += temp->region_size;
660 node->next = temp->next;
661 slab_free(pr->slab_alloc,temp);
662 }else{
663 prev = node;
664 node = node->next;
665 }
666 }
667 if(node->base_addr+node->region_size == pr->current_addr) {
668 pr->current_addr -= node->region_size;
669 slab_free(pr->slab_alloc,node);
670 if(prev != NULL)
671 prev->next = NULL;
672 else
673 pr->holes = NULL;
674 }
675 }
676
677 return SYS_ERR_OK;
678}
679
680/**
681 * TODO(M2): Implement this function
682 * \brief Find a bit of free virtual address space that is large enough to
683 * accomodate a buffer of size `bytes`.
684 */
685
686// TODO: consider wether guaranteeing page aligning is a good idea?
687// In particular, if it is, find a way to impl this so that it doesn't lose
688// memory through adjusting to page alignment
689// TODO: It probably should not do page aligning but our current
690// paging_map_fixed_frame impl doesn't like it when it's not page aligned.
691// So that one needs fixing first
692errval_t paging_alloc(struct paging_state *st, void **buf, size_t bytes)
693{
694 struct paging_frame_node *node = &st->free_vspace;
695 while(node != NULL) {
696 if(node->region_size >= bytes) {
697 DBG(DETAILED, "ps %u paging_alloc base: %p size: %u req: %u \n",
698 st->debug_paging_state_index, node->base_addr,
699 node->region_size, bytes);
700 *buf = (void*)node->base_addr;
701 node->base_addr += bytes;
702
703#ifndef no_page_align_in_frame_alloc
704 lvaddr_t temp = ROUND_UP(node->base_addr, BASE_PAGE_SIZE);
705 DBG(DETAILED, "ps %u alignment change does: %u \n",
706 st->debug_paging_state_index,
707 (size_t)(temp-node->base_addr));
708 node->region_size -= bytes + (temp-node->base_addr);
709 node->base_addr = temp;
710#else
711 node->region_size -= bytes;
712#endif
713
714/* struct paging_frame_node *new_node = (struct paging_frame_node *) slab_alloc(&st->slab_alloc);
715 new_node->base_addr = (lvaddr_t)*buf;
716 new_node->region_size = node->base_addr - new_node->base_addr;
717 new_node->next = st->used_vspace;
718 st->used_vspace = new_node;*/
719
720 DBG(DETAILED, "ps %u paging_alloc new base: %p new size: %u\n",
721 st->debug_paging_state_index, node->base_addr,
722 node->region_size);
723
724 if(node->region_size == 0) {
725 // TODO: add removing, just move content of next node
726 // into this and delete next node
727 // this does mean we'll eventually trail a size 0 node
728 // at maxaddr, but that's still cheaper than doubly linked
729 // list. And less of a perf issue than going from start to
730 // remove properly from the list
731 // random sidenote: it might be worth having pools, if aquiring
732 // capabilities is so expensive, on the other hand that sounds
733 // like a great way to get security issues later down the line.
734 }
735 return SYS_ERR_OK;
736 }
737
738 DBG(DETAILED, "ps %u we requested %u bytes but only had %u available "
739 "anymore on region %p \n",
740 st->debug_paging_state_index, bytes, node->region_size,
741 node->base_addr);
742 node = node->next;
743 }
744 return LIB_ERR_OUT_OF_VIRTUAL_ADDR;
745}
746
747/**
748 * \brief map a user provided frame, and return the VA of the mapped
749 * frame in `buf`.
750 */
751errval_t paging_map_frame_attr(struct paging_state *st, void **buf,
752 size_t bytes, struct capref frame,
753 int flags, void *arg1, void *arg2)
754{
755 CHECK_MSG(paging_alloc(st, buf, bytes), "to addr %p of size %i\n",
756 *buf, bytes);
757 return paging_map_fixed_attr(st, (lvaddr_t)(*buf), frame, bytes, flags);
758}
759
760errval_t
761slab_refill_no_pagefault(struct slab_allocator *slabs, struct capref frame,
762 size_t minbytes)
763{
764 DBG(DETAILED,"slab_refill_no_pagefault wants to alloc bytes: %u\n",minbytes);
765 // Refill the two-level slot allocator without causing a page-fault
766 void *buf;
767 struct paging_state* st = get_current_paging_state();
768 buf = NULL;
769 size_t frame_size;
770 DBG(DETAILED, "allocing at least: %u\n", minbytes);
771 CHECK(frame_alloc(&frame, minbytes, &frame_size));
772 DBG(DETAILED, "allocing in reality: %u\n", frame_size);
773// CHECK(paging_map_frame_attr(st, &buf, frame_size, frame,
774// VREGION_FLAGS_READ_WRITE, NULL, NULL));
775 size_t mapped_bytes = 0;
776 size_t bytes = frame_size;
777 int flags = VREGION_FLAGS_READ_WRITE;
778 CHECK_MSG(paging_alloc(st, &buf, bytes), "to addr %p of size %i\n",
779 buf, bytes);
780 DBG(DETAILED, "now pagefault goes into replication of the fixed mapping\n");
781 lvaddr_t vaddr = (lvaddr_t)buf;
782 while(bytes > 0) {
783 // Get the index of the L2 table in the L1 table.
784 lvaddr_t l1_index = ARM_L1_OFFSET(vaddr);
785
786 // Check if the table already exists.
787 struct capref l2_pagetable;
788 if (st->l2_page_tables[l1_index].init) {
789 // Table exists.
790 DBG(DETAILED, "found l2 page table \n");
791 l2_pagetable = st->l2_page_tables[l1_index].cap;
792 } else {
793 // Create a L2 pagetable.
794
795 // Create a new table.
796 DBG(DETAILED, "making l2 page table \n");
797 CHECK(arml2_alloc(st, &l2_pagetable));
798
799 DBG(DETAILED, "creating l1 l2 mapping capability \n");
800 // Write the L1 table entry.
801 struct capref l2_l1_mapping;
802 CHECK(st->slot_alloc->alloc(st->slot_alloc, &l2_l1_mapping));
803
804 DBG(DETAILED, "mapping l2 page table \n");
805 CHECK(vnode_map(st->l1_page_table, l2_pagetable, l1_index,
806 VREGION_FLAGS_READ_WRITE, 0, 1, l2_l1_mapping));
807
808 // Add cap to tracking array.
809 st->l2_page_tables[l1_index].cap = l2_pagetable;
810 st->l2_page_tables[l1_index].init = true;
811
812 // Add to child process if necessary.
813 if(st->spawninfo != NULL){
814 DBG(DETAILED, "I should add the new slot to the child\n");
815 ((struct spawninfo *)st->spawninfo)->slot_callback(
816 ((struct spawninfo *)st->spawninfo), l2_pagetable);
817 ((struct spawninfo *)st->spawninfo)->slot_callback(
818 ((struct spawninfo *)st->spawninfo), l2_l1_mapping);
819 }
820 }
821 DBG(DETAILED, "now allocing slot for frame mapping \n");
822
823 // Get the frame from the L2 table.
824 lvaddr_t l2_index = ARM_L2_OFFSET(vaddr);
825
826 // How many bytes should we map in that frame?
827 size_t mapping_size = MIN(
828 bytes, (ARM_L2_MAX_ENTRIES - l2_index) * BASE_PAGE_SIZE);
829
830 // Finally, do the mapping.
831 struct capref l2_frame;
832 CHECK(st->slot_alloc->alloc(st->slot_alloc, &l2_frame));
833 CHECK(vnode_map(l2_pagetable, frame, l2_index, flags, mapped_bytes,
834 mapping_size/BASE_PAGE_SIZE, l2_frame));
835 if(st->spawninfo != NULL){
836 ((struct spawninfo *)st->spawninfo)->slot_callback(
837 ((struct spawninfo *)st->spawninfo), l2_frame);
838 }
839
840 // To some house keeping for the next round:
841 mapped_bytes += mapping_size;
842 bytes -= mapping_size;
843 vaddr += mapping_size;
844 l1_index++;
845 DBG(DETAILED, "Still need to map: %"PRIuGENSIZE" bytes starting "
846 "from 0x%p\n" , (gensize_t)bytes, vaddr);
847 }
848 assert(bytes == 0);
849 DBG(DETAILED, "mapped %"PRIuGENSIZE" bytes\n", (gensize_t)mapped_bytes);
850
851
852 slab_grow(slabs, buf, frame_size);
853
854 return SYS_ERR_OK;
855}
856
857/**
858 * \brief map a user provided frame at user provided VA.
859 */
860errval_t paging_map_fixed_attr(struct paging_state *st, lvaddr_t vaddr,
861 struct capref frame, size_t bytes, int flags)
862{
863 DBG(DETAILED, "fixed alloc: vaddr: %p, bytes: %u \n", vaddr, bytes);
864
865 // Iterate over the L2 page tables and map the memory.
866 size_t mapped_bytes = 0;
867 if(slab_freecount(&st->slab_alloc) == 0) {
868 DBG(DETAILED, "triggering special slab refill\n");
869 struct capref slabframe;
870 st->slot_alloc->alloc(st->slot_alloc,&slabframe);
871 DBG(DETAILED, "first hurdle made\n");
872 slab_refill_no_pagefault(&st->slab_alloc,slabframe,BASE_PAGE_SIZE);
873 DBG(DETAILED, "second hurdle made\n");
874 }
875 struct paging_used_node *mappings = (struct paging_used_node *)slab_alloc(&st->slab_alloc);
876 DBG(DETAILED, "we got past the slab_alloc bit");
877 mappings->start_addr = vaddr;
878 mappings->size = bytes;
879 mappings->next = st->mappings;
880 st->mappings = mappings;
881 mappings->map_list = NULL;
882 while(bytes > 0) {
883 // Get the index of the L2 table in the L1 table.
884 lvaddr_t l1_index = ARM_L1_OFFSET(vaddr);
885
886 // Check if the table already exists.
887 struct capref l2_pagetable;
888 if (st->l2_page_tables[l1_index].init) {
889 // Table exists.
890 DBG(DETAILED, "found l2 page table \n");
891 l2_pagetable = st->l2_page_tables[l1_index].cap;
892 } else {
893 // Create a L2 pagetable.
894
895 // Create a new table.
896 DBG(DETAILED, "making l2 page table \n");
897 CHECK(arml2_alloc(st, &l2_pagetable));
898
899 DBG(DETAILED, "creating l1 l2 mapping capability \n");
900 // Write the L1 table entry.
901 struct capref l2_l1_mapping;
902 CHECK(st->slot_alloc->alloc(st->slot_alloc, &l2_l1_mapping));
903
904 DBG(DETAILED, "mapping l2 page table \n");
905 CHECK(vnode_map(st->l1_page_table, l2_pagetable, l1_index,
906 VREGION_FLAGS_READ_WRITE, 0, 1, l2_l1_mapping));
907
908 // Add cap to tracking array.
909 st->l2_page_tables[l1_index].cap = l2_pagetable;
910 st->l2_page_tables[l1_index].init = true;
911
912 // Add to child process if necessary.
913 if(st->spawninfo != NULL){
914 DBG(DETAILED, "I should add the new slot to the child\n");
915 ((struct spawninfo *)st->spawninfo)->slot_callback(
916 ((struct spawninfo *)st->spawninfo), l2_pagetable);
917 ((struct spawninfo *)st->spawninfo)->slot_callback(
918 ((struct spawninfo *)st->spawninfo), l2_l1_mapping);
919 }
920 }
921 DBG(DETAILED, "now allocing slot for frame mapping \n");
922
923 // Get the frame from the L2 table.
924 lvaddr_t l2_index = ARM_L2_OFFSET(vaddr);
925
926 // How many bytes should we map in that frame?
927 size_t mapping_size = MIN(
928 bytes, (ARM_L2_MAX_ENTRIES - l2_index) * BASE_PAGE_SIZE);
929
930 // Finally, do the mapping.
931 struct capref l2_frame;
932 CHECK(st->slot_alloc->alloc(st->slot_alloc, &l2_frame));
933 CHECK(vnode_map(l2_pagetable, frame, l2_index, flags, mapped_bytes,
934 mapping_size/BASE_PAGE_SIZE, l2_frame));
935 if(st->spawninfo != NULL){
936 ((struct spawninfo *)st->spawninfo)->slot_callback(
937 ((struct spawninfo *)st->spawninfo), l2_frame);
938 }
939
940 // TODO: store l2_l1_mapping and l2_frame (also a mapping), further,
941 // store l2_pagetable that storing is really just needed for unmapping
942 // however we might want to levy it later for finding an empty frame
943 // range. if we do levy it for that, rewrite frame_alloc and remove the
944 // frame_alloc specific suff from st
945 DBG(DETAILED, "now doing the storing thing \n");
946 struct paging_map_node *mapentry = (struct paging_map_node*)slab_alloc(&st->slab_alloc);
947 mapentry->table = l2_pagetable;
948 mapentry->mapping = l2_frame;
949 mapentry->next = mappings->map_list;
950 mappings->map_list = mapentry;
951
952 // To some house keeping for the next round:
953 mapped_bytes += mapping_size;
954 bytes -= mapping_size;
955 vaddr += mapping_size;
956 l1_index++;
957 DBG(DETAILED, "Still need to map: %"PRIuGENSIZE" bytes starting "
958 "from 0x%p\n" , (gensize_t)bytes, vaddr);
959 }
960 assert(bytes == 0);
961 DBG(DETAILED, "mapped %"PRIuGENSIZE" bytes\n", (gensize_t)mapped_bytes);
962 return SYS_ERR_OK;
963}
964
965void paging_add_space(struct paging_state *st, lvaddr_t base, size_t size);
966void paging_add_space(struct paging_state *st, lvaddr_t base, size_t size) {
967 //todo: coalesce nodes
968 //todo: sort space
969 struct paging_frame_node *node = (struct paging_frame_node*)slab_alloc(&st->slab_alloc);
970 node->base_addr = base;
971 node->region_size = size;
972 node->next = st->free_vspace.next;
973 st->free_vspace.next = node;
974}
975
976/**
977 * \brief unmap region starting at address `region`.
978 * NOTE: Implementing this function is optional.
979 */
980errval_t paging_unmap(struct paging_state *st, const void *region)
981{
982 DBG(DETAILED,"unmapping %p", region);
983 struct paging_used_node *node = st->mappings;
984 struct paging_used_node *prev = NULL;
985 while(true) {
986 assert(node != NULL); //Todo: make nicer
987 if(node->start_addr == (lvaddr_t)region) {
988 break;
989 }
990 prev = node;
991 node = node->next;
992 }
993 if(prev != NULL)
994 prev->next = node->next;
995 else
996 st->mappings = node->next;
997 struct paging_map_node *mapnode = node->map_list;
998 while(mapnode != NULL) {
999 struct paging_map_node *temp = mapnode;
1000 mapnode = mapnode->next;
1001 vnode_unmap(temp->table,temp->mapping);
1002 slab_free(&st->slab_alloc,temp);
1003 }
1004 paging_add_space(st,(lvaddr_t)node->start_addr,node->size);
1005 slab_free(&st->slab_alloc,node);
1006 return SYS_ERR_OK;
1007}
1008
1009
1010
1011
1012
1013/**
1014 * \file
1015 * \brief create child process library
1016 */
1017
1018/*
1019 * Copyright (c) 2016, ETH Zurich.
1020 * All rights reserved.
1021 *
1022 * This file is distributed under the terms in the attached LICENSE file.
1023 * If you do not find this file, copies can be found by writing to:
1024 * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
1025 */
1026
1027#ifndef _INIT_SPAWN_H_
1028#define _INIT_SPAWN_H_
1029
1030#include "aos/slot_alloc.h"
1031#include "aos/paging.h"
1032
1033/// Information about the binary.
1034struct spawninfo {
1035 char * binary_name; ///< Name of the binary
1036 struct capref l1_cnode; ///< Process's L1 CNode
1037 struct cnoderef l2_cnode_list[ROOTCN_SLOTS_USER]; ///< Foreign L2 CNodes
1038 struct capref dispatcher; ///< The dispatcher
1039 struct capref process_l1_pt; ///< The page table of the new process
1040 struct capref spawned_disp_memframe;///< Dispatcher memory in spawned proc
1041 struct paging_state paging_state; ///< New process's paging state
1042 genvaddr_t u_got; ///< Uspace address of the GOT
1043 genvaddr_t entry_addr; ///< Program entry point
1044 arch_registers_state_t *enabled_area;
1045 int next_slot;
1046 errval_t (*slot_callback)(struct spawninfo* si, struct capref cap);
1047};
1048
1049/// Start a child process by binary name. This fills in the spawninfo.
1050errval_t spawn_load_by_name(void * binary_name, struct spawninfo * si);
1051
1052#endif /* _INIT_SPAWN_H_ */
1053
1054
1055
1056
1057
1058
1059/**
1060 * \file
1061 * \brief Barrelfish paging helpers.
1062 */
1063
1064/*
1065 * Copyright (c) 2012, ETH Zurich.
1066 * All rights reserved.
1067 *
1068 * This file is distributed under the terms in the attached LICENSE file.
1069 * If you do not find this file, copies can be found by writing to:
1070 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
1071 */
1072
1073
1074#ifndef LIBBARRELFISH_PAGING_H
1075#define LIBBARRELFISH_PAGING_H
1076
1077#include <errors/errno.h>
1078#include <aos/capabilities.h>
1079#include <aos/slab.h>
1080#include <barrelfish_kpi/paging_arm_v7.h>
1081
1082typedef int paging_flags_t;
1083
1084#define VADDR_OFFSET ((lvaddr_t)1UL*1024*1024*1024) // 1GB
1085
1086#define PAGING_SLAB_BUFSIZE 12
1087
1088#define VREGION_FLAGS_READ 0x01 // Reading allowed
1089#define VREGION_FLAGS_WRITE 0x02 // Writing allowed
1090#define VREGION_FLAGS_EXECUTE 0x04 // Execute allowed
1091#define VREGION_FLAGS_NOCACHE 0x08 // Caching disabled
1092#define VREGION_FLAGS_MPB 0x10 // Message passing buffer
1093#define VREGION_FLAGS_GUARD 0x20 // Guard page
1094#define VREGION_FLAGS_MASK 0x2f // Mask of all individual VREGION_FLAGS
1095
1096#define VREGION_FLAGS_READ_WRITE \
1097 (VREGION_FLAGS_READ | VREGION_FLAGS_WRITE)
1098#define VREGION_FLAGS_READ_EXECUTE \
1099 (VREGION_FLAGS_READ | VREGION_FLAGS_EXECUTE)
1100#define VREGION_FLAGS_READ_WRITE_NOCACHE \
1101 (VREGION_FLAGS_READ | VREGION_FLAGS_WRITE | VREGION_FLAGS_NOCACHE)
1102#define VREGION_FLAGS_READ_WRITE_MPB \
1103 (VREGION_FLAGS_READ | VREGION_FLAGS_WRITE | VREGION_FLAGS_MPB)
1104
1105/* if necessary use tree
1106struct paging_free_frames_tree {
1107 union {
1108 struct paging_region *leaf;
1109 struct paging_free_frames_tree_node *node;
1110 } elm;
1111 bool leaf;
1112};
1113
1114struct paging_free_frames_tree_node {
1115 lvaddr_t split_addr; //todo: replace/rethinking. This is probably useless and should instead be depth or cut
1116 struct paging_free_frames_tree left;
1117 struct paging_free_frames_tree right;
1118};
1119*/
1120//tree not necessary for simple impl, trusty linked list it is!
1121struct paging_frame_node {
1122 lvaddr_t base_addr;
1123 size_t region_size;
1124 struct paging_frame_node* next;
1125};
1126
1127struct paging_map_node {
1128 struct capref table;
1129 struct capref mapping;
1130 struct paging_map_node* next;
1131};
1132
1133struct paging_used_node {
1134 lvaddr_t start_addr;
1135 size_t size;
1136 struct paging_used_node* next;
1137 struct paging_map_node* map_list;
1138};
1139
1140// struct to store the paging status of a process
1141struct paging_state {
1142 size_t debug_paging_state_index;
1143 struct slot_allocator* slot_alloc;
1144 // TODO: add struct members to keep track of the page tables etc
1145 struct slab_allocator slab_alloc;
1146 // l2 page tables
1147 struct paging_frame_node free_vspace;
1148 struct paging_used_node *mappings;
1149 struct capref l1_page_table;
1150 struct l2_page_table{
1151 struct capref cap;
1152 bool init;
1153 } l2_page_tables[ARM_L1_MAX_ENTRIES];
1154 void* spawninfo;
1155};
1156
1157
1158struct thread;
1159/// Initialize paging_state struct
1160errval_t paging_init_state(struct paging_state *st, lvaddr_t start_vaddr,
1161 struct capref pdir, struct slot_allocator * ca);
1162/// initialize self-paging module
1163errval_t paging_init(void);
1164/// setup paging on new thread (used for user-level threads)
1165void paging_init_onthread(struct thread *t);
1166
1167
1168struct paging_region {
1169 lvaddr_t base_addr;
1170 lvaddr_t current_addr;
1171 size_t region_size;
1172 struct slab_allocator *slab_alloc;
1173 // TODO: if needed add struct members for tracking state
1174 struct paging_frame_node* holes;
1175};
1176
1177errval_t paging_region_init(struct paging_state *st,
1178 struct paging_region *pr, size_t size);
1179
1180/**
1181 * \brief return a pointer to a bit of the paging region `pr`.
1182 * This function gets used in some of the code that is responsible
1183 * for allocating Frame (and other) capabilities.
1184 */
1185errval_t paging_region_map(struct paging_region *pr, size_t req_size,
1186 void **retbuf, size_t *ret_size);
1187/**
1188 * \brief free a bit of the paging region `pr`.
1189 * This function gets used in some of the code that is responsible
1190 * for allocating Frame (and other) capabilities.
1191 * We ignore unmap requests right now.
1192 */
1193errval_t paging_region_unmap(struct paging_region *pr, lvaddr_t base, size_t bytes);
1194
1195/**
1196 * \brief Find a bit of free virtual address space that is large enough to
1197 * accomodate a buffer of size `bytes`.
1198 */
1199errval_t paging_alloc(struct paging_state *st, void **buf, size_t bytes);
1200
1201/**
1202 * Functions to map a user provided frame.
1203 */
1204/// Map user provided frame with given flags while allocating VA space for it
1205errval_t paging_map_frame_attr(struct paging_state *st, void **buf,
1206 size_t bytes, struct capref frame,
1207 int flags, void *arg1, void *arg2);
1208/// Map user provided frame at user provided VA with given flags.
1209errval_t paging_map_fixed_attr(struct paging_state *st, lvaddr_t vaddr,
1210 struct capref frame, size_t bytes, int flags);
1211
1212/**
1213 * refill slab allocator without causing a page fault
1214 */
1215errval_t slab_refill_no_pagefault(struct slab_allocator *slabs,
1216 struct capref frame, size_t minbytes);
1217
1218/**
1219 * \brief unmap region starting at address `region`.
1220 * NOTE: this function is currently here to make libbarrelfish compile. As
1221 * noted on paging_region_unmap we ignore unmap requests right now.
1222 */
1223errval_t paging_unmap(struct paging_state *st, const void *region);
1224
1225
1226/// Map user provided frame while allocating VA space for it
1227static inline errval_t paging_map_frame(struct paging_state *st, void **buf,
1228 size_t bytes, struct capref frame,
1229 void *arg1, void *arg2)
1230{
1231 return paging_map_frame_attr(st, buf, bytes, frame,
1232 VREGION_FLAGS_READ_WRITE, arg1, arg2);
1233}
1234
1235/// Map user provided frame at user provided VA.
1236static inline errval_t paging_map_fixed(struct paging_state *st, lvaddr_t vaddr,
1237 struct capref frame, size_t bytes)
1238{
1239 return paging_map_fixed_attr(st, vaddr, frame, bytes,
1240 VREGION_FLAGS_READ_WRITE);
1241}
1242
1243static inline lvaddr_t paging_genvaddr_to_lvaddr(genvaddr_t genvaddr) {
1244 return (lvaddr_t) genvaddr;
1245}
1246
1247#endif // LIBBARRELFISH_PAGING_H