· 5 years ago · Apr 05, 2020, 07:30 PM
1/* See COPYRIGHT for copyright information. */
2
3#include <inc/x86.h>
4#include <inc/mmu.h>
5#include <inc/error.h>
6#include <inc/string.h>
7#include <inc/assert.h>
8
9#include <kern/pmap.h>
10#include <kern/kclock.h>
11
12#define MEM_ENTRY_ADDR 0x7000
13
14// These variables are set by i386_detect_memory()
15size_t npages; // Amount of physical memory (in pages)
16static size_t npages_basemem; // Amount of base memory (in pages)
17
18// These variables are set in mem_init()
19pde_t *kern_pgdir; // Kernel's initial page directory
20struct PageInfo *pages; // Physical page state array
21static struct PageInfo *page_free_list; // Free list of physical pages
22
23
24// --------------------------------------------------------------
25// Detect machine's physical memory setup.
26// --------------------------------------------------------------
27
28static int
29nvram_read(int r)
30{
31 return mc146818_read(r) | (mc146818_read(r + 1) << 8);
32}
33
34static void
35i386_detect_memory(void)
36{
37 size_t basemem, extmem, ext16mem, totalmem;
38
39 // Use CMOS calls to measure available base & extended memory.
40 // (CMOS calls return results in kilobytes.)
41 basemem = nvram_read(NVRAM_BASELO);
42 extmem = nvram_read(NVRAM_EXTLO);
43 ext16mem = nvram_read(NVRAM_EXT16LO) * 64;
44
45 // Calculate the number of physical pages available in both base
46 // and extended memory.
47 if (ext16mem)
48 totalmem = 16 * 1024 + ext16mem;
49 else if (extmem)
50 totalmem = 1 * 1024 + extmem;
51 else
52 totalmem = basemem;
53
54 npages = totalmem / (PGSIZE / 1024);
55 npages_basemem = basemem / (PGSIZE / 1024);
56
57 cprintf("Physical memory: %uK available, base = %uK, extended = %uK\n",
58 totalmem, basemem, totalmem - basemem);
59}
60
61
62// --------------------------------------------------------------
63// Set up memory mappings above UTOP.
64// --------------------------------------------------------------
65
66static void boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm);
67static void check_page_free_list(bool only_low_memory);
68static void check_page_alloc(void);
69static void check_kern_pgdir(void);
70static physaddr_t check_va2pa(pde_t *pgdir, uintptr_t va);
71static void check_page(void);
72static void check_page_installed_pgdir(void);
73
74// This simple physical memory allocator is used only while JOS is setting
75// up its virtual memory system. page_alloc() is the real allocator.
76//
77// If n>0, allocates enough pages of contiguous physical memory to hold 'n'
78// bytes. Doesn't initialize the memory. Returns a kernel virtual address.
79//
80// If n==0, returns the address of the next free page without allocating
81// anything.
82//
83// If we're out of memory, boot_alloc should panic.
84// This function may ONLY be used during initialization,
85// before the page_free_list list has been set up.
86static void *
87boot_alloc(uint32_t n)
88{
89 static char *nextfree; // virtual address of next byte of free memory
90 char *result;
91
92 // Initialize nextfree if this is the first time.
93 // 'end' is a magic symbol automatically generated by the linker,
94 // which points to the end of the kernel's bss segment:
95 // the first virtual address that the linker did *not* assign
96 // to any kernel code or global variables.
97 if (!nextfree) {
98 extern char end[];
99 nextfree = ROUNDUP((char *) end, PGSIZE);
100 }
101
102 // Allocate a chunk large enough to hold 'n' bytes, then update
103 // nextfree. Make sure nextfree is kept aligned
104 // to a multiple of PGSIZE.
105 //
106 // LAB 2: Your code here.
107
108 result = nextfree;
109 nextfree += (uint32_t)ROUNDUP((char *) n, PGSIZE);
110 //check if we are out of memory, if yes, send an error
111 if ((uint32_t) nextfree > (npages * PGSIZE + KERNBASE)){
112 panic("boot_alloc: we're out of memory to allocate!\n");
113 }
114
115 return result;
116}
117
118// Set up a two-level page table:
119// kern_pgdir is its linear (virtual) address of the root
120//
121// This function only sets up the kernel part of the address space
122// (ie. addresses >= UTOP). The user part of the address space
123// will be set up later.
124//
125// From UTOP to ULIM, the user is allowed to read but not write.
126// Above ULIM the user cannot read or write.
127void
128mem_init(void)
129{
130 uint32_t cr0;
131 size_t n;
132
133 // Find out how much memory the machine has (npages & npages_basemem).
134 i386_detect_memory();
135
136 // Remove this line when you're ready to test this function.
137 //panic("mem_init: This function is not finished\n");
138
139 //////////////////////////////////////////////////////////////////////
140 // create initial page directory.
141 kern_pgdir = (pde_t *) boot_alloc(PGSIZE);
142 memset(kern_pgdir, 0, PGSIZE);
143
144 //////////////////////////////////////////////////////////////////////
145 // Recursively insert PD in itself as a page table, to form
146 // a virtual page table at virtual address UVPT.
147 // (For now, you don't have understand the greater purpose of the
148 // following line.)
149
150 // Permissions: kernel R, user R
151 kern_pgdir[PDX(UVPT)] = PADDR(kern_pgdir) | PTE_U | PTE_P;
152
153 //////////////////////////////////////////////////////////////////////
154 // Allocate an array of npages 'struct PageInfo's and store it in 'pages'.
155 // The kernel uses this array to keep track of physical pages: for
156 // each physical page, there is a corresponding struct PageInfo in this
157 // array. 'npages' is the number of physical pages in memory. Use memset
158 // to initialize all fields of each struct PageInfo to 0.
159 // Your code goes here:
160
161 uint32_t mysize = npages * sizeof(struct PageInfo);
162 pages = (struct PageInfo *) boot_alloc(mysize);
163 memset(pages, 0, mysize);
164
165 //////////////////////////////////////////////////////////////////////
166 // Now that we've allocated the initial kernel data structures, we set
167 // up the list of free physical pages. Once we've done so, all further
168 // memory management will go through the page_* functions. In
169 // particular, we can now map memory using boot_map_region
170 // or page_insert
171 page_init();
172
173 check_page_free_list(1);
174 check_page_alloc();
175 check_page();
176
177 //////////////////////////////////////////////////////////////////////
178 // Now we set up virtual memory
179
180 //////////////////////////////////////////////////////////////////////
181 // Map 'pages' read-only by the user at linear address UPAGES
182 // Permissions:
183 // - the new image at UPAGES -- kernel R, user R
184 // (ie. perm = PTE_U | PTE_P)
185 // - pages itself -- kernel RW, user NONE
186 // Your code goes here:
187
188 //To map paqes, we will use boot_map_region function
189 //boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm)
190 //at linear address UPAGES
191 cprintf("\nExercise 5:\n\n");
192 uint32_t pa = PADDR(pages);
193 // Size is a multiple of PGSIZE, hence we use the size of page table
194 size_t custom_page_size = PTSIZE;
195 cprintf("UPAGES code test:\n");
196 boot_map_region(kern_pgdir, UPAGES, PTSIZE, PADDR(pages), (PTE_U | PTE_P));
197
198 //////////////////////////////////////////////////////////////////////
199 // Use the physical memory that 'bootstack' refers to as the kernel
200 // stack. The kernel stack grows down from virtual address KSTACKTOP.
201 // We consider the entire range from [KSTACKTOP-PTSIZE, KSTACKTOP)
202 // to be the kernel stack, but break this into two pieces:
203 // * [KSTACKTOP-KSTKSIZE, KSTACKTOP) -- backed by physical memory
204 // * [KSTACKTOP-PTSIZE, KSTACKTOP-KSTKSIZE) -- not backed; so if
205 // the kernel overflows its stack, it will fault rather than
206 // overwrite memory. Known as a "guard page".
207 // Permissions: kernel RW, user NONE
208 // Your code goes here:
209 pa = PADDR(bootstack);
210 custom_page_size = KSTKSIZE; //CPU's kernel stack size
211 //[KSTACKTOP-KSTKSIZE, KSTACKTOP) -- backed by physical memory
212 cprintf("\nKSTACK code test:\n");
213 boot_map_region(kern_pgdir, (KSTACKTOP-KSTKSIZE), custom_page_size, pa, (PTE_W | PTE_P));
214
215 //////////////////////////////////////////////////////////////////////
216 // Map all of physical memory at KERNBASE.
217 // Ie. the VA range [KERNBASE, 2^32) should map to
218 // the PA range [0, 2^32 - KERNBASE)
219 // We might not have 2^32 - KERNBASE bytes of physical memory, but
220 // we just set up the mapping anyway.
221 // Permissions: kernel RW, user NONE
222 // Your code goes here:
223
224 // Map all of physical memory at KERNBASE.
225 //2^32 is 0xFFFFFFFF in hex, so 0xFFFFFFFF-KERNBASE
226 custom_page_size = 0xFFFFFFFF-KERNBASE;
227
228 cprintf("\nKERNBASE code test:\n");
229 boot_map_region(kern_pgdir, KERNBASE, custom_page_size, 0, PTE_W | PTE_P);
230
231
232 // Check that the initial page directory has been set up correctly.
233 check_kern_pgdir();
234
235 // Switch from the minimal entry page directory to the full kern_pgdir
236 // page table we just created. Our instruction pointer should be
237 // somewhere between KERNBASE and KERNBASE+4MB right now, which is
238 // mapped the same way by both page tables.
239 //
240 // If the machine reboots at this point, you've probably set up your
241 // kern_pgdir wrong.
242 lcr3(PADDR(kern_pgdir));
243
244 check_page_free_list(0);
245
246 // entry.S set the really important flags in cr0 (including enabling
247 // paging). Here we configure the rest of the flags that we care about.
248 cr0 = rcr0();
249 cr0 |= CR0_PE|CR0_PG|CR0_AM|CR0_WP|CR0_NE|CR0_MP;
250 cr0 &= ~(CR0_TS|CR0_EM);
251 lcr0(cr0);
252
253 // Some more checks, only possible after kern_pgdir is installed.
254 check_page_installed_pgdir();
255}
256
257// --------------------------------------------------------------
258// Tracking of physical pages.
259// The 'pages' array has one 'struct PageInfo' entry per physical page.
260// Pages are reference counted, and free pages are kept on a linked list.
261// --------------------------------------------------------------
262
263//
264// Initialize page structure and memory free list.
265// After this is done, NEVER use boot_alloc again. ONLY use the page
266// allocator functions below to allocate and deallocate physical
267// memory via the page_free_list.
268//
269void
270page_init(void)
271{
272 // The example code here marks all physical pages as free.
273 // However this is not truly the case. What memory is free?
274 // 1) Mark physical page 0 as in use.
275 // This way we preserve the real-mode IDT and BIOS structures
276 // in case we ever need them. (Currently we don't, but...)
277 // 2) The rest of base memory, [PGSIZE, npages_basemem * PGSIZE)
278 // is free.
279 // 3) Then comes the IO hole [IOPHYSMEM, EXTPHYSMEM), which must
280 // never be allocated.
281 // 4) Then extended memory [EXTPHYSMEM, ...).
282 // Some of it is in use, some is free. Where is the kernel
283 // in physical memory? Which pages are already in use for
284 // page tables and other data structures?
285 //
286 // Change the code to reflect this.
287 // NB: DO NOT actually touch the physical memory corresponding to
288 // free pages!
289 size_t i;
290 //We will refer to memlayout to understand the layout of our memory and the bifurcations
291 uint32_t kern_size = (uint32_t)boot_alloc(0) - KERNBASE;
292 uint32_t kern_pages_cnt = kern_size/PGSIZE;
293
294 int a1 = 0, b1 = 0;
295 for (i = 0; i < npages; i++) {
296 pages[i].pp_link = NULL;
297
298 //pages[i].pp_link = page_free_list;
299 if ((i==0) || ( i < EXTPHYSMEM/PGSIZE + kern_pages_cnt && i >= npages_basemem )){
300 a1 += 1;
301 pages[i].pp_ref = 1;
302 } else {
303 b1 += 1;
304 //cprintf("Checkpoint B, ");
305 pages[i].pp_ref = 0;
306 pages[i].pp_link = page_free_list;
307 page_free_list = &pages[i];
308 }
309 }
310 cprintf("page_init succeeded, npages = %d\n", npages);
311 cprintf("a1, b1= %d | %d\n", a1, b1);
312 cprintf("Size of page_free_list = %d\n", sizeof(page_free_list));
313}
314
315
316
317struct PageInfo *
318page_alloc(int alloc_flags)
319{
320 //
321// Allocates a physical page. If (alloc_flags & ALLOC_ZERO), fills the entire
322// returned physical page with '\0' bytes. Does NOT increment the reference
323// count of the page - the caller must do these if necessary (either explicitly
324// or via page_insert).
325//
326// Be sure to set the pp_link field of the allocated page to NULL so
327// page_free can check for double-free bugs.
328//
329// Returns NULL if out of free memory.
330//
331// Hint: use page2kva and memset
332
333 struct PageInfo *pp = page_free_list;
334
335 if (pp)
336 {
337 //Check if pointers to the page is 0.
338 assert (pp->pp_ref == 0);
339 //next page on the free list. transfer the value to free list and clear
340 page_free_list = pp->pp_link;
341 pp->pp_link = NULL;
342
343
344 if(alloc_flags & ALLOC_ZERO){
345 //page2kva(struct PageInfo *pp)
346 memset (page2kva(pp), 0, PGSIZE);
347 }
348 return pp;
349
350 } else {
351 return NULL;
352 }
353}
354
355//
356// Return a page to the free list.
357// (This function should only be called when pp->pp_ref reaches 0.)
358//
359void
360page_free(struct PageInfo *pp)
361{
362 // Fill this function in
363 // Hint: You may want to panic if pp->pp_ref is nonzero or
364 // pp->pp_link is not NULL.
365
366 //assert creates panic if it is false
367 assert (pp->pp_link == NULL);
368 assert (pp->pp_ref == 0);
369
370 pp->pp_link = page_free_list;
371 page_free_list = pp;
372
373}
374
375//
376// Decrement the reference count on a page,
377// freeing it if there are no more refs.
378//
379void
380page_decref(struct PageInfo* pp)
381{
382 if (--pp->pp_ref == 0)
383 page_free(pp);
384}
385
386// Given 'pgdir', a pointer to a page directory, pgdir_walk returns
387// a pointer to the page table entry (PTE) for linear address 'va'.
388// This requires walking the two-level page table structure.
389//
390// The relevant page table page might not exist yet.
391// If this is true, and create == false, then pgdir_walk returns NULL.
392// Otherwise, pgdir_walk allocates a new page table page with page_alloc.
393// - If the allocation fails, pgdir_walk returns NULL.
394// - Otherwise, the new page's reference count is incremented,
395// the page is cleared,
396// and pgdir_walk returns a pointer into the new page table page.
397//
398// Hint 1: you can turn a PageInfo * into the physical address of the
399// page it refers to with page2pa() from kern/pmap.h.
400//
401// Hint 2: the x86 MMU checks permission bits in both the page directory
402// and the page table, so it's safe to leave permissions in the page
403// directory more permissive than strictly necessary.
404//
405// Hint 3: look at inc/mmu.h for useful macros that manipulate page
406// table and page directory entries.
407//
408pte_t *
409pgdir_walk(pde_t *pgdir, const void *va, int create)
410{
411 //pde_t returns the the page directory of a pointer
412 // The PDX, PTX, PGOFF, and PGNUM macros decompose linear addresses as shown.
413// To construct a linear address la from PDX(la), PTX(la), and PGOFF(la),
414// use PGADDR(PDX(la), PTX(la), PGOFF(la)).
415 // Fill this function in
416/*
417 const uint32_t temp_va = PDX(va);
418 //pgdir is our page directory address
419 pte_t *entry_addr = pgdir + temp_va;
420 struct PageInfo *pp;
421 if(*entry_addr & PTE_P){
422 } else {
423 //The directory entry does not exists
424 if(!create){
425 return NULL;
426 } else {
427 pp = page_alloc(0); //To crate a page and memset 0, refer to page_alloc
428 if(!pp){
429 return NULL;
430 } else {
431 pp->pp_ref = pp->pp_ref+1;
432 *entry_addr = page2pa(pp); //Converting into physical address
433 //As per hint 2, we need to set the permission bits too. Executable, Valid, and Writable should be sufficient
434 *entry_addr += PTE_P + PTE_W + PTE_U;
435 //Update the if condition above now to check if the directory entry exists
436 }
437 }
438 }
439 //pgdir_walk returns a pointer to the page table entry
440 //https://pdos.csail.mit.edu/6.828/2014/lec/l-josmem.html
441 //The PDX part of the address indexes into the page directory to give you a page table.
442 //The PTX part indexes into the page table to give you a page, and then you add the low bits in.
443 pde_t *return_addr = KADDR(PTE_ADDR(*entry_addr))+PTX(va); //Getting the kernel address for the kernel
444 return return_addr;
445
446*/
447
448 const uint32_t temp_va = PDX(va);
449 //pgdir is our page directory address
450 pte_t *entry_addr = pgdir + temp_va;
451 struct PageInfo *pp;
452 if(*entry_addr & PTE_P){
453 } else {
454 //The directory entry does not exists
455 if(!create){
456 return NULL;
457 } else {
458
459 pp = page_alloc (1); //allocate a physical page to pp and memset it to 0
460 if (!pp){ //if unable to allocate, return NULL
461 return NULL;
462 }
463 pp->pp_ref = pp->pp_ref+1;
464 *entry_addr = page2pa(pp); //Converting into physical address
465 //As per hint 2, we need to set the permission bits too. Executable, Valid, and Writable should be sufficient
466 *entry_addr += PTE_P + PTE_W + PTE_U;
467 //Update the if condition above now to check if the directory entry exists
468 }
469
470 }
471 //pde_t *result = KADDR(PTE_ADDR(*entry_addr)) + PTX(va); //calculate kernel address of the page table base
472 pde_t *result = KADDR(PTE_ADDR(*entry_addr)); //calculate kernel address of the page table base
473 return (result + PTX(va)); //return kernel address of page table entry
474}
475
476static void
477boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm)
478{
479
480//
481// Map [va, va+size) of virtual address space to physical [pa, pa+size)
482// in the page table rooted at pgdir. Size is a multiple of PGSIZE, and
483// va and pa are both page-aligned.
484// Use permission bits perm|PTE_P for the entries.
485//
486// This function is only intended to set up the ``static'' mappings
487// above UTOP. As such, it should *not* change the pp_ref field on the
488// mapped pages.
489//
490// Hint: the TA solution uses pgdir_walk
491
492 // Fill this function in
493 cprintf("boot_map_region entry point!\n");
494 pde_t *pgdir_addr;
495 for(size_t i = 0; i < size; i += PGSIZE){
496 //create pages in the page directory
497 pgdir_addr = pgdir_walk(pgdir, (void*)va + i, 1); //Converted from int to void because error: check_va2pa(pgdir, UPAGES + i) == PADDR(pages) + i
498 *pgdir_addr = (pa + i) + (perm | PTE_P); //Get the physical address along with the permission and replace the value
499 //*pgdir_walk(pgdir, (int*)va + i, 1) = (pa + i) + (perm | PTE_P);
500 }
501
502 cprintf("boot_map_region successful!\n");
503
504}
505
506
507int
508page_insert(pde_t *pgdir, struct PageInfo *pp, void *va, int perm)
509{
510 //
511// Map the physical page 'pp' at virtual address 'va'.
512// The permissions (the low 12 bits) of the page table entry
513// should be set to 'perm|PTE_P'.
514//
515// Requirements
516// - If there is already a page mapped at 'va', it should be page_remove()d.
517// - If necessary, on demand, a page table should be allocated and inserted
518// into 'pgdir'.
519// - pp->pp_ref should be incremented if the insertion succeeds.
520// - The TLB must be invalidated if a page was formerly present at 'va'.
521//
522// Corner-case hint: Make sure to consider what happens when the same
523// pp is re-inserted at the same virtual address in the same pgdir.
524// However, try not to distinguish this case in your code, as this
525// frequently leads to subtle bugs; there's an elegant way to handle
526// everything in one code path.
527//
528// RETURNS:
529// 0 on success
530// -E_NO_MEM, if page table couldn't be allocated
531//
532// Hint: The TA solution is implemented using pgdir_walk, page_remove,
533// and page2pa.
534//
535
536 // Fill this function in
537 //return 0;
538 //We will do a pgdir_walk as suggested and create a pgdir if does not exist.
539 pte_t *mypte = pgdir_walk(pgdir, va, 1); //Page directory created
540
541 //Given if this fails, we need to return -E_NO_MEM;
542 if(!mypte){
543 return -E_NO_MEM;
544 } else {
545 pp->pp_ref = pp->pp_ref+1;
546 }
547
548 //To avoid duplication we can check if the permisions are already set, if yes, then it is a duplicate.
549 if(*mypte & PTE_P){
550 cprintf("Going 1 to check page_remove, please be aware!\n");
551 page_remove(pgdir, va); //got this idea from the hint
552 }
553
554 //page_insert(pde_t *pgdir, struct PageInfo *pp, void *va, int perm)
555 // The permissions (the low 12 bits) of the page table entry
556 // should be set to 'perm|PTE_P'.
557 *mypte = page2pa(pp); //Converting to physical address, took the inspiration from pgdir_walk
558 //Changing the permission of the page table entry
559 *mypte += perm | PTE_P;
560 //Change the permission of the complete page in the directory
561 const uint32_t page_addr = PDX(va);
562 pgdir[page_addr] = pgdir[page_addr] | perm; //Set the required permissions if not already set
563 return 0;
564}
565
566
567struct PageInfo *
568page_lookup(pde_t *pgdir, void *va, pte_t **pte_store)
569{
570 //
571// Return the page mapped at virtual address 'va'.
572// If pte_store is not zero, then we store in it the address
573// of the pte for this page. This is used by page_remove and
574// can be used to verify page permissions for syscall arguments,
575// but should not be used by most callers.
576//
577// Return NULL if there is no page mapped at va.
578//
579// Hint: the TA solution uses pgdir_walk and pa2page.
580//
581
582 // Fill this function in
583 pte_t *pgdir_addr = pgdir_walk(pgdir, va, 0); //Translating a PDE value
584
585 if (!pgdir_addr || !(*pgdir_addr & PTE_P)){
586 return NULL;
587 }
588 if (pte_store){
589 *pte_store = pgdir_addr;
590 }
591 // Address in page table or page directory entry PTE_ADDR
592 cprintf("PGNUM(pa), npages, pgdir_addr = %d | %d\n", PGNUM(*pgdir_addr), npages, pgdir_addr);
593 return pa2page(PTE_ADDR(*pgdir_addr));
594
595}
596
597void
598page_remove(pde_t *pgdir, void *va)
599{
600
601//
602// Unmaps the physical page at virtual address 'va'.
603// If there is no physical page at that address, silently does nothing.
604//
605// Details:
606// - The ref count on the physical page should decrement.
607// - The physical page should be freed if the refcount reaches 0.
608// - The pg table entry corresponding to 'va' should be set to 0.
609// (if such a PTE exists)
610// - The TLB must be invalidated if you remove an entry from
611// the page table.
612//
613// Hint: The TA solution is implemented using page_lookup,
614// tlb_invalidate, and page_decref.
615//
616
617 // Fill this function in
618
619 //Since we do not have a pte_store to send to page lookup, we will initiatlize one.
620 //The hints have significantly declared the answer lol.
621
622 //cprintf("Entered page_remove()\n");
623 pte_t *mypte;
624
625 struct PageInfo *pp;
626 pp = page_lookup(pgdir, va, &mypte);
627
628 if (pp)
629 {
630 *mypte = 0; //Had to set to zero after kern_pgdir 0x0 failed
631 page_decref(pp);
632 tlb_invalidate(pgdir, va);
633 }
634}
635
636//
637// Invalidate a TLB entry, but only if the page tables being
638// edited are the ones currently in use by the processor.
639//
640void
641tlb_invalidate(pde_t *pgdir, void *va)
642{
643 // Flush the entry only if we're modifying the current address space.
644 // For now, there is only one address space, so always invalidate.
645 invlpg(va);
646}
647
648
649// --------------------------------------------------------------
650// Checking functions.
651// --------------------------------------------------------------
652
653//
654// Check that the pages on the page_free_list are reasonable.
655//
656static void
657check_page_free_list(bool only_low_memory)
658{
659 struct PageInfo *pp;
660 unsigned pdx_limit = only_low_memory ? 1 : NPDENTRIES;
661 int nfree_basemem = 0, nfree_extmem = 0;
662 char *first_free_page;
663
664 if (!page_free_list)
665 panic("'page_free_list' is a null pointer!");
666
667 if (only_low_memory) {
668 // Move pages with lower addresses first in the free
669 // list, since entry_pgdir does not map all pages.
670 struct PageInfo *pp1, *pp2;
671 struct PageInfo **tp[2] = { &pp1, &pp2 };
672 for (pp = page_free_list; pp; pp = pp->pp_link) {
673 int pagetype = PDX(page2pa(pp)) >= pdx_limit;
674 *tp[pagetype] = pp;
675 tp[pagetype] = &pp->pp_link;
676 }
677 *tp[1] = 0;
678 *tp[0] = pp2;
679 page_free_list = pp1;
680 cprintf("only_low_memory called\n");
681 }
682 cprintf("Size of page_free_list = %d\n", sizeof(page_free_list));
683 // if there's a page that shouldn't be on the free list,
684 // try to make sure it eventually causes trouble.
685 for (pp = page_free_list; pp; pp = pp->pp_link)
686 if (PDX(page2pa(pp)) < pdx_limit)
687 memset(page2kva(pp), 0x97, 128);
688
689 first_free_page = (char *) boot_alloc(0);
690 for (pp = page_free_list; pp; pp = pp->pp_link) {
691 //cprintf("Size of pp = %d, page_free_list = %d\n", sizeof(pp), sizeof(page_free_list));
692 // check that we didn't corrupt the free list itself
693 assert(pp >= pages);
694 assert(pp < pages + npages);
695 assert(((char *) pp - (char *) pages) % sizeof(*pp) == 0);
696
697 // check a few pages that shouldn't be on the free list
698 assert(page2pa(pp) != 0);
699 assert(page2pa(pp) != IOPHYSMEM);
700 assert(page2pa(pp) != EXTPHYSMEM - PGSIZE);
701 assert(page2pa(pp) != EXTPHYSMEM);
702 assert(page2pa(pp) < EXTPHYSMEM || (char *) page2kva(pp) >= first_free_page);
703
704 //cprintf("value of EXTPHYSMEM and page2pa(pp): %08x | %08x | %08x\n", EXTPHYSMEM, page2pa(pp));
705 if (page2pa(pp) < EXTPHYSMEM)
706 ++nfree_basemem;
707 else
708 ++nfree_extmem;
709 }
710 cprintf("value of nfree_basemem and nfree_extmem: %d | %d\n", nfree_basemem, nfree_extmem);
711 assert(nfree_basemem > 0);
712 assert(nfree_extmem > 0);
713
714
715 cprintf("check_page_free_list() succeeded!\n");
716}
717
718//
719// Check the physical page allocator (page_alloc(), page_free(),
720// and page_init()).
721//
722static void
723check_page_alloc(void)
724{
725 struct PageInfo *pp, *pp0, *pp1, *pp2;
726 int nfree;
727 struct PageInfo *fl;
728 char *c;
729 int i;
730
731 if (!pages)
732 panic("'pages' is a null pointer!");
733
734 // check number of free pages
735 for (pp = page_free_list, nfree = 0; pp; pp = pp->pp_link)
736 ++nfree;
737
738 // should be able to allocate three pages
739 pp0 = pp1 = pp2 = 0;
740 assert((pp0 = page_alloc(0)));
741 assert((pp1 = page_alloc(0)));
742 assert((pp2 = page_alloc(0)));
743
744 assert(pp0);
745 assert(pp1 && pp1 != pp0);
746 assert(pp2 && pp2 != pp1 && pp2 != pp0);
747 assert(page2pa(pp0) < npages*PGSIZE);
748 assert(page2pa(pp1) < npages*PGSIZE);
749 assert(page2pa(pp2) < npages*PGSIZE);
750
751 // temporarily steal the rest of the free pages
752 fl = page_free_list;
753 page_free_list = 0;
754
755 // should be no free memory
756 assert(!page_alloc(0));
757
758 // free and re-allocate?
759 page_free(pp0);
760 page_free(pp1);
761 page_free(pp2);
762 pp0 = pp1 = pp2 = 0;
763 assert((pp0 = page_alloc(0)));
764 assert((pp1 = page_alloc(0)));
765 assert((pp2 = page_alloc(0)));
766 assert(pp0);
767 assert(pp1 && pp1 != pp0);
768 assert(pp2 && pp2 != pp1 && pp2 != pp0);
769 assert(!page_alloc(0));
770
771 // test flags
772 memset(page2kva(pp0), 1, PGSIZE);
773 page_free(pp0);
774 assert((pp = page_alloc(ALLOC_ZERO)));
775 assert(pp && pp0 == pp);
776 c = page2kva(pp);
777 for (i = 0; i < PGSIZE; i++)
778 assert(c[i] == 0);
779
780 // give free list back
781 page_free_list = fl;
782
783 // free the pages we took
784 page_free(pp0);
785 page_free(pp1);
786 page_free(pp2);
787
788 // number of free pages should be the same
789 for (pp = page_free_list; pp; pp = pp->pp_link)
790 --nfree;
791 assert(nfree == 0);
792
793 cprintf("check_page_alloc() succeeded!\n");
794}
795
796//
797// Checks that the kernel part of virtual address space
798// has been set up roughly correctly (by mem_init()).
799//
800// This function doesn't test every corner case,
801// but it is a pretty good sanity check.
802//
803
804static void
805check_kern_pgdir(void)
806{
807 uint32_t i, n;
808 pde_t *pgdir;
809
810 pgdir = kern_pgdir;
811
812 // check pages array
813 n = ROUNDUP(npages*sizeof(struct PageInfo), PGSIZE);
814 cprintf("check_va2pa(pgdir, UPAGES + 0) | PADDR(pages)+0 -> %08x %08x\n\n", check_va2pa(pgdir, UPAGES + 0), (PADDR(pages)+0));
815 for (i = 0; i < n; i += PGSIZE)
816 assert(check_va2pa(pgdir, UPAGES + i) == PADDR(pages) + i);
817
818
819 // check phys mem
820 for (i = 0; i < npages * PGSIZE; i += PGSIZE)
821 assert(check_va2pa(pgdir, KERNBASE + i) == i);
822
823 // check kernel stack
824 for (i = 0; i < KSTKSIZE; i += PGSIZE)
825 assert(check_va2pa(pgdir, KSTACKTOP - KSTKSIZE + i) == PADDR(bootstack) + i);
826 assert(check_va2pa(pgdir, KSTACKTOP - PTSIZE) == ~0);
827
828 // check PDE permissions
829 for (i = 0; i < NPDENTRIES; i++) {
830 switch (i) {
831 case PDX(UVPT):
832 case PDX(KSTACKTOP-1):
833 case PDX(UPAGES):
834 assert(pgdir[i] & PTE_P);
835 break;
836 default:
837 if (i >= PDX(KERNBASE)) {
838 assert(pgdir[i] & PTE_P);
839 assert(pgdir[i] & PTE_W);
840 } else
841 assert(pgdir[i] == 0);
842 break;
843 }
844 }
845 cprintf("check_kern_pgdir() succeeded!\n");
846}
847
848// This function returns the physical address of the page containing 'va',
849// defined by the page directory 'pgdir'. The hardware normally performs
850// this functionality for us! We define our own version to help check
851// the check_kern_pgdir() function; it shouldn't be used elsewhere.
852
853static physaddr_t
854check_va2pa(pde_t *pgdir, uintptr_t va)
855{
856 pte_t *p;
857
858 pgdir = &pgdir[PDX(va)];
859 if (!(*pgdir & PTE_P))
860 return ~0;
861 p = (pte_t*) KADDR(PTE_ADDR(*pgdir));
862 if (!(p[PTX(va)] & PTE_P))
863 return ~0;
864 return PTE_ADDR(p[PTX(va)]);
865}
866
867
868// check page_insert, page_remove, &c
869static void
870check_page(void)
871{
872 struct PageInfo *pp, *pp0, *pp1, *pp2;
873 struct PageInfo *fl;
874 pte_t *ptep, *ptep1;
875 void *va;
876 int i;
877 extern pde_t entry_pgdir[];
878
879 // should be able to allocate three pages
880 pp0 = pp1 = pp2 = 0;
881 assert((pp0 = page_alloc(0)));
882 assert((pp1 = page_alloc(0)));
883 assert((pp2 = page_alloc(0)));
884
885 assert(pp0);
886 assert(pp1 && pp1 != pp0);
887 assert(pp2 && pp2 != pp1 && pp2 != pp0);
888
889 // temporarily steal the rest of the free pages
890 fl = page_free_list;
891 page_free_list = 0;
892
893 // should be no free memory
894 assert(!page_alloc(0));
895
896 // there is no page allocated at address 0
897 assert(page_lookup(kern_pgdir, (void *) 0x0, &ptep) == NULL);
898
899 // there is no free memory, so we can't allocate a page table
900 assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) < 0);
901
902 // free pp0 and try again: pp0 should be used for page table
903 page_free(pp0);
904 assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) == 0);
905 assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
906 assert(check_va2pa(kern_pgdir, 0x0) == page2pa(pp1));
907 assert(pp1->pp_ref == 1);
908 assert(pp0->pp_ref == 1);
909
910 // should be able to map pp2 at PGSIZE because pp0 is already allocated for page table
911 assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
912 assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
913 assert(pp2->pp_ref == 1);
914
915 // should be no free memory
916 assert(!page_alloc(0));
917
918 // should be able to map pp2 at PGSIZE because it's already there
919 assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
920 assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
921 assert(pp2->pp_ref == 1);
922
923 // pp2 should NOT be on the free list
924 // could happen in ref counts are handled sloppily in page_insert
925 assert(!page_alloc(0));
926
927 // check that pgdir_walk returns a pointer to the pte
928 ptep = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(PGSIZE)]));
929 assert(pgdir_walk(kern_pgdir, (void*)PGSIZE, 0) == ptep+PTX(PGSIZE));
930
931 // should be able to change permissions too.
932 assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W|PTE_U) == 0);
933 assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
934 assert(pp2->pp_ref == 1);
935 assert(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U);
936 assert(kern_pgdir[0] & PTE_U);
937
938 // should be able to remap with fewer permissions
939 assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
940 assert(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_W);
941 assert(!(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U));
942
943 // should not be able to map at PTSIZE because need free page for page table
944 assert(page_insert(kern_pgdir, pp0, (void*) PTSIZE, PTE_W) < 0);
945
946 // insert pp1 at PGSIZE (replacing pp2)
947 assert(page_insert(kern_pgdir, pp1, (void*) PGSIZE, PTE_W) == 0);
948 assert(!(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U));
949
950 // should have pp1 at both 0 and PGSIZE, pp2 nowhere, ...
951 assert(check_va2pa(kern_pgdir, 0) == page2pa(pp1));
952 assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1));
953 // ... and ref counts should reflect this
954 assert(pp1->pp_ref == 2);
955 assert(pp2->pp_ref == 0);
956
957 // pp2 should be returned by page_alloc
958 assert((pp = page_alloc(0)) && pp == pp2);
959 // unmapping pp1 at 0 should keep pp1 at PGSIZE
960
961 cprintf("Going 2 to check page_remove, please be aware!\n");
962 page_remove(kern_pgdir, 0x0);
963 cprintf("check_va2pa() output: %08x\n", check_va2pa(kern_pgdir, 0x0));
964 assert(check_va2pa(kern_pgdir, 0x0) == ~0);
965 assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1));
966 assert(pp1->pp_ref == 1);
967 assert(pp2->pp_ref == 0);
968
969 // test re-inserting pp1 at PGSIZE
970 assert(page_insert(kern_pgdir, pp1, (void*) PGSIZE, 0) == 0);
971 assert(pp1->pp_ref);
972 assert(pp1->pp_link == NULL);
973
974 // unmapping pp1 at PGSIZE should free it
975
976 cprintf("Going 3 to check page_remove, please be aware!\n");
977 page_remove(kern_pgdir, (void*) PGSIZE);
978 assert(check_va2pa(kern_pgdir, 0x0) == ~0);
979 assert(check_va2pa(kern_pgdir, PGSIZE) == ~0);
980 assert(pp1->pp_ref == 0);
981 assert(pp2->pp_ref == 0);
982
983 // so it should be returned by page_alloc
984 assert((pp = page_alloc(0)) && pp == pp1);
985
986 // should be no free memory
987 assert(!page_alloc(0));
988
989 // forcibly take pp0 back
990 assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
991 kern_pgdir[0] = 0;
992 assert(pp0->pp_ref == 1);
993 pp0->pp_ref = 0;
994
995 // check pointer arithmetic in pgdir_walk
996 page_free(pp0);
997 va = (void*)(PGSIZE * NPDENTRIES + PGSIZE);
998 ptep = pgdir_walk(kern_pgdir, va, 1);
999 ptep1 = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(va)]));
1000 assert(ptep == ptep1 + PTX(va));
1001 kern_pgdir[PDX(va)] = 0;
1002 pp0->pp_ref = 0;
1003
1004 // check that new page tables get cleared
1005 memset(page2kva(pp0), 0xFF, PGSIZE);
1006 page_free(pp0);
1007 pgdir_walk(kern_pgdir, 0x0, 1);
1008 ptep = (pte_t *) page2kva(pp0);
1009 for(i=0; i<NPTENTRIES; i++)
1010 assert((ptep[i] & PTE_P) == 0);
1011 kern_pgdir[0] = 0;
1012 pp0->pp_ref = 0;
1013
1014 // give free list back
1015 page_free_list = fl;
1016
1017 // free the pages we took
1018 page_free(pp0);
1019 page_free(pp1);
1020 page_free(pp2);
1021
1022 cprintf("check_page() succeeded!\n");
1023}
1024
1025// check page_insert, page_remove, &c, with an installed kern_pgdir
1026static void
1027check_page_installed_pgdir(void)
1028{
1029 struct PageInfo *pp, *pp0, *pp1, *pp2;
1030 struct PageInfo *fl;
1031 pte_t *ptep, *ptep1;
1032 uintptr_t va;
1033 int i;
1034
1035 // check that we can read and write installed pages
1036 pp1 = pp2 = 0;
1037 assert((pp0 = page_alloc(0)));
1038 assert((pp1 = page_alloc(0)));
1039 assert((pp2 = page_alloc(0)));
1040 page_free(pp0);
1041 memset(page2kva(pp1), 1, PGSIZE);
1042 memset(page2kva(pp2), 2, PGSIZE);
1043 page_insert(kern_pgdir, pp1, (void*) PGSIZE, PTE_W);
1044 assert(pp1->pp_ref == 1);
1045 assert(*(uint32_t *)PGSIZE == 0x01010101U);
1046 page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W);
1047 assert(*(uint32_t *)PGSIZE == 0x02020202U);
1048 assert(pp2->pp_ref == 1);
1049 assert(pp1->pp_ref == 0);
1050 *(uint32_t *)PGSIZE = 0x03030303U;
1051 assert(*(uint32_t *)page2kva(pp2) == 0x03030303U);
1052 page_remove(kern_pgdir, (void*) PGSIZE);
1053 assert(pp2->pp_ref == 0);
1054
1055 // forcibly take pp0 back
1056 assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
1057 kern_pgdir[0] = 0;
1058 assert(pp0->pp_ref == 1);
1059 pp0->pp_ref = 0;
1060
1061 // free the pages we took
1062 page_free(pp0);
1063
1064 cprintf("check_page_installed_pgdir() succeeded!\n");
1065}