· 5 years ago · Mar 23, 2020, 03:36 PM
1/* See COPYRIGHT for copyright information. */
2
3#include <inc/x86.h>
4#include <inc/mmu.h>
5#include <inc/error.h>
6#include <inc/string.h>
7#include <inc/assert.h>
8
9#include <kern/pmap.h>
10#include <kern/kclock.h>
11
12// These variables are set by i386_detect_memory()
13size_t npages; // Amount of physical memory (in pages)
14static size_t npages_basemem; // Amount of base memory (in pages)
15
16// These variables are set in mem_init()
17pde_t *kern_pgdir; // Kernel's initial page directory
18struct PageInfo *pages; // Physical page state array
19static struct PageInfo *page_free_list; // Free list of physical pages
20
21
22// --------------------------------------------------------------
23// Detect machine's physical memory setup.
24// --------------------------------------------------------------
25
26static int
27nvram_read(int r)
28{
29 return mc146818_read(r) | (mc146818_read(r + 1) << 8);
30}
31
32static void
33i386_detect_memory(void)
34{
35 size_t npages_extmem;
36
37 // Use CMOS calls to measure available base & extended memory.
38 // (CMOS calls return results in kilobytes.)
39 npages_basemem = (nvram_read(NVRAM_BASELO) * 1024) / PGSIZE;
40 npages_extmem = (nvram_read(NVRAM_EXTLO) * 1024) / PGSIZE;
41
42 // Calculate the number of physical pages available in both base
43 // and extended memory.
44 if (npages_extmem)
45 npages = (EXTPHYSMEM / PGSIZE) + npages_extmem;
46 else
47 npages = npages_basemem;
48
49 cprintf("Physical memory: %uK available, base = %uK, extended = %uK\n",
50 npages * PGSIZE / 1024,
51 npages_basemem * PGSIZE / 1024,
52 npages_extmem * PGSIZE / 1024);
53}
54
55
56// --------------------------------------------------------------
57// Set up memory mappings above UTOP.
58// --------------------------------------------------------------
59
60static void boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm);
61static void check_page_free_list(bool only_low_memory);
62static void check_page_alloc(void);
63static void check_kern_pgdir(void);
64static physaddr_t check_va2pa(pde_t *pgdir, uintptr_t va);
65static void check_page(void);
66static void check_page_installed_pgdir(void);
67
68// This simple physical memory allocator is used only while JOS is setting
69// up its virtual memory system. page_alloc() is the real allocator.
70//
71// If n>0, allocates enough pages of contiguous physical memory to hold 'n'
72// bytes. Doesn't initialize the memory. Returns a kernel virtual address.
73//
74// If n==0, returns the address of the next free page without allocating
75// anything.
76//
77// If we're out of memory, boot_alloc should panic.
78// This function may ONLY be used during initialization,
79// before the page_free_list list has been set up.
80static void *
81boot_alloc(uint32_t n)
82{
83 static char *nextfree; // virtual address of next byte of free memory
84 char *result;
85
86 // Initialize nextfree if this is the first time.
87 // 'end' is a magic symbol automatically generated by the linker,
88 // which points to the end of the kernel's bss segment:
89 // the first virtual address that the linker did *not* assign
90 // to any kernel code or global variables.
91 if (!nextfree) {
92 extern char end[];
93 nextfree = ROUNDUP((char *) end, PGSIZE);
94 }
95
96 if (n == 0)
97 return nextfree;
98 if (nextfree + n >= (char*)(KERNBASE + PTSIZE)) {
99 panic("boot_alloc: out of memory\n");
100 }
101
102 result = nextfree;
103
104 uint32_t rounded_n = ROUNDUP(n, PGSIZE);
105 nextfree = nextfree + rounded_n;
106 return result;
107}
108
109// Set up a two-level page table:
110// kern_pgdir is its linear (virtual) address of the root
111//
112// This function only sets up the kernel part of the address space
113// (ie. addresses >= UTOP). The user part of the address space
114// will be setup later.
115//
116// From UTOP to ULIM, the user is allowed to read but not write.
117// Above ULIM the user cannot read or write.
118void
119mem_init(void)
120{
121 uint32_t cr0;
122 size_t n;
123
124 // Find out how much memory the machine has (npages & npages_basemem).
125 i386_detect_memory();
126
127 // Remove this line when you're ready to test this function.
128 // panic("mem_init: This function is not finished\n");
129
130 //////////////////////////////////////////////////////////////////////
131 // create initial page directory.
132 kern_pgdir = (pde_t *) boot_alloc(PGSIZE);
133 memset(kern_pgdir, 0, PGSIZE);
134
135 //////////////////////////////////////////////////////////////////////
136 // Recursively insert PD in itself as a page table, to form
137 // a virtual page table at virtual address UVPT.
138 // (For now, you don't have understand the greater purpose of the
139 // following line.)
140
141 // Permissions: kernel R, user R
142 kern_pgdir[PDX(UVPT)] = PADDR(kern_pgdir) | PTE_U | PTE_P;
143
144 //////////////////////////////////////////////////////////////////////
145 // Allocate an array of npages 'struct PageInfo's and store it in 'pages'.
146 // The kernel uses this array to keep track of physical pages: for
147 // each physical page, there is a corresponding struct PageInfo in this
148 // array. 'npages' is the number of physical pages in memory.
149 // Your code goes here:
150 pages = boot_alloc(sizeof(struct PageInfo) * npages);
151
152 //////////////////////////////////////////////////////////////////////
153 // Now that we've allocated the initial kernel data structures, we set
154 // up the list of free physical pages. Once we've done so, all further
155 // memory management will go through the page_* functions. In
156 // particular, we can now map memory using boot_map_region
157 // or page_insert
158 page_init();
159
160 check_page_free_list(1);
161 check_page_alloc();
162 check_page();
163
164 //////////////////////////////////////////////////////////////////////
165 // Now we set up virtual memory
166
167 //////////////////////////////////////////////////////////////////////
168 // Map 'pages' read-only by the user at linear address UPAGES
169 // Permissions:
170 // - the new image at UPAGES -- kernel R, user R
171 // (ie. perm = PTE_U | PTE_P)
172 // - pages itself -- kernel RW, user NONE
173 // Your code goes here
174
175 boot_map_region(kern_pgdir, UPAGES, PTSIZE, PADDR(pages), PTE_U);
176
177 //////////////////////////////////////////////////////////////////////
178 // Use the physical memory that 'bootstack' refers to as the kernel
179 // stack. The kernel stack grows down from virtual address KSTACKTOP.
180 // We consider the entire range from [KSTACKTOP-PTSIZE, KSTACKTOP)
181 // to be the kernel stack, but break this into two pieces:
182 // * [KSTACKTOP-KSTKSIZE, KSTACKTOP) -- backed by physical memory
183 // * [KSTACKTOP-PTSIZE, KSTACKTOP-KSTKSIZE) -- not backed; so if
184 // the kernel overflows its stack, it will fault rather than
185 // overwrite memory. Known as a "guard page".
186 // Permissions: kernel RW, user NONE
187 // Your code goes here:
188
189 boot_map_region(kern_pgdir, KSTACKTOP-KSTKSIZE, KSTKSIZE, PADDR(bootstack), PTE_W);
190
191 //////////////////////////////////////////////////////////////////////
192 // Map all of physical memory at KERNBASE.
193 // Ie. the VA range [KERNBASE, 2^32) should map to
194 // the PA range [0, 2^32 - KERNBASE)
195 // We might not have 2^32 - KERNBASE bytes of physical memory, but
196 // we just set up the mapping anyway.
197 // Permissions: kernel RW, user NONE
198 // Your code goes here:
199
200
201 boot_map_region(kern_pgdir, KERNBASE, (uint32_t)(-KERNBASE), 0, PTE_W);
202
203 // Check that the initial page directory has been set up correctly.
204 check_kern_pgdir();
205
206 // Switch from the minimal entry page directory to the full kern_pgdir
207 // page table we just created. Our instruction pointer should be
208 // somewhere between KERNBASE and KERNBASE+4MB right now, which is
209 // mapped the same way by both page tables.
210 //
211 // If the machine reboots at this point, you've probably set up your
212 // kern_pgdir wrong.
213 lcr3(PADDR(kern_pgdir));
214
215 check_page_free_list(0);
216
217 // entry.S set the really important flags in cr0 (including enabling
218 // paging). Here we configure the rest of the flags that we care about.
219 cr0 = rcr0();
220 cr0 |= CR0_PE|CR0_PG|CR0_AM|CR0_WP|CR0_NE|CR0_MP;
221 cr0 &= ~(CR0_TS|CR0_EM);
222 lcr0(cr0);
223
224 // Some more checks, only possible after kern_pgdir is installed.
225 check_page_installed_pgdir();
226}
227
228// --------------------------------------------------------------
229// Tracking of physical pages.
230// The 'pages' array has one 'struct PageInfo' entry per physical page.
231// Pages are reference counted, and free pages are kept on a linked list.
232// --------------------------------------------------------------
233
234//
235// Initialize page structure and memory free list.
236// After this is done, NEVER use boot_alloc again. ONLY use the page
237// allocator functions below to allocate and deallocate physical
238// memory via the page_free_list.
239//
240void
241page_init(void)
242{
243 // The example code here marks all physical pages as free.
244 // However this is not truly the case. What memory is free?
245 // 1) Mark physical page 0 as in use.
246 // This way we preserve the real-mode IDT and BIOS structures
247 // in case we ever need them. (Currently we don't, but...)
248 // 2) The rest of base memory, [PGSIZE, npages_basemem * PGSIZE)
249 // is free.
250 // 3) Then comes the IO hole [IOPHYSMEM, EXTPHYSMEM), which must
251 // never be allocated.
252 // 4) Then extended memory [EXTPHYSMEM, ...).
253 // Some of it is in use, some is free. Where is the kernel
254 // in physical memory? Which pages are already in use for
255 // page tables and other data structures?
256 //
257 // Change the code to reflect this.
258 // NB: DO NOT actually touch the physical memory corresponding to
259 // free pages!
260
261 // Make 0th page in use
262 pages[0].pp_ref = 1;
263 pages[0].pp_link = NULL;
264
265 for (size_t i = 1; i < npages; i++) {
266 if (npages_basemem <= i&& i < (uintptr_t)PADDR(boot_alloc(0)) / PGSIZE) {
267 pages[i].pp_ref = 1;
268 pages[i].pp_link = NULL;
269 } else {
270
271 pages[i].pp_ref = 0;
272 pages[i].pp_link = page_free_list;
273 page_free_list = &pages[i];
274 }
275 }
276}
277
278//
279// Allocates a physical page. If (alloc_flags & ALLOC_ZERO), fills the entire
280// returned physical page with '\0' bytes. Does NOT increment the reference
281// count of the page - the caller must do these if necessary (either explicitly
282// or via page_insert).
283//
284// Returns NULL if out of free memory.
285//
286// Hint: use page2kva and memset
287struct PageInfo *
288page_alloc(int alloc_flags)
289{
290 // Fill this function in
291 if (page_free_list == NULL)
292 return NULL;
293
294 struct PageInfo *free_page = page_free_list;
295 page_free_list = free_page->pp_link;
296
297 if (alloc_flags & ALLOC_ZERO)
298 memset(page2kva(free_page), '\0', PGSIZE);
299 return free_page;
300}
301
302//
303// Return a page to the free list.
304// (This function should only be called when pp->pp_ref reaches 0.)
305//
306void
307page_free(struct PageInfo *pp)
308{
309 // Fill this function in
310 if (pp == NULL || pp->pp_ref)
311 panic("page free: invalid pp");
312 pp->pp_link = page_free_list;
313 page_free_list = pp;
314}
315
316//
317// Decrement the reference count on a page,
318// freeing it if there are no more refs.
319//
320void
321page_decref(struct PageInfo* pp)
322{
323 if (--pp->pp_ref == 0)
324 page_free(pp);
325}
326
327// Given 'pgdir', a pointer to a page directory, pgdir_walk returns
328// a pointer to the page table entry (PTE) for linear address 'va'.
329// This requires walking the two-level page table structure.
330//
331// The relevant page table page might not exist yet.
332// If this is true, and create == false, then pgdir_walk returns NULL.
333// Otherwise, pgdir_walk allocates a new page table page with page_alloc.
334// - If the allocation fails, pgdir_walk returns NULL.
335// - Otherwise, the new page's reference count is incremented,
336// the page is cleared,
337// and pgdir_walk returns a pointer into the new page table page.
338//
339// Hint 1: you can turn a Page * into the physical address of the
340// page it refers to with page2pa() from kern/pmap.h.
341//
342// Hint 2: the x86 MMU checks permission bits in both the page directory
343// and the page table, so it's safe to leave permissions in the page
344// more permissive than strictly necessary.
345//
346// Hint 3: look at inc/mmu.h for useful macros that mainipulate page
347// table and page directory entries.
348//
349pte_t *
350pgdir_walk(pde_t *pgdir, const void *va, int create)
351{
352 uintptr_t dirindex = PDX(va);
353 uintptr_t tableindex = PTX(va);
354
355 if (!(pgdir[dirindex] & PTE_P)) {
356 if (!create)
357 return NULL;
358
359 struct PageInfo *page = page_alloc(ALLOC_ZERO);
360 if (!page)
361 return NULL;
362
363 page->pp_ref++;
364 pgdir[dirindex] = page2pa(page) | PTE_P | PTE_W | PTE_U;
365 }
366 pte_t *pagetable = KADDR(PTE_ADDR(pgdir[dirindex]));
367 return &pagetable[tableindex];
368}
369
370//
371// Map [va, va+size) of virtual address space to physical [pa, pa+size)
372// in the page table rooted at pgdir. Size is a multiple of PGSIZE.
373// Use permission bits perm|PTE_P for the entries.
374//
375// This function is only intended to set up the ``static'' mappings
376// above UTOP. As such, it should *not* change the pp_ref field on the
377// mapped pages.
378//
379// Hint: the TA solution uses pgdir_walk
380static void
381boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm)
382{
383 // Fill this function in
384 size_t page_num = PGSIZE / size;
385 for (size_t i = 0; i < page_num; i++) {
386 pte_t *pte = pgdir_walk(pgdir, (void*)(va + i * PGSIZE), 1);
387 // TODO: panic here
388 *pte = (pa + i * PGSIZE) | perm | PTE_P;
389 }
390}
391
392//
393// Map the physical page 'pp' at virtual address 'va'.
394// The permissions (the low 12 bits) of the page table entry
395// should be set to 'perm|PTE_P'.
396//
397// Requirements
398// - If there is already a page mapped at 'va', it should be page_remove()d.
399// - If necessary, on demand, a page table should be allocated and inserted
400// into 'pgdir'.
401// - pp->pp_ref should be incremented if the insertion succeeds.
402// - The TLB must be invalidated if a page was formerly present at 'va'.
403//
404// Corner-case hint: Make sure to consider what happens when the same
405// pp is re-inserted at the same virtual address in the same pgdir.
406// However, try not to distinguish this case in your code, as this
407// frequently leads to subtle bugs; there's an elegant way to handle
408// everything in one code path.
409//
410// RETURNS:
411// 0 on success
412// -E_NO_MEM, if page table couldn't be allocated
413//
414// Hint: The TA solution is implemented using pgdir_walk, page_remove,
415// and page2pa.
416//
417int
418page_insert(pde_t *pgdir, struct PageInfo *pp, void *va, int perm)
419{
420 // Fill this function in
421 pte_t *pte = pgdir_walk(pgdir, va, 1);
422 if (pte == NULL)
423 return -E_NO_MEM;
424
425 pp->pp_ref++;
426 if (*pte & PTE_P)
427 page_remove(pgdir, va);
428
429 *pte = page2pa(pp) | PTE_P | perm;
430 return 0;
431}
432
433//
434// Return the page mapped at virtual address 'va'.
435// If pte_store is not zero, then we store in it the address
436// of the pte for this page. This is used by page_remove and
437// can be used to verify page permissions for syscall arguments,
438// but should not be used by most callers.
439//
440// Return NULL if there is no page mapped at va.
441//
442// Hint: the TA solution uses pgdir_walk and pa2page.
443//
444struct PageInfo *
445page_lookup(pde_t *pgdir, void *va, pte_t **pte_store)
446{
447 pte_t *pte = pgdir_walk(pgdir, va, 0);
448
449 if (pte && (*pte & PTE_P)) {
450 if (pte_store)
451 *pte_store = pte;
452
453 return pa2page(PTE_ADDR(*pte));
454 }
455 return NULL;
456}
457
458//
459// Unmaps the physical page at virtual address 'va'.
460// If there is no physical page at that address, silently does nothing.
461//
462// Details:
463// - The ref count on the physical page should decrement.
464// - The physical page should be freed if the refcount reaches 0.
465// - The pg table entry corresponding to 'va' should be set to 0.
466// (if such a PTE exists)
467// - The TLB must be invalidated if you remove an entry from
468// the page table.
469//
470// Hint: The TA solution is implemented using page_lookup,
471// tlb_invalidate, and page_decref.
472//
473void
474page_remove(pde_t *pgdir, void *va)
475{
476 pte_t *pte;
477 struct PageInfo *page = page_lookup(pgdir, va, &pte);
478
479 if (page == NULL)
480 return;
481
482 page_decref(page);
483 *pte = 0;
484 tlb_invalidate(pgdir, va);
485}
486
487//
488// Invalidate a TLB entry, but only if the page tables being
489// edited are the ones currently in use by the processor.
490//
491void
492tlb_invalidate(pde_t *pgdir, void *va)
493{
494 // Flush the entry only if we're modifying the current address space.
495 // For now, there is only one address space, so always invalidate.
496 invlpg(va);
497}
498
499
500// --------------------------------------------------------------
501// Checking functions.
502// --------------------------------------------------------------
503
504//
505// Check that the pages on the page_free_list are reasonable.
506//
507static void
508check_page_free_list(bool only_low_memory)
509{
510 struct PageInfo *pp;
511 unsigned pdx_limit = only_low_memory ? 1 : NPDENTRIES;
512 int nfree_basemem = 0, nfree_extmem = 0;
513 char *first_free_page;
514
515 if (!page_free_list)
516 panic("'page_free_list' is a null pointer!");
517
518 if (only_low_memory) {
519 // Move pages with lower addresses first in the free
520 // list, since entry_pgdir does not map all pages.
521 struct PageInfo *pp1, *pp2;
522 struct PageInfo **tp[2] = { &pp1, &pp2 };
523 for (pp = page_free_list; pp; pp = pp->pp_link) {
524 int pagetype = PDX(page2pa(pp)) >= pdx_limit;
525 *tp[pagetype] = pp;
526 tp[pagetype] = &pp->pp_link;
527 }
528 *tp[1] = 0;
529 *tp[0] = pp2;
530 page_free_list = pp1;
531 }
532
533 // if there's a page that shouldn't be on the free list,
534 // try to make sure it eventually causes trouble.
535 for (pp = page_free_list; pp; pp = pp->pp_link)
536 if (PDX(page2pa(pp)) < pdx_limit)
537 memset(page2kva(pp), 0x97, 128);
538
539 first_free_page = (char *) boot_alloc(0);
540 for (pp = page_free_list; pp; pp = pp->pp_link) {
541 // check that we didn't corrupt the free list itself
542 assert(pp >= pages);
543 assert(pp < pages + npages);
544 assert(((char *) pp - (char *) pages) % sizeof(*pp) == 0);
545
546 // check a few pages that shouldn't be on the free list
547 assert(page2pa(pp) != 0);
548 assert(page2pa(pp) != IOPHYSMEM);
549 assert(page2pa(pp) != EXTPHYSMEM - PGSIZE);
550 assert(page2pa(pp) != EXTPHYSMEM);
551 assert(page2pa(pp) < EXTPHYSMEM || (char *) page2kva(pp) >= first_free_page);
552
553 if (page2pa(pp) < EXTPHYSMEM)
554 ++nfree_basemem;
555 else
556 ++nfree_extmem;
557 }
558
559 assert(nfree_basemem > 0);
560 assert(nfree_extmem > 0);
561}
562
563//
564// Check the physical page allocator (page_alloc(), page_free(),
565// and page_init()).
566//
567static void
568check_page_alloc(void)
569{
570 struct PageInfo *pp, *pp0, *pp1, *pp2;
571 int nfree;
572 struct PageInfo *fl;
573 char *c;
574 int i;
575
576 if (!pages)
577 panic("'pages' is a null pointer!");
578
579 // check number of free pages
580 for (pp = page_free_list, nfree = 0; pp; pp = pp->pp_link)
581 ++nfree;
582
583 // should be able to allocate three pages
584 pp0 = pp1 = pp2 = 0;
585 assert((pp0 = page_alloc(0)));
586 assert((pp1 = page_alloc(0)));
587 assert((pp2 = page_alloc(0)));
588
589 assert(pp0);
590 assert(pp1 && pp1 != pp0);
591 assert(pp2 && pp2 != pp1 && pp2 != pp0);
592 assert(page2pa(pp0) < npages*PGSIZE);
593 assert(page2pa(pp1) < npages*PGSIZE);
594 assert(page2pa(pp2) < npages*PGSIZE);
595
596 // temporarily steal the rest of the free pages
597 fl = page_free_list;
598 page_free_list = 0;
599
600 // should be no free memory
601 assert(!page_alloc(0));
602
603 // free and re-allocate?
604 page_free(pp0);
605 page_free(pp1);
606 page_free(pp2);
607 pp0 = pp1 = pp2 = 0;
608 assert((pp0 = page_alloc(0)));
609 assert((pp1 = page_alloc(0)));
610 assert((pp2 = page_alloc(0)));
611 assert(pp0);
612 assert(pp1 && pp1 != pp0);
613 assert(pp2 && pp2 != pp1 && pp2 != pp0);
614 assert(!page_alloc(0));
615
616 // test flags
617 memset(page2kva(pp0), 1, PGSIZE);
618 page_free(pp0);
619 assert((pp = page_alloc(ALLOC_ZERO)));
620 assert(pp && pp0 == pp);
621 c = page2kva(pp);
622 for (i = 0; i < PGSIZE; i++)
623 assert(c[i] == 0);
624
625 // give free list back
626 page_free_list = fl;
627
628 // free the pages we took
629 page_free(pp0);
630 page_free(pp1);
631 page_free(pp2);
632
633 // number of free pages should be the same
634 for (pp = page_free_list; pp; pp = pp->pp_link)
635 --nfree;
636 assert(nfree == 0);
637
638 cprintf("check_page_alloc() succeeded!\n");
639}
640
641//
642// Checks that the kernel part of virtual address space
643// has been setup roughly correctly (by mem_init()).
644//
645// This function doesn't test every corner case,
646// but it is a pretty good sanity check.
647//
648
649static void
650check_kern_pgdir(void)
651{
652 uint32_t i, n;
653 pde_t *pgdir;
654
655 pgdir = kern_pgdir;
656
657 // check pages array
658 n = ROUNDUP(npages*sizeof(struct PageInfo), PGSIZE);
659 for (i = 0; i < n; i += PGSIZE)
660 assert(check_va2pa(pgdir, UPAGES + i) == PADDR(pages) + i);
661
662
663 // check phys mem
664 for (i = 0; i < npages * PGSIZE; i += PGSIZE)
665 assert(check_va2pa(pgdir, KERNBASE + i) == i);
666
667 // check kernel stack
668 for (i = 0; i < KSTKSIZE; i += PGSIZE)
669 assert(check_va2pa(pgdir, KSTACKTOP - KSTKSIZE + i) == PADDR(bootstack) + i);
670 assert(check_va2pa(pgdir, KSTACKTOP - PTSIZE) == ~0);
671
672 // check PDE permissions
673 for (i = 0; i < NPDENTRIES; i++) {
674 switch (i) {
675 case PDX(UVPT):
676 case PDX(KSTACKTOP-1):
677 case PDX(UPAGES):
678 assert(pgdir[i] & PTE_P);
679 break;
680 default:
681 if (i >= PDX(KERNBASE)) {
682 assert(pgdir[i] & PTE_P);
683 assert(pgdir[i] & PTE_W);
684 } else
685 assert(pgdir[i] == 0);
686 break;
687 }
688 }
689 cprintf("check_kern_pgdir() succeeded!\n");
690}
691
692// This function returns the physical address of the page containing 'va',
693// defined by the page directory 'pgdir'. The hardware normally performs
694// this functionality for us! We define our own version to help check
695// the check_kern_pgdir() function; it shouldn't be used elsewhere.
696
697static physaddr_t
698check_va2pa(pde_t *pgdir, uintptr_t va)
699{
700 pte_t *p;
701
702 pgdir = &pgdir[PDX(va)];
703 if (!(*pgdir & PTE_P))
704 return ~0;
705 p = (pte_t*) KADDR(PTE_ADDR(*pgdir));
706 if (!(p[PTX(va)] & PTE_P))
707 return ~0;
708 return PTE_ADDR(p[PTX(va)]);
709}
710
711
712// check page_insert, page_remove, &c
713static void
714check_page(void)
715{
716 struct PageInfo *pp, *pp0, *pp1, *pp2;
717 struct PageInfo *fl;
718 pte_t *ptep, *ptep1;
719 void *va;
720 int i;
721 extern pde_t entry_pgdir[];
722
723 // should be able to allocate three pages
724 pp0 = pp1 = pp2 = 0;
725 assert((pp0 = page_alloc(0)));
726 assert((pp1 = page_alloc(0)));
727 assert((pp2 = page_alloc(0)));
728
729 assert(pp0);
730 assert(pp1 && pp1 != pp0);
731 assert(pp2 && pp2 != pp1 && pp2 != pp0);
732
733 // temporarily steal the rest of the free pages
734 fl = page_free_list;
735 page_free_list = 0;
736
737 // should be no free memory
738 assert(!page_alloc(0));
739
740 // there is no page allocated at address 0
741 assert(page_lookup(kern_pgdir, (void *) 0x0, &ptep) == NULL);
742
743 // there is no free memory, so we can't allocate a page table
744 assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) < 0);
745
746 // free pp0 and try again: pp0 should be used for page table
747 page_free(pp0);
748 assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) == 0);
749 assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
750 assert(check_va2pa(kern_pgdir, 0x0) == page2pa(pp1));
751 assert(pp1->pp_ref == 1);
752 assert(pp0->pp_ref == 1);
753
754 // should be able to map pp2 at PGSIZE because pp0 is already allocated for page table
755 assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
756 assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
757 assert(pp2->pp_ref == 1);
758
759 // should be no free memory
760 assert(!page_alloc(0));
761
762 // should be able to map pp2 at PGSIZE because it's already there
763 assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
764 assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
765 assert(pp2->pp_ref == 1);
766
767 // pp2 should NOT be on the free list
768 // could happen in ref counts are handled sloppily in page_insert
769 assert(!page_alloc(0));
770
771 // check that pgdir_walk returns a pointer to the pte
772 ptep = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(PGSIZE)]));
773 assert(pgdir_walk(kern_pgdir, (void*)PGSIZE, 0) == ptep+PTX(PGSIZE));
774
775 // should be able to change permissions too.
776 assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W|PTE_U) == 0);
777 assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
778 assert(pp2->pp_ref == 1);
779 assert(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U);
780 assert(kern_pgdir[0] & PTE_U);
781
782 // should be able to remap with fewer permissions
783 assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
784 assert(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_W);
785 assert(!(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U));
786
787 // should not be able to map at PTSIZE because need free page for page table
788 assert(page_insert(kern_pgdir, pp0, (void*) PTSIZE, PTE_W) < 0);
789
790 // insert pp1 at PGSIZE (replacing pp2)
791 assert(page_insert(kern_pgdir, pp1, (void*) PGSIZE, PTE_W) == 0);
792 assert(!(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U));
793
794 // should have pp1 at both 0 and PGSIZE, pp2 nowhere, ...
795 assert(check_va2pa(kern_pgdir, 0) == page2pa(pp1));
796 assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1));
797 // ... and ref counts should reflect this
798 assert(pp1->pp_ref == 2);
799 assert(pp2->pp_ref == 0);
800
801 // pp2 should be returned by page_alloc
802 assert((pp = page_alloc(0)) && pp == pp2);
803
804 // unmapping pp1 at 0 should keep pp1 at PGSIZE
805 page_remove(kern_pgdir, 0x0);
806 assert(check_va2pa(kern_pgdir, 0x0) == ~0);
807 assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1));
808 assert(pp1->pp_ref == 1);
809 assert(pp2->pp_ref == 0);
810
811 // unmapping pp1 at PGSIZE should free it
812 page_remove(kern_pgdir, (void*) PGSIZE);
813 assert(check_va2pa(kern_pgdir, 0x0) == ~0);
814 assert(check_va2pa(kern_pgdir, PGSIZE) == ~0);
815 assert(pp1->pp_ref == 0);
816 assert(pp2->pp_ref == 0);
817
818 // so it should be returned by page_alloc
819 assert((pp = page_alloc(0)) && pp == pp1);
820
821 // should be no free memory
822 assert(!page_alloc(0));
823
824 // forcibly take pp0 back
825 assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
826 kern_pgdir[0] = 0;
827 assert(pp0->pp_ref == 1);
828 pp0->pp_ref = 0;
829
830 // check pointer arithmetic in pgdir_walk
831 page_free(pp0);
832 va = (void*)(PGSIZE * NPDENTRIES + PGSIZE);
833 ptep = pgdir_walk(kern_pgdir, va, 1);
834 ptep1 = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(va)]));
835 assert(ptep == ptep1 + PTX(va));
836 kern_pgdir[PDX(va)] = 0;
837 pp0->pp_ref = 0;
838
839 // check that new page tables get cleared
840 memset(page2kva(pp0), 0xFF, PGSIZE);
841 page_free(pp0);
842 pgdir_walk(kern_pgdir, 0x0, 1);
843 ptep = (pte_t *) page2kva(pp0);
844 for(i=0; i<NPTENTRIES; i++)
845 assert((ptep[i] & PTE_P) == 0);
846 kern_pgdir[0] = 0;
847 pp0->pp_ref = 0;
848
849 // give free list back
850 page_free_list = fl;
851
852 // free the pages we took
853 page_free(pp0);
854 page_free(pp1);
855 page_free(pp2);
856
857 cprintf("check_page() succeeded!\n");
858}
859
860// check page_insert, page_remove, &c, with an installed kern_pgdir
861static void
862check_page_installed_pgdir(void)
863{
864 struct PageInfo *pp, *pp0, *pp1, *pp2;
865 struct PageInfo *fl;
866 pte_t *ptep, *ptep1;
867 uintptr_t va;
868 int i;
869
870 // check that we can read and write installed pages
871 pp1 = pp2 = 0;
872 assert((pp0 = page_alloc(0)));
873 assert((pp1 = page_alloc(0)));
874 assert((pp2 = page_alloc(0)));
875 page_free(pp0);
876 memset(page2kva(pp1), 1, PGSIZE);
877 memset(page2kva(pp2), 2, PGSIZE);
878 page_insert(kern_pgdir, pp1, (void*) PGSIZE, PTE_W);
879 assert(pp1->pp_ref == 1);
880 assert(*(uint32_t *)PGSIZE == 0x01010101U);
881 page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W);
882 assert(*(uint32_t *)PGSIZE == 0x02020202U);
883 assert(pp2->pp_ref == 1);
884 assert(pp1->pp_ref == 0);
885 *(uint32_t *)PGSIZE = 0x03030303U;
886 assert(*(uint32_t *)page2kva(pp2) == 0x03030303U);
887 page_remove(kern_pgdir, (void*) PGSIZE);
888 assert(pp2->pp_ref == 0);
889
890 // forcibly take pp0 back
891 assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
892 kern_pgdir[0] = 0;
893 assert(pp0->pp_ref == 1);
894 pp0->pp_ref = 0;
895
896 // free the pages we took
897 page_free(pp0);
898
899 cprintf("check_page_installed_pgdir() succeeded!\n");
900}