· 5 years ago · Apr 11, 2020, 07:26 PM
1#define Firstlevel 0xffc00000
2#define Secondlevel 0x003ff000
3#define PAGE_FRAME 0xfffff000
4
5
6
7int
8vm_fault(int faulttype, vaddr_t faultaddress)
9{
10
11 struct addrspace *as;
12 int retval;
13 u_int32_t permission = 0;
14 int spl;
15
16 spl = splhigh();
17
18 faultaddress &= PAGE_FRAME;
19
20 if (faultaddress == NULL) {
21 splx(spl);
22 return EFAULT;
23 }
24
25 if(faulttype == VM_FAULT_READONLY){
26 splx(spl);
27 return EFAULT;
28 }
29 else if(faulttype != VM_FAULT_READ)
30 splx(spl);
31 return EINVAL;
32 }
33 else if (faulttype != VM_FAULT_WRITE){
34 splx(spl);
35 return EINVAL;
36 }
37
38
39 as = curthr->t_vmspace;
40
41 if (as == NULL) {
42 /*
43 * No address space set up. This is probably a kernel
44 * fault early in boot. Return EFAULT so as to panic
45 * instead of getting into an infinite faulting loop.
46 */
47 return EFAULT;
48 }
49
50 vaddr_t start_vm, end_vm;
51
52 int i;
53
54 for(i = 0; i < array_getnum(as->as_regions); i++){
55 struct as_regions *current = array_getguy(as->as_regions, i);
56 end_vm = current->end_vm;
57 start_vm = end_vm + current->npgs * PAGE_SIZE;
58 if(faultaddress >= end_vm && faultaddress < start_vm){
59 found = 1;
60 permission = (current->region_permis);
61 retval = faults(faultaddress, permission);
62 splx(spl);
63 return retval;
64 }
65 }
66//check stack if not found
67 if(!found){
68 fault_stack(faulttype, faultaddress, &retval);
69 if(found){
70 splx(spl);
71 return retval;
72 }
73 }
74// check heap if not found
75 if(!found){
76 fault_heap(faulttype, faultaddress, &retval, as);
77 if(found){
78 splx(spl);
79 return err;
80 }
81 }
82
83 splx(spl);
84 return EFAULT;
85}
86
87void fault_stack(int faulttype, vaddr_t faultaddress, int* retval){
88 u_int32_t permissions = 0;
89 vaddr_t start_vm, end_vm;
90 start_vm = MIPS_KSEG0;
91 end_vm = start_vm - 24 * PAGE_SIZE;
92 if(faultaddress >= end_vm && faultaddress < start_vm){
93 found = 1;
94 permissions = 6;
95 *retval = faults(faultaddress, permissions);
96 }
97}
98
99void fault_heap(int faulttype, vaddr_t faultaddress, int* retval, struct addrspace* as){
100 u_int32_t permissions = 0;
101 vaddr_t start_vm, end_vm;
102 end_vm = as->start_heap;
103 start_vm = as->end_heap;
104 if(faultaddress >= end_vm && faultaddress < start_vm){
105 found = 1;
106 permissions = 6;
107 *retval = faults(faultaddress, permissions);
108 }
109}
110
111int faults(vaddr_t faultaddress, u_int32_t permissions) {
112
113 int spl = splhigh();
114 vaddr_t vaddr;
115 paddr_t paddr;
116 u_int32_t tlb_end, tlb_start;
117
118 //function to see if second level page table exists or not, handles accordingly
119 check_levels(faultaddress, &paddr);
120
121 //load into TLB
122 if (permissions & PF_W) {
123 paddr |= TLBLO_DIRTY;
124 }
125
126 int k = 0;
127 for(k; k< NUM_TLB; k++){
128 TLB_Read(&tlb_end, &tlb_start, k);
129 // skip valid ones
130 if(tlb_start & TLBLO_VALID){
131 continue;
132 }
133 //fill first empty one
134 tlb_end = faultaddress;
135 tlb_start = paddr | TLBLO_VALID;
136 TLB_Write(tlb_end, tlb_start, k);
137 splx(spl);
138 return 0;
139 }
140 // no invalid ones => pick entry and random and expel it
141 tlb_end = faultaddress;
142 tlb_start = paddr | TLBLO_VALID;
143 TLB_Random(tlb_end, tlb_start);
144 splx(spl);
145 return 0;
146}
147
148
149void check_levels(vaddr_t faultaddress, paddr_t* paddr){
150
151 int level1_index = (faultaddress & Firstlevel) >> 22;
152 int level2_index = (faultaddress & Secondlevel) >> 12;
153 // check if the 2nd level page table exists
154 struct as_pagetable *lvl2_ptes = curthr->t_vmspace->as_ptes[level1_index];
155
156 if(lvl2_ptes != NULL) {
157
158 u_int32_t *pte = &(lvl2_ptes->PTE[level2_index]);
159
160 if (*pte & 0x00000800) {
161 // page is present in physical memory
162 *paddr = *pte & PAGE_FRAME;
163 }
164 else {
165 if (*pte) {
166 int freed_id = free_id();
167 *paddr = load_seg(freed_id, curthr->t_vmspace, faultaddress);
168
169 } else {
170 // page does not exist
171 *paddr = alloc_page_userspace(NULL, faultaddress);
172 }
173 // now update the PTE
174 *pte &= 0x00000fff;
175 *pte |= *paddr;
176 *pte |= 0x00000800;
177 }
178 } else {
179
180 // If second page table doesn't exist, create one
181 curthr->t_vmspace->as_ptes[level1_index] = kmalloc(sizeof(struct as_pagetable));
182 lvl2_ptes = curthr->t_vmspace->as_ptes[level1_index];
183
184 int i = 0;
185 for (i; i < PT_SIZE; i++) {
186 lvl2_ptes->PTE[i] = 0;
187 }
188 // allocate a page and do the mapping
189 *paddr = alloc_page_userspace(NULL, faultaddress);
190
191 u_int32_t* pte = retEntry(curthread, faultaddress);
192
193
194 *pte &= 0x00000fff;
195 *pte |= 0x00000800;
196 *pte |= *paddr;
197 }
198}
199
200paddr_t load_seg(int id, struct addrspace* as, vaddr_t v_as) {
201
202 COREMAP[id].state = 2;
203 COREMAP[id].as = as;
204 COREMAP[id].v_as = v_as;
205 COREMAP[id].length = 1;
206
207 return COREMAP[id].p_as;
208}
209
210u_int32_t* retEntry (struct thread* addrspace_owner, vaddr_t va){
211
212 int level1_index = (va & Firstlevel) >> 22;
213 int level2_index = (va & Secondlevel) >> 12;
214 struct as_pagetable* lvl2_ptes = addrspace_owner->t_vmspace->as_ptes[level1_index];
215
216 if (lvl2_ptes == NULL)
217 return NULL;
218 else
219 return &(lvl2_ptes->PTE[level2_index]);
220}
221
222paddr_t alloc_page_userspace(struct addrspace * as, vaddr_t v_as) {
223
224 int freed_id = free_id();
225
226 if(as == NULL)
227 COREMAP[freed_id].as = curthr->t_vmspace;
228 else
229 COREMAP[freed_id].as = as;
230
231 COREMAP[freed_id].state = 2;
232 COREMAP[freed_id].v_as = v_as;
233 COREMAP[freed_id].length = 1;
234
235 return COREMAP[freed_id].p_as;
236}