1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
|
#include "vm/pagefault.h"
#include "errno.h"
#include "globals.h"
#include "mm/mm.h"
#include "mm/mman.h"
#include "mm/mobj.h"
#include "mm/pframe.h"
#include "mm/tlb.h"
#include "types.h"
#include "util/debug.h"
/*
* Respond to a user mode pagefault by setting up the desired page.
*
* vaddr - The virtual address that the user pagefaulted on
* cause - A combination of FAULT_ flags indicating the type of operation that
* caused the fault (see pagefault.h)
*
* Implementation details:
* 1) Find the vmarea that contains vaddr, if it exists.
* 2) Check the vmarea's protections (see the vmarea_t struct) against the 'cause' of
* the pagefault. For example, error out if the fault has cause write and we don't
* have write permission in the area. Keep in mind:
* a) You can assume that FAULT_USER is always specified.
* b) If neither FAULT_WRITE nor FAULT_EXEC is specified, you may assume the
* fault was due to an attempted read.
* 3) Obtain the corresponding pframe from the vmarea's mobj. Be careful about
* locking and error checking!
* 4) Finally, set up a call to pt_map to insert a new mapping into the
* appropriate pagetable:
* a) Use pt_virt_to_phys() to obtain the physical address of the actual
* data.
* b) You should not assume that vaddr is page-aligned, but you should
* provide a page-aligned address to the mapping.
* c) For pdflags, use PT_PRESENT | PT_WRITE | PT_USER.
* d) For ptflags, start with PT_PRESENT | PT_USER. Also supply PT_WRITE if
* the user can and wants to write to the page.
* 5) Flush the TLB.
*
* Tips:
* 1) This gets called by _pt_fault_handler() in mm/pagetable.c, which
* importantly checks that the fault did not occur in kernel mode. Think
* about why a kernel mode page fault would be bad in Weenix. Explore
* _pt_fault_handler() to get a sense of what's going on.
* 2) If you run into any errors, you should segfault by calling
* do_exit(EFAULT).
*/
void handle_pagefault(uintptr_t vaddr, uintptr_t cause)
{
dbg(DBG_VM, "vaddr = 0x%p (0x%p), cause = %lu\n", (void *)vaddr,
PAGE_ALIGN_DOWN(vaddr), cause);
// NOT_YET_IMPLEMENTED("VM: handle_pagefault");
// Check that the vaddr is valid
if (vaddr < USER_MEM_LOW)
{
do_exit(EFAULT);
}
if (vaddr > USER_MEM_HIGH)
{
do_exit(EFAULT);
}
// Lookup the vmarea for this address
size_t pn = ADDR_TO_PN(vaddr);
vmarea_t* vma = vmmap_lookup(curproc->p_vmmap, pn);
if (!vma)
{
do_exit(EFAULT);
}
// Error out if we don't have any permission in the area
if (vma->vma_prot == PROT_NONE)
{
do_exit(EFAULT);
}
// Check the vmarea's protections (see the vmarea_t struct) against the 'cause' of the pagefault
if ((cause & FAULT_WRITE) && !(vma->vma_prot & PROT_WRITE))
{
do_exit(EFAULT);
}
else if ((cause & FAULT_EXEC) && !(vma->vma_prot & PROT_EXEC))
{
do_exit(EFAULT);
}
else if (!(vma->vma_prot & PROT_READ))
{
do_exit(EFAULT);
}
// Obtain the corresponding pframe from the vmarea's mobj
pframe_t* pfp;
mobj_lock(vma->vma_obj);
long status = mobj_get_pframe(
vma->vma_obj,
pn - vma->vma_start + vma->vma_off,
(long) ((cause & FAULT_WRITE) ? 1 : 0),
&pfp
);
mobj_unlock(vma->vma_obj);
if (status < 0)
{
do_exit(EFAULT);
}
// Set up a call to pt_map to insert a new mapping into the appropriate pagetable
uintptr_t paddr = pt_virt_to_phys((uintptr_t) pfp->pf_addr);
pframe_release(&pfp);
uint32_t ptflags = PT_PRESENT | PT_USER;
if (cause & FAULT_WRITE)
{
ptflags |= PT_WRITE;
}
status = pt_map(
curproc->p_pml4,
paddr,
(uintptr_t) PAGE_ALIGN_DOWN(vaddr),
PT_PRESENT | PT_USER | PT_WRITE,
ptflags
);
if (status < 0)
{
do_exit(EFAULT);
}
// Flush the TLB
tlb_flush(vaddr);
}
|