aboutsummaryrefslogtreecommitdiff
path: root/kernel/vm
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/vm')
-rw-r--r--kernel/vm/pagefault.c81
-rw-r--r--kernel/vm/shadow.c111
-rw-r--r--kernel/vm/vmmap.c4
3 files changed, 98 insertions, 98 deletions
diff --git a/kernel/vm/pagefault.c b/kernel/vm/pagefault.c
index b289537..39e5776 100644
--- a/kernel/vm/pagefault.c
+++ b/kernel/vm/pagefault.c
@@ -49,80 +49,87 @@ void handle_pagefault(uintptr_t vaddr, uintptr_t cause)
{
dbg(DBG_VM, "vaddr = 0x%p (0x%p), cause = %lu\n", (void *)vaddr,
PAGE_ALIGN_DOWN(vaddr), cause);
+
// NOT_YET_IMPLEMENTED("VM: handle_pagefault");
- // 1) Find the vmarea that contains vaddr, if it exists.
- // check that the vaddr is valid
- if (vaddr < USER_MEM_LOW || vaddr > USER_MEM_HIGH)
+ // Check that the vaddr is valid
+ if (vaddr < USER_MEM_LOW)
{
do_exit(EFAULT);
}
- // lookup the vmarea for this addr
- vmarea_t *vma = vmmap_lookup(curproc->p_vmmap, ADDR_TO_PN(vaddr));
- if (vma == NULL)
+ if (vaddr > USER_MEM_HIGH)
{
do_exit(EFAULT);
}
- // 2) Check the vmarea's protections (see the vmarea_t struct) against the 'cause'
- // error out if the fault has cause write and we don't have write permission in the area
- if ((cause & FAULT_WRITE) && !(vma->vma_prot & PROT_WRITE))
+ // Lookup the vmarea for this address
+ size_t pn = ADDR_TO_PN(vaddr);
+ vmarea_t* vma = vmmap_lookup(curproc->p_vmmap, pn);
+ if (!vma)
{
do_exit(EFAULT);
}
- // error out if the fault has cause exec and we don't have exec permission in the area
- if ((cause & FAULT_EXEC) && !(vma->vma_prot & PROT_EXEC))
+
+ // Error out if we don't have any permission in the area
+ if (vma->vma_prot == PROT_NONE)
{
do_exit(EFAULT);
}
- // error out if we don't have read permission in the area
- if (!(vma->vma_prot & PROT_READ))
+
+ // Check the vmarea's protections (see the vmarea_t struct) against the 'cause' of the pagefault
+ if ((cause & FAULT_WRITE) && !(vma->vma_prot & PROT_WRITE))
{
do_exit(EFAULT);
- }
- // error our if we don't have any permission in the area
- if (vma->vma_prot == PROT_NONE)
+ }
+ else if ((cause & FAULT_EXEC) && !(vma->vma_prot & PROT_EXEC))
+ {
+ do_exit(EFAULT);
+ }
+ else if (!(vma->vma_prot & PROT_READ))
{
do_exit(EFAULT);
}
- // 3) Obtain the corresponding pframe from the vmarea's mobj.
- pframe_t *pf;
+ // Obtain the corresponding pframe from the vmarea's mobj
+ long forwrite = 0;
+ if (cause & FAULT_WRITE)
+ {
+ forwrite = 1;
+ }
+ pframe_t* pfp;
mobj_lock(vma->vma_obj);
- int ret = mobj_get_pframe(
+ long status = mobj_get_pframe(
vma->vma_obj,
- vma->vma_off + ADDR_TO_PN(vaddr) - vma->vma_start,
- cause & FAULT_WRITE ? 1 : 0,
- &pf
+ pn - vma->vma_start + vma->vma_off,
+ forwrite,
+ &pfp
);
mobj_unlock(vma->vma_obj);
- if (ret < 0)
+ if (status < 0)
{
do_exit(EFAULT);
}
- // 4) Finally, set up a call to pt_map to insert a new mapping into the appropriate pagetable
- uintptr_t paddr = pt_virt_to_phys(pf->pf_addr);
- pframe_release(&pf);
- int pdflags = PT_PRESENT | PT_WRITE | PT_USER;
- int ptflags = PT_PRESENT | PT_USER;
- if (cause & FAULT_WRITE)
+ // Set up a call to pt_map to insert a new mapping into the appropriate pagetable
+ uintptr_t paddr = pt_virt_to_phys((uintptr_t) pfp->pf_addr);
+ pframe_release(&pfp);
+ uint32_t ptflags = PT_PRESENT | PT_USER;
+ if (cause & FAULT_WRITE)
{
ptflags |= PT_WRITE;
}
-
- int err = pt_map(
+ status = pt_map(
curproc->p_pml4,
- paddr,
- (uintptr_t) PAGE_ALIGN_DOWN(vaddr),
- pdflags,
+ paddr,
+ (uintptr_t) PAGE_ALIGN_DOWN(vaddr),
+ PT_PRESENT | PT_USER | PT_WRITE,
ptflags
);
- if (err < 0)
+ if (status < 0)
{
do_exit(EFAULT);
}
- // 5) Flush the TLB
- tlb_flush((uintptr_t) PAGE_ALIGN_DOWN(vaddr));
+ // Flush the TLB
+ tlb_flush(vaddr);
}
diff --git a/kernel/vm/shadow.c b/kernel/vm/shadow.c
index 06cf20d..4883160 100644
--- a/kernel/vm/shadow.c
+++ b/kernel/vm/shadow.c
@@ -43,6 +43,7 @@ void shadow_init()
{
// NOT_YET_IMPLEMENTED("VM: shadow_init");
shadow_allocator = slab_allocator_create("shadow", sizeof(mobj_shadow_t));
+ KASSERT(shadow_allocator);
}
/*
@@ -55,7 +56,7 @@ void shadow_init()
* 2) Set up the bottom object of the shadow chain, which could have two cases:
* a) Either shadowed is a shadow object, and you can use its bottom_mobj
* b) Or shadowed is not a shadow object, in which case it is the bottom
- * object of this chain.
+ * object of this chain.shadow_create
*
* Make sure to manage the refcounts correctly.
*/
@@ -63,6 +64,11 @@ mobj_t *shadow_create(mobj_t *shadowed)
{
// NOT_YET_IMPLEMENTED("VM: shadow_create");
+ if (!shadowed)
+ {
+ return NULL;
+ }
+
// create a new shadow object
mobj_shadow_t *so = (mobj_shadow_t *)slab_obj_alloc(shadow_allocator);
if (!so)
@@ -142,46 +148,41 @@ static long shadow_get_pframe(mobj_t *o, size_t pagenum, long forwrite,
pframe_t **pfp)
{
// NOT_YET_IMPLEMENTED("VM: shadow_get_pframe");
-
- // if forwrite is set, use mobj_default_get_pframe
+ // return 0;
if (forwrite)
{
return mobj_default_get_pframe(o, pagenum, forwrite, pfp);
}
-
- // else, check if the object already contains the desired frame
+ // check if o already contains the desired frame.
pframe_t *pf = NULL;
mobj_find_pframe(o, pagenum, &pf);
if (pf)
{
- // if it does, return the pframe
*pfp = pf;
return 0;
}
- // iterate through the shadow chain to find the nearest shadow mobj that has the frame
- mobj_shadow_t *so = MOBJ_TO_SO(o);
- mobj_t *iter = so->shadowed;
- while (iter && iter->mo_type == MOBJ_SHADOW)
+ mobj_shadow_t *shadow = MOBJ_TO_SO(o);
+ mobj_t *current = shadow->shadowed;
+
+ while (current && current->mo_type == MOBJ_SHADOW)
{
- mobj_lock(iter);
- mobj_find_pframe(iter, pagenum, &pf);
- mobj_unlock(iter);
+ mobj_lock(current);
+ mobj_find_pframe(current, pagenum, &pf);
+ mobj_unlock(current);
+
if (pf)
{
*pfp = pf;
return 0;
}
- // update the iterator
- so = MOBJ_TO_SO(iter);
- iter = so->shadowed;
- }
- // if no shadow objects have the page, call mobj_get_pframe() to get the page from the bottom object
- // at this point, iter is the bottom object
- mobj_lock(so->bottom_mobj);
- long ret = mobj_get_pframe(so->bottom_mobj, pagenum, forwrite, pfp);
- mobj_unlock(so->bottom_mobj);
+ shadow = MOBJ_TO_SO(current);
+ current = shadow->shadowed;
+ }
+ mobj_lock(shadow->bottom_mobj);
+ long ret = mobj_get_pframe(shadow->bottom_mobj, pagenum, forwrite, pfp);
+ mobj_unlock(shadow->bottom_mobj);
return ret;
}
@@ -208,48 +209,39 @@ static long shadow_get_pframe(mobj_t *o, size_t pagenum, long forwrite,
*/
static long shadow_fill_pframe(mobj_t *o, pframe_t *pf)
{
- // NOT_YET_IMPLEMENTEDshadow_fill_pframe");
+ // NOT_YET_IMPLEMENTED("VM: shadow_fill_pframe");
+ // return -1;
+ pframe_t *pf_shadow = NULL;
+ long ret = 0;
+
- // get the mobj_shadow_t
mobj_shadow_t *so = MOBJ_TO_SO(o);
- // iterate over the shadow chain
- mobj_t *iter = so->shadowed;
- while (iter && iter->mo_type == MOBJ_SHADOW)
+ mobj_t *shadowed = so->shadowed;
+ while (shadowed && shadowed->mo_type == MOBJ_SHADOW)
{
- // get the pframe from the shadow object
- pframe_t *spf = NULL;
- mobj_lock(iter);
- mobj_find_pframe(iter, pf->pf_pagenum, &spf);
- mobj_unlock(iter);
+ mobj_lock(shadowed);
+ mobj_find_pframe(shadowed, pf->pf_pagenum, &pf_shadow);
+ mobj_unlock(shadowed);
- // if the pframe is found, copy the contents into pf
- // then release the pframe
- if (spf)
+ if (pf_shadow)
{
- memcpy(pf->pf_addr, spf->pf_addr, PAGE_SIZE);
- pframe_release(&spf);
+ memcpy(pf->pf_addr, pf_shadow->pf_addr, PAGE_SIZE);
+ pframe_release(&pf_shadow);
return 0;
}
- // update the iterator
- so = MOBJ_TO_SO(iter);
- iter = so->shadowed;
+ so = MOBJ_TO_SO(shadowed);
+ shadowed = so->shadowed;
}
-
- // if none of the shadow objects have a copy of the frame, use mobj_get_pframe on the bottom object
- pframe_t *spf = NULL;
mobj_lock(so->bottom_mobj);
- long ret = mobj_get_pframe(so->bottom_mobj, pf->pf_pagenum, 0, &spf);
+ ret = mobj_get_pframe(so->bottom_mobj, pf->pf_pagenum, 0, &pf_shadow);
mobj_unlock(so->bottom_mobj);
- // check if the operation was sucessful, memcpy the contents into pf
- // and release the pframe
- if (ret >= 0)
- {
- memcpy(pf->pf_addr, spf->pf_addr, PAGE_SIZE);
- pframe_release(&spf);
+ if (ret < 0) {
+ return ret;
}
-
- return ret;
+ memcpy(pf->pf_addr, pf_shadow->pf_addr, PAGE_SIZE);
+ pframe_release(&pf_shadow);
+ return 0;
}
/*
@@ -264,6 +256,7 @@ static long shadow_fill_pframe(mobj_t *o, pframe_t *pf)
static long shadow_flush_pframe(mobj_t *o, pframe_t *pf)
{
// NOT_YET_IMPLEMENTED("VM: shadow_flush_pframe");
+ // return -1;
return 0;
}
@@ -280,17 +273,17 @@ static long shadow_flush_pframe(mobj_t *o, pframe_t *pf)
static void shadow_destructor(mobj_t *o)
{
// NOT_YET_IMPLEMENTED("VM: shadow_destructor");
+ mobj_default_destructor(o);
- // get the mobj_shadow_t
mobj_shadow_t *so = MOBJ_TO_SO(o);
+ // dbg(DBG_PROC, "shadow_destructor: refcount bottom: %d\n", so->bottom_mobj->mo_refcount);
+ // dbg(DBG_PROC, "shadow_destructor: refcount: %d\n", so->shadowed->mo_refcount);
- // call the default destructor
- mobj_default_destructor(o);
-
- // put the shadow and bottom_mobj
+ // put the shadow and bottom_mobj members of the shadow object
mobj_put(&so->shadowed);
mobj_put(&so->bottom_mobj);
- // free the slab
slab_obj_free(shadow_allocator, so);
-} \ No newline at end of file
+
+ return;
+}
diff --git a/kernel/vm/vmmap.c b/kernel/vm/vmmap.c
index 8c1a455..5f8e575 100644
--- a/kernel/vm/vmmap.c
+++ b/kernel/vm/vmmap.c
@@ -169,7 +169,7 @@ void vmmap_insert(vmmap_t *map, vmarea_t *new_vma)
*/
ssize_t vmmap_find_range(vmmap_t *map, size_t npages, int dir)
{
- // NOT_YET_IMPLEMENTED("VM: vmmap_find_range");
+ // : vmmap_find_range");
// case 1: dir is VMMAP_DIR_LOHI
if (dir == VMMAP_DIR_LOHI)
@@ -871,4 +871,4 @@ end:
buf[osize - 1] = '\0';
}
return osize - size;
-}
+} \ No newline at end of file