aboutsummaryrefslogtreecommitdiff
path: root/kernel/vm/vmmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/vm/vmmap.c')
-rw-r--r--kernel/vm/vmmap.c225
1 files changed, 116 insertions, 109 deletions
diff --git a/kernel/vm/vmmap.c b/kernel/vm/vmmap.c
index 8789371..8c1a455 100644
--- a/kernel/vm/vmmap.c
+++ b/kernel/vm/vmmap.c
@@ -49,7 +49,7 @@ vmarea_t *vmarea_alloc(void)
new_vmarea->vma_prot = 0;
new_vmarea->vma_flags = 0;
new_vmarea->vma_obj = NULL;
- new_vmarea->vma_obj = NULL;
+ new_vmarea->vma_vmmap = NULL;
list_link_init(&new_vmarea->vma_plink);
// Return the new vmarea
@@ -134,6 +134,8 @@ void vmmap_insert(vmmap_t *map, vmarea_t *new_vma)
{
// NOT_YET_IMPLEMENTED("VM: vmmap_insert*");
+ new_vma->vma_vmmap = map;
+
// iterate over the list of vmareas
list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink)
{
@@ -173,47 +175,48 @@ ssize_t vmmap_find_range(vmmap_t *map, size_t npages, int dir)
if (dir == VMMAP_DIR_LOHI)
{
// iterate over the page numbers, going from low to high
- // determine the continguous range of free virtual pages
+ // determine the continguous range of free virtual pages
- int start, end = 0;
+ size_t start_page, contig_page = 0;
size_t vfn = ADDR_TO_PN(USER_MEM_LOW);
- while (vfn <= ADDR_TO_PN(USER_MEM_HIGH))
+ while (vfn++ < ADDR_TO_PN(USER_MEM_HIGH))
{
// Lookup the vmarea for this page number
- vmarea_t *vma = vmmap_lookup(map, vfn++);
+ vmarea_t *vma = vmmap_lookup(map, vfn);
if (vma == NULL)
{
// if unmapped, document this
- end = vfn;
- if (start == 0)
+ if (contig_page == 0)
{
- start = vfn;
+ start_page = 0;
}
+ contig_page++;
}
else
{
// if mapped, start over
- start, end = 0;
+ start_page = contig_page = 0;
}
// if the range exists, return the start
- if (end == npages)
+ if (contig_page == npages)
{
- return start;
+ KASSERT(start_page >= ADDR_TO_PN(USER_MEM_LOW));
+ return start_page;
}
}
}
-
+
// case 2: dir is VMMAP_DIR_HILO
- else if (dir == VMMAP_DIR_HILO)
+ if (dir == VMMAP_DIR_HILO)
{
// iterate over the page numbers
- int contig = 0;
+ size_t contig = 0;
size_t vfn = ADDR_TO_PN(USER_MEM_HIGH);
- while (vfn >= ADDR_TO_PN(USER_MEM_LOW))
+ while (--vfn >= ADDR_TO_PN(USER_MEM_LOW))
{
// Lookup the vmarea for this page number
- vmarea_t *vma = vmmap_lookup(map, --vfn);
+ vmarea_t *vma = vmmap_lookup(map, vfn);
if (vma == NULL)
{
// if unmapped, increment the contig
@@ -228,6 +231,7 @@ ssize_t vmmap_find_range(vmmap_t *map, size_t npages, int dir)
// if there are n contiguous pages, return the current vfn
if (contig == npages)
{
+ KASSERT(vfn >= ADDR_TO_PN(USER_MEM_LOW));
return vfn;
}
}
@@ -298,6 +302,7 @@ vmmap_t *vmmap_clone(vmmap_t *map)
// NOT_YET_IMPLEMENTED("VM: vmmap_clone");
// Create a new vmmap
+ // vmmap_collapse(map);
vmmap_t *new_vmmap = vmmap_create();
if (new_vmmap == NULL)
{
@@ -413,97 +418,101 @@ long vmmap_map(vmmap_t *map, vnode_t *file, size_t lopage, size_t npages,
int prot, int flags, off_t off, int dir, vmarea_t **new_vma)
{
// NOT_YET_IMPLEMENTED("VM: vmmap_map");
+ // return -1;
+
+ // ASK: why are these needed!!
+ KASSERT(map != NULL);
+ KASSERT(prot == PROT_NONE
+ || prot == PROT_READ
+ || prot == PROT_WRITE
+ || prot == PROT_EXEC
+ || prot == (PROT_READ | PROT_WRITE)
+ || prot == (PROT_READ | PROT_EXEC)
+ || prot == (PROT_WRITE | PROT_EXEC)
+ || prot == (PROT_READ | PROT_WRITE | PROT_EXEC));
+ KASSERT((flags & MAP_TYPE) == MAP_SHARED || (flags & MAP_TYPE) == MAP_PRIVATE);
+
+ if (lopage == 0)
+ {
+ KASSERT(dir == VMMAP_DIR_LOHI || dir == VMMAP_DIR_HILO);
+ ssize_t res = vmmap_find_range(map, npages, dir);
+ if (res == -1) {
+ return -ENOMEM;
+ }
+ lopage = res;
+ }
- // Create a new vmarea
- // see if lopage is 0. if so, use vmmap_find_range() to get a valid range
- if (lopage == 0)
+ if (lopage != 0 && (flags & MAP_FIXED))
{
- lopage = vmmap_find_range(map, npages, dir);
- if (lopage == -1)
+ long ret = vmmap_remove(map, lopage, npages);
+ if (ret < 0)
{
- return -ENOMEM;
+ return ret;
}
}
- // Alloc the new vmarea
- vmarea_t *new_vmarea = vmarea_alloc();
- if (new_vmarea == NULL)
+ // alloc the new vma
+ vmarea_t *vma = vmarea_alloc();
+ if (!vma)
{
return -ENOMEM;
}
- // Set the fields of the new vmarea
- new_vmarea->vma_start = lopage;
- new_vmarea->vma_end = lopage + npages;
- new_vmarea->vma_off = ADDR_TO_PN(off);
- new_vmarea->vma_prot = prot;
- new_vmarea->vma_flags = flags;
- new_vmarea->vma_vmmap = map;
- new_vmarea->vma_obj = NULL;
- // If file is NULL, create an anon object
- if (file == NULL)
+ // fill in fields, except for mobj and vma
+ vma->vma_start = lopage;
+ vma->vma_end = lopage + npages;
+ vma->vma_off = ADDR_TO_PN(off);
+ vma->vma_prot = prot;
+ vma->vma_flags = flags;
+
+ // make the mobj, depending on the case (anon or mmap)
+ mobj_t *obj = NULL;
+ if (file == NULL)
{
- new_vmarea->vma_obj = anon_create();
- mobj_unlock(new_vmarea->vma_obj); // unlock the anon object before use
- if (new_vmarea->vma_obj == NULL)
+ obj = anon_create();
+ if (obj == NULL)
{
- vmarea_free(new_vmarea);
+ vmarea_free(vma);
return -ENOMEM;
}
- }
+ mobj_unlock(obj);
+ }
else
{
- // If file is non-NULL, use the vnode's mmap operation to get the mobj
- long ret = file->vn_ops->mmap(file, &new_vmarea->vma_obj);
+ long ret = file->vn_ops->mmap(file, &obj);
if (ret < 0)
{
- // on fail, free the new vmarea and return the error
- vmarea_free(new_vmarea);
+ vmarea_free(vma);
return ret;
}
}
+ vma->vma_obj = obj;
- // If MAP_PRIVATE is specified, set up a shadow object
- if (flags & MAP_PRIVATE)
+ // if the flag is private, upgrade the obj to a shadow obj
+ if (flags & MAP_PRIVATE)
{
- mobj_lock(new_vmarea->vma_obj);
- mobj_t *shadow_obj = shadow_create(new_vmarea->vma_obj);
- mobj_unlock(new_vmarea->vma_obj);
- mobj_unlock(shadow_obj); // unlock the shadow object before use
- mobj_put(&new_vmarea->vma_obj); // put the original object
- if (shadow_obj == NULL)
+ mobj_t *shadow_obj = shadow_create(obj);
+ if (shadow_obj == NULL)
{
- vmarea_free(new_vmarea);
+ vmarea_free(vma);
+ mobj_put(&obj);
return -ENOMEM;
}
- new_vmarea->vma_obj = shadow_obj;
- }
+ // unlock from creation
+ mobj_unlock(shadow_obj);
- // If MAP_FIXED is specified and the given range overlaps with any preexisting mappings, remove the preexisting mappings
- if (lopage != 0 && (flags & MAP_FIXED))
- {
- long ret = vmmap_remove(map, lopage, npages);
- if (ret < 0)
- {
- vmarea_free(new_vmarea);
- // remove/put the shadow/annon object if it exists
- if (new_vmarea->vma_obj)
- {
- mobj_put(&new_vmarea->vma_obj); // FIXME: is this correct!
- }
-
- return ret;
- }
+ vma->vma_obj = shadow_obj;
+ // put the og obj
+ mobj_put(&obj);
}
- // Insert the new vmarea into the map
- vmmap_insert(map, new_vmarea);
-
- // set ret val and return 0
- if (new_vma)
+ // now that vma is ready, set it
+ vmmap_insert(map, vma);
+ if (new_vma != NULL)
{
- *new_vma = new_vmarea;
+ *new_vma = vma;
}
+
return 0;
}
@@ -558,13 +567,15 @@ long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages)
// Set the fields of the new vmarea
new_vmarea->vma_start = lopage + npages;
new_vmarea->vma_end = vma->vma_end;
- new_vmarea->vma_off = vma->vma_off + (new_vmarea->vma_start - vma->vma_start);
+ new_vmarea->vma_off += lopage + npages - vma->vma_start;
new_vmarea->vma_prot = vma->vma_prot;
new_vmarea->vma_flags = vma->vma_flags;
- new_vmarea->vma_vmmap = map;
+ // new_vmarea->vma_vmmap = map;
+ mobj_lock(vma->vma_obj);
new_vmarea->vma_obj = vma->vma_obj;
// increment the refcount of the object associated with the vmarea
mobj_ref(new_vmarea->vma_obj);
+ mobj_unlock(vma->vma_obj);
// Shorten the length of the old vmarea
vma->vma_end = lopage;
@@ -574,13 +585,12 @@ long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages)
// call pt_unmap_range() and tlb_flush_range() to clean the pagetables and TLB
pt_unmap_range(
- map->vmm_proc->p_pml4,
- PN_TO_ADDR(lopage),
- PN_TO_ADDR(lopage + npages)
- );
+ curproc->p_pml4,
+ PN_TO_ADDR(lopage),
+ PN_TO_ADDR(lopage + npages));
tlb_flush_range(
- map->vmm_proc->p_pml4,
- PN_TO_ADDR(lopage + npages) - PN_TO_ADDR(lopage)
+ PN_TO_ADDR(lopage),
+ npages
);
}
@@ -589,15 +599,15 @@ long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages)
{
// shorten the length of the mapping
vma->vma_end = lopage;
+
// call pt_unmap_range() and tlb_flush_range() to clean the pagetables and TLB
pt_unmap_range(
- map->vmm_proc->p_pml4,
- PN_TO_ADDR(lopage),
- PN_TO_ADDR(lopage + npages)
- );
+ curproc->p_pml4,
+ PN_TO_ADDR(lopage),
+ PN_TO_ADDR(lopage + npages));
tlb_flush_range(
- map->vmm_proc->p_pml4,
- PN_TO_ADDR(lopage + npages) - PN_TO_ADDR(lopage)
+ PN_TO_ADDR(lopage),
+ npages
);
}
@@ -605,18 +615,17 @@ long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages)
else if (vma->vma_start >= lopage && vma->vma_end > lopage + npages && vma->vma_start < lopage + npages)
{
// move the beginning of the mapping and shorten its length
- vma->vma_off += (lopage + npages - vma->vma_start);
+ vma->vma_off += lopage + npages - vma->vma_start;
vma->vma_start = lopage + npages;
// call pt_unmap_range() and tlb_flush_range() to clean the pagetables and TLB
pt_unmap_range(
- map->vmm_proc->p_pml4,
- PN_TO_ADDR(lopage),
- PN_TO_ADDR(lopage + npages)
- );
+ curproc->p_pml4,
+ PN_TO_ADDR(lopage),
+ PN_TO_ADDR(lopage + npages));
tlb_flush_range(
- map->vmm_proc->p_pml4,
- PN_TO_ADDR(lopage + npages) - PN_TO_ADDR(lopage)
+ PN_TO_ADDR(lopage),
+ npages
);
}
@@ -629,13 +638,12 @@ long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages)
// call pt_unmap_range() and tlb_flush_range() to clean the pagetables and TLB
pt_unmap_range(
- map->vmm_proc->p_pml4,
- PN_TO_ADDR(lopage),
- PN_TO_ADDR(lopage + npages)
- );
+ curproc->p_pml4,
+ PN_TO_ADDR(lopage),
+ PN_TO_ADDR(lopage + npages));
tlb_flush_range(
- map->vmm_proc->p_pml4,
- PN_TO_ADDR(lopage + npages) - PN_TO_ADDR(lopage)
+ PN_TO_ADDR(lopage),
+ npages
);
}
}
@@ -717,11 +725,11 @@ long vmmap_read(vmmap_t *map, const void *vaddr, void *buf, size_t count)
}
// Read from the pframe and copy it into buf
- void *cursor = (void *)(bytes_read + vaddr);
+ void *cursor = bytes_read + vaddr;
size_t bytes_this_iteration = MIN(PAGE_SIZE - PAGE_OFFSET(cursor), count - bytes_read);
memcpy(
(void *) buf + bytes_read,
- (void *)pf->pf_addr + PAGE_OFFSET(cursor),
+ (void *) pf->pf_addr + PAGE_OFFSET(cursor),
bytes_this_iteration
);
@@ -767,7 +775,7 @@ long vmmap_write(vmmap_t *map, void *vaddr, const void *buf, size_t count)
size_t vfn = ADDR_TO_PN(vaddr);
size_t end_vfn = ADDR_TO_PN(vaddr + count);
size_t bytes_written = 0;
- while(vfn < end_vfn)
+ while(vfn <= end_vfn)
{
// Lookup the vmarea for this page number
vmarea_t *vma = vmmap_lookup(map, vfn);
@@ -787,7 +795,7 @@ long vmmap_write(vmmap_t *map, void *vaddr, const void *buf, size_t count)
}
// Write to the pframe, copying data from buf
- void *cursor = (void *)(bytes_written + vaddr);
+ void *cursor = bytes_written + vaddr;
size_t bytes_this_iteration = MIN(PAGE_SIZE - PAGE_OFFSET(cursor), count - bytes_written);
memcpy(
(void *)pf->pf_addr + PAGE_OFFSET(cursor),
@@ -795,9 +803,8 @@ long vmmap_write(vmmap_t *map, void *vaddr, const void *buf, size_t count)
bytes_this_iteration
);
- // Dirty the page
+ // Dirty the pframe
pf->pf_dirty = 1;
-
// Unlock the pframe
pframe_release(&pf);