aboutsummaryrefslogtreecommitdiff
path: root/kernel/vm/vmmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/vm/vmmap.c')
-rw-r--r--kernel/vm/vmmap.c556
1 files changed, 537 insertions, 19 deletions
diff --git a/kernel/vm/vmmap.c b/kernel/vm/vmmap.c
index 0e2dad6..fd99c55 100644
--- a/kernel/vm/vmmap.c
+++ b/kernel/vm/vmmap.c
@@ -16,6 +16,7 @@
#include "mm/mm.h"
#include "mm/mman.h"
#include "mm/slab.h"
+#include "mm/tlb.h"
static slab_allocator_t *vmmap_allocator;
static slab_allocator_t *vmarea_allocator;
@@ -32,8 +33,27 @@ void vmmap_init(void)
*/
vmarea_t *vmarea_alloc(void)
{
- NOT_YET_IMPLEMENTED("VM: vmarea_alloc");
- return NULL;
+ // NOT_YET_IMPLEMENTED("VM: vmarea_alloc");
+
+ // Allocate a new vmarea
+ vmarea_t *new_vmarea = (vmarea_t *)slab_obj_alloc(vmarea_allocator);
+ if (new_vmarea == NULL)
+ {
+ return NULL;
+ }
+
+ // Initialize the fields of the vmarea
+ new_vmarea->vma_start = 0;
+ new_vmarea->vma_end = 0;
+ new_vmarea->vma_off = 0;
+ new_vmarea->vma_prot = 0;
+ new_vmarea->vma_flags = 0;
+ new_vmarea->vma_obj = NULL;
+ new_vmarea->vma_obj = NULL;
+ list_link_init(&new_vmarea->vma_plink);
+
+ // Return the new vmarea
+ return new_vmarea;
}
/*
@@ -42,7 +62,22 @@ vmarea_t *vmarea_alloc(void)
*/
void vmarea_free(vmarea_t *vma)
{
- NOT_YET_IMPLEMENTED("VM: vmarea_free");
+ // NOT_YET_IMPLEMENTED("VM: vmarea_free");
+
+ // Remove the vmarea from any lists it may be on
+ if (list_link_is_linked(&vma->vma_plink))
+ {
+ list_remove(&vma->vma_plink);
+ }
+
+ // Put the vma_obj if it exists
+ if (vma->vma_obj != NULL)
+ {
+ mobj_put(&vma->vma_obj);
+ }
+
+ // Free the vmarea
+ slab_obj_free(vmarea_allocator, vma);
}
/*
@@ -50,8 +85,20 @@ void vmarea_free(vmarea_t *vma)
*/
vmmap_t *vmmap_create(void)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_create");
- return NULL;
+ // NOT_YET_IMPLEMENTED("VM: vmmap_create");
+
+ // Allocate a new vmmap
+ vmmap_t *new_vmmap = (vmmap_t *)slab_obj_alloc(vmmap_allocator);
+ if (new_vmmap == NULL)
+ {
+ return NULL;
+ }
+
+ // Initialize the fields of the vmmap
+ list_init(&new_vmmap->vmm_list);
+ new_vmmap->vmm_proc = curproc;
+
+ return new_vmmap;
}
/*
@@ -60,7 +107,22 @@ vmmap_t *vmmap_create(void)
*/
void vmmap_destroy(vmmap_t **mapp)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_destroy");
+ // NOT_YET_IMPLEMENTED("VM: vmmap_destroy");
+
+ vmmap_t *map = *mapp;
+
+ // Iterate through the list of vmareas and free each one
+ list_iterate(&(map)->vmm_list, vma, vmarea_t, vma_plink)
+ {
+ list_remove(&vma->vma_plink);
+ vmarea_free(vma);
+ }
+
+ // Free the map
+ slab_obj_free(vmmap_allocator, map);
+
+ // Set the map to NULL
+ *mapp = NULL;
}
/*
@@ -70,7 +132,22 @@ void vmmap_destroy(vmmap_t **mapp)
*/
void vmmap_insert(vmmap_t *map, vmarea_t *new_vma)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_insert*");
+ // NOT_YET_IMPLEMENTED("VM: vmmap_insert*");
+
+ // iterate over the list of vmareas
+ list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink)
+ {
+ // if the new vmarea is after the current vmarea
+ if (vma->vma_start > new_vma->vma_end)
+ {
+ // insert the new vmarea before the current vmarea
+ list_insert_before(&vma->vma_plink, &new_vma->vma_plink);
+ return;
+ }
+ }
+
+ // insert this map to the tail
+ list_insert_tail(&map->vmm_list, &new_vma->vma_plink);
}
/*
@@ -90,7 +167,51 @@ void vmmap_insert(vmmap_t *map, vmarea_t *new_vma)
*/
ssize_t vmmap_find_range(vmmap_t *map, size_t npages, int dir)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_find_range");
+ // NOT_YET_IMPLEMENTED("VM: vmmap_find_range");
+
+ // case 1: dir is VMMAP_DIR_LOHI
+ if (dir == VMMAP_DIR_LOHI)
+ {
+ // iterate over the page numbers
+ size_t vfn = ADDR_TO_PN(USER_MEM_LOW);
+ while (vfn <= ADDR_TO_PN(USER_MEM_HIGH) - npages)
+ {
+ // Lookup the vmarea for this page number
+ vmarea_t *vma = vmmap_lookup(map, vfn);
+
+ // if the vmarea is NULL, return the page number
+ if (vma == NULL)
+ {
+ return vfn;
+ }
+
+ // if the vmarea is not NULL, set the page number to the end of the vmarea
+ vfn = vma->vma_end;
+ }
+ }
+
+ // case 2: dir is VMMAP_DIR_HILO
+ else if (dir == VMMAP_DIR_HILO)
+ {
+ // iterate over the page numbers
+ size_t vfn = ADDR_TO_PN(USER_MEM_HIGH) - npages;
+ while (vfn >= ADDR_TO_PN(USER_MEM_LOW))
+ {
+ // Lookup the vmarea for this page number
+ vmarea_t *vma = vmmap_lookup(map, vfn);
+
+ // if the vmarea is NULL, return the page number
+ if (vma == NULL)
+ {
+ return vfn;
+ }
+
+ // if the vmarea is not NULL, set the page number to the start of the vmarea
+ vfn = vma->vma_start - npages;
+ }
+ }
+
+ // if no range exists, return -1
return -1;
}
@@ -100,7 +221,19 @@ ssize_t vmmap_find_range(vmmap_t *map, size_t npages, int dir)
*/
vmarea_t *vmmap_lookup(vmmap_t *map, size_t vfn)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_lookup");
+ // NOT_YET_IMPLEMENTED("VM: vmmap_lookup");
+
+ // iterate over the list of vmareas
+ list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink)
+ {
+ // if the vfn lies within the range of the current vmarea
+ if (vfn >= vma->vma_start && vfn < vma->vma_end)
+ {
+ return vma;
+ }
+ }
+
+ // if the page is unmapped, return NULL
return NULL;
}
@@ -140,8 +273,83 @@ void vmmap_collapse(vmmap_t *map)
*/
vmmap_t *vmmap_clone(vmmap_t *map)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_clone");
- return NULL;
+ // NOT_YET_IMPLEMENTED("VM: vmmap_clone");
+
+ // Create a new vmmap
+ vmmap_t *new_vmmap = vmmap_create();
+ if (new_vmmap == NULL)
+ {
+ return NULL;
+ }
+
+ // Iterate over the list of vmareas
+ list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink)
+ {
+ // Create a new vmarea
+ vmarea_t *new_vmarea = vmarea_alloc();
+ if (new_vmarea == NULL)
+ {
+ vmmap_destroy(&new_vmmap);
+ return NULL;
+ }
+
+ // Clone the fields of the vmarea
+ new_vmarea->vma_start = vma->vma_start;
+ new_vmarea->vma_end = vma->vma_end;
+ new_vmarea->vma_off = vma->vma_off;
+ new_vmarea->vma_prot = vma->vma_prot;
+ new_vmarea->vma_flags = vma->vma_flags;
+
+ // If the vmarea is share-mapped
+ if (vma->vma_flags & MAP_SHARED)
+ {
+ new_vmarea->vma_obj = vma->vma_obj;
+ mobj_ref(new_vmarea->vma_obj);
+ }
+
+ // If the vmarea is not share-mapped
+ else
+ {
+ // Create two shadow objects
+ mobj_lock(vma->vma_obj);
+ mobj_t *shadow_obj_map = shadow_create(vma->vma_obj);
+ mobj_unlock(vma->vma_obj);
+
+ mobj_unlock(shadow_obj_map); // unlock the map before use
+ if (shadow_obj_map == NULL)
+ {
+ vmarea_free(new_vmarea);
+ vmmap_destroy(&new_vmmap);
+ return NULL;
+ }
+
+ mobj_lock(vma->vma_obj);
+ mobj_t *shadow_obj_new = shadow_create(vma->vma_obj);
+ mobj_unlock(vma->vma_obj);
+
+ mobj_unlock(shadow_obj_new); // unlock the new before use
+ if (shadow_obj_new == NULL)
+ {
+ mobj_put(&shadow_obj_map);
+ vmarea_free(new_vmarea);
+ vmmap_destroy(&new_vmmap);
+ return NULL;
+ }
+
+ // Put the original vma_obj
+ mobj_put(&vma->vma_obj);
+
+ // Insert the shadow objects into their respective vmareas
+ new_vmarea->vma_obj = shadow_obj_new;
+ vma->vma_obj = shadow_obj_map;
+ }
+
+ // Insert the new vmarea into the new vmmap
+ vmmap_insert(new_vmmap, new_vmarea);
+ }
+
+ // Return the new vmmap
+ return new_vmmap;
}
/*
@@ -182,8 +390,98 @@ vmmap_t *vmmap_clone(vmmap_t *map)
long vmmap_map(vmmap_t *map, vnode_t *file, size_t lopage, size_t npages,
int prot, int flags, off_t off, int dir, vmarea_t **new_vma)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_map");
- return -1;
+ // NOT_YET_IMPLEMENTED("VM: vmmap_map");
+
+ // Create a new vmarea
+ // see if lopage is 0. if so, use vmmap_find_range() to get a valid range
+ if (lopage == 0)
+ {
+ lopage = vmmap_find_range(map, npages, dir);
+ if (lopage == -1)
+ {
+ return -ENOMEM;
+ }
+ }
+
+ // Alloc the new vmarea
+ vmarea_t *new_vmarea = vmarea_alloc();
+ if (new_vmarea == NULL)
+ {
+ return -ENOMEM;
+ }
+ // Set the fields of the new vmarea
+ new_vmarea->vma_start = lopage;
+ new_vmarea->vma_end = lopage + npages;
+ new_vmarea->vma_off = ADDR_TO_PN(off);
+ new_vmarea->vma_prot = prot;
+ new_vmarea->vma_flags = flags;
+ new_vmarea->vma_vmmap = map;
+ new_vmarea->vma_obj = NULL;
+
+ // If file is NULL, create an anon object
+ if (file == NULL)
+ {
+ new_vmarea->vma_obj = anon_create();
+ mobj_unlock(new_vmarea->vma_obj); // unlock the anon object before use
+ if (new_vmarea->vma_obj == NULL)
+ {
+ vmarea_free(new_vmarea);
+ return -ENOMEM;
+ }
+ }
+ else
+ {
+ // If file is non-NULL, use the vnode's mmap operation to get the mobj
+ long ret = file->vn_ops->mmap(file, &new_vmarea->vma_obj);
+ if (ret < 0)
+ {
+ // on fail, free the new vmarea and return the error
+ vmarea_free(new_vmarea);
+ return ret;
+ }
+ }
+
+ // If MAP_PRIVATE is specified, set up a shadow object
+ if (flags & MAP_PRIVATE)
+ {
+ mobj_lock(new_vmarea->vma_obj);
+ mobj_t *shadow_obj = shadow_create(new_vmarea->vma_obj);
+ mobj_unlock(new_vmarea->vma_obj);
+ mobj_unlock(shadow_obj); // unlock the shadow object before use
+ if (shadow_obj == NULL)
+ {
+ vmarea_free(new_vmarea);
+ return -ENOMEM;
+ }
+ new_vmarea->vma_obj = shadow_obj;
+ }
+
+ // If MAP_FIXED is specified and the given range overlaps with any preexisting mappings, remove the preexisting mappings
+ if (lopage != 0 && (flags & MAP_FIXED))
+ {
+ long ret = vmmap_remove(map, lopage, npages);
+ if (ret < 0)
+ {
+ vmarea_free(new_vmarea);
+ // remove/put the shadow/annon object if it exists
+ if (new_vmarea->vma_obj)
+ {
+ mobj_put(&new_vmarea->vma_obj); // FIXME: is this correct!
+ }
+
+ return ret;
+ }
+ }
+
+ // Insert the new vmarea into the map
+ vmmap_insert(map, new_vmarea);
+
+ // set ret val and return 0
+ if (new_vma)
+ {
+ *new_vma = new_vmarea;
+ }
+ return 0;
}
/*
@@ -219,8 +517,107 @@ long vmmap_map(vmmap_t *map, vnode_t *file, size_t lopage, size_t npages,
*/
long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_remove");
- return -1;
+ // NOT_YET_IMPLEMENTED("VM: vmmap_remove");
+
+ // Iterate over the list of vmareas
+ list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink)
+ {
+ // if the vmarea is completely inside the region to be unmapped
+ if (vma->vma_start < lopage && vma->vma_end > lopage + npages)
+ {
+ // split the old vmarea into two vmareas
+ vmarea_t *new_vmarea = vmarea_alloc();
+ if (new_vmarea == NULL)
+ {
+ return -ENOMEM;
+ }
+
+ // Set the fields of the new vmarea
+ new_vmarea->vma_start = lopage + npages;
+ new_vmarea->vma_end = vma->vma_end;
+ new_vmarea->vma_off = vma->vma_off + (new_vmarea->vma_start - vma->vma_start);
+ new_vmarea->vma_prot = vma->vma_prot;
+ new_vmarea->vma_flags = vma->vma_flags;
+ new_vmarea->vma_vmmap = map;
+ new_vmarea->vma_obj = vma->vma_obj;
+ // increment the refcount of the object associated with the vmarea
+ mobj_ref(new_vmarea->vma_obj);
+
+ // Shorten the length of the old vmarea
+ vma->vma_end = lopage;
+
+ // Insert the new vmarea into the map
+ vmmap_insert(map, new_vmarea);
+
+ // call pt_unmap_range() and tlb_flush_range() to clean the pagetables and TLB
+ pt_unmap_range(
+ map->vmm_proc->p_pml4,
+ PN_TO_ADDR(lopage),
+ PN_TO_ADDR(lopage + npages)
+ );
+ tlb_flush_range(
+ map->vmm_proc->p_pml4,
+ PN_TO_ADDR(lopage + npages) - PN_TO_ADDR(lopage)
+ );
+ }
+
+ // if the region overlaps the end of the vmarea
+ else if (vma->vma_start < lopage && vma->vma_end > lopage && vma->vma_end <= lopage + npages)
+ {
+ // shorten the length of the mapping
+ vma->vma_end = lopage;
+ // call pt_unmap_range() and tlb_flush_range() to clean the pagetables and TLB
+ pt_unmap_range(
+ map->vmm_proc->p_pml4,
+ PN_TO_ADDR(lopage),
+ PN_TO_ADDR(lopage + npages)
+ );
+ tlb_flush_range(
+ map->vmm_proc->p_pml4,
+ PN_TO_ADDR(lopage + npages) - PN_TO_ADDR(lopage)
+ );
+ }
+
+ // if the region overlaps the beginning of the vmarea
+ else if (vma->vma_start >= lopage && vma->vma_end > lopage + npages && vma->vma_start < lopage + npages)
+ {
+ // move the beginning of the mapping and shorten its length
+ vma->vma_off += (lopage + npages - vma->vma_start);
+ vma->vma_start = lopage + npages;
+
+ // call pt_unmap_range() and tlb_flush_range() to clean the pagetables and TLB
+ pt_unmap_range(
+ map->vmm_proc->p_pml4,
+ PN_TO_ADDR(lopage),
+ PN_TO_ADDR(lopage + npages)
+ );
+ tlb_flush_range(
+ map->vmm_proc->p_pml4,
+ PN_TO_ADDR(lopage + npages) - PN_TO_ADDR(lopage)
+ );
+ }
+
+ // if the region completely contains the vmarea
+ else if (vma->vma_start >= lopage && vma->vma_end <= lopage + npages)
+ {
+ // remove the vmarea from the list
+ list_remove(&vma->vma_plink);
+ vmarea_free(vma);
+
+ // call pt_unmap_range() and tlb_flush_range() to clean the pagetables and TLB
+ pt_unmap_range(
+ map->vmm_proc->p_pml4,
+ PN_TO_ADDR(lopage),
+ PN_TO_ADDR(lopage + npages)
+ );
+ tlb_flush_range(
+ map->vmm_proc->p_pml4,
+ PN_TO_ADDR(lopage + npages) - PN_TO_ADDR(lopage)
+ );
+ }
+ }
+
+ return 0;
}
/*
@@ -229,8 +626,29 @@ long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages)
*/
long vmmap_is_range_empty(vmmap_t *map, size_t startvfn, size_t npages)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_is_range_empty");
- return 0;
+ // NOT_YET_IMPLEMENTED("VM: vmmap_is_range_empty");
+
+ // Iterate over the list of vmareas
+ list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink)
+ {
+ // if the range completely contains the vmarea
+ if (vma->vma_start <= startvfn && vma->vma_end >= startvfn + npages)
+ {
+ return 0;
+ }
+ // if the start of the vmarea is greater than or equal to the start of the range
+ if (vma->vma_start < startvfn + npages && vma->vma_start >= startvfn)
+ {
+ return 0;
+ }
+ // check if the end of the vmarea is greater than the start of the range
+ if (vma->vma_end > startvfn && vma->vma_end <= startvfn + npages)
+ {
+ return 0;
+ }
+ }
+
+ return 1;
}
/*
@@ -250,7 +668,55 @@ long vmmap_is_range_empty(vmmap_t *map, size_t startvfn, size_t npages)
*/
long vmmap_read(vmmap_t *map, const void *vaddr, void *buf, size_t count)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_read");
+ // NOT_YET_IMPLEMENTED("VM: vmmap_read");
+
+ // Iterate over the page numbers
+ size_t vfn = ADDR_TO_PN(vaddr);
+ size_t end_vfn = ADDR_TO_PN(vaddr + count);
+ size_t bytes_read = 0;
+ while (vfn < end_vfn)
+ {
+ // Lookup the vmarea for this page number
+ vmarea_t *vma = vmmap_lookup(map, vfn);
+ if (vma == NULL)
+ {
+ return -EFAULT;
+ }
+
+ // Find the pframe for this page number
+ pframe_t *pf;
+ mobj_lock(vma->vma_obj);
+ long ret = mobj_get_pframe(vma->vma_obj, vfn - vma->vma_start + vma->vma_off, 0, &pf);
+ mobj_unlock(vma->vma_obj);
+ if (ret < 0)
+ {
+ return ret;
+ }
+
+ // Read from the pframe and copy it into buf
+ void *cursor = (void *)(bytes_read + vaddr);
+ size_t bytes_this_iteration = MIN(PAGE_SIZE - PAGE_OFFSET(cursor), count - bytes_read);
+ memcpy(
+ buf + bytes_read,
+ (void *)pf->pf_addr + PAGE_OFFSET(cursor),
+ bytes_this_iteration
+ );
+
+ // Unlock the pframe
+ pframe_release(&pf);
+
+ // Increment the bytes read
+ bytes_read += bytes_this_iteration;
+
+ // check if we have read enough
+ if (bytes_read >= count)
+ {
+ return 0;
+ }
+
+ // Increment the page number
+ vfn++;
+ }
return 0;
}
@@ -272,7 +738,59 @@ long vmmap_read(vmmap_t *map, const void *vaddr, void *buf, size_t count)
*/
long vmmap_write(vmmap_t *map, void *vaddr, const void *buf, size_t count)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_write");
+ // NOT_YET_IMPLEMENTED("VM: vmmap_write");
+
+ // Iterate over the page numbers
+ size_t vfn = ADDR_TO_PN(vaddr);
+ size_t end_vfn = ADDR_TO_PN(vaddr + count);
+ size_t bytes_written = 0;
+ while(vfn < end_vfn)
+ {
+ // Lookup the vmarea for this page number
+ vmarea_t *vma = vmmap_lookup(map, vfn);
+ if (vma == NULL)
+ {
+ return -EFAULT;
+ }
+
+ // Find the pframe for this page number
+ pframe_t *pf;
+ mobj_lock(vma->vma_obj);
+ long ret = mobj_get_pframe(vma->vma_obj, vfn - vma->vma_start + vma->vma_off, 1, &pf);
+ mobj_unlock(vma->vma_obj);
+ if (ret < 0)
+ {
+ return ret;
+ }
+
+ // Write to the pframe, copying data from buf
+ void *cursor = (void *)(bytes_written + vaddr);
+ size_t bytes_this_iteration = MIN(PAGE_SIZE - PAGE_OFFSET(cursor), count - bytes_written);
+ memcpy(
+ (void *)pf->pf_addr + PAGE_OFFSET(cursor),
+ buf + bytes_written,
+ bytes_this_iteration
+ );
+
+ // Dirty the page
+ pf->pf_dirty = 1;
+
+ // Unlock the pframe
+ pframe_release(&pf);
+
+ // Increment the bytes written
+ bytes_written += bytes_this_iteration;
+
+ // check if we have written enough
+ if (bytes_written >= count)
+ {
+ return 0;
+ }
+
+ // Increment the page number
+ vfn++;
+ }
+
return 0;
}