diff options
Diffstat (limited to 'kernel/vm')
-rw-r--r-- | kernel/vm/anon.c | 4 | ||||
-rw-r--r-- | kernel/vm/brk.c | 4 | ||||
-rw-r--r-- | kernel/vm/mmap.c | 48 | ||||
-rw-r--r-- | kernel/vm/shadow.c | 83 | ||||
-rw-r--r-- | kernel/vm/vmmap.c | 225 |
5 files changed, 161 insertions, 203 deletions
diff --git a/kernel/vm/anon.c b/kernel/vm/anon.c index a433395..8c65b85 100644 --- a/kernel/vm/anon.c +++ b/kernel/vm/anon.c @@ -60,7 +60,7 @@ static long anon_fill_pframe(mobj_t *o, pframe_t *pf) // set the pframe's mobj to the given mobj // pf->pf_addr = o; - // // set the pframe's flags to dirty + // set the pframe's flags to dirty // pf->pf_dirty = 1; memset(pf->pf_addr, 0, PAGE_SIZE); @@ -85,4 +85,4 @@ static void anon_destructor(mobj_t *o) // free the mobj slab_obj_free(anon_allocator, o); -} +}
\ No newline at end of file diff --git a/kernel/vm/brk.c b/kernel/vm/brk.c index 69a315f..37bd16c 100644 --- a/kernel/vm/brk.c +++ b/kernel/vm/brk.c @@ -63,7 +63,7 @@ long do_brk(void *addr, void **ret) } // Check if the address is within the valid range - if ((uintptr_t)addr > USER_MEM_HIGH) + if ((uintptr_t)addr > USER_MEM_HIGH || (uintptr_t)addr < USER_MEM_LOW) { return -ENOMEM; } @@ -154,4 +154,4 @@ long do_brk(void *addr, void **ret) curproc->p_brk = addr; *ret = addr; return 0; -} +}
\ No newline at end of file diff --git a/kernel/vm/mmap.c b/kernel/vm/mmap.c index ce932de..a298df4 100644 --- a/kernel/vm/mmap.c +++ b/kernel/vm/mmap.c @@ -70,7 +70,7 @@ long do_mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off, } // check if len is not zero (len is an unsigned value, so it is always positive) - if (len == 0) + if ((ssize_t) len <= 0) { return -EINVAL; } @@ -87,8 +87,8 @@ long do_mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off, return -EINVAL; } - // check if fd is not a valid file descriptor and MAP_ANON was not set - if (fd < 0 && (flags & MAP_ANON) == 0) + // check if the fd is valid and MAP_ANON was not set + if (((fd < 0 || fd >= NFILES) || curproc->p_files[fd] == NULL) && !(flags & MAP_ANON)) { return -EBADF; } @@ -96,10 +96,10 @@ long do_mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off, // check if a file mapping was requested, but fd is not open for reading // file error checking is done in if statement below file_t *file = NULL; - if (fd >= 0 && (flags & MAP_ANON) == 0) + if (fd >= 0 && fd < NFILES) { // get the file and check if it is valid - file = fget(fd); + file = curproc->p_files[fd]; if (file == NULL) { return -EBADF; @@ -110,7 +110,6 @@ long do_mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off, // check if the file's vnode's mmap operation doesn't exist if (file->f_vnode->vn_ops == NULL || file->f_vnode->vn_ops->mmap == NULL) { - fput(&file); return -ENODEV; } @@ -119,32 +118,26 @@ long do_mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off, // check if thef FMODE_READ flag is not set if ((file->f_mode & FMODE_READ) == 0) { - fput(&file); return -EACCES; } // check if append mode is set and PROT_WRITE is set if ((prot & PROT_WRITE) && (file->f_mode & FMODE_APPEND)) { - fput(&file); return -EACCES; } - // check if MAP_SHARED was requested and PROT_WRITE is set, but fd is not open in read/write (O_RDWR) mode - if ((flags & MAP_SHARED) && (prot & PROT_WRITE) && (file->f_mode & FMODE_READ) == 0) + // if MAP_SHARED was requested and PROT_WRITE is set, but fd is not open in read/write (O_RDWR) mode. + if ((flags & MAP_SHARED) && (prot & PROT_WRITE) && (file->f_mode & FMODE_WRITE) == 0) { - fput(&file); return -EACCES; } // check if PROT_WRITE is set, but the file has FMODE_APPEND specified if ((prot & PROT_WRITE) && (file->f_mode & FMODE_APPEND)) { - fput(&file); return -EACCES; } - - fput(&file); } @@ -167,20 +160,18 @@ long do_mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off, return err; } - // set ret if it was provided void *start = PN_TO_ADDR(vma->vma_start); - if (ret) - { - *ret = start; - } - // flush the TLB tlb_flush_range( (uintptr_t) start, PAGE_SIZE * (vma->vma_end - vma->vma_start) ); - // return 0 on success + // set ret if it was provided and return 0 on success + if (ret) + { + *ret = start; + } return 0; } @@ -212,13 +203,17 @@ long do_munmap(void *addr, size_t len) } // Check if len is in bounds - if (len > USER_MEM_HIGH || len == 0) + if (len > USER_MEM_HIGH || len <= 0) { return -EINVAL; } // Check if the addr is out of range of the user address space - if ((uintptr_t)addr < USER_MEM_LOW || (uintptr_t)addr + len > USER_MEM_HIGH) + if ( + (uintptr_t)addr < USER_MEM_LOW + || (uintptr_t)addr > USER_MEM_HIGH + || (uintptr_t)addr + len > USER_MEM_HIGH + ) { return -EINVAL; } @@ -227,9 +222,8 @@ long do_munmap(void *addr, size_t len) size_t start = ADDR_TO_PN(addr); size_t end = ADDR_TO_PN(PAGE_ALIGN_UP((uintptr_t)addr + len)); long ret = vmmap_remove( - curproc->p_vmmap, - start, - end - start - ); + curproc->p_vmmap, + start, + end - start); return ret; }
\ No newline at end of file diff --git a/kernel/vm/shadow.c b/kernel/vm/shadow.c index 91b1fce..06cf20d 100644 --- a/kernel/vm/shadow.c +++ b/kernel/vm/shadow.c @@ -71,6 +71,7 @@ mobj_t *shadow_create(mobj_t *shadowed) } // initialize the mobj_shadow_t + so->shadowed = shadowed; // set the bottom_mobj based on the two cases if (shadowed->mo_type == MOBJ_SHADOW) @@ -82,12 +83,11 @@ mobj_t *shadow_create(mobj_t *shadowed) so->bottom_mobj = shadowed; } // init the other fields - so->shadowed = shadowed; - mobj_init(&so->mobj, MOBJ_SHADOW, &shadow_mobj_ops); mobj_ref(so->shadowed); mobj_ref(so->bottom_mobj); - // lock the shadow object + // init and lock the shadow object + mobj_init(&so->mobj, MOBJ_SHADOW, &shadow_mobj_ops); mobj_lock(&so->mobj); // return the shadow object @@ -110,52 +110,7 @@ mobj_t *shadow_create(mobj_t *shadowed) */ void shadow_collapse(mobj_t *o) { - // NOT_YET_IMPLEMENTED("VM: shadow_collapse"); - - // get the mobj_shadow_t and it's mobj - mobj_shadow_t *so = MOBJ_TO_SO(o); - mobj_t *iter = so->shadowed; - // iterate through the shadow chain - while (iter && so->shadowed->mo_type == MOBJ_SHADOW) - { - // check to see if the refcount is not 1. if so, continue to next shadowed object - if (so->shadowed->mo_refcount != 1) - { - iter = so->shadowed; - continue; - } - // else, go over the shadowed object's pframes - - // iterate through the pframes - mobj_lock(&so->shadowed); - list_iterate(&so->shadowed->mo_pframes, pframe, pframe_t, pf_link) - { - // get the pframe from the shadow object - pframe_t *spf = NULL; - - mobj_lock(iter); // lock before getting the pframe - mobj_find_pframe(o, pframe->pf_pagenum, &spf); - mobj_unlock(iter); - - // check if the pframe is not in the shadow object when migrating - if (spf == NULL) - { - // if not, remove the pframe from the shadowed object - // and insert it into out iterated shadow object - list_remove(&pframe->pf_link); - list_insert_tail(&iter->mo_pframes, &pframe->pf_link); - } - else - { - // if it is, release the pframe we found - pframe_release(&spf); - } - } - - // put locked the shadowed object after iterating through it - mobj_put_locked(&so->shadowed); - // FIXME: this is probably wrong - } + NOT_YET_IMPLEMENTED("VM: shadow_collapse"); } /* @@ -210,7 +165,7 @@ static long shadow_get_pframe(mobj_t *o, size_t pagenum, long forwrite, while (iter && iter->mo_type == MOBJ_SHADOW) { mobj_lock(iter); - mobj_find_pframe(o, pagenum, &pf); + mobj_find_pframe(iter, pagenum, &pf); mobj_unlock(iter); if (pf) { @@ -218,14 +173,15 @@ static long shadow_get_pframe(mobj_t *o, size_t pagenum, long forwrite, return 0; } // update the iterator - iter = MOBJ_TO_SO(iter)->shadowed; + so = MOBJ_TO_SO(iter); + iter = so->shadowed; } // if no shadow objects have the page, call mobj_get_pframe() to get the page from the bottom object // at this point, iter is the bottom object - mobj_lock(iter); - long ret = mobj_get_pframe(iter, pagenum, forwrite, pfp); - mobj_unlock(iter); + mobj_lock(so->bottom_mobj); + long ret = mobj_get_pframe(so->bottom_mobj, pagenum, forwrite, pfp); + mobj_unlock(so->bottom_mobj); return ret; } @@ -252,7 +208,7 @@ static long shadow_get_pframe(mobj_t *o, size_t pagenum, long forwrite, */ static long shadow_fill_pframe(mobj_t *o, pframe_t *pf) { - // NOT_YET_IMPLEMENTED("VM: shadow_fill_pframe"); + // NOT_YET_IMPLEMENTEDshadow_fill_pframe"); // get the mobj_shadow_t mobj_shadow_t *so = MOBJ_TO_SO(o); @@ -263,7 +219,7 @@ static long shadow_fill_pframe(mobj_t *o, pframe_t *pf) // get the pframe from the shadow object pframe_t *spf = NULL; mobj_lock(iter); - mobj_find_pframe(o, pf->pf_pagenum, &spf); + mobj_find_pframe(iter, pf->pf_pagenum, &spf); mobj_unlock(iter); // if the pframe is found, copy the contents into pf @@ -276,19 +232,20 @@ static long shadow_fill_pframe(mobj_t *o, pframe_t *pf) } // update the iterator - iter = MOBJ_TO_SO(iter)->shadowed; + so = MOBJ_TO_SO(iter); + iter = so->shadowed; } // if none of the shadow objects have a copy of the frame, use mobj_get_pframe on the bottom object pframe_t *spf = NULL; - mobj_lock(iter); - long ret = mobj_get_pframe(iter, pf->pf_pagenum, 0, &spf); - mobj_unlock(iter); + mobj_lock(so->bottom_mobj); + long ret = mobj_get_pframe(so->bottom_mobj, pf->pf_pagenum, 0, &spf); + mobj_unlock(so->bottom_mobj); // check if the operation was sucessful, memcpy the contents into pf // and release the pframe - if (ret == 0) + if (ret >= 0) { - memcpy(pf->pf_addr, pf->pf_addr, PAGE_SIZE); + memcpy(pf->pf_addr, spf->pf_addr, PAGE_SIZE); pframe_release(&spf); } @@ -336,4 +293,4 @@ static void shadow_destructor(mobj_t *o) // free the slab slab_obj_free(shadow_allocator, so); -} +}
\ No newline at end of file diff --git a/kernel/vm/vmmap.c b/kernel/vm/vmmap.c index 8789371..8c1a455 100644 --- a/kernel/vm/vmmap.c +++ b/kernel/vm/vmmap.c @@ -49,7 +49,7 @@ vmarea_t *vmarea_alloc(void) new_vmarea->vma_prot = 0; new_vmarea->vma_flags = 0; new_vmarea->vma_obj = NULL; - new_vmarea->vma_obj = NULL; + new_vmarea->vma_vmmap = NULL; list_link_init(&new_vmarea->vma_plink); // Return the new vmarea @@ -134,6 +134,8 @@ void vmmap_insert(vmmap_t *map, vmarea_t *new_vma) { // NOT_YET_IMPLEMENTED("VM: vmmap_insert*"); + new_vma->vma_vmmap = map; + // iterate over the list of vmareas list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink) { @@ -173,47 +175,48 @@ ssize_t vmmap_find_range(vmmap_t *map, size_t npages, int dir) if (dir == VMMAP_DIR_LOHI) { // iterate over the page numbers, going from low to high - // determine the continguous range of free virtual pages + // determine the continguous range of free virtual pages - int start, end = 0; + size_t start_page, contig_page = 0; size_t vfn = ADDR_TO_PN(USER_MEM_LOW); - while (vfn <= ADDR_TO_PN(USER_MEM_HIGH)) + while (vfn++ < ADDR_TO_PN(USER_MEM_HIGH)) { // Lookup the vmarea for this page number - vmarea_t *vma = vmmap_lookup(map, vfn++); + vmarea_t *vma = vmmap_lookup(map, vfn); if (vma == NULL) { // if unmapped, document this - end = vfn; - if (start == 0) + if (contig_page == 0) { - start = vfn; + start_page = 0; } + contig_page++; } else { // if mapped, start over - start, end = 0; + start_page = contig_page = 0; } // if the range exists, return the start - if (end == npages) + if (contig_page == npages) { - return start; + KASSERT(start_page >= ADDR_TO_PN(USER_MEM_LOW)); + return start_page; } } } - + // case 2: dir is VMMAP_DIR_HILO - else if (dir == VMMAP_DIR_HILO) + if (dir == VMMAP_DIR_HILO) { // iterate over the page numbers - int contig = 0; + size_t contig = 0; size_t vfn = ADDR_TO_PN(USER_MEM_HIGH); - while (vfn >= ADDR_TO_PN(USER_MEM_LOW)) + while (--vfn >= ADDR_TO_PN(USER_MEM_LOW)) { // Lookup the vmarea for this page number - vmarea_t *vma = vmmap_lookup(map, --vfn); + vmarea_t *vma = vmmap_lookup(map, vfn); if (vma == NULL) { // if unmapped, increment the contig @@ -228,6 +231,7 @@ ssize_t vmmap_find_range(vmmap_t *map, size_t npages, int dir) // if there are n contiguous pages, return the current vfn if (contig == npages) { + KASSERT(vfn >= ADDR_TO_PN(USER_MEM_LOW)); return vfn; } } @@ -298,6 +302,7 @@ vmmap_t *vmmap_clone(vmmap_t *map) // NOT_YET_IMPLEMENTED("VM: vmmap_clone"); // Create a new vmmap + // vmmap_collapse(map); vmmap_t *new_vmmap = vmmap_create(); if (new_vmmap == NULL) { @@ -413,97 +418,101 @@ long vmmap_map(vmmap_t *map, vnode_t *file, size_t lopage, size_t npages, int prot, int flags, off_t off, int dir, vmarea_t **new_vma) { // NOT_YET_IMPLEMENTED("VM: vmmap_map"); + // return -1; + + // ASK: why are these needed!! + KASSERT(map != NULL); + KASSERT(prot == PROT_NONE + || prot == PROT_READ + || prot == PROT_WRITE + || prot == PROT_EXEC + || prot == (PROT_READ | PROT_WRITE) + || prot == (PROT_READ | PROT_EXEC) + || prot == (PROT_WRITE | PROT_EXEC) + || prot == (PROT_READ | PROT_WRITE | PROT_EXEC)); + KASSERT((flags & MAP_TYPE) == MAP_SHARED || (flags & MAP_TYPE) == MAP_PRIVATE); + + if (lopage == 0) + { + KASSERT(dir == VMMAP_DIR_LOHI || dir == VMMAP_DIR_HILO); + ssize_t res = vmmap_find_range(map, npages, dir); + if (res == -1) { + return -ENOMEM; + } + lopage = res; + } - // Create a new vmarea - // see if lopage is 0. if so, use vmmap_find_range() to get a valid range - if (lopage == 0) + if (lopage != 0 && (flags & MAP_FIXED)) { - lopage = vmmap_find_range(map, npages, dir); - if (lopage == -1) + long ret = vmmap_remove(map, lopage, npages); + if (ret < 0) { - return -ENOMEM; + return ret; } } - // Alloc the new vmarea - vmarea_t *new_vmarea = vmarea_alloc(); - if (new_vmarea == NULL) + // alloc the new vma + vmarea_t *vma = vmarea_alloc(); + if (!vma) { return -ENOMEM; } - // Set the fields of the new vmarea - new_vmarea->vma_start = lopage; - new_vmarea->vma_end = lopage + npages; - new_vmarea->vma_off = ADDR_TO_PN(off); - new_vmarea->vma_prot = prot; - new_vmarea->vma_flags = flags; - new_vmarea->vma_vmmap = map; - new_vmarea->vma_obj = NULL; - // If file is NULL, create an anon object - if (file == NULL) + // fill in fields, except for mobj and vma + vma->vma_start = lopage; + vma->vma_end = lopage + npages; + vma->vma_off = ADDR_TO_PN(off); + vma->vma_prot = prot; + vma->vma_flags = flags; + + // make the mobj, depending on the case (anon or mmap) + mobj_t *obj = NULL; + if (file == NULL) { - new_vmarea->vma_obj = anon_create(); - mobj_unlock(new_vmarea->vma_obj); // unlock the anon object before use - if (new_vmarea->vma_obj == NULL) + obj = anon_create(); + if (obj == NULL) { - vmarea_free(new_vmarea); + vmarea_free(vma); return -ENOMEM; } - } + mobj_unlock(obj); + } else { - // If file is non-NULL, use the vnode's mmap operation to get the mobj - long ret = file->vn_ops->mmap(file, &new_vmarea->vma_obj); + long ret = file->vn_ops->mmap(file, &obj); if (ret < 0) { - // on fail, free the new vmarea and return the error - vmarea_free(new_vmarea); + vmarea_free(vma); return ret; } } + vma->vma_obj = obj; - // If MAP_PRIVATE is specified, set up a shadow object - if (flags & MAP_PRIVATE) + // if the flag is private, upgrade the obj to a shadow obj + if (flags & MAP_PRIVATE) { - mobj_lock(new_vmarea->vma_obj); - mobj_t *shadow_obj = shadow_create(new_vmarea->vma_obj); - mobj_unlock(new_vmarea->vma_obj); - mobj_unlock(shadow_obj); // unlock the shadow object before use - mobj_put(&new_vmarea->vma_obj); // put the original object - if (shadow_obj == NULL) + mobj_t *shadow_obj = shadow_create(obj); + if (shadow_obj == NULL) { - vmarea_free(new_vmarea); + vmarea_free(vma); + mobj_put(&obj); return -ENOMEM; } - new_vmarea->vma_obj = shadow_obj; - } + // unlock from creation + mobj_unlock(shadow_obj); - // If MAP_FIXED is specified and the given range overlaps with any preexisting mappings, remove the preexisting mappings - if (lopage != 0 && (flags & MAP_FIXED)) - { - long ret = vmmap_remove(map, lopage, npages); - if (ret < 0) - { - vmarea_free(new_vmarea); - // remove/put the shadow/annon object if it exists - if (new_vmarea->vma_obj) - { - mobj_put(&new_vmarea->vma_obj); // FIXME: is this correct! - } - - return ret; - } + vma->vma_obj = shadow_obj; + // put the og obj + mobj_put(&obj); } - // Insert the new vmarea into the map - vmmap_insert(map, new_vmarea); - - // set ret val and return 0 - if (new_vma) + // now that vma is ready, set it + vmmap_insert(map, vma); + if (new_vma != NULL) { - *new_vma = new_vmarea; + *new_vma = vma; } + return 0; } @@ -558,13 +567,15 @@ long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages) // Set the fields of the new vmarea new_vmarea->vma_start = lopage + npages; new_vmarea->vma_end = vma->vma_end; - new_vmarea->vma_off = vma->vma_off + (new_vmarea->vma_start - vma->vma_start); + new_vmarea->vma_off += lopage + npages - vma->vma_start; new_vmarea->vma_prot = vma->vma_prot; new_vmarea->vma_flags = vma->vma_flags; - new_vmarea->vma_vmmap = map; + // new_vmarea->vma_vmmap = map; + mobj_lock(vma->vma_obj); new_vmarea->vma_obj = vma->vma_obj; // increment the refcount of the object associated with the vmarea mobj_ref(new_vmarea->vma_obj); + mobj_unlock(vma->vma_obj); // Shorten the length of the old vmarea vma->vma_end = lopage; @@ -574,13 +585,12 @@ long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages) // call pt_unmap_range() and tlb_flush_range() to clean the pagetables and TLB pt_unmap_range( - map->vmm_proc->p_pml4, - PN_TO_ADDR(lopage), - PN_TO_ADDR(lopage + npages) - ); + curproc->p_pml4, + PN_TO_ADDR(lopage), + PN_TO_ADDR(lopage + npages)); tlb_flush_range( - map->vmm_proc->p_pml4, - PN_TO_ADDR(lopage + npages) - PN_TO_ADDR(lopage) + PN_TO_ADDR(lopage), + npages ); } @@ -589,15 +599,15 @@ long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages) { // shorten the length of the mapping vma->vma_end = lopage; + // call pt_unmap_range() and tlb_flush_range() to clean the pagetables and TLB pt_unmap_range( - map->vmm_proc->p_pml4, - PN_TO_ADDR(lopage), - PN_TO_ADDR(lopage + npages) - ); + curproc->p_pml4, + PN_TO_ADDR(lopage), + PN_TO_ADDR(lopage + npages)); tlb_flush_range( - map->vmm_proc->p_pml4, - PN_TO_ADDR(lopage + npages) - PN_TO_ADDR(lopage) + PN_TO_ADDR(lopage), + npages ); } @@ -605,18 +615,17 @@ long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages) else if (vma->vma_start >= lopage && vma->vma_end > lopage + npages && vma->vma_start < lopage + npages) { // move the beginning of the mapping and shorten its length - vma->vma_off += (lopage + npages - vma->vma_start); + vma->vma_off += lopage + npages - vma->vma_start; vma->vma_start = lopage + npages; // call pt_unmap_range() and tlb_flush_range() to clean the pagetables and TLB pt_unmap_range( - map->vmm_proc->p_pml4, - PN_TO_ADDR(lopage), - PN_TO_ADDR(lopage + npages) - ); + curproc->p_pml4, + PN_TO_ADDR(lopage), + PN_TO_ADDR(lopage + npages)); tlb_flush_range( - map->vmm_proc->p_pml4, - PN_TO_ADDR(lopage + npages) - PN_TO_ADDR(lopage) + PN_TO_ADDR(lopage), + npages ); } @@ -629,13 +638,12 @@ long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages) // call pt_unmap_range() and tlb_flush_range() to clean the pagetables and TLB pt_unmap_range( - map->vmm_proc->p_pml4, - PN_TO_ADDR(lopage), - PN_TO_ADDR(lopage + npages) - ); + curproc->p_pml4, + PN_TO_ADDR(lopage), + PN_TO_ADDR(lopage + npages)); tlb_flush_range( - map->vmm_proc->p_pml4, - PN_TO_ADDR(lopage + npages) - PN_TO_ADDR(lopage) + PN_TO_ADDR(lopage), + npages ); } } @@ -717,11 +725,11 @@ long vmmap_read(vmmap_t *map, const void *vaddr, void *buf, size_t count) } // Read from the pframe and copy it into buf - void *cursor = (void *)(bytes_read + vaddr); + void *cursor = bytes_read + vaddr; size_t bytes_this_iteration = MIN(PAGE_SIZE - PAGE_OFFSET(cursor), count - bytes_read); memcpy( (void *) buf + bytes_read, - (void *)pf->pf_addr + PAGE_OFFSET(cursor), + (void *) pf->pf_addr + PAGE_OFFSET(cursor), bytes_this_iteration ); @@ -767,7 +775,7 @@ long vmmap_write(vmmap_t *map, void *vaddr, const void *buf, size_t count) size_t vfn = ADDR_TO_PN(vaddr); size_t end_vfn = ADDR_TO_PN(vaddr + count); size_t bytes_written = 0; - while(vfn < end_vfn) + while(vfn <= end_vfn) { // Lookup the vmarea for this page number vmarea_t *vma = vmmap_lookup(map, vfn); @@ -787,7 +795,7 @@ long vmmap_write(vmmap_t *map, void *vaddr, const void *buf, size_t count) } // Write to the pframe, copying data from buf - void *cursor = (void *)(bytes_written + vaddr); + void *cursor = bytes_written + vaddr; size_t bytes_this_iteration = MIN(PAGE_SIZE - PAGE_OFFSET(cursor), count - bytes_written); memcpy( (void *)pf->pf_addr + PAGE_OFFSET(cursor), @@ -795,9 +803,8 @@ long vmmap_write(vmmap_t *map, void *vaddr, const void *buf, size_t count) bytes_this_iteration ); - // Dirty the page + // Dirty the pframe pf->pf_dirty = 1; - // Unlock the pframe pframe_release(&pf); |