aboutsummaryrefslogtreecommitdiff
path: root/kernel/include/mm
diff options
context:
space:
mode:
authornthnluu <nate1299@me.com>2024-01-28 21:20:27 -0500
committernthnluu <nate1299@me.com>2024-01-28 21:20:27 -0500
commitc63f340d90800895f007de64b7d2d14624263331 (patch)
tree2c0849fa597dd6da831c8707b6f2603403778d7b /kernel/include/mm
Created student weenix repository
Diffstat (limited to 'kernel/include/mm')
-rw-r--r--kernel/include/mm/kmalloc.h7
-rw-r--r--kernel/include/mm/mm.h8
-rw-r--r--kernel/include/mm/mman.h25
-rw-r--r--kernel/include/mm/mobj.h75
-rw-r--r--kernel/include/mm/page.h124
-rw-r--r--kernel/include/mm/pagecache.h9
-rw-r--r--kernel/include/mm/pagetable.h94
-rw-r--r--kernel/include/mm/pframe.h23
-rw-r--r--kernel/include/mm/slab.h96
-rw-r--r--kernel/include/mm/tlb.h35
10 files changed, 496 insertions, 0 deletions
diff --git a/kernel/include/mm/kmalloc.h b/kernel/include/mm/kmalloc.h
new file mode 100644
index 0000000..f99e9df
--- /dev/null
+++ b/kernel/include/mm/kmalloc.h
@@ -0,0 +1,7 @@
+#pragma once
+
+#include "types.h"
+
+void *kmalloc(size_t size);
+
+void kfree(void *addr);
diff --git a/kernel/include/mm/mm.h b/kernel/include/mm/mm.h
new file mode 100644
index 0000000..c2989b4
--- /dev/null
+++ b/kernel/include/mm/mm.h
@@ -0,0 +1,8 @@
+#pragma once
+
+#define MM_POISON 1
+#define MM_POISON_ALLOC 0xBB
+#define MM_POISON_FREE 0xDD
+
+#define USER_MEM_LOW 0x00400000 /* inclusive */
+#define USER_MEM_HIGH (1UL << 47) /* exclusive */
diff --git a/kernel/include/mm/mman.h b/kernel/include/mm/mman.h
new file mode 100644
index 0000000..27f4d57
--- /dev/null
+++ b/kernel/include/mm/mman.h
@@ -0,0 +1,25 @@
+#pragma once
+
+/* Kernel and user header (via symlink) */
+
+/* Page protection flags.
+ */
+#define PROT_NONE 0x0 /* No access. */
+#define PROT_READ 0x1 /* Pages can be read. */
+#define PROT_WRITE 0x2 /* Pages can be written. */
+#define PROT_EXEC 0x4 /* Pages can be executed. */
+
+/* Return value for mmap() on failure.
+ */
+#define MAP_FAILED ((void *)-1)
+
+/* Mapping type - shared or private.
+ */
+#define MAP_SHARED 1
+#define MAP_PRIVATE 2
+#define MAP_TYPE 3 /* mask for above types */
+
+/* Mapping flags.
+ */
+#define MAP_FIXED 4
+#define MAP_ANON 8
diff --git a/kernel/include/mm/mobj.h b/kernel/include/mm/mobj.h
new file mode 100644
index 0000000..bca1b38
--- /dev/null
+++ b/kernel/include/mm/mobj.h
@@ -0,0 +1,75 @@
+#pragma once
+
+#include "proc/kmutex.h"
+#include "util/atomic.h"
+#include "util/list.h"
+#include "mm/pframe.h"
+
+struct pframe;
+
+struct mobj;
+
+typedef enum
+{
+ MOBJ_VNODE = 1,
+ MOBJ_SHADOW,
+ MOBJ_ANON,
+#ifdef OLD
+ MOBJ_BLOCKDEV,
+#else
+ MOBJ_FS,
+#endif
+} mobj_type_t;
+
+typedef struct mobj_ops
+{
+ long (*get_pframe)(struct mobj *o, uint64_t pagenum, long forwrite,
+ struct pframe **pfp);
+
+ long (*fill_pframe)(struct mobj *o, struct pframe *pf);
+
+ long (*flush_pframe)(struct mobj *o, struct pframe *pf);
+
+ void (*destructor)(struct mobj *o);
+} mobj_ops_t;
+
+typedef struct mobj
+{
+ long mo_type;
+ struct mobj_ops mo_ops;
+ atomic_t mo_refcount;
+ list_t mo_pframes;
+ kmutex_t mo_mutex;
+} mobj_t;
+
+void mobj_init(mobj_t *o, long type, mobj_ops_t *ops);
+
+void mobj_lock(mobj_t *o);
+
+void mobj_unlock(mobj_t *o);
+
+void mobj_ref(mobj_t *o);
+
+void mobj_put(mobj_t **op);
+
+void mobj_put_locked(mobj_t **op);
+
+long mobj_get_pframe(mobj_t *o, uint64_t pagenum, long forwrite,
+ struct pframe **pfp);
+
+void mobj_find_pframe(mobj_t *o, uint64_t pagenum, struct pframe **pfp);
+
+long mobj_flush_pframe(mobj_t *o, struct pframe *pf);
+
+long mobj_flush(mobj_t *o);
+
+long mobj_free_pframe(mobj_t *o, struct pframe **pfp);
+
+long mobj_default_get_pframe(mobj_t *o, uint64_t pagenum, long forwrite,
+ struct pframe **pfp);
+
+void mobj_default_destructor(mobj_t *o);
+
+#ifndef OLD
+void mobj_create_pframe(mobj_t *o, uint64_t pagenum, uint64_t loc, pframe_t **pfp);
+#endif \ No newline at end of file
diff --git a/kernel/include/mm/page.h b/kernel/include/mm/page.h
new file mode 100644
index 0000000..5230a85
--- /dev/null
+++ b/kernel/include/mm/page.h
@@ -0,0 +1,124 @@
+#pragma once
+
+#ifdef __KERNEL__
+#include "types.h"
+#else
+#include "sys/types.h"
+#endif
+
+/* This header file contains the functions for allocating
+ * and freeing page-aligned chunks of data which are a
+ * multiple of a page in size. These are the lowest level
+ * memory allocation functions. In general code should
+ * use the slab allocator functions in mm/slab.h unless
+ * they require page-aligned buffers. */
+
+#define PAGE_SHIFT 12
+#define PAGE_SIZE ((uintptr_t)(1UL << PAGE_SHIFT))
+#define PAGE_MASK (0xffffffffffffffff << PAGE_SHIFT)
+
+#define PAGE_ALIGN_DOWN(x) ((void *)(((uintptr_t)(x)&PAGE_MASK)))
+#define PAGE_ALIGN_UP(x) \
+ ((void *)((((uintptr_t)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)))
+
+#define PAGE_OFFSET(x) (((uintptr_t)(x)) & ~PAGE_MASK)
+#define PAGE_ALIGNED(x) (!PAGE_OFFSET(x))
+
+#define PN_TO_ADDR(x) ((void *)(((uintptr_t)(x)) << PAGE_SHIFT))
+#define ADDR_TO_PN(x) (((uintptr_t)(x)) >> PAGE_SHIFT)
+
+#define PAGE_SAME(x, y) (PAGE_ALIGN_DOWN(x) == PAGE_ALIGN_DOWN(y))
+
+#define PAGE_NSIZES 8
+
+#define USE_2MB_PAGES 1
+#define USE_1GB_PAGES 1
+
+#define PAGE_SHIFT_2MB 21
+#define PAGE_SIZE_2MB ((uintptr_t)(1UL << PAGE_SHIFT_2MB))
+#define PAGE_MASK_2MB (0xffffffffffffffff << PAGE_SHIFT_2MB)
+#define PAGE_ALIGN_DOWN_2MB(x) (((uintptr_t)(x)) & PAGE_MASK_2MB)
+#define PAGE_ALIGN_UP_2MB(x) (PAGE_ALIGN_DOWN_2MB((x)-1) + PAGE_SIZE_2MB)
+#define PAGE_OFFSET_2MB(x) (((uintptr_t)(x)) & ~PAGE_MASK_2MB)
+#define PAGE_ALIGNED_2MB(x) ((x) == PAGE_ALIGN_DOWN_2MB(x))
+#define PAGE_SAME_2MB(x, y) (PAGE_ALIGN_DOWN_2MB(x) == PAGE_ALIGN_DOWN_2MB(y))
+
+#define PAGE_SHIFT_1GB 30
+#define PAGE_MASK_1GB (0xffffffffffffffff << PAGE_SHIFT_1GB)
+#define PAGE_SIZE_1GB ((uintptr_t)(1UL << PAGE_SHIFT_1GB))
+#define PAGE_ALIGN_DOWN_1GB(x) (((uintptr_t)(x)) & PAGE_MASK_1GB)
+#define PAGE_ALIGN_UP_1GB(x) (PAGE_ALIGN_DOWN_1GB((x)-1) + PAGE_SIZE_1GB)
+#define PAGE_OFFSET_1GB(x) (((uintptr_t)(x)) & ~PAGE_MASK_1GB)
+#define PAGE_ALIGNED_1GB(x) ((x) == PAGE_ALIGN_DOWN_1GB(x))
+#define PAGE_SAME_1GB(x, y) (PAGE_ALIGN_DOWN_1GB(x) == PAGE_ALIGN_DOWN_1GB(y))
+
+#define PAGE_SHIFT_512GB 39
+#define PAGE_SIZE_512GB ((uintptr_t)(1UL << PAGE_SHIFT_512GB))
+#define PAGE_MASK_512GB (0xffffffffffffffff << PAGE_SHIFT_512GB)
+#define PAGE_ALIGN_DOWN_512GB(x) (((uintptr_t)(x)) & PAGE_MASK_512GB)
+#define PAGE_ALIGN_UP_512GB(x) (PAGE_ALIGN_DOWN_512GB((x)-1) + PAGE_SIZE_512GB)
+
+#define PAGE_CONTROL_FLAGS(x) \
+ ((x) & (PT_PRESENT | PT_WRITE | PT_USER | PT_WRITE_THROUGH | \
+ PT_CACHE_DISABLED | PT_SIZE | PT_GLOBAL))
+#define PAGE_FLAGS(x) ((x) & (~PAGE_MASK))
+
+typedef enum page_size
+{
+ ps_4kb,
+ ps_2mb,
+ ps_1gb,
+ ps_512gb,
+} page_size_t;
+
+typedef struct page_status
+{
+ page_size_t size;
+ int mapped;
+} page_status_t;
+
+/* Performs all initialization necessary for the
+ * page allocation system. This should be called
+ * only once at boot time before any other functions
+ * in this header are called. */
+void page_init();
+
+void *physmap_start();
+
+void *physmap_end();
+
+/* These functions allocate and free one page-aligned,
+ * page-sized block of memory. Values passed to
+ * page_free MUST have been returned by page_alloc
+ * at some previous point. There should be only one
+ * call to page_free for each value returned by
+ * page_alloc. If the system is out of memory page_alloc
+ * will return NULL. */
+void *page_alloc(void);
+
+void *page_alloc_bounded(void *max_paddr);
+
+void page_free(void *addr);
+
+/* These functions allocate and free a page-aligned
+ * block of memory which are npages pages in length.
+ * A call to page_alloc_n will allocate a block, to free
+ * that block a call should be made to page_free_n with
+ * npages set to the same as it was when the block was
+ * allocated */
+void *page_alloc_n(size_t npages);
+
+void *page_alloc_n_bounded(size_t npages, void *max_paddr);
+
+void page_free_n(void *start, size_t npages);
+
+void page_add_range(void *start, void *end);
+
+void page_mark_reserved(void *paddr);
+
+void page_init_finish();
+
+/* Returns the number of free pages remaining in the
+ * system. Note that calls to page_alloc_n(npages) may
+ * fail even if page_free_count() >= npages. */
+size_t page_free_count();
diff --git a/kernel/include/mm/pagecache.h b/kernel/include/mm/pagecache.h
new file mode 100644
index 0000000..442e7b1
--- /dev/null
+++ b/kernel/include/mm/pagecache.h
@@ -0,0 +1,9 @@
+#pragma once
+
+#include "drivers/blockdev.h"
+#include "mm/pframe.h"
+
+long pagecache_get_page(pframe_t *pf);
+#ifdef NO
+void pagecache_newsource(pframe_t *pf, blockdev_t *dev, long loc);
+#endif \ No newline at end of file
diff --git a/kernel/include/mm/pagetable.h b/kernel/include/mm/pagetable.h
new file mode 100644
index 0000000..cc1fa3e
--- /dev/null
+++ b/kernel/include/mm/pagetable.h
@@ -0,0 +1,94 @@
+#pragma once
+
+#include "mm/page.h"
+#include "vm/vmmap.h"
+
+#define PT_PRESENT 0x001
+#define PT_WRITE 0x002
+#define PT_USER 0x004
+#define PT_WRITE_THROUGH 0x008
+#define PT_CACHE_DISABLED 0x010
+#define PT_ACCESSED 0x020
+#define PT_DIRTY 0x040
+#define PT_SIZE 0x080
+#define PT_GLOBAL 0x100
+
+#define PT_ENTRY_COUNT (PAGE_SIZE / sizeof(uintptr_t))
+
+typedef struct page
+{
+ uint8_t data[PAGE_SIZE];
+} page_t;
+
+// Generalized structure for all directory like entries
+typedef struct pt
+{
+ uintptr_t phys[PT_ENTRY_COUNT];
+} pt_t, pd_t, pdp_t, pml4_t;
+
+#define INDEX_MASK 0b111111111
+#define PML4E(x) ((((uintptr_t)(x)) >> 39) & INDEX_MASK)
+#define PDPE(x) ((((uintptr_t)(x)) >> 30) & INDEX_MASK)
+#define PDE(x) ((((uintptr_t)(x)) >> 21) & INDEX_MASK)
+#define PTE(x) ((((uintptr_t)(x)) >> 12) & INDEX_MASK)
+
+#define PT_ENTRY_COUNT (PAGE_SIZE / sizeof(uintptr_t))
+#define PT_VADDR_SIZE (PAGE_SIZE * PT_ENTRY_COUNT)
+#define PD_VADDR_SIZE (PAGE_SIZE * PT_ENTRY_COUNT * PT_ENTRY_COUNT)
+#define PDP_VADDR_SIZE \
+ (PAGE_SIZE * PT_ENTRY_COUNT * PT_ENTRY_COUNT * PT_ENTRY_COUNT)
+#define PML4_VADDR_SIZE \
+ (PAGE_SIZE * PT_ENTRY_COUNT * PT_ENTRY_COUNT * PT_ENTRY_COUNT * \
+ PT_ENTRY_COUNT)
+
+#define IS_PRESENT(n) ((n)&PT_PRESENT)
+#define IS_2MB_PAGE(n) ((n)&PT_SIZE)
+#define IS_1GB_PAGE IS_2MB_PAGE
+
+#define GDB_PT_PHYSADDR(pt, v) (pt->phys[PTE(v)] & PAGE_MASK)
+#define GDB_PD_PHYSADDR(pd, v) (pd->phys[PDE(v)] & PAGE_MASK)
+#define GDB_PDP_PHYSADDR(pdp, v) (pdp->phys[PDPE(v)] & PAGE_MASK)
+#define GDB_PML4_PHYSADDR(pml4, v) (pml4->phys[PML4E(v)] & PAGE_MASK)
+
+#define GDB_PHYSADDR(pml4, v) \
+ (GDB_PT_PHYSADDR( \
+ GDB_PD_PHYSADDR( \
+ GDB_PDP_PHYSADDR(GDB_PML4_PHYSADDR(pml4, (v)) + PHYS_OFFSET, \
+ (v)) + \
+ PHYS_OFFSET, \
+ (v)) + \
+ PHYS_OFFSET, \
+ (v)) + \
+ PHYS_OFFSET)
+#define GDB_CUR_PHYSADDR(v) GDB_PHYSADDR(curproc->p_pml4, (v))
+
+uintptr_t pt_virt_to_phys_helper(pml4_t *pml4, uintptr_t vaddr);
+
+uintptr_t pt_virt_to_phys(uintptr_t vaddr);
+
+void pt_init(void);
+
+/* Currently unused. */
+void pt_template_init(void);
+
+pml4_t *pt_get();
+
+void pt_set(pml4_t *pml4);
+
+pml4_t *clone_pml4(pml4_t *pml4, long include_user_mappings);
+
+pml4_t *pt_create();
+
+void pt_destroy(pml4_t *pml4);
+
+long pt_map(pml4_t *pml4, uintptr_t paddr, uintptr_t vaddr, uint32_t pdflags,
+ uint32_t ptflags);
+
+long pt_map_range(pml4_t *pml4, uintptr_t paddr, uintptr_t vaddr,
+ uintptr_t vmax, uint32_t pdflags, uint32_t ptflags);
+
+void pt_unmap(pml4_t *pml4, uintptr_t vaddr);
+
+void pt_unmap_range(pml4_t *pml4, uintptr_t vaddr, uintptr_t vmax);
+
+void check_invalid_mappings(pml4_t *pml4, vmmap_t *vmmap, char *prompt);
diff --git a/kernel/include/mm/pframe.h b/kernel/include/mm/pframe.h
new file mode 100644
index 0000000..bd2c3f7
--- /dev/null
+++ b/kernel/include/mm/pframe.h
@@ -0,0 +1,23 @@
+#pragma once
+
+//#include "mm/mobj.h"
+#include "proc/kmutex.h"
+#include "types.h"
+
+typedef struct pframe
+{
+ size_t pf_pagenum;
+ size_t pf_loc;
+ void *pf_addr;
+ long pf_dirty;
+ kmutex_t pf_mutex;
+ list_link_t pf_link;
+} pframe_t;
+
+void pframe_init();
+
+pframe_t *pframe_create();
+
+void pframe_release(pframe_t **pfp);
+
+void pframe_free(pframe_t **pfp);
diff --git a/kernel/include/mm/slab.h b/kernel/include/mm/slab.h
new file mode 100644
index 0000000..6ead5ae
--- /dev/null
+++ b/kernel/include/mm/slab.h
@@ -0,0 +1,96 @@
+#pragma once
+
+#include <types.h>
+
+/* Define SLAB_REDZONE to add top and bottom redzones to every object. */
+#define SLAB_REDZONE 0xdeadbeefdeadbeef
+
+/* Define SLAB_CHECK_FREE to add extra book keeping to make sure there
+ * are no double frees. */
+#define SLAB_CHECK_FREE
+
+/*
+ * The slab allocator. A "cache" is a store of objects; you create one by
+ * specifying a constructor, destructor, and the size of an object. The
+ * "alloc" function allocates one object, and the "free" function returns
+ * it to the free list *without calling the destructor*. This lets you save
+ * on destruction/construction calls; the idea is that every free object in
+ * the cache is in a known state.
+ */
+typedef struct slab_allocator slab_allocator_t;
+
+/* Initializes the slab allocator subsystem. This should be done
+ * only after the page subsystem has been initialized. Slab allocators
+ * and kmalloc will not work until this function has been called. */
+void slab_init();
+
+/*
+ * Example Usage
+ * See the below example for how to use a slab allocator to allocate objects
+ * of a given size. Note that you usually don't need to destroy most allocators,
+ * as they should last as long as the system is running (e.g. the process allocator).
+ *
+ * ```
+ * typedef struct {
+ * int x;
+ * int y;
+ * } point_t;
+ *
+ * // Create a new allocator for objects of type point_t. This only needs to
+ * // happen once, usually in an initialization routine.
+ * slab_allocator_t *point_allocator = slab_allocator_create("point", sizeof(point_t));
+ *
+ * // Allocate a new point_t from the slab allocator
+ * point_t *p = (point_t *)slab_obj_alloc(point_allocator);
+ *
+ * // ... Use p here ...
+ *
+ * // Deallocate the point_t
+ * slab_obj_free(point_allocator, p);
+ * ```
+ */
+
+/**
+ * Creates a slab allocator for allocating objects of a given size.
+ *
+ * @param name The name of the allocator (for debugging)
+ * @param size The size (bytes) of objects that will be allocated from this allocator
+ * @return slab_allocator_t* An allocator, or NULL on failure
+ */
+slab_allocator_t *slab_allocator_create(const char *name, size_t size);
+
+/**
+ * Destroys a slab allocator.
+ *
+ * @param allocator The allocator to destroy
+ */
+void slab_allocator_destroy(struct slab_allocator *allocator);
+
+/**
+ * Allocates an object from the given slab allocator. The object is a chunk of
+ * memory as big as the size that slab allocator was created with.
+ *
+ * @param allocator The allocator to allocate from
+ * @return void* A chunk of memory of the appropriate object size, or NULL
+ * on failure
+ */
+void *slab_obj_alloc(slab_allocator_t *allocator);
+
+/**
+ * Frees a given object that was allocated by a given slab allocator.
+ *
+ * @param allocator The allocator that allocated this object
+ * @param obj The object to be freed
+ */
+void slab_obj_free(slab_allocator_t *allocator, void *obj);
+
+/**
+ * Reclaims memory from unused slabs.
+ *
+ * NOTE: This is not currently implemented.
+ *
+ * @param target Target number of pages to reclaim. If negative, reclaim as many
+ * as possible
+ * @return long Number of pages freed
+ */
+long slab_allocators_reclaim(long target); \ No newline at end of file
diff --git a/kernel/include/mm/tlb.h b/kernel/include/mm/tlb.h
new file mode 100644
index 0000000..836be4e
--- /dev/null
+++ b/kernel/include/mm/tlb.h
@@ -0,0 +1,35 @@
+#pragma once
+
+#include "kernel.h"
+#include "types.h"
+
+#include "mm/page.h"
+
+/* Invalidates any entries from the TLB which contain
+ * mappings for the given virtual address. */
+static inline void tlb_flush(uintptr_t vaddr)
+{
+ __asm__ volatile("invlpg (%0)" ::"r"(vaddr));
+}
+
+/* Invalidates any entries for count pages starting at
+ * vaddr from the TLB. If this range is very large it may
+ * be more efficient to call tlb_flush_all to invalidate
+ * the entire TLB. */
+static inline void tlb_flush_range(uintptr_t vaddr, size_t count)
+{
+ for (size_t i = 0; i < count; i++, vaddr += PAGE_SIZE)
+ {
+ tlb_flush(vaddr);
+ }
+}
+
+/* Invalidates the entire TLB. */
+static inline void tlb_flush_all()
+{
+ uintptr_t pdir;
+ __asm__ volatile("movq %%cr3, %0"
+ : "=r"(pdir));
+ __asm__ volatile("movq %0, %%cr3" ::"r"(pdir)
+ : "memory");
+}