aboutsummaryrefslogtreecommitdiff
path: root/kernel/proc
diff options
context:
space:
mode:
authorsotech117 <michael_foiani@brown.edu>2024-02-11 07:36:50 +0000
committersotech117 <michael_foiani@brown.edu>2024-02-11 07:36:50 +0000
commit6cfe0ddd014597113e0635fcdecba9db0cc2c64b (patch)
treeb40bbeda9f1e7ca44c1d59e75b94ed168310a959 /kernel/proc
parentc71b9e406a8fb7bfcaeb20ee787d6eb4a1cbb71d (diff)
basic implementation of most functions. not tested, but generally well thought out
Diffstat (limited to 'kernel/proc')
-rw-r--r--kernel/proc/kthread.c45
-rw-r--r--kernel/proc/proc.c59
-rw-r--r--kernel/proc/sched.c77
3 files changed, 167 insertions, 14 deletions
diff --git a/kernel/proc/kthread.c b/kernel/proc/kthread.c
index 1104b31..5f9917f 100644
--- a/kernel/proc/kthread.c
+++ b/kernel/proc/kthread.c
@@ -68,8 +68,36 @@ void kthread_init()
kthread_t *kthread_create(proc_t *proc, kthread_func_t func, long arg1,
void *arg2)
{
- NOT_YET_IMPLEMENTED("PROCS: kthread_create");
- return NULL;
+ // NOT_YET_IMPLEMENTED("PROCS: kthread_create");
+ kthread_t *new_thread = slab_obj_alloc(kthread_allocator);
+ if (new_thread == NULL)
+ {
+ return NULL;
+ }
+ new_thread->kt_state = KT_NO_STATE;
+
+ new_thread->kt_kstack = alloc_stack();
+ if (new_thread->kt_kstack == NULL)
+ {
+ slab_obj_free(kthread_allocator, new_thread);
+ return NULL;
+ }
+
+ new_thread->kt_proc = proc;
+ context_setup(&new_thread->kt_ctx, func, arg1, arg2, new_thread->kt_kstack,
+ DEFAULT_STACK_SIZE, new_thread->kt_proc->p_pml4);
+
+ // give default values to rest of struct
+ new_thread->kt_retval = NULL;
+ new_thread->kt_errno = 0;
+ new_thread->kt_cancelled = 0;
+ new_thread->kt_wchan = NULL;
+ list_link_init(&new_thread->kt_plink);
+ list_link_init(&new_thread->kt_qlink);
+ list_init(&new_thread->kt_mutexes);
+ new_thread->kt_recent_core = 0;
+
+ return new_thread;
}
/*
@@ -124,7 +152,14 @@ void kthread_destroy(kthread_t *thr)
*/
void kthread_cancel(kthread_t *thr, void *retval)
{
- NOT_YET_IMPLEMENTED("PROCS: kthread_cancel");
+ // NOT_YET_IMPLEMENTED("PROCS: kthread_cancel");
+ KASSERT(thr != curthr);
+
+ // TODO: ask about the use of check_curthr_cancelled() in syscall_handler()
+ thr->kt_retval = retval;
+ sched_cancel(thr);
+
+ check_curthr_cancelled();
}
/*
@@ -132,5 +167,7 @@ void kthread_cancel(kthread_t *thr, void *retval)
*/
void kthread_exit(void *retval)
{
- NOT_YET_IMPLEMENTED("PROCS: kthread_exit");
+ // NOT_YET_IMPLEMENTED("PROCS: kthread_exit");
+
+ proc_thread_exiting(retval);
}
diff --git a/kernel/proc/proc.c b/kernel/proc/proc.c
index 6155b58..fd253f6 100644
--- a/kernel/proc/proc.c
+++ b/kernel/proc/proc.c
@@ -340,7 +340,7 @@ void proc_kill_all()
}
}
- // kill the current proc
+ // kill this proc
do_exit(0);
}
@@ -416,7 +416,58 @@ void proc_destroy(proc_t *proc)
*/
pid_t do_waitpid(pid_t pid, int *status, int options)
{
- NOT_YET_IMPLEMENTED("PROCS: do_waitpid");
+ // NOT_YET_IMPLEMENTED("PROCS: do_waitpid");
+
+ if (pid == 0 || options != 0 || pid < -1)
+ {
+ return -ENOTSUP;
+ }
+
+ if (pid > 0)
+ {
+ proc_t *child = proc_lookup(pid);
+ if (child == NULL || child->p_pproc != curproc)
+ {
+ return -ECHILD;
+ }
+
+ // sleep until this specific child process exits
+ while (child->p_state != PROC_DEAD)
+ {
+ sched_sleep_on(&curproc->p_wait);
+ }
+
+ if (status != NULL)
+ {
+ *status = child->p_status;
+ }
+ proc_destroy(child);
+
+ return pid;
+ }
+ else if (pid == -1)
+ {
+ if (list_empty(&curproc->p_children))
+ {
+ return -ECHILD;
+ }
+
+ proc_t *child;
+ list_iterate(&curproc->p_children, child, proc_t, p_child_link)
+ {
+ if (child->p_state == PROC_DEAD)
+ {
+ if (status != NULL)
+ {
+ *status = child->p_status;
+ }
+
+ proc_destroy(child);
+ return child->p_pid;
+ }
+ }
+ }
+
return 0;
}
@@ -425,7 +476,9 @@ pid_t do_waitpid(pid_t pid, int *status, int options)
*/
void do_exit(long status)
{
- NOT_YET_IMPLEMENTED("PROCS: do_exit");
+ // NOT_YET_IMPLEMENTED("PROCS: do_exit");
+
+ kthread_exit((void *)status);
}
/*==========
diff --git a/kernel/proc/sched.c b/kernel/proc/sched.c
index 3bc20c8..2e69634 100644
--- a/kernel/proc/sched.c
+++ b/kernel/proc/sched.c
@@ -162,7 +162,16 @@ void sched_init(void)
*/
long sched_cancellable_sleep_on(ktqueue_t *queue)
{
- NOT_YET_IMPLEMENTED("PROCS: sched_cancellable_sleep_on");
+ // NOT_YET_IMPLEMENTED("PROCS: sched_cancellable_sleep_on");
+
+ KASSERT(queue != NULL);
+
+ curthr->kt_state = KT_SLEEP_CANCELLABLE;
+
+ sched_switch(queue);
+
+ // TODO: finish this function - consider the ipl_wait() function
+
return 0;
}
@@ -174,7 +183,15 @@ long sched_cancellable_sleep_on(ktqueue_t *queue)
*/
void sched_cancel(kthread_t *thr)
{
- NOT_YET_IMPLEMENTED("PROCS: sched_cancel");
+ // NOT_YET_IMPLEMENTED("PROCS: sched_cancel");
+
+ thr->kt_cancelled = 1;
+
+ if (thr->kt_state == KT_SLEEP_CANCELLABLE)
+ {
+ ktqueue_remove(thr->kt_wchan, thr);
+ sched_make_runnable(thr);
+ }
}
/*
@@ -210,7 +227,20 @@ void sched_cancel(kthread_t *thr)
*/
void sched_switch(ktqueue_t *queue)
{
- NOT_YET_IMPLEMENTED("PROCS: sched_switch");
+ // NOT_YET_IMPLEMENTED("PROCS: sched_switch");
+
+ KASSERT(intr_getipl == IPL_HIGH);
+ intr_disable();
+
+ KASSERT(curthr->kt_state != KT_ON_CPU);
+
+ ktqueue_enqueue(queue, curthr);
+ curcore.kc_queue = queue;
+ last_thread_context = &curthr->kt_ctx;
+
+ intr_setipl(IPL_LOW); // allow interrupts to wake up the idling core
+ intr_enable();
+ context_switch(&curthr->kt_ctx, &curcore.kc_ctx);
}
/*
@@ -236,7 +266,15 @@ void sched_yield()
*/
void sched_make_runnable(kthread_t *thr)
{
- NOT_YET_IMPLEMENTED("PROCS: sched_make_runnable");
+ // NOT_YET_IMPLEMENTED("PROCS: sched_make_runnable");
+
+ KASSERT(thr != curthr);
+ KASSERT(thr->kt_state != KT_RUNNABLE);
+
+ int oldIPL = intr_setipl(IPL_HIGH);
+ thr->kt_state = KT_RUNNABLE;
+ ktqueue_enqueue(&kt_runq, thr);
+ intr_setipl(oldIPL);
}
/*
@@ -255,7 +293,13 @@ void sched_make_runnable(kthread_t *thr)
*/
void sched_sleep_on(ktqueue_t *q)
{
- NOT_YET_IMPLEMENTED("PROCS: sched_sleep_on");
+ // NOT_YET_IMPLEMENTED("PROCS: sched_sleep_on");
+
+ int oldIPL = intr_setipl(IPL_HIGH);
+ curthr->kt_state = KT_SLEEP;
+
+ sched_switch(q);
+ // intr_setipl(oldIPL); not called, hold the lock into the sched_switch call
}
/*
@@ -271,7 +315,21 @@ void sched_sleep_on(ktqueue_t *q)
*/
void sched_wakeup_on(ktqueue_t *q, kthread_t **ktp)
{
- NOT_YET_IMPLEMENTED("PROCS: sched_wakeup_on");
+ // NOT_YET_IMPLEMENTED("PROCS: sched_wakeup_on");
+
+ if (sched_queue_empty(q))
+ {
+ return;
+ }
+
+ int oldIPL = intr_setipl(IPL_HIGH); // don't allow interrupts while modifying the queue
+ kthread_t *thr = ktqueue_dequeue(q);
+ if (ktp)
+ {
+ *ktp = thr;
+ }
+ sched_make_runnable(thr);
+ intr_setipl(oldIPL);
}
/*
@@ -279,7 +337,12 @@ void sched_wakeup_on(ktqueue_t *q, kthread_t **ktp)
*/
void sched_broadcast_on(ktqueue_t *q)
{
- NOT_YET_IMPLEMENTED("PROCS: sched_broadcast_on");
+ // NOT_YET_IMPLEMENTED("PROCS: sched_broadcast_on");
+
+ while (!sched_queue_empty(q))
+ {
+ sched_make_runnable(ktqueue_dequeue(q));
+ }
}
/*===============