aboutsummaryrefslogtreecommitdiff
path: root/kernel/proc/sched.c
diff options
context:
space:
mode:
authorsotech117 <michael_foiani@brown.edu>2024-02-11 07:36:50 +0000
committersotech117 <michael_foiani@brown.edu>2024-02-11 07:36:50 +0000
commit6cfe0ddd014597113e0635fcdecba9db0cc2c64b (patch)
treeb40bbeda9f1e7ca44c1d59e75b94ed168310a959 /kernel/proc/sched.c
parentc71b9e406a8fb7bfcaeb20ee787d6eb4a1cbb71d (diff)
basic implementation of most functions. not tested, but generally well thought out
Diffstat (limited to 'kernel/proc/sched.c')
-rw-r--r--kernel/proc/sched.c77
1 files changed, 70 insertions, 7 deletions
diff --git a/kernel/proc/sched.c b/kernel/proc/sched.c
index 3bc20c8..2e69634 100644
--- a/kernel/proc/sched.c
+++ b/kernel/proc/sched.c
@@ -162,7 +162,16 @@ void sched_init(void)
*/
long sched_cancellable_sleep_on(ktqueue_t *queue)
{
- NOT_YET_IMPLEMENTED("PROCS: sched_cancellable_sleep_on");
+ // NOT_YET_IMPLEMENTED("PROCS: sched_cancellable_sleep_on");
+
+ KASSERT(queue != NULL);
+
+ curthr->kt_state = KT_SLEEP_CANCELLABLE;
+
+ sched_switch(queue);
+
+ // TODO: finish this function - consider the ipl_wait() function
+
return 0;
}
@@ -174,7 +183,15 @@ long sched_cancellable_sleep_on(ktqueue_t *queue)
*/
void sched_cancel(kthread_t *thr)
{
- NOT_YET_IMPLEMENTED("PROCS: sched_cancel");
+ // NOT_YET_IMPLEMENTED("PROCS: sched_cancel");
+
+ thr->kt_cancelled = 1;
+
+ if (thr->kt_state == KT_SLEEP_CANCELLABLE)
+ {
+ ktqueue_remove(thr->kt_wchan, thr);
+ sched_make_runnable(thr);
+ }
}
/*
@@ -210,7 +227,20 @@ void sched_cancel(kthread_t *thr)
*/
void sched_switch(ktqueue_t *queue)
{
- NOT_YET_IMPLEMENTED("PROCS: sched_switch");
+ // NOT_YET_IMPLEMENTED("PROCS: sched_switch");
+
+ KASSERT(intr_getipl == IPL_HIGH);
+ intr_disable();
+
+ KASSERT(curthr->kt_state != KT_ON_CPU);
+
+ ktqueue_enqueue(queue, curthr);
+ curcore.kc_queue = queue;
+ last_thread_context = &curthr->kt_ctx;
+
+ intr_setipl(IPL_LOW); // allow interrupts to wake up the idling core
+ intr_enable();
+ context_switch(&curthr->kt_ctx, &curcore.kc_ctx);
}
/*
@@ -236,7 +266,15 @@ void sched_yield()
*/
void sched_make_runnable(kthread_t *thr)
{
- NOT_YET_IMPLEMENTED("PROCS: sched_make_runnable");
+ // NOT_YET_IMPLEMENTED("PROCS: sched_make_runnable");
+
+ KASSERT(thr != curthr);
+ KASSERT(thr->kt_state != KT_RUNNABLE);
+
+ int oldIPL = intr_setipl(IPL_HIGH);
+ thr->kt_state = KT_RUNNABLE;
+ ktqueue_enqueue(&kt_runq, thr);
+ intr_setipl(oldIPL);
}
/*
@@ -255,7 +293,13 @@ void sched_make_runnable(kthread_t *thr)
*/
void sched_sleep_on(ktqueue_t *q)
{
- NOT_YET_IMPLEMENTED("PROCS: sched_sleep_on");
+ // NOT_YET_IMPLEMENTED("PROCS: sched_sleep_on");
+
+ int oldIPL = intr_setipl(IPL_HIGH);
+ curthr->kt_state = KT_SLEEP;
+
+ sched_switch(q);
+ // intr_setipl(oldIPL); not called, hold the lock into the sched_switch call
}
/*
@@ -271,7 +315,21 @@ void sched_sleep_on(ktqueue_t *q)
*/
void sched_wakeup_on(ktqueue_t *q, kthread_t **ktp)
{
- NOT_YET_IMPLEMENTED("PROCS: sched_wakeup_on");
+ // NOT_YET_IMPLEMENTED("PROCS: sched_wakeup_on");
+
+ if (sched_queue_empty(q))
+ {
+ return;
+ }
+
+ int oldIPL = intr_setipl(IPL_HIGH); // don't allow interrupts while modifying the queue
+ kthread_t *thr = ktqueue_dequeue(q);
+ if (ktp)
+ {
+ *ktp = thr;
+ }
+ sched_make_runnable(thr);
+ intr_setipl(oldIPL);
}
/*
@@ -279,7 +337,12 @@ void sched_wakeup_on(ktqueue_t *q, kthread_t **ktp)
*/
void sched_broadcast_on(ktqueue_t *q)
{
- NOT_YET_IMPLEMENTED("PROCS: sched_broadcast_on");
+ // NOT_YET_IMPLEMENTED("PROCS: sched_broadcast_on");
+
+ while (!sched_queue_empty(q))
+ {
+ sched_make_runnable(ktqueue_dequeue(q));
+ }
}
/*===============