diff options
Diffstat (limited to 'kernel/proc/sched.c')
-rw-r--r-- | kernel/proc/sched.c | 100 |
1 files changed, 93 insertions, 7 deletions
diff --git a/kernel/proc/sched.c b/kernel/proc/sched.c index 3bc20c8..b422ef1 100644 --- a/kernel/proc/sched.c +++ b/kernel/proc/sched.c @@ -162,7 +162,23 @@ void sched_init(void) */ long sched_cancellable_sleep_on(ktqueue_t *queue) { - NOT_YET_IMPLEMENTED("PROCS: sched_cancellable_sleep_on"); + // NOT_YET_IMPLEMENTED("PROCS: sched_cancellable_sleep_on"); + + KASSERT(queue != NULL); + + if (curthr->kt_cancelled) + { + return -EINTR; + } + + curthr->kt_state = KT_SLEEP_CANCELLABLE; + sched_switch(queue); + + if (curthr->kt_cancelled) + { + return -EINTR; + } + return 0; } @@ -174,7 +190,15 @@ long sched_cancellable_sleep_on(ktqueue_t *queue) */ void sched_cancel(kthread_t *thr) { - NOT_YET_IMPLEMENTED("PROCS: sched_cancel"); + // NOT_YET_IMPLEMENTED("PROCS: sched_cancel"); + + thr->kt_cancelled = 1; + + if (thr->kt_state == KT_SLEEP_CANCELLABLE) + { + ktqueue_remove(thr->kt_wchan, thr); + sched_make_runnable(thr); + } } /* @@ -210,7 +234,21 @@ void sched_cancel(kthread_t *thr) */ void sched_switch(ktqueue_t *queue) { - NOT_YET_IMPLEMENTED("PROCS: sched_switch"); + // NOT_YET_IMPLEMENTED("PROCS: sched_switch"); + + // KASSERT(intr_getipl == IPL_HIGH); + intr_disable(); + int oldIPL = intr_setipl(IPL_LOW); // allow interrupts to wake up the idling core + + KASSERT(curthr->kt_state != KT_ON_CPU); + + curcore.kc_queue = queue; + last_thread_context = &curthr->kt_ctx; + + context_switch(&curthr->kt_ctx, &curcore.kc_ctx); + + intr_enable(); + intr_setipl(oldIPL); } /* @@ -236,7 +274,23 @@ void sched_yield() */ void sched_make_runnable(kthread_t *thr) { - NOT_YET_IMPLEMENTED("PROCS: sched_make_runnable"); + // NOT_YET_IMPLEMENTED("PROCS: sched_make_runnable"); + + dbg(DBG_SCHED, "Making thread with proc pid %d runnable\n in thread\n", thr->kt_proc->p_pid); + if (curthr) + { + dbg(DBG_SCHED, "I did this ^^ with thread %d\n", curthr->kt_proc->p_pid); + } else { + dbg(DBG_SCHED, "I did this ^^ with a null thread!\n"); + } + + KASSERT(thr != curthr); + KASSERT(thr->kt_state != KT_RUNNABLE); + + int oldIPL = intr_setipl(IPL_HIGH); + thr->kt_state = KT_RUNNABLE; + ktqueue_enqueue(&kt_runq, thr); + intr_setipl(oldIPL); } /* @@ -255,7 +309,12 @@ void sched_make_runnable(kthread_t *thr) */ void sched_sleep_on(ktqueue_t *q) { - NOT_YET_IMPLEMENTED("PROCS: sched_sleep_on"); + // NOT_YET_IMPLEMENTED("PROCS: sched_sleep_on"); + + int oldIPL = intr_setipl(IPL_HIGH); + curthr->kt_state = KT_SLEEP; + sched_switch(q); + intr_setipl(oldIPL); } /* @@ -271,7 +330,26 @@ void sched_sleep_on(ktqueue_t *q) */ void sched_wakeup_on(ktqueue_t *q, kthread_t **ktp) { - NOT_YET_IMPLEMENTED("PROCS: sched_wakeup_on"); + // NOT_YET_IMPLEMENTED("PROCS: sched_wakeup_on"); + + if (sched_queue_empty(q) || q == NULL) + { + if (ktp) + { + *ktp = NULL; + } + return; + } + + int oldIPL = intr_setipl(IPL_HIGH); // don't allow interrupts while modifying the queue + kthread_t *thr = ktqueue_dequeue(q); + if (ktp) + { + *ktp = thr; + } + + sched_make_runnable(thr); + intr_setipl(oldIPL); } /* @@ -279,7 +357,12 @@ void sched_wakeup_on(ktqueue_t *q, kthread_t **ktp) */ void sched_broadcast_on(ktqueue_t *q) { - NOT_YET_IMPLEMENTED("PROCS: sched_broadcast_on"); + // NOT_YET_IMPLEMENTED("PROCS: sched_broadcast_on"); + + while (!sched_queue_empty(q)) + { + sched_make_runnable(ktqueue_dequeue(q)); + } } /*=============== @@ -360,6 +443,9 @@ void core_switch() KASSERT(mapped_paddr == expected_paddr); curthr = next_thread; + + dbg(DBG_THR, "Switching to curthr thread %d\n", curthr->kt_proc->p_pid); + curthr->kt_state = KT_ON_CPU; curproc = curthr->kt_proc; context_switch(&curcore.kc_ctx, &curthr->kt_ctx); |