aboutsummaryrefslogtreecommitdiff
path: root/core/thread
diff options
context:
space:
mode:
Diffstat (limited to 'core/thread')
-rw-r--r--core/thread/exit_thread.c30
-rw-r--r--core/thread/idle_thread.c27
-rw-r--r--core/thread/kill_thread.c42
-rw-r--r--core/thread/mbox.c63
-rw-r--r--core/thread/root_thread.c11
-rw-r--r--core/thread/schedule.c91
-rw-r--r--core/thread/sem_asm.S16
-rw-r--r--core/thread/semaphore.c87
-rw-r--r--core/thread/start_thread.c69
-rw-r--r--core/thread/thread_asm.S37
-rw-r--r--core/thread/timeout.c41
11 files changed, 514 insertions, 0 deletions
diff --git a/core/thread/exit_thread.c b/core/thread/exit_thread.c
new file mode 100644
index 0000000..d9fd83a
--- /dev/null
+++ b/core/thread/exit_thread.c
@@ -0,0 +1,30 @@
+#include <limits.h>
+#include <stdlib.h>
+#include <klibc/compiler.h>
+#include "thread.h"
+#include "core.h"
+
+__noreturn __exit_thread(void)
+{
+ struct thread *curr = current();
+
+ cli();
+
+ /* Remove from the linked list */
+ curr->list.prev->next = curr->list.next;
+ curr->list.next->prev = curr->list.prev;
+
+ /* Free allocated stacks (note: free(NULL) is permitted and safe). */
+ free(curr->stack);
+ free(curr->rmstack);
+
+ /*
+ * Note: __schedule() can explictly handle the case where
+ * curr isn't part of the linked list anymore, as long as
+ * curr->list.next is still valid.
+ */
+ __schedule();
+
+ /* We should never get here */
+ kaboom();
+}
diff --git a/core/thread/idle_thread.c b/core/thread/idle_thread.c
new file mode 100644
index 0000000..8faa071
--- /dev/null
+++ b/core/thread/idle_thread.c
@@ -0,0 +1,27 @@
+#include "thread.h"
+#include <limits.h>
+#include <sys/cpu.h>
+
+static void default_idle_thread_hook(void)
+{
+}
+
+void (*idle_thread_hook)(void) = default_idle_thread_hook;
+
+static void idle_thread_func(void *dummy)
+{
+ (void)dummy;
+
+ for (;;) {
+ cli();
+ idle_thread_hook();
+ __schedule();
+ asm volatile("sti ; hlt" : : : "memory");
+ }
+}
+
+void start_idle_thread(void)
+{
+ start_thread("idle", 4096, IDLE_THREAD_PRIORITY, idle_thread_func, NULL);
+}
+
diff --git a/core/thread/kill_thread.c b/core/thread/kill_thread.c
new file mode 100644
index 0000000..c22517c
--- /dev/null
+++ b/core/thread/kill_thread.c
@@ -0,0 +1,42 @@
+#include "thread.h"
+#include <limits.h>
+
+extern void __exit_thread(void);
+typedef void (*func_ptr)(void);
+
+void kill_thread(struct thread *thread)
+{
+ irq_state_t irq;
+ struct thread_block *block;
+
+ if (thread == current())
+ __exit_thread();
+
+ irq = irq_save();
+
+ /*
+ * Muck with the stack so that the next time the thread is run then
+ * we end up going to __exit_thread.
+ */
+ thread->esp->eip = __exit_thread;
+ thread->prio = INT_MIN;
+
+ block = thread->blocked;
+ if (block) {
+ struct semaphore *sem = block->semaphore;
+ /* Remove us from the queue and increase the count */
+ block->list.next->prev = block->list.prev;
+ block->list.prev->next = block->list.next;
+ sem->count++;
+
+ thread->blocked = NULL;
+ block->timed_out = true; /* Fake an immediate timeout */
+ }
+
+ __schedule();
+
+ irq_restore(irq);
+}
+
+
+
diff --git a/core/thread/mbox.c b/core/thread/mbox.c
new file mode 100644
index 0000000..d1c640a
--- /dev/null
+++ b/core/thread/mbox.c
@@ -0,0 +1,63 @@
+/*
+ * mbox.c
+ *
+ * Simple thread mailbox interface
+ */
+
+#include "thread.h"
+#include "mbox.h"
+#include <errno.h>
+
+void mbox_init(struct mailbox *mbox, size_t size)
+{
+ if (!!mbox) {
+ sem_init(&mbox->prod_sem, size); /* All slots empty */
+ sem_init(&mbox->cons_sem, 0); /* No slots full */
+ sem_init(&mbox->head_sem, 1); /* Head mutex */
+ sem_init(&mbox->tail_sem, 1); /* Tail mutex */
+
+ mbox->wrap = &mbox->data[size];
+ mbox->head = &mbox->data[0];
+ mbox->tail = &mbox->data[0];
+ }
+};
+
+int mbox_post(struct mailbox *mbox, void *msg, mstime_t timeout)
+{
+ if (!mbox_is_valid(mbox))
+ return ENOMEM;
+ if (sem_down(&mbox->prod_sem, timeout) == (mstime_t)-1)
+ return ENOMEM;
+ sem_down(&mbox->head_sem, 0);
+
+ *mbox->head = msg;
+ mbox->head++;
+ if (mbox->head == mbox->wrap)
+ mbox->head = &mbox->data[0];
+
+ sem_up(&mbox->head_sem);
+ sem_up(&mbox->cons_sem);
+ return 0;
+}
+
+mstime_t mbox_fetch(struct mailbox *mbox, void **msg, mstime_t timeout)
+{
+ mstime_t t;
+
+ if (!mbox)
+ return -1;
+ t = sem_down(&mbox->cons_sem, timeout);
+ if (t == (mstime_t)-1)
+ return -1;
+ t += sem_down(&mbox->tail_sem, 0);
+
+ if (msg)
+ *msg = *mbox->tail;
+ mbox->tail++;
+ if (mbox->tail == mbox->wrap)
+ mbox->tail = &mbox->data[0];
+
+ sem_up(&mbox->tail_sem);
+ sem_up(&mbox->prod_sem);
+ return t;
+}
diff --git a/core/thread/root_thread.c b/core/thread/root_thread.c
new file mode 100644
index 0000000..2bba7c2
--- /dev/null
+++ b/core/thread/root_thread.c
@@ -0,0 +1,11 @@
+#include "thread.h"
+
+struct thread __root_thread = {
+ .thread_magic = THREAD_MAGIC,
+ .name = "root",
+ .list = { .next = &__root_thread.list, .prev = &__root_thread.list },
+ .blocked = NULL,
+ .prio = 0,
+};
+
+struct thread *__current = &__root_thread;
diff --git a/core/thread/schedule.c b/core/thread/schedule.c
new file mode 100644
index 0000000..5a426f1
--- /dev/null
+++ b/core/thread/schedule.c
@@ -0,0 +1,91 @@
+#include <klibc/compiler.h>
+#include <sys/cpu.h>
+#include "thread.h"
+#include "core.h"
+#include <dprintf.h>
+
+void (*sched_hook_func)(void);
+
+/*
+ * __schedule() should only be called with interrupts locked out!
+ */
+void __schedule(void)
+{
+ static bool in_sched_hook;
+ struct thread *curr = current();
+ struct thread *st, *nt, *best;
+
+#if DEBUG
+ if (__unlikely(irq_state() & 0x200)) {
+ dprintf("In __schedule with interrupts on!\n");
+ kaboom();
+ }
+#endif
+
+ /*
+ * Are we called from inside sched_hook_func()? If so we'll
+ * schedule anyway on the way out.
+ */
+ if (in_sched_hook)
+ return;
+
+ dprintf("Schedule ");
+
+ /* Possibly update the information on which we make
+ * scheduling decisions.
+ */
+ if (sched_hook_func) {
+ in_sched_hook = true;
+ sched_hook_func();
+ in_sched_hook = false;
+ }
+
+ /*
+ * The unusual form of this walk is because we have to start with
+ * the thread *following* curr, and curr may not actually be part
+ * of the list anymore (in the case of __exit_thread).
+ */
+ best = NULL;
+ nt = st = container_of(curr->list.next, struct thread, list);
+ do {
+ if (__unlikely(nt->thread_magic != THREAD_MAGIC)) {
+ dprintf("Invalid thread on thread list %p magic = 0x%08x\n",
+ nt, nt->thread_magic);
+ kaboom();
+ }
+
+ dprintf("Thread %p (%s) ", nt, nt->name);
+ if (!nt->blocked) {
+ dprintf("runnable priority %d\n", nt->prio);
+ if (!best || nt->prio < best->prio)
+ best = nt;
+ } else {
+ dprintf("blocked\n");
+ }
+ nt = container_of(nt->list.next, struct thread, list);
+ } while (nt != st);
+
+ if (!best)
+ kaboom(); /* No runnable thread */
+
+ if (best != curr) {
+ uint64_t tsc;
+
+ asm volatile("rdtsc" : "=A" (tsc));
+
+ dprintf("@ %llu -> %p (%s)\n", tsc, best, best->name);
+ __switch_to(best);
+ } else {
+ dprintf("no change\n");
+ }
+}
+
+/*
+ * This can be called from "normal" code...
+ */
+void thread_yield(void)
+{
+ irq_state_t irq = irq_save();
+ __schedule();
+ irq_restore(irq);
+}
diff --git a/core/thread/sem_asm.S b/core/thread/sem_asm.S
new file mode 100644
index 0000000..ce67471
--- /dev/null
+++ b/core/thread/sem_asm.S
@@ -0,0 +1,16 @@
+ .globl sem_down
+ .type sem_down, @function
+sem_down:
+ decl (%eax)
+ js __sem_down_slow
+ xorl %eax, %eax
+ ret
+ .size sem_down, .-sem_down
+
+ .globl sem_up
+ .type sem_up, @function
+sem_up:
+ incl (%eax)
+ jle __sem_up_slow
+ ret
+ .size sem_up, .-sem_up
diff --git a/core/thread/semaphore.c b/core/thread/semaphore.c
new file mode 100644
index 0000000..c99af9c
--- /dev/null
+++ b/core/thread/semaphore.c
@@ -0,0 +1,87 @@
+#include <sys/cpu.h>
+#include "thread.h"
+
+void sem_init(struct semaphore *sem, int count)
+{
+ if (!!sem) {
+ sem->list.next = sem->list.prev = &sem->list;
+ sem->count = count;
+ }
+}
+
+mstime_t __sem_down_slow(struct semaphore *sem, mstime_t timeout)
+{
+ irq_state_t irq;
+ mstime_t rv;
+
+ irq = irq_save();
+
+ if (!sem_is_valid(sem)) {
+ rv = -1;
+ } else if (sem->count >= 0) {
+ /* Something already freed the semaphore on us */
+ rv = 0;
+ } else if (timeout == -1) {
+ /* Immediate timeout */
+ sem->count++;
+ rv = -1;
+ } else {
+ /* Put the thread to sleep... */
+
+ struct thread_block block;
+ struct thread *curr = current();
+ mstime_t now = ms_timer();
+
+ block.thread = curr;
+ block.semaphore = sem;
+ block.block_time = now;
+ block.timeout = timeout ? now+timeout : 0;
+ block.timed_out = false;
+
+ curr->blocked = &block;
+
+ /* Add to the end of the wakeup list */
+ block.list.prev = sem->list.prev;
+ block.list.next = &sem->list;
+ sem->list.prev = &block.list;
+ block.list.prev->next = &block.list;
+
+ __schedule();
+
+ rv = block.timed_out ? -1 : ms_timer() - block.block_time;
+ }
+
+ irq_restore(irq);
+ return rv;
+}
+
+void __sem_up_slow(struct semaphore *sem)
+{
+ irq_state_t irq;
+ struct thread_list *l;
+
+ irq = irq_save();
+
+ /*
+ * It's possible that something did a down on the semaphore, but
+ * didn't get to add themselves to the queue just yet. In that case
+ * we don't have to do anything, since the bailout clause in
+ * __sem_down_slow will take care of it.
+ */
+ if (!!sem) {
+ l = sem->list.next;
+ if (l != &sem->list) {
+ struct thread_block *block;
+ block = container_of(l, struct thread_block, list);
+
+ sem->list.next = block->list.next;
+ block->list.next->prev = &sem->list;
+
+ block->thread->blocked = NULL;
+
+ __schedule();
+ }
+ }
+
+ irq_restore(irq);
+}
diff --git a/core/thread/start_thread.c b/core/thread/start_thread.c
new file mode 100644
index 0000000..2e4320a
--- /dev/null
+++ b/core/thread/start_thread.c
@@ -0,0 +1,69 @@
+#include <string.h>
+#include <stdlib.h>
+#include <com32.h>
+#include "core.h"
+#include "thread.h"
+
+#define REAL_MODE_STACK_SIZE 4096
+#define MIN_STACK_SIZE 16384
+#define THREAD_ALIGN 64 /* Thread alignment */
+
+extern void __start_thread(void);
+
+struct thread *start_thread(const char *name, size_t stack_size, int prio,
+ void (*start_func)(void *), void *func_arg)
+{
+ irq_state_t irq;
+ struct thread *curr, *t;
+ char *stack, *rmstack;
+ const size_t thread_mask = THREAD_ALIGN - 1;
+ struct thread_stack *sp;
+
+ if (stack_size < MIN_STACK_SIZE)
+ stack_size = MIN_STACK_SIZE;
+
+ stack_size = (stack_size + thread_mask) & ~thread_mask;
+ stack = malloc(stack_size + sizeof(struct thread));
+ if (!stack)
+ return NULL;
+ rmstack = lmalloc(REAL_MODE_STACK_SIZE);
+ if (!rmstack) {
+ free(stack);
+ return NULL;
+ }
+
+ t = (struct thread *)stack;
+ memset(t, 0, sizeof *t);
+ t->stack = stack;
+ t->rmstack = rmstack;
+ stack += sizeof(struct thread);
+
+ /* sp allocated from the end of the stack */
+ sp = (struct thread_stack *)(stack + stack_size) - 1;
+ t->esp = sp;
+
+ sp->errno = 0;
+ sp->rmss = SEG(rmstack);
+ sp->rmsp = REAL_MODE_STACK_SIZE;
+ sp->esi = (size_t)start_func;
+ sp->edi = (size_t)func_arg;
+ sp->ebx = irq_state(); /* Inherit the IRQ state from the spawner */
+ sp->eip = __start_thread;
+ t->prio = prio;
+ t->name = name;
+
+ irq = irq_save();
+ curr = current();
+
+ t->thread_magic = THREAD_MAGIC;
+
+ t->list.prev = &curr->list;
+ t->list.next = curr->list.next;
+ curr->list.next = &t->list;
+ t->list.next->prev = &t->list;
+
+ __schedule();
+
+ irq_restore(irq);
+ return t;
+}
diff --git a/core/thread/thread_asm.S b/core/thread/thread_asm.S
new file mode 100644
index 0000000..ec3e0ad
--- /dev/null
+++ b/core/thread/thread_asm.S
@@ -0,0 +1,37 @@
+ .globl __switch_to
+ .type __switch_to, @function
+__switch_to:
+ movl __current, %edx
+ pushl %ebx
+ pushl %ebp
+ pushl %esi
+ pushl %edi
+ pushl RealModeSSSP
+ pushl errno /* Hack! */
+ movl %esp, (%edx)
+
+ movl %eax, __current
+ movl (%eax), %esp
+ popl errno
+ popl RealModeSSSP
+ popl %edi
+ popl %esi
+ popl %ebp
+ popl %ebx
+ ret
+ .size __switch_to, .-__switch_to
+
+ .globl __start_thread
+ .type __start_thread, @function
+__start_thread:
+ movl %edi, %eax /* Thread function argument */
+
+ pushl $0 /* For gdb's benefit */
+ movl %esp, %ebp /* For gdb's benefit */
+
+ pushl %ebx /* Set up the flags/interrupt state */
+ popfl
+
+ call *%esi /* Run the desired function */
+ jmp __exit_thread /* If we get here, kill the thread */
+ .size __start_thread, .-__start_thread
diff --git a/core/thread/timeout.c b/core/thread/timeout.c
new file mode 100644
index 0000000..409ad6d
--- /dev/null
+++ b/core/thread/timeout.c
@@ -0,0 +1,41 @@
+/*
+ * timeout.c
+ *
+ */
+
+#include "thread.h"
+
+/*
+ * __thread_process_timeouts()
+ *
+ * Look for threads that have timed out. This should be called
+ * under interrupt lock, before calling __schedule().
+ */
+void __thread_process_timeouts(void)
+{
+ struct thread *curr = current();
+ struct thread_list *tp;
+ struct thread *t;
+ mstime_t now = ms_timer();
+ struct thread_block *block;
+ mstime_t timeout;
+
+ /* The current thread is obviously running, so no need to check... */
+ for (tp = curr->list.next; tp != &curr->list; tp = tp->next) {
+ t = container_of(tp, struct thread, list);
+ if ((block = t->blocked) && (timeout = block->timeout)) {
+ if ((mstimediff_t)(timeout - now) <= 0) {
+ struct semaphore *sem = block->semaphore;
+ /* Remove us from the queue and increase the count */
+ block->list.next->prev = block->list.prev;
+ block->list.prev->next = block->list.next;
+ sem->count++;
+
+ t->blocked = NULL;
+ block->timed_out = true;
+
+ __schedule(); /* Normally sets just __need_schedule */
+ }
+ }
+ }
+}