#define WAIT_TIMEOUT_MS 100u
#define WORD_SIZE sizeof(void*)
-#define MAX_TASKS 4194304
+#define MAX_TASKS 1024
+#define TASK_NONE ((TaskID_T)-1)
#define WORD_ALIGN(x) \
(((x) + WORD_SIZE) & ~(WORD_SIZE))
+typedef long TaskID_T;
+
typedef enum {
STATE_READY = 0,
STATE_RUNNING,
} TaskState_T;
typedef struct Task_T {
- long* stack_top; // top of task's stack
- long* heap_top; // top of task's heap
- struct Task_T* next; // pointer to next task
- long memory[];
+ long* stack_top;
+ long* heap_top;
+ long* memory;
+ struct Task_T* next;
} Task_T;
typedef struct {
} TaskQueue_T;
typedef struct {
- Task_T* task;
- Task_T* idle;
- Task_T* dead;
+ Task_T idle;
+ TaskID_T task;
+ TaskID_T dead;
pthread_t thread;
} CpuState_T;
***************************************/
static pthread_mutex_t ScheduleLock;
static pthread_cond_t ScheduleCond;
+static long CpuCount;
static __thread int CpuID = 0;
static TaskQueue_T ReadyQueue = {0};
static CpuState_T* Running = NULL;
-static int CpuCount;
static long NextTask = 0;
-static Task_T* Tasks[MAX_TASKS];
+static long TaskCount = 0;
+static Task_T Tasks[MAX_TASKS];
/***************************************
Lock and Condition Operations
pthread_cond_timedwait(cond, mutex, &expire_time);
}
-static void WaitForTaskFree(Task_T* prev, Task_T* next)
+static void WaitForTaskFree(TaskID_T prev, TaskID_T next)
{
- if (next && prev != next)
+ if (next != TASK_NONE && prev != next)
{
- while (!atomic_load(&next->stack_top))
+ Task_T* ntask = &Tasks[next];
+ assert(ntask);
+
+ while (!atomic_load(&ntask->stack_top))
{
}
}
Queue Operations
***************************************/
-static void Enqueue(TaskQueue_T* queue, Task_T* task)
+static void Enqueue(TaskQueue_T* queue, TaskID_T tid)
{
+ Task_T* task = &Tasks[tid];
if (queue->tail)
{
queue->tail->next = task;
}
}
-static Task_T* Dequeue(TaskQueue_T* queue)
+static TaskID_T Dequeue(TaskQueue_T* queue)
{
+ TaskID_T tid = TASK_NONE;
Task_T* task = queue->head;
if (task)
{
{
queue->tail = NULL;
}
+ tid = (TaskID_T)(task - Tasks);
}
- return task;
+
+ return tid;
}
/***************************************
***************************************/
// Should eventually handle prioritization
-static void Enter(Task_T* task)
+static void Enter(TaskID_T task)
{
- if (task && task != Running[CpuID].idle)
+ if (task != TASK_NONE)
{
Enqueue(&ReadyQueue, task);
pthread_cond_signal(&ScheduleCond);
}
}
-static Task_T* Select(void)
+static Task_T* LoadNextTask(void)
{
- Task_T* task = Dequeue(&ReadyQueue);
- if (!task)
+ Task_T* next_task;
+ TaskID_T task = Dequeue(&ReadyQueue);
+ assert(task == TASK_NONE || task < MAX_TASKS);
+ if (task != TASK_NONE)
{
- task = Running[CpuID].idle;
+ pthread_cond_signal(&ScheduleCond);
+ Running[CpuID].task = task;
+ next_task = &Tasks[task];
}
else
{
- pthread_cond_signal(&ScheduleCond);
+ next_task = &Running[CpuID].idle;
}
- return task;
+ return next_task;
}
/***************************************
the stack top pointer is used as a spinlock to ensure
another thread does not start the task before we have
saved off our context */
- Task_T* prev = Running[CpuID].task;
- Running[CpuID].task = NULL;
- atomic_store(&prev->stack_top, NULL);
+ Task_T* prev_task;
+ TaskID_T prev = Running[CpuID].task;
+ Running[CpuID].task = TASK_NONE;
+ if (prev != TASK_NONE)
+ {
+ atomic_store(&Tasks[prev].stack_top, NULL);
+ prev_task = &Tasks[prev];
+ }
+ else
+ {
+ prev_task = &Running[CpuID].idle;
+ }
- /* decide what to do with the task */
+ /* decide what to do with the old task */
if (dead)
{
Running[CpuID].dead = prev;
{
Enter(prev);
}
- Running[CpuID].task = Select();
+
+ /* select the next task to run */
+ Task_T* next_task = LoadNextTask();
+
ReleaseLock();
+
WaitForTaskFree(prev, Running[CpuID].task);
- SwapTask(prev, Running[CpuID].task);
- if (Running[CpuID].dead)
+ SwapTask(prev_task, next_task);
+ if (Running[CpuID].dead != TASK_NONE)
{
- free(Running[CpuID].dead);
- Running[CpuID].dead = NULL;
+ WaitForTaskFree(prev, Running[CpuID].dead);
+ free(Tasks[Running[CpuID].dead].memory);
+ Running[CpuID].dead = TASK_NONE;
}
}
PickNewTask(true);
}
-static Task_T* CreateTask(void (*task_fn)(void*), void* arg, long int argsz, long int memsz)
+static TaskID_T AllocateTaskID(void)
+{
+ AcquireLock();
+ TaskID_T tid = TASK_NONE;
+ if (TaskCount < MAX_TASKS)
+ {
+ TaskCount++;
+ tid = NextTask++;
+ }
+ else
+ {
+ /* TODO: iterate over table to find free slot... */
+ assert(!"out of task IDs");
+ }
+ ReleaseLock();
+ return tid;
+}
+
+static void InitializeTask(Task_T* task, void (*task_fn)(void*), void* arg, long int argsz, long int memsz)
{
/* allocate a new task. default memory size is used if 0 provided */
if (memsz == 0)
memsz = 32768;
}
memsz = WORD_ALIGN(memsz);
- Task_T* task = calloc(1, sizeof(Task_T) + memsz);
+ task->memory = calloc(1, WORD_ALIGN(memsz));
task->heap_top = task->memory;
- task->stack_top = (void*)((size_t)task->memory + (size_t)memsz - sizeof(long));
+ atomic_store(&task->stack_top,
+ task->memory + (memsz/WORD_SIZE) - 1);
/* copy the argument to the task's heap */
void* newarg = (arg ? task->memory : NULL);
- task->heap_top = (void*)((ssize_t)task->heap_top + WORD_ALIGN(argsz));
- memcpy(newarg, arg, argsz);
+ task->heap_top = task->heap_top + (WORD_ALIGN(argsz)/WORD_SIZE);
+ if (newarg && argsz)
+ {
+ memcpy(newarg, arg, argsz);
+ }
/* populate the initial context on the stack so SwapTasks works */
*(--task->stack_top) = (long)Kernel_Exit; // coroutine cleanup
{
*(--task->stack_top) = 0xdeadbeef; // initial values for saved registers
}
-
- return task;
}
-void Kernel_Spawn(void (*task_fn)(void*), void* arg, long int argsz, long int memsz)
+TaskID_T Kernel_Spawn(void (*task_fn)(void*), void* arg, long int argsz, long int memsz)
{
- Task_T* task = CreateTask(task_fn, arg, argsz, memsz);
- AcquireLock();
- Enter(task);
- ReleaseLock();
+ TaskID_T tid = AllocateTaskID();
+ if (tid != TASK_NONE)
+ {
+ InitializeTask(&Tasks[tid], task_fn, arg, argsz, memsz);
+ AcquireLock();
+ Enter(tid);
+ ReleaseLock();
+ }
+ return tid;
}
static void CpuIdle(void* arg)
{
+ (void)arg;
while(1)
{
Kernel_Yield();
static void* CpuMain(void* arg)
{
CpuID = (long int)arg;
- Running[CpuID].idle = CreateTask(CpuIdle, 0, 0, 0);
- Running[CpuID].task = Running[CpuID].idle;
- StartTask(Running[CpuID].idle);
+ Running[CpuID].task = TASK_NONE;
+ InitializeTask(&Running[CpuID].idle, CpuIdle, 0, 0, 0);
+ StartTask(&Running[CpuID].idle);
return NULL; /* unreachable */
}
pthread_mutex_init(&ScheduleLock, 0);
pthread_cond_init(&ScheduleCond, NULL);
CpuCount = sysconf(_SC_NPROCESSORS_ONLN);
+// CpuCount = 1;
Running = calloc(CpuCount, sizeof(CpuState_T));
for (long int i = 0; i < CpuCount; i++)
{
bool done = true;
for (long int i = 0; i < CpuCount; i++)
{
- done = done && (Running[i].task == Running[i].idle);
+ done = done && (Running[i].task == TASK_NONE);
}
if (done)
{
}
}
-void* Kernel_Allocate(size_t sz)
+void* Kernel_Allocate(long sz)
{
- Task_T* task = Running[CpuID].task;
+ Task_T* task = &Tasks[Running[CpuID].task];
task->stack_top = &(long){0};
sz = WORD_ALIGN(sz);
- ssize_t free_sz = (ssize_t)task->stack_top - (ssize_t)task->heap_top;
+ long free_sz = (long)task->stack_top - (long)task->heap_top;
assert(sz <= free_sz);
void* ptr = task->heap_top;
- task->heap_top = (void*)((ssize_t)task->heap_top + sz);
+ task->heap_top = task->heap_top + (sz / WORD_SIZE);
return ptr;
}
(void)argv;
Kernel();
-
for (int i = 0; i < 100; i++)
{
Kernel_Spawn(task1, &i, sizeof(int), 0);
}
-
- /* wait for all jobs to be done */
Kernel_Run();
return 0;