219 lines
4.0 KiB
C
219 lines
4.0 KiB
C
/******************************************************************************
|
|
*
|
|
* Copyright (c) 2017-2020 by Löwenware Ltd
|
|
* Please, refer LICENSE file for legal information
|
|
*
|
|
******************************************************************************/
|
|
|
|
/**
|
|
* @file task.c
|
|
* @author Ilja Kartašov <ik@lowenware.com>
|
|
* @brief
|
|
*
|
|
* @see https://lowenware.com/
|
|
*/
|
|
|
|
#include <stdlib.h>
|
|
#include <aarch64/aarch64.h>
|
|
#include <drivers/soc/bcm2837/bcm2837.h>
|
|
#include "memory.h"
|
|
#include "log.h"
|
|
#include "task.h"
|
|
|
|
#define TASK_STATE_RUNNING 0
|
|
|
|
#ifndef CONFIG_IDLE_TASK_STACK_SIZE
|
|
#define CONFIG_IDLE_TASK_STACK_SIZE 0x4000
|
|
#endif
|
|
|
|
struct Task {
|
|
void *sp;
|
|
void *stackStart;
|
|
uint32_t pid;
|
|
uint32_t stackSize;
|
|
uint32_t lock;
|
|
uint32_t counter;
|
|
uint32_t cycles;
|
|
int32_t priority;
|
|
uint32_t state;
|
|
char name[CONFIG_TASK_MAX_NAME_LEN + 1];
|
|
struct Task *next;
|
|
};
|
|
|
|
struct __attribute__((packed)) TaskContext {
|
|
uint64_t x[31];
|
|
uint64_t elr_el1;
|
|
uint64_t spsr_el1;
|
|
};
|
|
|
|
static struct Task *m_currentTask = NULL
|
|
, *m_lastTask = NULL
|
|
, m_idleTask = {
|
|
.sp = NULL
|
|
, .stackStart = NULL
|
|
, .stackSize = CONFIG_IDLE_TASK_STACK_SIZE
|
|
, .pid = 1
|
|
, .lock = 1
|
|
, .counter = 1
|
|
, .cycles = 1
|
|
, .priority = 0
|
|
, .state = TASK_STATE_RUNNING
|
|
, .name = {'I', 'D', 'L', 'E', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
|
, .next = NULL
|
|
};
|
|
|
|
static uint32_t m_pid = 1;
|
|
|
|
static struct Task *
|
|
scheduleNext(void)
|
|
{
|
|
struct Task *i, *next = NULL;
|
|
int32_t priority = -1;
|
|
|
|
Log_putS("scheduleNext\r\n ");
|
|
for (;;) {
|
|
/* check urgent tasks */
|
|
for (i = &m_idleTask; i != NULL; i = i->next) {
|
|
if (i->state != TASK_STATE_RUNNING)
|
|
continue;
|
|
|
|
if (i->priority > priority && i->counter) {
|
|
priority = i->priority;
|
|
next = i;
|
|
}
|
|
if (!i->state && i->counter > priority) {
|
|
priority = i->priority;
|
|
next = i;
|
|
}
|
|
}
|
|
|
|
if (next) {
|
|
break;
|
|
}
|
|
|
|
for (i = &m_idleTask; i != NULL; i = i->next) {
|
|
i->counter = i->cycles;
|
|
}
|
|
}
|
|
|
|
Log_putS("\t switch: ");
|
|
Log_putU((uint64_t)next->pid, 10);
|
|
Log_putS("\n");
|
|
|
|
return next;
|
|
}
|
|
|
|
void
|
|
Task_initSheduler(void)
|
|
{
|
|
struct Task *idleTask = &m_idleTask;
|
|
m_currentTask = idleTask;
|
|
m_lastTask = idleTask;
|
|
Task_unlockScheduler();
|
|
}
|
|
|
|
PID
|
|
Task_create(TaskCallback callback, void *arg)
|
|
{
|
|
struct TaskContext *ctx;
|
|
struct Task *task = Memory_getPage();
|
|
|
|
if (!task)
|
|
return -1;
|
|
|
|
task->sp = (void *)task + MEMORY_PAGE_SIZE - 272;
|
|
task->stackStart = task->sp;
|
|
task->stackSize = MEMORY_PAGE_SIZE - sizeof(*task);
|
|
task->lock = 1;
|
|
task->counter = 1;
|
|
task->cycles = 1;
|
|
task->pid = ++m_pid;
|
|
task->priority = 0;
|
|
task->state = TASK_STATE_RUNNING;
|
|
task->name[0] = 'N';
|
|
task->name[1] = 'O';
|
|
task->name[2] = 'N';
|
|
task->name[3] = 'E';
|
|
task->name[4] = 0;
|
|
task->next = 0;
|
|
|
|
ctx = (struct TaskContext *) task->sp;
|
|
ctx->x[0] = (uint64_t) arg;
|
|
ctx->x[1] = (uint64_t) callback;
|
|
ctx->x[30] = (uint64_t) AArch64_startTask;
|
|
ctx->elr_el1 = (uint64_t) AArch64_startTask;
|
|
|
|
Task_lockScheduler();
|
|
m_lastTask->next = task;
|
|
m_lastTask = task;
|
|
Task_unlockScheduler();
|
|
|
|
return task->pid;
|
|
}
|
|
|
|
void
|
|
Task_yield(void)
|
|
{
|
|
struct Task *next, *prev = m_currentTask;
|
|
|
|
Log_putS("Task_yield()\r\n");
|
|
Task_lockScheduler();
|
|
prev->counter = 0;
|
|
|
|
next = scheduleNext();
|
|
|
|
if (next != prev) {
|
|
m_currentTask = next;
|
|
AArch64_switchContext(prev, next);
|
|
/* newly created tasks never exit here, that is why in AArch64_startTask
|
|
* there is own Task_unlockScheduler() call
|
|
* */
|
|
} else {
|
|
__asm__("WFE");
|
|
}
|
|
|
|
Task_unlockScheduler();
|
|
}
|
|
|
|
void
|
|
Task_lockScheduler(void)
|
|
{
|
|
m_currentTask->lock++;
|
|
}
|
|
|
|
void
|
|
Task_unlockScheduler(void)
|
|
{
|
|
m_currentTask->lock--;
|
|
}
|
|
|
|
void *
|
|
Task_scheduleFromISR(void)
|
|
{
|
|
void *sp = NULL;
|
|
|
|
# if 0
|
|
if (m_currentTask->counter)
|
|
m_currentTask->counter--;
|
|
|
|
if (!m_currentTask->lock) {
|
|
struct Task *next;
|
|
Log_putS("Task_scheduleFromISR\r\n");
|
|
|
|
Task_lockScheduler();
|
|
|
|
next = scheduleNext();
|
|
if (next != m_currentTask) {
|
|
m_currentTask = next;
|
|
sp = next->sp;
|
|
}
|
|
/* unlock call could be moved to aarch64.S interrupt handler in case of
|
|
* issue
|
|
* */
|
|
Task_unlockScheduler();
|
|
}
|
|
#endif
|
|
|
|
return sp;
|
|
}
|