2020-01-17 10:13:01 +01:00
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* Copyright (c) 2017-2020 by Löwenware Ltd
|
|
|
|
* Please, refer LICENSE file for legal information
|
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file task.c
|
|
|
|
* @author Ilja Kartašov <ik@lowenware.com>
|
|
|
|
* @brief
|
|
|
|
*
|
|
|
|
* @see https://lowenware.com/
|
|
|
|
*/
|
|
|
|
|
2020-01-20 09:14:13 +01:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <aarch64/aarch64.h>
|
|
|
|
#include <drivers/soc/bcm2837/bcm2837.h>
|
|
|
|
#include "memory.h"
|
2020-01-17 10:13:01 +01:00
|
|
|
#include "task.h"
|
|
|
|
|
2020-01-20 09:14:13 +01:00
|
|
|
#define TASK_STATE_RUNNING 0
|
2020-01-17 10:13:01 +01:00
|
|
|
|
2020-01-20 09:14:13 +01:00
|
|
|
#ifndef CONFIG_IDLE_TASK_STACK_SIZE
|
|
|
|
#define CONFIG_IDLE_TASK_STACK_SIZE 0x4000
|
|
|
|
#endif
|
|
|
|
|
|
|
|
struct Task {
|
|
|
|
void *sp;
|
|
|
|
void *stackStart;
|
|
|
|
uint32_t stackSize;
|
|
|
|
uint32_t lock;
|
|
|
|
uint32_t counter;
|
|
|
|
uint32_t cycles;
|
|
|
|
int32_t priority;
|
|
|
|
uint32_t state;
|
|
|
|
char name[CONFIG_TASK_MAX_NAME_LEN + 1];
|
|
|
|
struct Task *next;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct Task *m_currentTask = NULL
|
|
|
|
, *m_lastTask = NULL
|
|
|
|
, m_idleTask = {
|
|
|
|
.sp = NULL
|
|
|
|
, .stackStart = NULL
|
|
|
|
, .stackSize = CONFIG_IDLE_TASK_STACK_SIZE
|
|
|
|
, .lock = 1
|
|
|
|
, .counter = 1
|
|
|
|
, .cycles = 1
|
|
|
|
, .priority = 0
|
|
|
|
, .state = TASK_STATE_RUNNING
|
|
|
|
, .name = {'I', 'D', 'L', 'E', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
|
|
|
, .next = NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct Task *
|
|
|
|
scheduleNext(void)
|
|
|
|
{
|
|
|
|
struct Task *i, *next = NULL;
|
|
|
|
int32_t priority = -1;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
/* check urgent tasks */
|
|
|
|
for (i = &m_idleTask; i != NULL; i = i->next) {
|
|
|
|
if (i->state != TASK_STATE_RUNNING)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (i->priority > priority && i->counter) {
|
|
|
|
priority = i->priority;
|
|
|
|
next = i;
|
|
|
|
}
|
|
|
|
if (!i->state && i->counter > priority) {
|
|
|
|
priority = i->priority;
|
|
|
|
next = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (next) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = &m_idleTask; i != NULL; i = i->next) {
|
|
|
|
i->counter = i->cycles;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return next;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Task_initSheduler(void)
|
2020-01-17 10:13:01 +01:00
|
|
|
{
|
2020-01-20 09:14:13 +01:00
|
|
|
struct Task *idleTask = &m_idleTask;
|
|
|
|
m_currentTask = idleTask;
|
|
|
|
m_lastTask = m_currentTask;
|
|
|
|
}
|
|
|
|
|
|
|
|
PID
|
|
|
|
Task_create(TaskCallback callback, void *arg)
|
|
|
|
{
|
|
|
|
struct Task *task = Memory_getPage();
|
|
|
|
|
|
|
|
task->sp = (void *)task + MEMORY_PAGE_SIZE - 272;
|
|
|
|
task->stackStart = task->sp;
|
|
|
|
task->stackSize = MEMORY_PAGE_SIZE - sizeof(*task);
|
|
|
|
task->lock = 1;
|
|
|
|
task->counter = 1;
|
|
|
|
task->cycles = 1;
|
|
|
|
task->priority = 0;
|
|
|
|
task->state = TASK_STATE_RUNNING;
|
|
|
|
task->name[0] = 'N';
|
|
|
|
task->name[1] = 'O';
|
|
|
|
task->name[2] = 'N';
|
|
|
|
task->name[3] = 'E';
|
|
|
|
task->name[4] = 0;
|
|
|
|
task->next = 0;
|
|
|
|
|
|
|
|
Task_lockScheduler();
|
|
|
|
m_lastTask->next = task;
|
|
|
|
m_lastTask = task;
|
|
|
|
Task_unlockScheduler();
|
2020-01-17 10:13:01 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2020-01-20 09:14:13 +01:00
|
|
|
|
|
|
|
void
|
|
|
|
Task_yield(void)
|
|
|
|
{
|
|
|
|
struct Task *next;
|
|
|
|
|
|
|
|
Task_lockScheduler();
|
|
|
|
m_currentTask->counter = 0;
|
|
|
|
|
|
|
|
next = scheduleNext();
|
|
|
|
|
|
|
|
if (next != m_currentTask) {
|
|
|
|
AArch64_switchContext(m_currentTask, next);
|
|
|
|
m_currentTask = next;
|
|
|
|
} else {
|
|
|
|
__asm__("WFE");
|
|
|
|
}
|
|
|
|
|
|
|
|
Task_unlockScheduler();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Task_lockScheduler(void)
|
|
|
|
{
|
|
|
|
m_currentTask->lock++;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Task_unlockScheduler(void)
|
|
|
|
{
|
|
|
|
m_currentTask->lock--;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
Task_scheduleFromISR(void)
|
|
|
|
{
|
|
|
|
void *sp = NULL;
|
|
|
|
|
|
|
|
if (!m_currentTask->lock) {
|
|
|
|
struct Task *next;
|
|
|
|
|
|
|
|
Task_lockScheduler();
|
|
|
|
next = scheduleNext();
|
|
|
|
if (next != m_currentTask) {
|
|
|
|
m_currentTask = next;
|
|
|
|
sp = next->sp;
|
|
|
|
}
|
|
|
|
/* unlock call could be moved to aarch64.S interrupt handler in case of
|
|
|
|
* issue
|
|
|
|
* */
|
|
|
|
Task_unlockScheduler();
|
|
|
|
}
|
|
|
|
|
|
|
|
return sp;
|
|
|
|
}
|