aboutsummaryrefslogtreecommitdiffstats
path: root/base/src/mpu/kernel/kernel.c
diff options
context:
space:
mode:
Diffstat (limited to 'base/src/mpu/kernel/kernel.c')
-rw-r--r--base/src/mpu/kernel/kernel.c229
1 files changed, 161 insertions, 68 deletions
diff --git a/base/src/mpu/kernel/kernel.c b/base/src/mpu/kernel/kernel.c
index 928940a..58f9686 100644
--- a/base/src/mpu/kernel/kernel.c
+++ b/base/src/mpu/kernel/kernel.c
@@ -48,9 +48,6 @@
#include "kernel_internal_types.h"
#include "workload_internal_types.h"
-#define MARS_WORKLOAD_RESERVED_NONE 0
-#define MARS_WORKLOAD_RESERVED 1
-
/* kernel */
void *__kernel_stack;
static struct mars_kernel_params kernel_params;
@@ -218,6 +215,74 @@ static uint64_t get_block_bits(uint16_t id)
return block_bits;
}
+static void update_header_bits(int block)
+{
+ int i;
+ uint16_t *block_bits = &queue_header.bits[block];
+ uint8_t block_ready = MARS_WORKLOAD_BLOCK_READY_OFF;
+ uint8_t block_waiting = MARS_WORKLOAD_BLOCK_WAITING_OFF;
+ uint8_t block_priority = MARS_WORKLOAD_BLOCK_PRIORITY_MIN;
+
+ /* search through currently locked queue block workload bits */
+ for (i = 0; i < MARS_WORKLOAD_PER_BLOCK; i++) {
+ uint64_t *bits = &queue_block.bits[i];
+ uint8_t state = MARS_BITS_GET(bits, WORKLOAD_STATE);
+
+ /* workload state is ready so check priority */
+ if (state == MARS_WORKLOAD_STATE_READY) {
+ uint8_t priority = MARS_BITS_GET(bits,
+ WORKLOAD_PRIORITY);
+
+ /* set block priority if higher then current */
+ if (priority > block_priority)
+ block_priority = priority;
+
+ /* set block ready bit in header bits for block */
+ block_ready = MARS_WORKLOAD_BLOCK_READY_ON;
+ } else if (state == MARS_WORKLOAD_STATE_WAITING) {
+ /* set block waiting bit in header bits for block */
+ block_waiting = MARS_WORKLOAD_BLOCK_WAITING_ON;
+ }
+ }
+
+ /* lock the queue header */
+ mars_mutex_lock_get(kernel_params.workload_queue_ea,
+ (struct mars_mutex *)&queue_header);
+
+ /* set the info bits inside queue header for this queue block */
+ MARS_BITS_SET(block_bits, BLOCK_READY, block_ready);
+ MARS_BITS_SET(block_bits, BLOCK_WAITING, block_waiting);
+ MARS_BITS_SET(block_bits, BLOCK_PRIORITY, block_priority);
+
+ /* unlock the queue header */
+ mars_mutex_unlock_put(kernel_params.workload_queue_ea,
+ (struct mars_mutex *)&queue_header);
+}
+
+static void update_header_bits_counter(int block, int reset)
+{
+ uint16_t *block_bits = &queue_header.bits[block];
+ uint8_t block_counter = MARS_WORKLOAD_BLOCK_COUNTER_MIN;
+
+ /* lock the queue header */
+ mars_mutex_lock_get(kernel_params.workload_queue_ea,
+ (struct mars_mutex *)&queue_header);
+
+ /* reset is not specified so increment current block counter */
+ if (!reset) {
+ block_counter = MARS_BITS_GET(block_bits, BLOCK_COUNTER);
+ if (block_counter < MARS_WORKLOAD_BLOCK_COUNTER_MAX)
+ block_counter++;
+ }
+
+ /* set the block counter bits */
+ MARS_BITS_SET(block_bits, BLOCK_COUNTER, block_counter);
+
+ /* unlock the queue header */
+ mars_mutex_unlock_put(kernel_params.workload_queue_ea,
+ (struct mars_mutex *)&queue_header);
+}
+
static int workload_query(uint16_t id, int query)
{
uint64_t bits = get_block_bits(id);
@@ -430,6 +495,9 @@ static void schedule_end_callback(uint16_t id)
/* put the workload context into workload queue */
dma_put((void *)&schedule_workload, get_workload_ea(id),
sizeof(struct mars_workload_context));
+
+ /* update queue header bits */
+ update_header_bits(id / MARS_WORKLOAD_PER_BLOCK);
}
static int workload_schedule_end(uint16_t id)
@@ -484,12 +552,16 @@ static struct mars_kernel_syscalls kernel_syscalls =
host_signal_send
};
-static int search_block(int block)
+static int search_block(int block, int ready)
{
int i;
int index = -1;
uint8_t max_priority = 0;
uint16_t max_counter = 0;
+ uint64_t block_ea = get_block_ea(block);
+
+ /* lock the queue block */
+ mars_mutex_lock_get(block_ea, (struct mars_mutex *)&queue_block);
/* search through all workloads in block */
for (i = 0; i < MARS_WORKLOAD_PER_BLOCK; i++) {
@@ -501,7 +573,7 @@ static int search_block(int block)
uint16_t counter = MARS_BITS_GET(bits, WORKLOAD_COUNTER);
/* found workload in ready state */
- if (state == MARS_WORKLOAD_STATE_READY) {
+ if (ready && state == MARS_WORKLOAD_STATE_READY) {
/* compare priority and counter with previous ones */
if (index < 0 || priority > max_priority ||
(priority == max_priority && counter > max_counter)) {
@@ -515,7 +587,7 @@ static int search_block(int block)
MARS_BITS_SET(bits, WORKLOAD_COUNTER,
counter + 1);
/* found workload in waiting state */
- } else if (state == MARS_WORKLOAD_STATE_WAITING) {
+ } else if (!ready && state == MARS_WORKLOAD_STATE_WAITING) {
/* waiting for workload to finish so check status */
if (wait_id != MARS_WORKLOAD_ID_NONE) {
struct mars_workload_queue_block wait_block;
@@ -550,6 +622,9 @@ static int search_block(int block)
MARS_WORKLOAD_ID_NONE);
MARS_BITS_SET(bits, WORKLOAD_STATE,
MARS_WORKLOAD_STATE_READY);
+
+ /* update queue header bits */
+ update_header_bits(block);
}
/* waiting for signal so check signal bit and reset */
} else if (signal == MARS_WORKLOAD_SIGNAL_ON) {
@@ -557,23 +632,14 @@ static int search_block(int block)
MARS_WORKLOAD_SIGNAL_OFF);
MARS_BITS_SET(bits, WORKLOAD_STATE,
MARS_WORKLOAD_STATE_READY);
+
+ /* update queue header bits */
+ update_header_bits(block);
}
}
}
- /* returns -1 if no runnable workload found */
- return index;
-}
-
-static int reserve_block(int block)
-{
- int index;
- uint64_t block_ea = get_block_ea(block);
-
- mars_mutex_lock_get(block_ea, (struct mars_mutex *)&queue_block);
-
- /* set the workload index */
- index = search_block(block);
+ /* index is set so reserve the runnable workload */
if (index >= 0) {
/* update the current state of the workload */
MARS_BITS_SET(&queue_block.bits[index], WORKLOAD_STATE,
@@ -582,10 +648,16 @@ static int reserve_block(int block)
/* reset the counter for reserved workload */
MARS_BITS_SET(&queue_block.bits[index], WORKLOAD_COUNTER,
MARS_WORKLOAD_COUNTER_MIN);
+
+ /* update queue header bits and reset block counter */
+ update_header_bits(block);
+ update_header_bits_counter(block, 1);
}
+ /* unlock the queue block */
mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block);
+ /* returns -1 if no runnable workload found */
return index;
}
@@ -599,36 +671,57 @@ static void notify_host_bits(uint64_t block_ea, int index)
host_signal_send(bits_ea);
}
-static void release_block(int block, int index)
+static int reserve_workload(void)
{
- uint64_t block_ea = get_block_ea(block);
-
- mars_mutex_lock_get(block_ea, (struct mars_mutex *)&queue_block);
-
- /* update current workload state in workload queue block */
- MARS_BITS_SET(&queue_block.bits[index], WORKLOAD_STATE, workload_state);
+ int i;
+ int block = -1;
+ int index = -1;
+ uint8_t max_block_priority = 0;
+ uint16_t max_block_counter = 0;
- mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block);
+ /* get the workload queue header */
+ dma_get(&queue_header, kernel_params.workload_queue_ea,
+ sizeof(struct mars_workload_queue_header));
- if (workload_state == MARS_WORKLOAD_STATE_FINISHED)
- notify_host_bits(block_ea, index);
-}
+ /* return exit status if exit flag is set from host */
+ if (queue_header.flag == MARS_WORKLOAD_QUEUE_FLAG_EXIT)
+ return MARS_KERNEL_STATUS_EXIT;
-static int reserve_workload(void)
-{
- int block;
- int index = -1;
+ /* search workload queue header for highest priority ready block that
+ * has waited the longest in ready state */
+ for (i = 0; i < MARS_WORKLOAD_NUM_BLOCKS; i++) {
+ uint16_t *bits = &queue_header.bits[i];
+ uint8_t block_ready = MARS_BITS_GET(bits, BLOCK_READY);
+ uint8_t block_waiting = MARS_BITS_GET(bits, BLOCK_WAITING);
+ uint8_t block_priority = MARS_BITS_GET(bits, BLOCK_PRIORITY);
+ uint16_t block_counter = MARS_BITS_GET(bits, BLOCK_COUNTER);
+
+ /* block is ready so check scheduling conditions */
+ if (block_ready &&
+ (block < 0 || block_priority > max_block_priority ||
+ (block_priority == max_block_priority &&
+ block_counter > max_block_counter))) {
+ block = i;
+ max_block_priority = block_priority;
+ max_block_counter = block_counter;
+
+ /* increment block counter */
+ update_header_bits_counter(block, 0);
+ }
- /* search workload queue blocks until runnable workload reserved */
- for (block = 0; block < MARS_WORKLOAD_NUM_BLOCKS; block++) {
- index = reserve_block(block);
- if (index >= 0)
- break;
+ /* block is waiting so check block */
+ if (block_waiting)
+ search_block(i, 0);
}
/* no runnable workload found */
+ if (block < 0)
+ return MARS_KERNEL_STATUS_IDLE;
+
+ /* search block for workload index to run */
+ index = search_block(block, 1);
if (index < 0)
- return MARS_WORKLOAD_RESERVED_NONE;
+ return MARS_KERNEL_STATUS_IDLE;
/* set global workload info based on workload block and index */
workload_id = MARS_WORKLOAD_PER_BLOCK * block + index;
@@ -637,19 +730,33 @@ static int reserve_workload(void)
/* get the workload context code from workload queue */
dma_get(&workload, workload_ea, sizeof(struct mars_workload_context));
- return MARS_WORKLOAD_RESERVED;
+ return MARS_KERNEL_STATUS_BUSY;
}
static void release_workload(void)
{
int block = workload_id / MARS_WORKLOAD_PER_BLOCK;
int index = workload_id % MARS_WORKLOAD_PER_BLOCK;
+ uint64_t block_ea = get_block_ea(block);
/* put the workload context into workload queue */
dma_put(&workload, workload_ea, sizeof(struct mars_workload_context));
- /* release block reservation */
- release_block(block, index);
+ /* lock the queue block */
+ mars_mutex_lock_get(block_ea, (struct mars_mutex *)&queue_block);
+
+ /* update current workload state in workload queue block */
+ MARS_BITS_SET(&queue_block.bits[index], WORKLOAD_STATE, workload_state);
+
+ /* update queue header bits */
+ update_header_bits(block);
+
+ /* unlock the queue block */
+ mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block);
+
+ /* workload state is finished so notify host */
+ if (workload_state == MARS_WORKLOAD_STATE_FINISHED)
+ notify_host_bits(block_ea, index);
}
static void __attribute__((noinline)) run_workload(void)
@@ -694,35 +801,21 @@ static void load_workload_module(void)
static int scheduler(void)
{
- int status;
+ int status = reserve_workload();
- /* get the workload queue header */
- dma_get(&queue_header,
- kernel_params.workload_queue_ea +
- offsetof(struct mars_workload_queue, header),
- sizeof(struct mars_workload_queue_header));
-
- /* return exit status if exit flag is set from host */
- if (queue_header.flag == MARS_WORKLOAD_QUEUE_FLAG_EXIT)
- return MARS_KERNEL_STATUS_EXIT;
+ /* workload reserved */
+ if (status == MARS_KERNEL_STATUS_BUSY) {
+ /* load the workload module */
+ load_workload_module();
- /* reserve next workload to run or return idle status if none found */
- status = reserve_workload();
+ /* run workload */
+ run_workload();
- /* return idle status if no workload was reserved */
- if (status == MARS_WORKLOAD_RESERVED_NONE)
- return MARS_KERNEL_STATUS_IDLE;
-
- /* load the workload module */
- load_workload_module();
-
- /* run workload */
- run_workload();
-
- /* release reservation of current workload */
- release_workload();
+ /* release reservation of current workload */
+ release_workload();
+ }
- return MARS_KERNEL_STATUS_BUSY;
+ return status;
}
static void get_params(uint64_t kernel_params_ea)