mirror of
https://github.com/kelvinlawson/atomthreads.git
synced 2026-01-21 15:23:14 +01:00
Add Atomthreads RTOS source files.
This commit is contained in:
1161
kernel/Doxyfile
Normal file
1161
kernel/Doxyfile
Normal file
File diff suppressed because it is too large
Load Diff
119
kernel/atom.h
Executable file
119
kernel/atom.h
Executable file
@@ -0,0 +1,119 @@
|
||||
/*
|
||||
* Copyright (c) 2010, Kelvin Lawson. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. No personal names or organizations' names associated with the
|
||||
* Atomthreads project may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE ATOMTHREADS PROJECT AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __ATOM_H
|
||||
#define __ATOM_H
|
||||
|
||||
#include "atomtimer.h"
|
||||
#include "atomuser.h"
|
||||
|
||||
/* Data types */
|
||||
|
||||
/* Forward declaration */
|
||||
struct atom_tcb;
|
||||
|
||||
typedef struct atom_tcb
|
||||
{
|
||||
/* Thread's current stack pointer. When a thread is scheduled
|
||||
* out the architecture port can save*/
|
||||
POINTER sp_save_ptr;
|
||||
|
||||
/* Thread priority (0-255) */
|
||||
uint8_t priority;
|
||||
|
||||
/* Thread entry point and parameter */
|
||||
void (*entry_point)(uint32_t);
|
||||
uint32_t entry_param;
|
||||
|
||||
/* Queue pointers */
|
||||
struct atom_tcb *prev_tcb; /* Previous TCB in doubly-linked TCB list */
|
||||
struct atom_tcb *next_tcb; /* Next TCB in doubly-linked list */
|
||||
|
||||
/* Suspension data */
|
||||
uint8_t suspended; /* TRUE if task is currently suspended */
|
||||
uint8_t suspend_wake_status; /* Status returned to woken suspend calls */
|
||||
ATOM_TIMER *suspend_timo_cb; /* Callback registered for suspension timeouts */
|
||||
|
||||
} ATOM_TCB;
|
||||
|
||||
|
||||
/* Global data */
|
||||
extern ATOM_TCB *tcbReadyQ;
|
||||
extern uint8_t atomOSStarted;
|
||||
|
||||
|
||||
/* Constants */
|
||||
#define TRUE 1
|
||||
#define FALSE 0
|
||||
|
||||
/* Error values */
|
||||
|
||||
#define ATOM_OK 0
|
||||
#define ATOM_ERROR 1
|
||||
#define ATOM_TIMEOUT 2
|
||||
#define ATOM_WOULDBLOCK 3
|
||||
#define ATOM_ERR_CONTEXT 200
|
||||
#define ATOM_ERR_PARAM 201
|
||||
#define ATOM_ERR_DELETED 202
|
||||
#define ATOM_ERR_OVF 203
|
||||
#define ATOM_ERR_QUEUE 204
|
||||
#define ATOM_ERR_TIMER 205
|
||||
#define ATOM_ERR_NOT_FOUND 206
|
||||
#define ATOM_ERR_OWNERSHIP 207
|
||||
|
||||
/* Idle thread priority (lowest) */
|
||||
#define IDLE_THREAD_PRIORITY 255
|
||||
|
||||
|
||||
/* Function prototypes */
|
||||
extern uint8_t atomOSInit (void *idle_thread_stack_top);
|
||||
extern void atomOSStart (void);
|
||||
|
||||
extern void atomSched (uint8_t timer_tick);
|
||||
|
||||
extern void atomIntEnter (void);
|
||||
extern void atomIntExit (uint8_t timer_tick);
|
||||
|
||||
extern uint8_t tcbEnqueuePriority (ATOM_TCB **tcb_queue_ptr, ATOM_TCB *tcb_ptr);
|
||||
extern ATOM_TCB *tcbDequeueHead (ATOM_TCB **tcb_queue_ptr);
|
||||
extern ATOM_TCB *tcbDequeueEntry (ATOM_TCB **tcb_queue_ptr, ATOM_TCB *tcb_ptr);
|
||||
extern ATOM_TCB *tcbDequeuePriority (ATOM_TCB **tcb_queue_ptr, uint8_t priority);
|
||||
|
||||
extern ATOM_TCB *atomCurrentContext (void);
|
||||
|
||||
extern uint8_t atomThreadCreate (ATOM_TCB *tcb_ptr, uint8_t priority, void (*entry_point)(uint32_t), uint32_t entry_param, void *stack_top);
|
||||
|
||||
extern void archContextSwitch (ATOM_TCB *old_tcb_ptr, ATOM_TCB *new_tcb_ptr);
|
||||
extern void archThreadContextInit (ATOM_TCB *tcb_ptr, void *stack_top, void (*entry_point)(uint32_t), uint32_t entry_param);
|
||||
extern void archFirstThreadRestore(ATOM_TCB *new_tcb_ptr);
|
||||
|
||||
extern void atomTimerTick (void);
|
||||
|
||||
|
||||
#endif /* __ATOM_H */
|
||||
796
kernel/atomkernel.c
Executable file
796
kernel/atomkernel.c
Executable file
@@ -0,0 +1,796 @@
|
||||
/*
|
||||
* Copyright (c) 2010, Kelvin Lawson. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. No personal names or organizations' names associated with the
|
||||
* Atomthreads project may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE ATOMTHREADS PROJECT AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <stddef.h>
|
||||
#include "atom.h"
|
||||
#include "atomuser.h"
|
||||
|
||||
|
||||
/* Global data */
|
||||
|
||||
/**
|
||||
* This is the head of the queue of threads that are ready to run. It is
|
||||
* ordered by priority, with the higher priority threads coming first.
|
||||
* Where there are multiple threads of the same priority, the TCB pointers
|
||||
* are FIFO-ordered.
|
||||
*
|
||||
* Dequeuing the head is a fast operation because the list is ordered.
|
||||
* Enqueuing may have to walk up to the end of the list. This means that
|
||||
* context-switch times depend on the number of threads on the ready queue,
|
||||
* but efficient use is made of available RAM on tiny systems by avoiding
|
||||
* priority tables etc. This scheme can be easily swapped out for other
|
||||
* scheduler schemes by replacing the TCB enqueue and dequeue functions.
|
||||
*
|
||||
* Once a thread is scheduled in, it is not present on the ready queue while
|
||||
* it is running. When scheduled out it will be either placed back on the
|
||||
* ready queue, or will be suspended on some OS primitive (e.g. on the
|
||||
* suspended TCB queue for a semaphore, or in the timer list if suspended on
|
||||
* a timer delay).
|
||||
*/
|
||||
ATOM_TCB *tcbReadyQ = NULL;
|
||||
|
||||
/** Set to TRUE when OS is started and running threads */
|
||||
uint8_t atomOSStarted = FALSE;
|
||||
|
||||
|
||||
/* Local data */
|
||||
|
||||
/** This is a pointer to the TCB for the currently-running thread */
|
||||
static ATOM_TCB *curr_tcb = NULL;
|
||||
|
||||
/** Storage for the idle thread's TCB */
|
||||
static ATOM_TCB idle_tcb;
|
||||
|
||||
/* Number of nested interrupts */
|
||||
static int atomIntCnt = 0;
|
||||
|
||||
|
||||
/* Forward declarations */
|
||||
static void atomThreadSwitch(ATOM_TCB *old_tcb, ATOM_TCB *new_tcb);
|
||||
static void atomIdleThread (uint32_t data);
|
||||
|
||||
|
||||
/**
|
||||
* \b atomSched
|
||||
*
|
||||
* This is an internal function not for use by application code.
|
||||
*
|
||||
* This is the main scheduler routine. It is called by the various OS
|
||||
* library routines to check if any threads should be scheduled in now.
|
||||
* If so, the context will be switched from the current thread to the
|
||||
* new one.
|
||||
*
|
||||
* The scheduler is priority-based with round-robin performed on threads
|
||||
* with the same priority. Round-robin is only performed on timer ticks
|
||||
* however. During reschedules caused by an OS operation (e.g. after
|
||||
* giving or taking a semaphore) we only allow the scheduling in of
|
||||
* threads with higher priority than current priority. On timer ticks we
|
||||
* also allow the scheduling of same-priority threads - in that case we
|
||||
* schedule in the head of the ready list for that priority and put the
|
||||
* current thread at the tail.
|
||||
*
|
||||
* @param[in] timer_tick Should be TRUE when called from the system tick
|
||||
*
|
||||
* @return None
|
||||
*/
|
||||
void atomSched (uint8_t timer_tick)
|
||||
{
|
||||
CRITICAL_STORE;
|
||||
ATOM_TCB *new_tcb = NULL;
|
||||
int16_t lowest_pri;
|
||||
|
||||
/**
|
||||
* Check the OS has actually started. As long as the proper initialisation
|
||||
* sequence is followed there should be no calls here until the OS is
|
||||
* started, but we check to handle badly-behaved ports.
|
||||
*/
|
||||
if (atomOSStarted == FALSE)
|
||||
{
|
||||
/* Don't schedule anything in until the OS is started */
|
||||
return;
|
||||
}
|
||||
|
||||
/* Enter critical section */
|
||||
CRITICAL_START ();
|
||||
|
||||
/**
|
||||
* If the current thread is going into suspension, then
|
||||
* unconditionally dequeue the next thread for execution.
|
||||
*/
|
||||
if (curr_tcb->suspended == TRUE)
|
||||
{
|
||||
/**
|
||||
* Dequeue the next ready to run thread. There will always be
|
||||
* at least the idle thread waiting. Note that this could
|
||||
* actually be the suspending thread if it was unsuspended
|
||||
* before the scheduler was called.
|
||||
*/
|
||||
new_tcb = tcbDequeueHead (&tcbReadyQ);
|
||||
|
||||
/**
|
||||
* Don't need to add the current thread to any queue because
|
||||
* it was suspended by another OS mechanism and will be
|
||||
* sitting on a suspend queue or similar within one of the OS
|
||||
* primitive libraries (e.g. semaphore).
|
||||
*/
|
||||
|
||||
/* Switch to the new thread */
|
||||
atomThreadSwitch (curr_tcb, new_tcb);
|
||||
}
|
||||
|
||||
/**
|
||||
* Otherwise the current thread is still ready, but check
|
||||
* if any other threads are ready.
|
||||
*/
|
||||
else
|
||||
{
|
||||
/* Calculate which priority is allowed to be scheduled in */
|
||||
if (timer_tick == TRUE)
|
||||
{
|
||||
/* Same priority or higher threads can preempt */
|
||||
lowest_pri = (int16_t)curr_tcb->priority;
|
||||
}
|
||||
else if (curr_tcb->priority > 0)
|
||||
{
|
||||
/* Only higher priority threads can preempt, invalid for 0 (highest) */
|
||||
lowest_pri = (int16_t)(curr_tcb->priority - 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
/**
|
||||
* Current priority is already highest (0), don't allow preempt by
|
||||
* threads of any priority because this is not a time-slice.
|
||||
*/
|
||||
lowest_pri = -1;
|
||||
}
|
||||
|
||||
/* Check if a reschedule is allowed */
|
||||
if (lowest_pri >= 0)
|
||||
{
|
||||
/* Check for a thread at the given minimum priority level or higher */
|
||||
new_tcb = tcbDequeuePriority (&tcbReadyQ, (uint8_t)lowest_pri);
|
||||
|
||||
/* If a thread was found, schedule it in */
|
||||
if (new_tcb)
|
||||
{
|
||||
/* Add the current thread to the ready queue */
|
||||
(void)tcbEnqueuePriority (&tcbReadyQ, curr_tcb);
|
||||
|
||||
/* Switch to the new thread */
|
||||
atomThreadSwitch (curr_tcb, new_tcb);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Exit critical section */
|
||||
CRITICAL_END ();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomThreadSwitch
|
||||
*
|
||||
* This is an internal function not for use by application code.
|
||||
*
|
||||
* The function is called by the scheduler to perform a context switch.
|
||||
* Execution will switch to the new thread's context, therefore the
|
||||
* function doesn't actually return until the old thread is scheduled
|
||||
* back in.
|
||||
*
|
||||
* @param[in] old_tcb Pointer to TCB for thread being scheduled out
|
||||
* @param[in] new_tcb Pointer to TCB for thread being scheduled in
|
||||
*
|
||||
* @return None
|
||||
*/
|
||||
static void atomThreadSwitch(ATOM_TCB *old_tcb, ATOM_TCB *new_tcb)
|
||||
{
|
||||
/**
|
||||
* Check if the new thread is actually the current one, in which
|
||||
* case we don't need to do any context switch. This can happen
|
||||
* if a thread goes into suspend but is unsuspended again before
|
||||
* it is fully scheduled out.
|
||||
*/
|
||||
if (old_tcb != new_tcb)
|
||||
{
|
||||
/* Set the new currently-running thread pointer */
|
||||
curr_tcb = new_tcb;
|
||||
|
||||
/* Call the architecture-specific context switch */
|
||||
archContextSwitch (old_tcb, new_tcb);
|
||||
}
|
||||
|
||||
/**
|
||||
* The context switch shifted execution to a different thread. By the time
|
||||
* we get back here, we are running in old_tcb context again. Clear its
|
||||
* suspend status now that we're back.
|
||||
*/
|
||||
old_tcb->suspended = FALSE;
|
||||
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomThreadCreate
|
||||
*
|
||||
* Creates and starts a new thread.
|
||||
*
|
||||
* Callers provide the ATOM_TCB structure storage, these are not obtained
|
||||
* from an internal TCB free list.
|
||||
*
|
||||
* The function puts the new thread on the ready queue and calls the
|
||||
* scheduler. If the priority is higher than the current priority, then the
|
||||
* new thread may be scheduled in before the function returns.
|
||||
*
|
||||
* @param[in] tcb_ptr Pointer to the thread's TCB storage
|
||||
* @param[in] priority Priority of the thread (0 to 255)
|
||||
* @param[in] entry_point Thread entry point
|
||||
* @param[in] entry_param Parameter passed to thread entry point
|
||||
* @param[in] stack_top Top of the stack area
|
||||
*
|
||||
* @retval ATOM_OK Success
|
||||
* @retval ATOM_ERR_PARAM Bad parameters
|
||||
* @retval ATOM_ERR_QUEUE Error putting the thread on the ready queue
|
||||
*/
|
||||
uint8_t atomThreadCreate (ATOM_TCB *tcb_ptr, uint8_t priority, void (*entry_point)(uint32_t), uint32_t entry_param, void *stack_top)
|
||||
{
|
||||
CRITICAL_STORE;
|
||||
uint8_t status;
|
||||
|
||||
if ((tcb_ptr == NULL) || (entry_point == NULL) || (stack_top == NULL))
|
||||
{
|
||||
/* Bad parameters */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
/* Set up the TCB initial values */
|
||||
tcb_ptr->suspended = FALSE;
|
||||
tcb_ptr->priority = priority;
|
||||
tcb_ptr->prev_tcb = NULL;
|
||||
tcb_ptr->next_tcb = NULL;
|
||||
tcb_ptr->suspend_timo_cb = NULL;
|
||||
|
||||
/**
|
||||
* Store the thread entry point and parameter in the TCB. This may
|
||||
* not be necessary for all architecture ports if they put all of
|
||||
* this information in the initial thread stack.
|
||||
*/
|
||||
tcb_ptr->entry_point = entry_point;
|
||||
tcb_ptr->entry_param = entry_param;
|
||||
|
||||
/**
|
||||
* Call the arch-specific routine to set up the stack. This routine
|
||||
* is responsible for creating the context save area necessary for
|
||||
* allowing atomThreadSwitch() to schedule it in. The initial
|
||||
* archContextSwitch() call when this thread gets scheduled in the
|
||||
* first time will then restore the program counter to the thread
|
||||
* entry point, and any other necessary register values ready for
|
||||
* it to start running.
|
||||
*/
|
||||
archThreadContextInit (tcb_ptr, stack_top, entry_point, entry_param);
|
||||
|
||||
/* Protect access to the OS queue */
|
||||
CRITICAL_START ();
|
||||
|
||||
/* Put this thread on the ready queue */
|
||||
if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK)
|
||||
{
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Queue-related error */
|
||||
status = ATOM_ERR_QUEUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/**
|
||||
* If the OS is started and we're in thread context, check if we
|
||||
* should be scheduled in now.
|
||||
*/
|
||||
if ((atomOSStarted == TRUE) && atomCurrentContext())
|
||||
atomSched (FALSE);
|
||||
|
||||
/* Success */
|
||||
status = ATOM_OK;
|
||||
}
|
||||
}
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomIntEnter
|
||||
*
|
||||
* Interrupt handler entry routine.
|
||||
*
|
||||
* Must be called at the start of any interrupt handlers that may
|
||||
* call an OS primitive and make a thread ready.
|
||||
*
|
||||
* @return None
|
||||
*/
|
||||
void atomIntEnter (void)
|
||||
{
|
||||
/* Increment the interrupt count */
|
||||
atomIntCnt++;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomIntExit
|
||||
*
|
||||
* Interrupt handler exit routine.
|
||||
*
|
||||
* Must be called at the end of any interrupt handlers that may
|
||||
* call an OS primitive and make a thread ready.
|
||||
*
|
||||
* This is responsible for calling the scheduler at the end of
|
||||
* interrupt handlers to determine whether a new thread has now
|
||||
* been made ready and should be scheduled in.
|
||||
*
|
||||
* @param timer_tick TRUE if this is a timer tick
|
||||
*
|
||||
* @return None
|
||||
*/
|
||||
void atomIntExit (uint8_t timer_tick)
|
||||
{
|
||||
/* Decrement the interrupt count */
|
||||
atomIntCnt--;
|
||||
|
||||
/* Call the scheduler */
|
||||
atomSched (timer_tick);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomCurrentContext
|
||||
*
|
||||
* Get the current thread context.
|
||||
*
|
||||
* Returns a pointer to the current thread's TCB, or NULL if not in
|
||||
* thread-context (in interrupt context).
|
||||
*
|
||||
* @retval Pointer to current thread's TCB, NULL if in interrupt context
|
||||
*/
|
||||
ATOM_TCB *atomCurrentContext (void)
|
||||
{
|
||||
/* Return the current thread's TCB or NULL if in interrupt context */
|
||||
if (atomIntCnt == 0)
|
||||
return (curr_tcb);
|
||||
else
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomOSInit
|
||||
*
|
||||
* Initialise the atomthreads OS.
|
||||
*
|
||||
* Must be called before any application code uses the atomthreads APIs. No
|
||||
* threads are actually started until the application calls atomOSStart().
|
||||
*
|
||||
* Callers must provide a pointer to some storage for the idle thread stack.
|
||||
* The caller is responsible for calculating the appropriate space required
|
||||
* for their particular architecture.
|
||||
*
|
||||
* Applications should use the following initialisation sequence:
|
||||
*
|
||||
* -> Call atomOSInit() before calling any atomthreads APIs
|
||||
* -> Arrange for a timer to call atomTimerTick() periodically
|
||||
* -> Create one or more application threads using atomThreadCreate()
|
||||
* -> Start the OS using atomOSStart(). At this point the highest
|
||||
* priority application thread created will be started.
|
||||
*
|
||||
* Interrupts should be disabled until the first thread restore is complete,
|
||||
* to avoid any complications due to interrupts occurring while crucial
|
||||
* operating system facilities are being initialised. They are normally
|
||||
* enabled by the archFirstThreadRestore() routine in the architecture port.
|
||||
*
|
||||
* @param[in] idle_thread_stack_top Ptr to top of stack area for idle thread
|
||||
*
|
||||
* @retval ATOM_OK Success
|
||||
* @retval ATOM_ERROR Initialisation error
|
||||
*/
|
||||
uint8_t atomOSInit (void *idle_thread_stack_top)
|
||||
{
|
||||
uint8_t status;
|
||||
|
||||
/* Initialise data */
|
||||
curr_tcb = NULL;
|
||||
tcbReadyQ = NULL;
|
||||
atomOSStarted = FALSE;
|
||||
|
||||
/* Create the idle thread */
|
||||
status = atomThreadCreate(&idle_tcb,
|
||||
IDLE_THREAD_PRIORITY,
|
||||
atomIdleThread,
|
||||
0,
|
||||
idle_thread_stack_top);
|
||||
|
||||
/* Return status */
|
||||
return (status);
|
||||
|
||||
}
|
||||
/**
|
||||
* \b atomOSStart
|
||||
*
|
||||
* Start the highest priority thread running.
|
||||
*
|
||||
* This function must be called after all OS initialisation is complete, and
|
||||
* at least one application thread has been created. It will start executing
|
||||
* the highest priority thread created (or first created if multiple threads
|
||||
* share the highest priority).
|
||||
*
|
||||
* Interrupts must still be disabled at this point. They must only be enabled
|
||||
* when the first thread is restored and started by the architecture port's
|
||||
* archFirstThreadRestore() routine.
|
||||
*
|
||||
* @return None
|
||||
*/
|
||||
void atomOSStart (void)
|
||||
{
|
||||
ATOM_TCB *new_tcb;
|
||||
|
||||
/**
|
||||
* Enable the OS started flag. This stops routines like atomThreadCreate()
|
||||
* attempting to schedule in a newly-created thread until the scheduler is
|
||||
* up and running.
|
||||
*/
|
||||
atomOSStarted = TRUE;
|
||||
|
||||
/**
|
||||
* Application calls to atomThreadCreate() should have added at least one
|
||||
* thread to the ready queue. Take the highest priority one off and
|
||||
* schedule it in. If no threads were created, the OS will simply start
|
||||
* the idle thread (the lowest priority allowed to be scheduled is the
|
||||
* idle thread's priority, 255).
|
||||
*/
|
||||
new_tcb = tcbDequeuePriority (&tcbReadyQ, 255);
|
||||
if (new_tcb)
|
||||
{
|
||||
/* Set the new currently-running thread pointer */
|
||||
curr_tcb = new_tcb;
|
||||
|
||||
/* Restore and run the first thread */
|
||||
archFirstThreadRestore (new_tcb);
|
||||
|
||||
/* Never returns to here, execution shifts to new thread context */
|
||||
}
|
||||
else
|
||||
{
|
||||
/* No ready threads were found. atomOSInit() probably was not called */
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomIdleThread
|
||||
*
|
||||
* Entry point for idle thread.
|
||||
*
|
||||
* This thread must always be present, and will be the thread executed when
|
||||
* no other threads are ready to run. It must not call any library routines
|
||||
* which would cause it to block.
|
||||
*
|
||||
* @param[in] data Unused (optional thread entry parameter)
|
||||
*
|
||||
* @return None
|
||||
*/
|
||||
static void atomIdleThread (uint32_t data)
|
||||
{
|
||||
/* Loop forever */
|
||||
while (1)
|
||||
{
|
||||
/** \todo Provide user idle hooks*/
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b tcbEnqueuePriority
|
||||
*
|
||||
* This is an internal function not for use by application code.
|
||||
*
|
||||
* Enqueues the TCB \c tcb_ptr on the TCB queue pointed to by \c tcb_queue_ptr.
|
||||
* TCBs are placed on the queue in priority order. If there are existing TCBs
|
||||
* at the same priority as the TCB to be enqueued, the enqueued TCB will be
|
||||
* placed at the end of the same-priority TCBs. Calls to tcbDequeuePriority()
|
||||
* will dequeue same-priority TCBs in FIFO order.
|
||||
*
|
||||
* \c tcb_queue_ptr may be modified by the routine if the enqueued TCB becomes
|
||||
* the new list head. It is valid for tcb_queue_ptr to point to a NULL pointer,
|
||||
* which is the case if the queue is currently empty.
|
||||
*
|
||||
* \b NOTE: Assumes that the caller is already in a critical section.
|
||||
*
|
||||
* @param[in,out] tcb_queue_ptr Pointer to TCB queue head pointer
|
||||
* @param[in] tcb_ptr Pointer to TCB to enqueue
|
||||
*
|
||||
* @retval ATOM_OK Success
|
||||
* @retval ATOM_ERR_PARAM Bad parameters
|
||||
*/
|
||||
uint8_t tcbEnqueuePriority (ATOM_TCB **tcb_queue_ptr, ATOM_TCB *tcb_ptr)
|
||||
{
|
||||
uint8_t status;
|
||||
ATOM_TCB *prev_ptr, *next_ptr;
|
||||
|
||||
/* Parameter check */
|
||||
if ((tcb_queue_ptr == NULL) || (tcb_ptr == NULL))
|
||||
{
|
||||
/* Return error */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Walk the list and enqueue at the end of the TCBs at this priority */
|
||||
prev_ptr = next_ptr = *tcb_queue_ptr;
|
||||
do
|
||||
{
|
||||
/* Insert if:
|
||||
* next_ptr = NULL (we're at the head of an empty queue or at the tail)
|
||||
* the next TCB in the list is lower priority than the one we're enqueuing.
|
||||
*/
|
||||
if ((next_ptr == NULL) || (next_ptr->priority > tcb_ptr->priority))
|
||||
{
|
||||
/* Make this TCB the new listhead */
|
||||
if (next_ptr == *tcb_queue_ptr)
|
||||
{
|
||||
*tcb_queue_ptr = tcb_ptr;
|
||||
tcb_ptr->prev_tcb = NULL;
|
||||
tcb_ptr->next_tcb = next_ptr;
|
||||
if (next_ptr)
|
||||
next_ptr->prev_tcb = tcb_ptr;
|
||||
}
|
||||
/* Insert between two TCBs or at the tail */
|
||||
else
|
||||
{
|
||||
tcb_ptr->prev_tcb = prev_ptr;
|
||||
tcb_ptr->next_tcb = next_ptr;
|
||||
prev_ptr->next_tcb = tcb_ptr;
|
||||
if (next_ptr)
|
||||
next_ptr->prev_tcb = tcb_ptr;
|
||||
}
|
||||
|
||||
/* Quit the loop, we've finished inserting */
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Not inserting here, try the next one */
|
||||
prev_ptr = next_ptr;
|
||||
next_ptr = next_ptr->next_tcb;
|
||||
}
|
||||
|
||||
}
|
||||
while (prev_ptr != NULL);
|
||||
|
||||
/* Successful */
|
||||
status = ATOM_OK;
|
||||
}
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b tcbDequeueHead
|
||||
*
|
||||
* This is an internal function not for use by application code.
|
||||
*
|
||||
* Dequeues the highest priority TCB on the queue pointed to by
|
||||
* \c tcb_queue_ptr.
|
||||
*
|
||||
* The TCB will be removed from the queue. Same priority TCBs are dequeued in
|
||||
* FIFO order.
|
||||
*
|
||||
* \c tcb_queue_ptr will be modified by the routine if a TCB is dequeued,
|
||||
* as this will be the list head. It is valid for tcb_queue_ptr to point to a
|
||||
* NULL pointer, which is the case if the queue is currently empty. In this
|
||||
* case the function returns NULL.
|
||||
*
|
||||
* \b NOTE: Assumes that the caller is already in a critical section.
|
||||
*
|
||||
* @param[in,out] tcb_queue_ptr Pointer to TCB queue head pointer
|
||||
*
|
||||
* @return Pointer to highest priority TCB on queue, or NULL if queue empty
|
||||
*/
|
||||
ATOM_TCB *tcbDequeueHead (ATOM_TCB **tcb_queue_ptr)
|
||||
{
|
||||
ATOM_TCB *ret_ptr;
|
||||
|
||||
/* Parameter check */
|
||||
if (tcb_queue_ptr == NULL)
|
||||
{
|
||||
/* Return NULL */
|
||||
ret_ptr = NULL;
|
||||
}
|
||||
/* Check for an empty queue */
|
||||
else if (*tcb_queue_ptr == NULL)
|
||||
{
|
||||
/* Return NULL */
|
||||
ret_ptr = NULL;
|
||||
}
|
||||
/* Remove and return the listhead */
|
||||
else
|
||||
{
|
||||
ret_ptr = *tcb_queue_ptr;
|
||||
*tcb_queue_ptr = ret_ptr->next_tcb;
|
||||
if (*tcb_queue_ptr)
|
||||
(*tcb_queue_ptr)->prev_tcb = NULL;
|
||||
ret_ptr->next_tcb = ret_ptr->prev_tcb = NULL;
|
||||
}
|
||||
|
||||
return (ret_ptr);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b tcbDequeueEntry
|
||||
*
|
||||
* This is an internal function not for use by application code.
|
||||
*
|
||||
* Dequeues a particular TCB from the queue pointed to by \c tcb_queue_ptr.
|
||||
*
|
||||
* The TCB will be removed from the queue.
|
||||
*
|
||||
* \c tcb_queue_ptr may be modified by the routine if the dequeued TCB was
|
||||
* the list head. It is valid for tcb_queue_ptr to point to a NULL pointer,
|
||||
* which is the case if the queue is currently empty. In this case the
|
||||
* function returns NULL.
|
||||
*
|
||||
* \b NOTE: Assumes that the caller is already in a critical section.
|
||||
*
|
||||
* @param[in,out] tcb_queue_ptr Pointer to TCB queue head pointer
|
||||
* @param[in] tcb_ptr Pointer to TCB to dequeue
|
||||
*
|
||||
* @return Pointer to the dequeued TCB, or NULL if entry wasn't found
|
||||
*/
|
||||
ATOM_TCB *tcbDequeueEntry (ATOM_TCB **tcb_queue_ptr, ATOM_TCB *tcb_ptr)
|
||||
{
|
||||
ATOM_TCB *ret_ptr, *prev_ptr, *next_ptr;
|
||||
|
||||
/* Parameter check */
|
||||
if (tcb_queue_ptr == NULL)
|
||||
{
|
||||
/* Return NULL */
|
||||
ret_ptr = NULL;
|
||||
}
|
||||
/* Check for an empty queue */
|
||||
else if (*tcb_queue_ptr == NULL)
|
||||
{
|
||||
/* Return NULL */
|
||||
ret_ptr = NULL;
|
||||
}
|
||||
/* Find and remove/return the specified entry */
|
||||
else
|
||||
{
|
||||
ret_ptr = NULL;
|
||||
prev_ptr = next_ptr = *tcb_queue_ptr;
|
||||
while (next_ptr)
|
||||
{
|
||||
/* Is this entry the one we're looking for? */
|
||||
if (next_ptr == tcb_ptr)
|
||||
{
|
||||
if (next_ptr == *tcb_queue_ptr)
|
||||
{
|
||||
/* We're removing the list head */
|
||||
*tcb_queue_ptr = next_ptr->next_tcb;
|
||||
if (*tcb_queue_ptr)
|
||||
(*tcb_queue_ptr)->prev_tcb = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We're removing a mid or tail TCB */
|
||||
prev_ptr->next_tcb = next_ptr->next_tcb;
|
||||
if (next_ptr->next_tcb)
|
||||
next_ptr->next_tcb->prev_tcb = prev_ptr;
|
||||
}
|
||||
ret_ptr = next_ptr;
|
||||
ret_ptr->prev_tcb = ret_ptr->next_tcb = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Move on to the next in the list */
|
||||
prev_ptr = next_ptr;
|
||||
next_ptr = next_ptr->next_tcb;
|
||||
}
|
||||
}
|
||||
|
||||
return (ret_ptr);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b tcbDequeuePriority
|
||||
*
|
||||
* This is an internal function not for use by application code.
|
||||
*
|
||||
* Dequeues the first TCB of the given priority or higher, from the queue
|
||||
* pointed to by \c tcb_queue_ptr. Because the queue is ordered high priority
|
||||
* first, we only ever dequeue the list head, if any. If the list head is
|
||||
* lower priority than we wish to dequeue, then all following ones will also
|
||||
* be lower priority and hence are not parsed.
|
||||
*
|
||||
* The TCB will be removed from the queue. Same priority TCBs will be dequeued
|
||||
* in FIFO order.
|
||||
*
|
||||
* \c tcb_queue_ptr may be modified by the routine if the dequeued TCB was
|
||||
* the list head. It is valid for tcb_queue_ptr to point to a NULL pointer,
|
||||
* which is the case if the queue is currently empty. In this case the
|
||||
* function returns NULL.
|
||||
*
|
||||
* \b NOTE: Assumes that the caller is already in a critical section.
|
||||
*
|
||||
* @param[in,out] tcb_queue_ptr Pointer to TCB queue head pointer
|
||||
* @param[in] priority Minimum priority to qualify for dequeue
|
||||
*
|
||||
* @return Pointer to the dequeued TCB, or NULL if none found within priority
|
||||
*/
|
||||
ATOM_TCB *tcbDequeuePriority (ATOM_TCB **tcb_queue_ptr, uint8_t priority)
|
||||
{
|
||||
ATOM_TCB *ret_ptr;
|
||||
|
||||
/* Parameter check */
|
||||
if (tcb_queue_ptr == NULL)
|
||||
{
|
||||
/* Return NULL */
|
||||
ret_ptr = NULL;
|
||||
}
|
||||
/* Check for an empty queue */
|
||||
else if (*tcb_queue_ptr == NULL)
|
||||
{
|
||||
/* Return NULL */
|
||||
ret_ptr = NULL;
|
||||
}
|
||||
/* Check if the list head priority is within our range */
|
||||
else if ((*tcb_queue_ptr)->priority <= priority)
|
||||
{
|
||||
/* Remove the list head */
|
||||
ret_ptr = *tcb_queue_ptr;
|
||||
*tcb_queue_ptr = (*tcb_queue_ptr)->next_tcb;
|
||||
if (*tcb_queue_ptr)
|
||||
{
|
||||
(*tcb_queue_ptr)->prev_tcb = NULL;
|
||||
ret_ptr->next_tcb = NULL;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* No higher priority ready threads found */
|
||||
ret_ptr = NULL;
|
||||
}
|
||||
|
||||
return (ret_ptr);
|
||||
}
|
||||
645
kernel/atommutex.c
Executable file
645
kernel/atommutex.c
Executable file
@@ -0,0 +1,645 @@
|
||||
/*
|
||||
* Copyright (c) 2010, Kelvin Lawson. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. No personal names or organizations' names associated with the
|
||||
* Atomthreads project may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE ATOMTHREADS PROJECT AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include "atom.h"
|
||||
#include "atommutex.h"
|
||||
#include "atomtimer.h"
|
||||
#include "atomuser.h"
|
||||
|
||||
|
||||
/* Local data types */
|
||||
|
||||
typedef struct mutex_timer
|
||||
{
|
||||
ATOM_TCB *tcb_ptr; /* Thread which is suspended with timeout */
|
||||
ATOM_MUTEX *mutex_ptr; /* Mutex the thread is suspended on */
|
||||
} MUTEX_TIMER;
|
||||
|
||||
|
||||
/* Forward declarations */
|
||||
|
||||
static void atomMutexTimerCallback (POINTER cb_data);
|
||||
|
||||
|
||||
/**
|
||||
* \b atomMutexCreate
|
||||
*
|
||||
* Initialises a mutex object.
|
||||
*
|
||||
* Must be called before calling any other mutex library routines on a
|
||||
* mutex. Objects can be deleted later using atomMutexDelete().
|
||||
*
|
||||
* Does not set the owner of a mutex. atomMutexGet() must be called after
|
||||
* creation in order to actually take ownership.
|
||||
*
|
||||
* Does not allocate storage, the caller provides the mutex object.
|
||||
*
|
||||
* This function can be called from interrupt context.
|
||||
*
|
||||
* @param[in] mutex Pointer to mutex object
|
||||
*
|
||||
* @retval ATOM_OK Success
|
||||
* @retval ATOM_ERR_PARAM Bad parameters
|
||||
*/
|
||||
uint8_t atomMutexCreate (ATOM_MUTEX *mutex)
|
||||
{
|
||||
uint8_t status;
|
||||
|
||||
/* Parameter check */
|
||||
if (mutex == NULL)
|
||||
{
|
||||
/* Bad mutex pointer */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Start with no owner (unlocked) */
|
||||
mutex->owner = NULL;
|
||||
|
||||
/* Reset the initial lock count */
|
||||
mutex->count = 0;
|
||||
|
||||
/* Initialise the suspended threads queue */
|
||||
mutex->suspQ = NULL;
|
||||
|
||||
/* Successful */
|
||||
status = ATOM_OK;
|
||||
}
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomMutexDelete
|
||||
*
|
||||
* Deletes a mutex object.
|
||||
*
|
||||
* Any threads currently suspended on the mutex will be woken up with
|
||||
* return status ATOM_ERR_DELETED. If called at thread context then the
|
||||
* scheduler will be called during this function which may schedule in one
|
||||
* of the woken threads depending on relative priorities.
|
||||
*
|
||||
* This function can be called from interrupt context, but loops internally
|
||||
* waking up all threads blocking on the mutex, so the potential
|
||||
* execution cycles cannot be determined in advance.
|
||||
*
|
||||
* @param[in] mutex Pointer to mutex object
|
||||
*
|
||||
* @retval ATOM_OK Success
|
||||
* @retval ATOM_ERR_QUEUE Problem putting a woken thread on the ready queue
|
||||
* @retval ATOM_ERR_TIMER Problem cancelling a timeout on a woken thread
|
||||
*/
|
||||
uint8_t atomMutexDelete (ATOM_MUTEX *mutex)
|
||||
{
|
||||
uint8_t status;
|
||||
CRITICAL_STORE;
|
||||
ATOM_TCB *tcb_ptr;
|
||||
uint8_t woken_threads = FALSE;
|
||||
|
||||
/* Parameter check */
|
||||
if (mutex == NULL)
|
||||
{
|
||||
/* Bad mutex pointer */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Default to success status unless errors occur during wakeup */
|
||||
status = ATOM_OK;
|
||||
|
||||
/* Wake up all suspended tasks */
|
||||
while (1)
|
||||
{
|
||||
/* Enter critical region */
|
||||
CRITICAL_START ();
|
||||
|
||||
/* Check if any threads are suspended */
|
||||
tcb_ptr = tcbDequeueHead (&mutex->suspQ);
|
||||
|
||||
/* A thread is suspended on the mutex */
|
||||
if (tcb_ptr)
|
||||
{
|
||||
/* Return error status to the waiting thread */
|
||||
tcb_ptr->suspend_wake_status = ATOM_ERR_DELETED;
|
||||
|
||||
/* Put the thread on the ready queue */
|
||||
if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK)
|
||||
{
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Quit the loop, returning error */
|
||||
status = ATOM_ERR_QUEUE;
|
||||
break;
|
||||
}
|
||||
|
||||
/* If there's a timeout on this suspension, cancel it */
|
||||
if (tcb_ptr->suspend_timo_cb)
|
||||
{
|
||||
/* Cancel the callback */
|
||||
if (atomTimerCancel (tcb_ptr->suspend_timo_cb) != ATOM_OK)
|
||||
{
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Quit the loop, returning error */
|
||||
status = ATOM_ERR_TIMER;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Flag as no timeout registered */
|
||||
tcb_ptr->suspend_timo_cb = NULL;
|
||||
|
||||
}
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Request a reschedule */
|
||||
woken_threads = TRUE;
|
||||
}
|
||||
|
||||
/* No more suspended threads */
|
||||
else
|
||||
{
|
||||
/* Exit critical region and quit the loop */
|
||||
CRITICAL_END ();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Call scheduler if any threads were woken up */
|
||||
if (woken_threads == TRUE)
|
||||
{
|
||||
/**
|
||||
* Only call the scheduler if we are in thread context, otherwise
|
||||
* it will be called on exiting the ISR by atomIntExit().
|
||||
*/
|
||||
if (atomCurrentContext())
|
||||
atomSched (FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomMutexGet
|
||||
*
|
||||
* Take the lock on a mutex.
|
||||
*
|
||||
* This takes ownership of a mutex if it is not currently owned. Ownership
|
||||
* is held by this thread until a corresponding call to atomMutexPut() by
|
||||
* the same thread.
|
||||
*
|
||||
* Can be called recursively by the original locking thread (owner).
|
||||
* Recursive calls are counted, and ownership is not relinquished until
|
||||
* the number of unlock (atomMutexPut()) calls by the owner matches the
|
||||
* number of lock (atomMutexGet()) calls.
|
||||
*
|
||||
* No thread other than the owner can lock or unlock the mutex while it is
|
||||
* locked by another thread.
|
||||
*
|
||||
* Depending on the \c timeout value specified the call will do one of
|
||||
* the following if the mutex is already locked by another thread:
|
||||
*
|
||||
* \c timeout == 0 : Call will block until the mutex is available
|
||||
* \c timeout > 0 : Call will block until available up to the specified timeout
|
||||
* \c timeout == -1 : Return immediately if mutex is locked by another thread
|
||||
*
|
||||
* If the call needs to block and \c timeout is zero, it will block
|
||||
* indefinitely until the owning thread calls atomMutexPut() or
|
||||
* atomMutexDelete() is called on the mutex.
|
||||
*
|
||||
* If the call needs to block and \c timeout is non-zero, the call will only
|
||||
* block for the specified number of system ticks after which time, if the
|
||||
* thread was not already woken, the call will return with \c ATOM_TIMEOUT.
|
||||
*
|
||||
* If the call would normally block and \c timeout is -1, the call will
|
||||
* return immediately with \c ATOM_WOULDBLOCK.
|
||||
*
|
||||
* This function can only be called from thread context. A mutex has the
|
||||
* concept of an owner thread, so it is never valid to make a mutex call
|
||||
* from interrupt context when there is no thread to associate with.
|
||||
*
|
||||
* @param[in] mutex Pointer to mutex object
|
||||
* @param[in] timeout Max system ticks to block (0 = forever)
|
||||
*
|
||||
* @retval ATOM_OK Success
|
||||
* @retval ATOM_TIMEOUT Mutex timed out before being woken
|
||||
* @retval ATOM_WOULDBLOCK Called with timeout == -1 but count is zero
|
||||
* @retval ATOM_ERR_DELETED Mutex was deleted while suspended
|
||||
* @retval ATOM_ERR_CONTEXT Not called in thread context and attempted to block
|
||||
* @retval ATOM_ERR_PARAM Bad parameter
|
||||
* @retval ATOM_ERR_QUEUE Problem putting the thread on the suspend queue
|
||||
* @retval ATOM_ERR_TIMER Problem registering the timeout
|
||||
* @retval ATOM_ERR_OVF The recursive lock count would have overflowed (>255)
|
||||
*/
|
||||
uint8_t atomMutexGet (ATOM_MUTEX *mutex, int32_t timeout)
|
||||
{
|
||||
CRITICAL_STORE;
|
||||
uint8_t status;
|
||||
MUTEX_TIMER timer_data;
|
||||
ATOM_TIMER timer_cb;
|
||||
ATOM_TCB *curr_tcb_ptr;
|
||||
|
||||
/* Check parameters */
|
||||
if (mutex == NULL)
|
||||
{
|
||||
/* Bad mutex pointer */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Get the current TCB */
|
||||
curr_tcb_ptr = atomCurrentContext();
|
||||
|
||||
/* Protect access to the mutex object and OS queues */
|
||||
CRITICAL_START ();
|
||||
|
||||
/**
|
||||
* Check we are at thread context. Because mutexes have the concept of
|
||||
* owner threads, it is never valid to call here from an ISR,
|
||||
* regardless of whether we will block.
|
||||
*/
|
||||
if (curr_tcb_ptr == NULL)
|
||||
{
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Not currently in thread context, can't suspend */
|
||||
status = ATOM_ERR_CONTEXT;
|
||||
}
|
||||
|
||||
/* Otherwise if mutex is owned by another thread, block the calling thread */
|
||||
else if ((mutex->owner != NULL) && (mutex->owner != curr_tcb_ptr))
|
||||
{
|
||||
/* If called with timeout >= 0, we should block */
|
||||
if (timeout >= 0)
|
||||
{
|
||||
/* Add current thread to the suspend list on this mutex */
|
||||
if (tcbEnqueuePriority (&mutex->suspQ, curr_tcb_ptr) != ATOM_OK)
|
||||
{
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* There was an error putting this thread on the suspend list */
|
||||
status = ATOM_ERR_QUEUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Set suspended status for the current thread */
|
||||
curr_tcb_ptr->suspended = TRUE;
|
||||
|
||||
/* Track errors */
|
||||
status = ATOM_OK;
|
||||
|
||||
/* Register a timer callback if requested */
|
||||
if (timeout)
|
||||
{
|
||||
/* Fill out the data needed by the callback to wake us up */
|
||||
timer_data.tcb_ptr = curr_tcb_ptr;
|
||||
timer_data.mutex_ptr = mutex;
|
||||
|
||||
/* Fill out the timer callback request structure */
|
||||
timer_cb.cb_func = atomMutexTimerCallback;
|
||||
timer_cb.cb_data = (POINTER)&timer_data;
|
||||
timer_cb.cb_ticks = timeout;
|
||||
|
||||
/**
|
||||
* Store the timer details in the TCB so that we can
|
||||
* cancel the timer callback if the mutex is put
|
||||
* before the timeout occurs.
|
||||
*/
|
||||
curr_tcb_ptr->suspend_timo_cb = &timer_cb;
|
||||
|
||||
/* Register a callback on timeout */
|
||||
if (atomTimerRegister (&timer_cb) != ATOM_OK)
|
||||
{
|
||||
/* Timer registration failed */
|
||||
status = ATOM_ERR_TIMER;
|
||||
|
||||
/* Clean up and return to the caller */
|
||||
(void)tcbDequeueEntry (&mutex->suspQ, curr_tcb_ptr);
|
||||
curr_tcb_ptr->suspended = FALSE;
|
||||
curr_tcb_ptr->suspend_timo_cb = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set no timeout requested */
|
||||
else
|
||||
{
|
||||
/* No need to cancel timeouts on this one */
|
||||
curr_tcb_ptr->suspend_timo_cb = NULL;
|
||||
}
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Check no errors have occurred */
|
||||
if (status == ATOM_OK)
|
||||
{
|
||||
/**
|
||||
* Current thread now blocking, schedule in a new
|
||||
* one. We already know we are in thread context
|
||||
* so can call the scheduler from here.
|
||||
*/
|
||||
atomSched (FALSE);
|
||||
|
||||
/**
|
||||
* Normal atomMutexPut() wakeups will set ATOM_OK status,
|
||||
* while timeouts will set ATOM_TIMEOUT and mutex
|
||||
* deletions will set ATOM_ERR_DELETED. */
|
||||
status = curr_tcb_ptr->suspend_wake_status;
|
||||
|
||||
/**
|
||||
* If we were woken up by another thread relinquishing
|
||||
* the mutex and handing this thread ownership, then
|
||||
* the relinquishing thread will set status to ATOM_OK
|
||||
* and will make this thread the owner. Setting the
|
||||
* owner before waking the thread ensures that no other
|
||||
* thread can preempt and take ownership of the mutex
|
||||
* between this thread being made ready to run, and
|
||||
* actually being scheduled back in here.
|
||||
*/
|
||||
if (status == ATOM_OK)
|
||||
{
|
||||
/**
|
||||
* Since this thread has just gained ownership, the
|
||||
* lock count is zero and should be incremented
|
||||
* once for this call.
|
||||
*/
|
||||
mutex->count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* timeout == -1, requested not to block and mutex is owned by another thread */
|
||||
CRITICAL_END();
|
||||
status = ATOM_WOULDBLOCK;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Thread is not owned or is owned by us, we can claim ownership */
|
||||
|
||||
/* Increment the lock count, checking for count overflow */
|
||||
if (mutex->count == 255)
|
||||
{
|
||||
/* Don't increment, just return error status */
|
||||
status = ATOM_ERR_OVF;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Increment the count and return to the calling thread */
|
||||
mutex->count++;
|
||||
|
||||
/* If the mutex is not locked, mark the calling thread as the new owner */
|
||||
if (mutex->owner == NULL)
|
||||
{
|
||||
mutex->owner = curr_tcb_ptr;
|
||||
}
|
||||
|
||||
/* Successful */
|
||||
status = ATOM_OK;
|
||||
}
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
}
|
||||
}
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomMutexPut
|
||||
*
|
||||
* Give back the lock on a mutex.
|
||||
*
|
||||
* This checks that the mutex is owned by the calling thread, and decrements
|
||||
* the recursive lock count. Once the lock count reaches zero, the lock is
|
||||
* considered relinquished and no longer owned by this thread.
|
||||
*
|
||||
* If the lock is relinquished and there are threads blocking on the mutex, the
|
||||
* call will wake up the highest priority thread suspended. Only one thread is
|
||||
* woken per call to atomMutexPut(). If multiple threads of the same priority
|
||||
* are suspended, they are woken in order of suspension (FIFO).
|
||||
*
|
||||
* This function can only be called from thread context. A mutex has the
|
||||
* concept of an owner thread, so it is never valid to make a mutex call
|
||||
* from interrupt context when there is no thread to associate with.
|
||||
*
|
||||
* @param[in] mutex Pointer to mutex object
|
||||
*
|
||||
* @retval ATOM_OK Success
|
||||
* @retval ATOM_ERR_PARAM Bad parameter
|
||||
* @retval ATOM_ERR_QUEUE Problem putting a woken thread on the ready queue
|
||||
* @retval ATOM_ERR_TIMER Problem cancelling a timeout for a woken thread
|
||||
* @retval ATOM_ERR_OWNERSHIP Attempt to unlock mutex not owned by this thread
|
||||
*/
|
||||
uint8_t atomMutexPut (ATOM_MUTEX * mutex)
|
||||
{
|
||||
uint8_t status;
|
||||
CRITICAL_STORE;
|
||||
ATOM_TCB *tcb_ptr, *curr_tcb_ptr;
|
||||
|
||||
/* Check parameters */
|
||||
if (mutex == NULL)
|
||||
{
|
||||
/* Bad mutex pointer */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Get the current TCB */
|
||||
curr_tcb_ptr = atomCurrentContext();
|
||||
|
||||
/* Protect access to the mutex object and OS queues */
|
||||
CRITICAL_START ();
|
||||
|
||||
/* Check if the calling thread owns this mutex */
|
||||
if (mutex->owner != curr_tcb_ptr)
|
||||
{
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Attempt to unlock by non-owning thread */
|
||||
status = ATOM_ERR_OWNERSHIP;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Lock is owned by this thread, decrement the recursive lock count */
|
||||
mutex->count--;
|
||||
|
||||
/* Once recursive lock count reaches zero, we relinquish ownership */
|
||||
if (mutex->count == 0)
|
||||
{
|
||||
/* Relinquish ownership */
|
||||
mutex->owner = NULL;
|
||||
|
||||
/* If any threads are blocking on this mutex, wake them now */
|
||||
if (mutex->suspQ)
|
||||
{
|
||||
/**
|
||||
* Threads are woken up in priority order, with a FIFO system
|
||||
* used on same priority threads. We always take the head,
|
||||
* ordering is taken care of by an ordered list enqueue.
|
||||
*/
|
||||
tcb_ptr = tcbDequeueHead (&mutex->suspQ);
|
||||
if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK)
|
||||
{
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* There was a problem putting the thread on the ready queue */
|
||||
status = ATOM_ERR_QUEUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Set OK status to be returned to the waiting thread */
|
||||
tcb_ptr->suspend_wake_status = ATOM_OK;
|
||||
|
||||
/* Set this thread as the new owner of the mutex */
|
||||
mutex->owner = tcb_ptr;
|
||||
|
||||
/* If there's a timeout on this suspension, cancel it */
|
||||
if ((tcb_ptr->suspend_timo_cb != NULL)
|
||||
&& (atomTimerCancel (tcb_ptr->suspend_timo_cb) != ATOM_OK))
|
||||
{
|
||||
/* There was a problem cancelling a timeout on this mutex */
|
||||
status = ATOM_ERR_TIMER;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Flag as no timeout registered */
|
||||
tcb_ptr->suspend_timo_cb = NULL;
|
||||
|
||||
/* Successful */
|
||||
status = ATOM_OK;
|
||||
}
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/**
|
||||
* The scheduler may now make a policy decision to
|
||||
* thread switch. We already know we are in thread
|
||||
* context so can call the scheduler from here.
|
||||
*/
|
||||
atomSched (FALSE);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/**
|
||||
* Relinquished ownership and no threads waiting.
|
||||
* Nothing to do.
|
||||
*/
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Successful */
|
||||
status = ATOM_OK;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/**
|
||||
* Decremented lock but still retain ownership due to
|
||||
* recursion. Nothing to do.
|
||||
*/
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Successful */
|
||||
status = ATOM_OK;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomMutexTimerCallback
|
||||
*
|
||||
* This is an internal function not for use by application code.
|
||||
*
|
||||
* Timeouts on suspended threads are notified by the timer system through
|
||||
* this generic callback. The timer system calls us back with a pointer to
|
||||
* the relevant \c MUTEX_TIMER object which is used to retrieve the
|
||||
* mutex details.
|
||||
*
|
||||
* @param[in] cb_data Pointer to a MUTEX_TIMER object
|
||||
*/
|
||||
static void atomMutexTimerCallback (POINTER cb_data)
|
||||
{
|
||||
MUTEX_TIMER *timer_data_ptr;
|
||||
CRITICAL_STORE;
|
||||
|
||||
/* Get the MUTEX_TIMER structure pointer */
|
||||
timer_data_ptr = (MUTEX_TIMER *)cb_data;
|
||||
|
||||
/* Check parameter is valid */
|
||||
if (timer_data_ptr)
|
||||
{
|
||||
/* Enter critical region */
|
||||
CRITICAL_START ();
|
||||
|
||||
/* Set status to indicate to the waiting thread that it timed out */
|
||||
timer_data_ptr->tcb_ptr->suspend_wake_status = ATOM_TIMEOUT;
|
||||
|
||||
/* Flag as no timeout registered */
|
||||
timer_data_ptr->tcb_ptr->suspend_timo_cb = NULL;
|
||||
|
||||
/* Remove this thread from the mutex's suspend list */
|
||||
(void)tcbDequeueEntry (&timer_data_ptr->mutex_ptr->suspQ, timer_data_ptr->tcb_ptr);
|
||||
|
||||
/* Put the thread on the ready queue */
|
||||
(void)tcbEnqueuePriority (&tcbReadyQ, timer_data_ptr->tcb_ptr);
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/**
|
||||
* Note that we don't call the scheduler now as it will be called
|
||||
* when we exit the ISR by atomIntExit().
|
||||
*/
|
||||
}
|
||||
}
|
||||
44
kernel/atommutex.h
Executable file
44
kernel/atommutex.h
Executable file
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright (c) 2010, Kelvin Lawson. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. No personal names or organizations' names associated with the
|
||||
* Atomthreads project may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE ATOMTHREADS PROJECT AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef __ATOM_MUTEX_H
|
||||
#define __ATOM_MUTEX_H
|
||||
|
||||
typedef struct atom_mutex
|
||||
{
|
||||
ATOM_TCB * suspQ; /* Queue of threads suspended on this mutex */
|
||||
ATOM_TCB * owner; /* Thread which currently owns the lock */
|
||||
uint8_t count; /* Recursive count of locks by the owner */
|
||||
} ATOM_MUTEX;
|
||||
|
||||
extern uint8_t atomMutexCreate (ATOM_MUTEX *mutex);
|
||||
extern uint8_t atomMutexDelete (ATOM_MUTEX *mutex);
|
||||
extern uint8_t atomMutexGet (ATOM_MUTEX *mutex, int32_t timeout);
|
||||
extern uint8_t atomMutexPut (ATOM_MUTEX *mutex);
|
||||
|
||||
#endif /* __ATOM_MUTEX_H */
|
||||
879
kernel/atomqueue.c
Executable file
879
kernel/atomqueue.c
Executable file
@@ -0,0 +1,879 @@
|
||||
/*
|
||||
* Copyright (c) 2010, Kelvin Lawson. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. No personal names or organizations' names associated with the
|
||||
* Atomthreads project may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE ATOMTHREADS PROJECT AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "atom.h"
|
||||
#include "atomqueue.h"
|
||||
#include "atomtimer.h"
|
||||
#include "atomuser.h"
|
||||
|
||||
|
||||
/* Local data types */
|
||||
|
||||
typedef struct queue_timer
|
||||
{
|
||||
ATOM_TCB *tcb_ptr; /* Thread which is suspended with timeout */
|
||||
ATOM_QUEUE *queue_ptr; /* Queue the thread is interested in */
|
||||
ATOM_TCB **suspQ; /* TCB queue which thread is suspended on */
|
||||
} QUEUE_TIMER;
|
||||
|
||||
|
||||
/* Forward declarations */
|
||||
|
||||
static uint8_t queue_remove (ATOM_QUEUE *qptr, uint8_t* msgptr);
|
||||
static uint8_t queue_insert (ATOM_QUEUE *qptr, uint8_t* msgptr);
|
||||
static void atomQueueTimerCallback (POINTER cb_data);
|
||||
|
||||
|
||||
/**
|
||||
* \b atomQueueCreate
|
||||
*
|
||||
* Initialises a queue object.
|
||||
*
|
||||
* Must be called before calling any other queue library routines on a
|
||||
* queue. Objects can be deleted later using atomQueueDelete().
|
||||
*
|
||||
* Does not allocate storage, the caller provides the queue object.
|
||||
*
|
||||
* Callers pass in their own buffer area for storing the queue messages while
|
||||
* in transit between threads. The provided storage must be large enough to
|
||||
* store (\c unit_size * \c max_num_mgs) bytes. i.e. the storage area will be
|
||||
* used for up to \c max_num_msgs messages each of size \c unit_size.
|
||||
*
|
||||
* Queues use a fixed-size message.
|
||||
*
|
||||
* This function can be called from interrupt context.
|
||||
*
|
||||
* @param[in] qptr Pointer to queue object
|
||||
* @param[in] buff_ptr Pointer to buffer storage area
|
||||
* @param[in] unit_size Size in bytes of each queue message
|
||||
* @param[in] max_num_msgs Maximum number of messages in the queue
|
||||
*
|
||||
* @retval ATOM_OK Success
|
||||
* @retval ATOM_ERR_PARAM Bad parameters
|
||||
*/
|
||||
uint8_t atomQueueCreate (ATOM_QUEUE *qptr, uint8_t *buff_ptr, uint32_t unit_size, uint32_t max_num_msgs)
|
||||
{
|
||||
uint8_t status;
|
||||
|
||||
/* Parameter check */
|
||||
if ((qptr == NULL) || (buff_ptr == NULL))
|
||||
{
|
||||
/* Bad pointers */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
else if ((unit_size == 0) || (max_num_msgs == 0))
|
||||
{
|
||||
/* Bad values */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Store the queue details */
|
||||
qptr->buff_ptr = buff_ptr;
|
||||
qptr->unit_size = unit_size;
|
||||
qptr->max_num_msgs = max_num_msgs;
|
||||
|
||||
/* Initialise the suspended threads queues */
|
||||
qptr->putSuspQ = NULL;
|
||||
qptr->getSuspQ = NULL;
|
||||
|
||||
/* Initialise the insert/remove pointers */
|
||||
qptr->insert_index = 0;
|
||||
qptr->remove_index = 0;
|
||||
qptr->num_msgs_stored = 0;
|
||||
|
||||
/* Successful */
|
||||
status = ATOM_OK;
|
||||
}
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomQueueDelete
|
||||
*
|
||||
* Deletes a queue object.
|
||||
*
|
||||
* Any threads currently suspended on the queue will be woken up with
|
||||
* return status ATOM_ERR_DELETED. If called at thread context then the
|
||||
* scheduler will be called during this function which may schedule in one
|
||||
* of the woken threads depending on relative priorities.
|
||||
*
|
||||
* This function can be called from interrupt context, but loops internally
|
||||
* waking up all threads blocking on the queue, so the potential
|
||||
* execution cycles cannot be determined in advance.
|
||||
*
|
||||
* @param[in] qptr Pointer to queue object
|
||||
*
|
||||
* @retval ATOM_OK Success
|
||||
* @retval ATOM_ERR_QUEUE Problem putting a woken thread on the ready queue
|
||||
* @retval ATOM_ERR_TIMER Problem cancelling a timeout on a woken thread
|
||||
*/
|
||||
uint8_t atomQueueDelete (ATOM_QUEUE *qptr)
|
||||
{
|
||||
uint8_t status;
|
||||
CRITICAL_STORE;
|
||||
ATOM_TCB *tcb_ptr;
|
||||
uint8_t woken_threads = FALSE;
|
||||
|
||||
/* Parameter check */
|
||||
if (qptr == NULL)
|
||||
{
|
||||
/* Bad pointer */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Default to success status unless errors occur during wakeup */
|
||||
status = ATOM_OK;
|
||||
|
||||
/* Wake up all suspended tasks */
|
||||
while (1)
|
||||
{
|
||||
/* Enter critical region */
|
||||
CRITICAL_START ();
|
||||
|
||||
/* Check if any threads are suspended */
|
||||
if (((tcb_ptr = tcbDequeueHead (&qptr->getSuspQ)) != NULL)
|
||||
|| ((tcb_ptr = tcbDequeueHead (&qptr->putSuspQ)) != NULL))
|
||||
{
|
||||
/* A thread is waiting on a suspend queue */
|
||||
|
||||
/* Return error status to the waiting thread */
|
||||
tcb_ptr->suspend_wake_status = ATOM_ERR_DELETED;
|
||||
|
||||
/* Put the thread on the ready queue */
|
||||
if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK)
|
||||
{
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Quit the loop, returning error */
|
||||
status = ATOM_ERR_QUEUE;
|
||||
break;
|
||||
}
|
||||
|
||||
/* If there's a timeout on this suspension, cancel it */
|
||||
if (tcb_ptr->suspend_timo_cb)
|
||||
{
|
||||
/* Cancel the callback */
|
||||
if (atomTimerCancel (tcb_ptr->suspend_timo_cb) != ATOM_OK)
|
||||
{
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Quit the loop, returning error */
|
||||
status = ATOM_ERR_TIMER;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Flag as no timeout registered */
|
||||
tcb_ptr->suspend_timo_cb = NULL;
|
||||
|
||||
}
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Request a reschedule */
|
||||
woken_threads = TRUE;
|
||||
}
|
||||
|
||||
/* No more suspended threads */
|
||||
else
|
||||
{
|
||||
/* Exit critical region and quit the loop */
|
||||
CRITICAL_END ();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Call scheduler if any threads were woken up */
|
||||
if (woken_threads == TRUE)
|
||||
{
|
||||
/**
|
||||
* Only call the scheduler if we are in thread context, otherwise
|
||||
* it will be called on exiting the ISR by atomIntExit().
|
||||
*/
|
||||
if (atomCurrentContext())
|
||||
atomSched (FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomQueueGet
|
||||
*
|
||||
* Attempt to retrieve a message from a queue.
|
||||
*
|
||||
* If the queue is currently empty, the call will do one of the following
|
||||
* depending on the \c timeout value specified:
|
||||
*
|
||||
* \c timeout == 0 : Call will block until a message is available
|
||||
* \c timeout > 0 : Call will block until a message or the specified timeout
|
||||
* \c timeout == -1 : Return immediately if no message is on the queue
|
||||
*
|
||||
* If a maximum timeout value is specified (\c timeout > 0), and no message
|
||||
* is present on the queue for the specified number of system ticks, the
|
||||
* call will return with \c ATOM_TIMEOUT.
|
||||
*
|
||||
* This function can only be called from interrupt context if the \c timeout
|
||||
* parameter is -1 (in which case it does not block).
|
||||
*
|
||||
* @param[in] qptr Pointer to queue object
|
||||
* @param[in] timeout Max system ticks to block (0 = forever, -1 = no block)
|
||||
* @param[out] msgptr Pointer to which the received message will be copied
|
||||
*
|
||||
* @retval ATOM_OK Success
|
||||
* @retval ATOM_TIMEOUT Queue wait timed out before being woken
|
||||
* @retval ATOM_WOULDBLOCK Called with timeout == -1 but queue was empty
|
||||
* @retval ATOM_ERR_DELETED Queue was deleted while suspended
|
||||
* @retval ATOM_ERR_CONTEXT Not called in thread context and attempted to block
|
||||
* @retval ATOM_ERR_PARAM Bad parameter
|
||||
* @retval ATOM_ERR_QUEUE Problem putting the thread on the suspend queue
|
||||
* @retval ATOM_ERR_TIMER Problem registering the timeout
|
||||
*/
|
||||
uint8_t atomQueueGet (ATOM_QUEUE *qptr, int32_t timeout, uint8_t *msgptr)
|
||||
{
|
||||
CRITICAL_STORE;
|
||||
uint8_t status;
|
||||
QUEUE_TIMER timer_data;
|
||||
ATOM_TIMER timer_cb;
|
||||
ATOM_TCB *curr_tcb_ptr;
|
||||
|
||||
/* Check parameters */
|
||||
if ((qptr == NULL) || (msgptr == NULL))
|
||||
{
|
||||
/* Bad pointer */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Protect access to the queue object and OS queues */
|
||||
CRITICAL_START ();
|
||||
|
||||
/* If no messages on the queue, block the calling thread */
|
||||
if (qptr->num_msgs_stored == 0)
|
||||
{
|
||||
/* If called with timeout >= 0, we should block */
|
||||
if (timeout >= 0)
|
||||
{
|
||||
/* Queue is empty, block the calling thread */
|
||||
|
||||
/* Get the current TCB */
|
||||
curr_tcb_ptr = atomCurrentContext();
|
||||
|
||||
/* Check we are actually in thread context */
|
||||
if (curr_tcb_ptr)
|
||||
{
|
||||
/* Add current thread to the list suspended on receives */
|
||||
if (tcbEnqueuePriority (&qptr->getSuspQ, curr_tcb_ptr) == ATOM_OK)
|
||||
{
|
||||
/* Set suspended status for the current thread */
|
||||
curr_tcb_ptr->suspended = TRUE;
|
||||
|
||||
/* Track errors */
|
||||
status = ATOM_OK;
|
||||
|
||||
/* Register a timer callback if requested */
|
||||
if (timeout)
|
||||
{
|
||||
/**
|
||||
* Fill out the data needed by the callback to
|
||||
* wake us up.
|
||||
*/
|
||||
timer_data.tcb_ptr = curr_tcb_ptr;
|
||||
timer_data.queue_ptr = qptr;
|
||||
timer_data.suspQ = &qptr->getSuspQ;
|
||||
|
||||
/* Fill out the timer callback request structure */
|
||||
timer_cb.cb_func = atomQueueTimerCallback;
|
||||
timer_cb.cb_data = (POINTER)&timer_data;
|
||||
timer_cb.cb_ticks = timeout;
|
||||
|
||||
/**
|
||||
* Store the timer details in the TCB so that we
|
||||
* can cancel the timer callback if the queue is
|
||||
* put before the timeout occurs.
|
||||
*/
|
||||
curr_tcb_ptr->suspend_timo_cb = &timer_cb;
|
||||
|
||||
/* Register a callback on timeout */
|
||||
if (atomTimerRegister (&timer_cb) != ATOM_OK)
|
||||
{
|
||||
/* Timer registration failed */
|
||||
status = ATOM_ERR_TIMER;
|
||||
|
||||
/* Clean up and return to the caller */
|
||||
(void)tcbDequeueEntry (&qptr->getSuspQ, curr_tcb_ptr);
|
||||
curr_tcb_ptr->suspended = FALSE;
|
||||
curr_tcb_ptr->suspend_timo_cb = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set no timeout requested */
|
||||
else
|
||||
{
|
||||
/* No need to cancel timeouts on this one */
|
||||
curr_tcb_ptr->suspend_timo_cb = NULL;
|
||||
}
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Check no errors occurred */
|
||||
if (status == ATOM_OK)
|
||||
{
|
||||
/**
|
||||
* Current thread now blocking, schedule in a new
|
||||
* one. We already know we are in thread context
|
||||
* so can call the scheduler from here.
|
||||
*/
|
||||
atomSched (FALSE);
|
||||
|
||||
/**
|
||||
* Normal atomQueuePut() wakeups will set ATOM_OK
|
||||
* status, while timeouts will set ATOM_TIMEOUT
|
||||
* and queue deletions will set ATOM_ERR_DELETED.
|
||||
*/
|
||||
status = curr_tcb_ptr->suspend_wake_status;
|
||||
|
||||
/**
|
||||
* Check suspend_wake_status. If it is ATOM_OK
|
||||
* then we were woken because a message has been
|
||||
* put on the queue and we can now copy it out.
|
||||
* Otherwise we were woken because we timed out
|
||||
* waiting for a message, or the queue was
|
||||
* deleted, so we should just quit.
|
||||
*/
|
||||
if (status == ATOM_OK)
|
||||
{
|
||||
/* Enter critical region */
|
||||
CRITICAL_START();
|
||||
|
||||
/* Copy the message out of the queue */
|
||||
status = queue_remove (qptr, msgptr);
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END();
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* There was an error putting this thread on the suspend list */
|
||||
CRITICAL_END ();
|
||||
status = ATOM_ERR_QUEUE;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Not currently in thread context, can't suspend */
|
||||
CRITICAL_END ();
|
||||
status = ATOM_ERR_CONTEXT;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* timeout == -1, requested not to block and queue is empty */
|
||||
CRITICAL_END();
|
||||
status = ATOM_WOULDBLOCK;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* No need to block, there is a message to copy out of the queue */
|
||||
status = queue_remove (qptr, msgptr);
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/**
|
||||
* The scheduler may now make a policy decision to thread
|
||||
* switch if we are currently in thread context. If we are
|
||||
* in interrupt context it will be handled by atomIntExit().
|
||||
*/
|
||||
if (atomCurrentContext())
|
||||
atomSched (FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomQueuePut
|
||||
*
|
||||
* Attempt to put a message onto a queue.
|
||||
*
|
||||
* If the queue is currently full, the call will do one of the following
|
||||
* depending on the \c timeout value specified:
|
||||
*
|
||||
* \c timeout == 0 : Call will block until space is available
|
||||
* \c timeout > 0 : Call will block until space or the specified timeout
|
||||
* \c timeout == -1 : Return immediately if the queue is full
|
||||
*
|
||||
* If a maximum timeout value is specified (\c timeout > 0), and no space
|
||||
* is available on the queue for the specified number of system ticks, the
|
||||
* call will return with \c ATOM_TIMEOUT.
|
||||
*
|
||||
* This function can only be called from interrupt context if the \c timeout
|
||||
* parameter is -1 (in which case it does not block and may fail to post a
|
||||
* message if the queue is full).
|
||||
*
|
||||
* @param[in] qptr Pointer to queue object
|
||||
* @param[in] timeout Max system ticks to block (0 = forever, -1 = no block)
|
||||
* @param[out] msgptr Pointer from which the message should be copied out
|
||||
*
|
||||
* @retval ATOM_OK Success
|
||||
* @retval ATOM_WOULDBLOCK Called with timeout == -1 but queue was full
|
||||
* @retval ATOM_TIMEOUT Queue wait timed out before being woken
|
||||
* @retval ATOM_ERR_DELETED Queue was deleted while suspended
|
||||
* @retval ATOM_ERR_CONTEXT Not called in thread context and attempted to block
|
||||
* @retval ATOM_ERR_PARAM Bad parameter
|
||||
* @retval ATOM_ERR_QUEUE Problem putting the thread on the suspend queue
|
||||
* @retval ATOM_ERR_TIMER Problem registering the timeout
|
||||
*/
|
||||
uint8_t atomQueuePut (ATOM_QUEUE *qptr, int32_t timeout, uint8_t *msgptr)
|
||||
{
|
||||
CRITICAL_STORE;
|
||||
uint8_t status;
|
||||
QUEUE_TIMER timer_data;
|
||||
ATOM_TIMER timer_cb;
|
||||
ATOM_TCB *curr_tcb_ptr;
|
||||
|
||||
/* Check parameters */
|
||||
if ((qptr == NULL) || (msgptr == NULL))
|
||||
{
|
||||
/* Bad pointer */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Protect access to the queue object and OS queues */
|
||||
CRITICAL_START ();
|
||||
|
||||
/* If queue is full, block the calling thread */
|
||||
if (qptr->num_msgs_stored == qptr->max_num_msgs)
|
||||
{
|
||||
/* If called with timeout >= 0, we should block */
|
||||
if (timeout >= 0)
|
||||
{
|
||||
/* Queue is full, block the calling thread */
|
||||
|
||||
/* Get the current TCB */
|
||||
curr_tcb_ptr = atomCurrentContext();
|
||||
|
||||
/* Check we are actually in thread context */
|
||||
if (curr_tcb_ptr)
|
||||
{
|
||||
/* Add current thread to the suspend list on sends */
|
||||
if (tcbEnqueuePriority (&qptr->putSuspQ, curr_tcb_ptr) == ATOM_OK)
|
||||
{
|
||||
/* Set suspended status for the current thread */
|
||||
curr_tcb_ptr->suspended = TRUE;
|
||||
|
||||
/* Track errors */
|
||||
status = ATOM_OK;
|
||||
|
||||
/* Register a timer callback if requested */
|
||||
if (timeout)
|
||||
{
|
||||
/**
|
||||
* Fill out the data needed by the callback to
|
||||
* wake us up.
|
||||
*/
|
||||
timer_data.tcb_ptr = curr_tcb_ptr;
|
||||
timer_data.queue_ptr = qptr;
|
||||
timer_data.suspQ = &qptr->putSuspQ;
|
||||
|
||||
|
||||
/* Fill out the timer callback request structure */
|
||||
timer_cb.cb_func = atomQueueTimerCallback;
|
||||
timer_cb.cb_data = (POINTER)&timer_data;
|
||||
timer_cb.cb_ticks = timeout;
|
||||
|
||||
/**
|
||||
* Store the timer details in the TCB so that we
|
||||
* can cancel the timer callback if a message is
|
||||
* removed from the queue before the timeout
|
||||
* occurs.
|
||||
*/
|
||||
curr_tcb_ptr->suspend_timo_cb = &timer_cb;
|
||||
|
||||
/* Register a callback on timeout */
|
||||
if (atomTimerRegister (&timer_cb) != ATOM_OK)
|
||||
{
|
||||
/* Timer registration failed */
|
||||
status = ATOM_ERR_TIMER;
|
||||
|
||||
/* Clean up and return to the caller */
|
||||
(void)tcbDequeueEntry (&qptr->putSuspQ, curr_tcb_ptr);
|
||||
curr_tcb_ptr->suspended = FALSE;
|
||||
curr_tcb_ptr->suspend_timo_cb = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set no timeout requested */
|
||||
else
|
||||
{
|
||||
/* No need to cancel timeouts on this one */
|
||||
curr_tcb_ptr->suspend_timo_cb = NULL;
|
||||
}
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Check timer registration was successful */
|
||||
if (status == ATOM_OK)
|
||||
{
|
||||
/**
|
||||
* Current thread now blocking, schedule in a new
|
||||
* one. We already know we are in thread context
|
||||
* so can call the scheduler from here.
|
||||
*/
|
||||
atomSched (FALSE);
|
||||
|
||||
/**
|
||||
* Normal atomQueueGet() wakeups will set ATOM_OK
|
||||
* status, while timeouts will set ATOM_TIMEOUT
|
||||
* and queue deletions will set ATOM_ERR_DELETED.
|
||||
*/
|
||||
status = curr_tcb_ptr->suspend_wake_status;
|
||||
|
||||
/**
|
||||
* Check suspend_wake_status. If it is ATOM_OK
|
||||
* then we were woken because a message has been
|
||||
* removed from the queue and we can now add ours.
|
||||
* Otherwise we were woken because we timed out
|
||||
* waiting for a message, or the queue was
|
||||
* deleted, so we should just quit.
|
||||
*/
|
||||
if (status == ATOM_OK)
|
||||
{
|
||||
/* Enter critical region */
|
||||
CRITICAL_START();
|
||||
|
||||
/* Copy the message into the queue */
|
||||
status = queue_insert (qptr, msgptr);
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END();
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* There was an error putting this thread on the suspend list */
|
||||
CRITICAL_END ();
|
||||
status = ATOM_ERR_QUEUE;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Not currently in thread context, can't suspend */
|
||||
CRITICAL_END ();
|
||||
status = ATOM_ERR_CONTEXT;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* timeout == -1, cannot block. Just return queue is full */
|
||||
CRITICAL_END();
|
||||
status = ATOM_WOULDBLOCK;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* No need to block, there is space to copy into the queue */
|
||||
status = queue_insert (qptr, msgptr);
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/**
|
||||
* The scheduler may now make a policy decision to thread
|
||||
* switch if we are currently in thread context. If we are
|
||||
* in interrupt context it will be handled by atomIntExit().
|
||||
*/
|
||||
if (atomCurrentContext())
|
||||
atomSched (FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomQueueTimerCallback
|
||||
*
|
||||
* This is an internal function not for use by application code.
|
||||
*
|
||||
* Timeouts on suspended threads are notified by the timer system through
|
||||
* this generic callback. The timer system calls us back with a pointer to
|
||||
* the relevant \c QUEUE_TIMER object which is used to retrieve the
|
||||
* queue details.
|
||||
*
|
||||
* @param[in] cb_data Pointer to a QUEUE_TIMER object
|
||||
*/
|
||||
static void atomQueueTimerCallback (POINTER cb_data)
|
||||
{
|
||||
QUEUE_TIMER *timer_data_ptr;
|
||||
CRITICAL_STORE;
|
||||
|
||||
/* Get the QUEUE_TIMER structure pointer */
|
||||
timer_data_ptr = (QUEUE_TIMER *)cb_data;
|
||||
|
||||
/* Check parameter is valid */
|
||||
if (timer_data_ptr)
|
||||
{
|
||||
/* Enter critical region */
|
||||
CRITICAL_START ();
|
||||
|
||||
/* Set status to indicate to the waiting thread that it timed out */
|
||||
timer_data_ptr->tcb_ptr->suspend_wake_status = ATOM_TIMEOUT;
|
||||
|
||||
/* Flag as no timeout registered */
|
||||
timer_data_ptr->tcb_ptr->suspend_timo_cb = NULL;
|
||||
|
||||
/**
|
||||
* Remove this thread from the queue's suspend list. Handles threads
|
||||
* suspended on the receive list as well as the send list.
|
||||
*/
|
||||
(void)tcbDequeueEntry (timer_data_ptr->suspQ, timer_data_ptr->tcb_ptr);
|
||||
|
||||
/* Put the thread on the ready queue */
|
||||
(void)tcbEnqueuePriority (&tcbReadyQ, timer_data_ptr->tcb_ptr);
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/**
|
||||
* Note that we don't call the scheduler now as it will be called
|
||||
* when we exit the ISR by atomIntExit().
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b queue_remove
|
||||
*
|
||||
* This is an internal function not for use by application code.
|
||||
*
|
||||
* Removes a message from a queue. Assumes that there is a message present,
|
||||
* which is already checked by the calling functions with interrupts locked
|
||||
* out.
|
||||
*
|
||||
* Also wakes up a suspended thread if there are any waiting to send on the
|
||||
* queue.
|
||||
*
|
||||
* Assumes interrupts are already locked out.
|
||||
*
|
||||
* @param[in] qptr Pointer to an ATOM_QUEUE object
|
||||
* @param[in] msgptr Destination pointer for the message to be copied into
|
||||
*
|
||||
* @retval ATOM_OK Success
|
||||
* @retval ATOM_ERR_PARAM Bad parameter
|
||||
* @retval ATOM_ERR_QUEUE Problem putting a thread on the ready queue
|
||||
* @retval ATOM_ERR_TIMER Problem cancelling a timeout
|
||||
*/
|
||||
static uint8_t queue_remove (ATOM_QUEUE *qptr, uint8_t* msgptr)
|
||||
{
|
||||
uint8_t status;
|
||||
ATOM_TCB *tcb_ptr;
|
||||
|
||||
/* Check parameters */
|
||||
if ((qptr == NULL) || (msgptr == NULL))
|
||||
{
|
||||
/* Bad pointer */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* There is a message on the queue, copy it out */
|
||||
memcpy (msgptr, (qptr->buff_ptr + qptr->remove_index), qptr->unit_size);
|
||||
qptr->remove_index += qptr->unit_size;
|
||||
qptr->num_msgs_stored--;
|
||||
|
||||
/* Check if the remove index should now wrap to the beginning */
|
||||
if (qptr->remove_index >= (qptr->unit_size * qptr->max_num_msgs))
|
||||
qptr->remove_index = 0;
|
||||
|
||||
/**
|
||||
* If there are threads waiting to send, wake one up now. Waiting
|
||||
* threads are woken up in priority order, with same-priority
|
||||
* threads woken up in FIFO order.
|
||||
*/
|
||||
tcb_ptr = tcbDequeueHead (&qptr->putSuspQ);
|
||||
if (tcb_ptr)
|
||||
{
|
||||
/* Move the waiting thread to the ready queue */
|
||||
if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) == ATOM_OK)
|
||||
{
|
||||
/* Set OK status to be returned to the waiting thread */
|
||||
tcb_ptr->suspend_wake_status = ATOM_OK;
|
||||
|
||||
/* If there's a timeout on this suspension, cancel it */
|
||||
if ((tcb_ptr->suspend_timo_cb != NULL)
|
||||
&& (atomTimerCancel (tcb_ptr->suspend_timo_cb) != ATOM_OK))
|
||||
{
|
||||
/* There was a problem cancelling a timeout */
|
||||
status = ATOM_ERR_TIMER;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Flag as no timeout registered */
|
||||
tcb_ptr->suspend_timo_cb = NULL;
|
||||
|
||||
/* Successful */
|
||||
status = ATOM_OK;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/**
|
||||
* There was a problem putting the thread on the ready
|
||||
* queue.
|
||||
*/
|
||||
status = ATOM_ERR_QUEUE;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* There were no threads waiting to send */
|
||||
status = ATOM_OK;
|
||||
}
|
||||
}
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b queue_insert
|
||||
*
|
||||
* This is an internal function not for use by application code.
|
||||
*
|
||||
* Inserts a message onto a queue. Assumes that the queue has space for one
|
||||
* message, which has already been checked by the calling function with
|
||||
* interrupts locked out.
|
||||
*
|
||||
* Also wakes up a suspended thread if there are any waiting to receive on the
|
||||
* queue.
|
||||
*
|
||||
* Assumes interrupts are already locked out.
|
||||
*
|
||||
* @param[in] qptr Pointer to an ATOM_QUEUE object
|
||||
* @param[in] msgptr Source pointer for the message to be copied out of
|
||||
*
|
||||
* @retval ATOM_OK Success
|
||||
* @retval ATOM_ERR_PARAM Bad parameter
|
||||
* @retval ATOM_ERR_QUEUE Problem putting a thread on the ready queue
|
||||
* @retval ATOM_ERR_TIMER Problem cancelling a timeout
|
||||
*/
|
||||
static uint8_t queue_insert (ATOM_QUEUE *qptr, uint8_t* msgptr)
|
||||
{
|
||||
uint8_t status;
|
||||
ATOM_TCB *tcb_ptr;
|
||||
|
||||
/* Check parameters */
|
||||
if ((qptr == NULL) || (msgptr == NULL))
|
||||
{
|
||||
/* Bad pointer */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* There is space in the queue, copy it in */
|
||||
memcpy ((qptr->buff_ptr + qptr->insert_index), msgptr, qptr->unit_size);
|
||||
qptr->insert_index += qptr->unit_size;
|
||||
qptr->num_msgs_stored++;
|
||||
|
||||
/* Check if the insert index should now wrap to the beginning */
|
||||
if (qptr->insert_index >= (qptr->unit_size * qptr->max_num_msgs))
|
||||
qptr->insert_index = 0;
|
||||
|
||||
/**
|
||||
* If there are threads waiting to receive, wake one up now. Waiting
|
||||
* threads are woken up in priority order, with same-priority
|
||||
* threads woken up in FIFO order.
|
||||
*/
|
||||
tcb_ptr = tcbDequeueHead (&qptr->getSuspQ);
|
||||
if (tcb_ptr)
|
||||
{
|
||||
/* Move the waiting thread to the ready queue */
|
||||
if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) == ATOM_OK)
|
||||
{
|
||||
/* Set OK status to be returned to the waiting thread */
|
||||
tcb_ptr->suspend_wake_status = ATOM_OK;
|
||||
|
||||
/* If there's a timeout on this suspension, cancel it */
|
||||
if ((tcb_ptr->suspend_timo_cb != NULL)
|
||||
&& (atomTimerCancel (tcb_ptr->suspend_timo_cb) != ATOM_OK))
|
||||
{
|
||||
/* There was a problem cancelling a timeout */
|
||||
status = ATOM_ERR_TIMER;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Flag as no timeout registered */
|
||||
tcb_ptr->suspend_timo_cb = NULL;
|
||||
|
||||
/* Successful */
|
||||
status = ATOM_OK;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/**
|
||||
* There was a problem putting the thread on the ready
|
||||
* queue.
|
||||
*/
|
||||
status = ATOM_ERR_QUEUE;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* There were no threads waiting to send */
|
||||
status = ATOM_OK;
|
||||
}
|
||||
}
|
||||
|
||||
return (status);
|
||||
}
|
||||
49
kernel/atomqueue.h
Executable file
49
kernel/atomqueue.h
Executable file
@@ -0,0 +1,49 @@
|
||||
/*
|
||||
* Copyright (c) 2010, Kelvin Lawson. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. No personal names or organizations' names associated with the
|
||||
* Atomthreads project may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE ATOMTHREADS PROJECT AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef __ATOM_QUEUE_H
|
||||
#define __ATOM_QUEUE_H
|
||||
|
||||
typedef struct atom_queue
|
||||
{
|
||||
ATOM_TCB * putSuspQ; /* Queue of threads waiting to send */
|
||||
ATOM_TCB * getSuspQ; /* Queue of threads waiting to receive */
|
||||
uint8_t * buff_ptr; /* Pointer to queue data area */
|
||||
uint32_t unit_size; /* Size of each message */
|
||||
uint32_t max_num_msgs; /* Max number of storable messages */
|
||||
uint32_t insert_index; /* Next byte index to insert into */
|
||||
uint32_t remove_index; /* Next byte index to remove from */
|
||||
uint32_t num_msgs_stored;/* Number of messages stored */
|
||||
} ATOM_QUEUE;
|
||||
|
||||
extern uint8_t atomQueueCreate (ATOM_QUEUE *qptr, uint8_t *buff_ptr, uint32_t unit_size, uint32_t max_num_msgs);
|
||||
extern uint8_t atomQueueDelete (ATOM_QUEUE *qptr);
|
||||
extern uint8_t atomQueueGet (ATOM_QUEUE *qptr, int32_t timeout, uint8_t *msgptr);
|
||||
extern uint8_t atomQueuePut (ATOM_QUEUE *qptr, int32_t timeout, uint8_t *msgptr);
|
||||
|
||||
#endif /* __ATOM_QUEUE_H */
|
||||
618
kernel/atomsem.c
Executable file
618
kernel/atomsem.c
Executable file
@@ -0,0 +1,618 @@
|
||||
/*
|
||||
* Copyright (c) 2010, Kelvin Lawson. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. No personal names or organizations' names associated with the
|
||||
* Atomthreads project may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE ATOMTHREADS PROJECT AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include "atom.h"
|
||||
#include "atomsem.h"
|
||||
#include "atomtimer.h"
|
||||
#include "atomuser.h"
|
||||
|
||||
|
||||
/* Local data types */
|
||||
|
||||
typedef struct sem_timer
|
||||
{
|
||||
ATOM_TCB *tcb_ptr; /* Thread which is suspended with timeout */
|
||||
ATOM_SEM *sem_ptr; /* Semaphore the thread is suspended on */
|
||||
} SEM_TIMER;
|
||||
|
||||
|
||||
/* Forward declarations */
|
||||
|
||||
static void atomSemTimerCallback (POINTER cb_data);
|
||||
|
||||
|
||||
/**
|
||||
* \b atomSemCreate
|
||||
*
|
||||
* Initialises a semaphore object.
|
||||
*
|
||||
* Must be called before calling any other semaphore library routines on a
|
||||
* semaphore. Objects can be deleted later using atomSemDelete().
|
||||
*
|
||||
* Does not allocate storage, the caller provides the semaphore object.
|
||||
*
|
||||
* This function can be called from interrupt context.
|
||||
*
|
||||
* @param[in] sem Pointer to semaphore object
|
||||
* @param[in] initial_count Initial count value
|
||||
*
|
||||
* @retval ATOM_OK Success
|
||||
* @retval ATOM_ERR_PARAM Bad parameters
|
||||
*/
|
||||
uint8_t atomSemCreate (ATOM_SEM *sem, uint8_t initial_count)
|
||||
{
|
||||
uint8_t status;
|
||||
|
||||
/* Parameter check */
|
||||
if (sem == NULL)
|
||||
{
|
||||
/* Bad semaphore pointer */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Set the initial count */
|
||||
sem->count = initial_count;
|
||||
|
||||
/* Initialise the suspended threads queue */
|
||||
sem->suspQ = NULL;
|
||||
|
||||
/* Successful */
|
||||
status = ATOM_OK;
|
||||
}
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomSemDelete
|
||||
*
|
||||
* Deletes a semaphore object.
|
||||
*
|
||||
* Any threads currently suspended on the semaphore will be woken up with
|
||||
* return status ATOM_ERR_DELETED. If called at thread context then the
|
||||
* scheduler will be called during this function which may schedule in one
|
||||
* of the woken threads depending on relative priorities.
|
||||
*
|
||||
* This function can be called from interrupt context, but loops internally
|
||||
* waking up all threads blocking on the semaphore, so the potential
|
||||
* execution cycles cannot be determined in advance.
|
||||
*
|
||||
* @param[in] sem Pointer to semaphore object
|
||||
*
|
||||
* @retval ATOM_OK Success
|
||||
* @retval ATOM_ERR_QUEUE Problem putting a woken thread on the ready queue
|
||||
* @retval ATOM_ERR_TIMER Problem cancelling a timeout on a woken thread
|
||||
*/
|
||||
uint8_t atomSemDelete (ATOM_SEM *sem)
|
||||
{
|
||||
uint8_t status;
|
||||
CRITICAL_STORE;
|
||||
ATOM_TCB *tcb_ptr;
|
||||
uint8_t woken_threads = FALSE;
|
||||
|
||||
/* Parameter check */
|
||||
if (sem == NULL)
|
||||
{
|
||||
/* Bad semaphore pointer */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Default to success status unless errors occur during wakeup */
|
||||
status = ATOM_OK;
|
||||
|
||||
/* Wake up all suspended tasks */
|
||||
while (1)
|
||||
{
|
||||
/* Enter critical region */
|
||||
CRITICAL_START ();
|
||||
|
||||
/* Check if any threads are suspended */
|
||||
tcb_ptr = tcbDequeueHead (&sem->suspQ);
|
||||
|
||||
/* A thread is suspended on the semaphore */
|
||||
if (tcb_ptr)
|
||||
{
|
||||
/* Return error status to the waiting thread */
|
||||
tcb_ptr->suspend_wake_status = ATOM_ERR_DELETED;
|
||||
|
||||
/* Put the thread on the ready queue */
|
||||
if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK)
|
||||
{
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Quit the loop, returning error */
|
||||
status = ATOM_ERR_QUEUE;
|
||||
break;
|
||||
}
|
||||
|
||||
/* If there's a timeout on this suspension, cancel it */
|
||||
if (tcb_ptr->suspend_timo_cb)
|
||||
{
|
||||
/* Cancel the callback */
|
||||
if (atomTimerCancel (tcb_ptr->suspend_timo_cb) != ATOM_OK)
|
||||
{
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Quit the loop, returning error */
|
||||
status = ATOM_ERR_TIMER;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Flag as no timeout registered */
|
||||
tcb_ptr->suspend_timo_cb = NULL;
|
||||
|
||||
}
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Request a reschedule */
|
||||
woken_threads = TRUE;
|
||||
}
|
||||
|
||||
/* No more suspended threads */
|
||||
else
|
||||
{
|
||||
/* Exit critical region and quit the loop */
|
||||
CRITICAL_END ();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Call scheduler if any threads were woken up */
|
||||
if (woken_threads == TRUE)
|
||||
{
|
||||
/**
|
||||
* Only call the scheduler if we are in thread context, otherwise
|
||||
* it will be called on exiting the ISR by atomIntExit().
|
||||
*/
|
||||
if (atomCurrentContext())
|
||||
atomSched (FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomSemGet
|
||||
*
|
||||
* Perform a get operation on a semaphore.
|
||||
*
|
||||
* This decrements the current count value for the semaphore and returns.
|
||||
* If the count value is already zero then the call will block until the
|
||||
* count is incremented by another thread, or until the specified \c timeout
|
||||
* is reached. Blocking threads will also be woken if the semaphore is
|
||||
* deleted by another thread while blocking.
|
||||
*
|
||||
* Depending on the \c timeout value specified the call will do one of
|
||||
* the following if the count value is zero:
|
||||
*
|
||||
* \c timeout == 0 : Call will block until the count is non-zero
|
||||
* \c timeout > 0 : Call will block until non-zero up to the specified timeout
|
||||
* \c timeout == -1 : Return immediately if the count is zero
|
||||
*
|
||||
* If the call needs to block and \c timeout is zero, it will block
|
||||
* indefinitely until atomSemPut() or atomSemDelete() is called on the
|
||||
* semaphore.
|
||||
*
|
||||
* If the call needs to block and \c timeout is non-zero, the call will only
|
||||
* block for the specified number of system ticks after which time, if the
|
||||
* thread was not already woken, the call will return with \c ATOM_TIMEOUT.
|
||||
*
|
||||
* If the call would normally block and \c timeout is -1, the call will
|
||||
* return immediately with \c ATOM_WOULDBLOCK.
|
||||
*
|
||||
* This function can only be called from interrupt context if the \c timeout
|
||||
* parameter is -1 (in which case it does not block).
|
||||
*
|
||||
* @param[in] sem Pointer to semaphore object
|
||||
* @param[in] timeout Max system ticks to block (0 = forever)
|
||||
*
|
||||
* @retval ATOM_OK Success
|
||||
* @retval ATOM_TIMEOUT Semaphore timed out before being woken
|
||||
* @retval ATOM_WOULDBLOCK Called with timeout == -1 but count is zero
|
||||
* @retval ATOM_ERR_DELETED Semaphore was deleted while suspended
|
||||
* @retval ATOM_ERR_CONTEXT Not called in thread context and attempted to block
|
||||
* @retval ATOM_ERR_PARAM Bad parameter
|
||||
* @retval ATOM_ERR_QUEUE Problem putting the thread on the suspend queue
|
||||
* @retval ATOM_ERR_TIMER Problem registering the timeout
|
||||
*/
|
||||
uint8_t atomSemGet (ATOM_SEM *sem, int32_t timeout)
|
||||
{
|
||||
CRITICAL_STORE;
|
||||
uint8_t status;
|
||||
SEM_TIMER timer_data;
|
||||
ATOM_TIMER timer_cb;
|
||||
ATOM_TCB *curr_tcb_ptr;
|
||||
|
||||
/* Check parameters */
|
||||
if (sem == NULL)
|
||||
{
|
||||
/* Bad semaphore pointer */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Protect access to the semaphore object and OS queues */
|
||||
CRITICAL_START ();
|
||||
|
||||
/* If count is zero, block the calling thread */
|
||||
if (sem->count == 0)
|
||||
{
|
||||
/* If called with timeout >= 0, we should block */
|
||||
if (timeout >= 0)
|
||||
{
|
||||
/* Count is zero, block the calling thread */
|
||||
|
||||
/* Get the current TCB */
|
||||
curr_tcb_ptr = atomCurrentContext();
|
||||
|
||||
/* Check we are actually in thread context */
|
||||
if (curr_tcb_ptr)
|
||||
{
|
||||
/* Add current thread to the suspend list on this semaphore */
|
||||
if (tcbEnqueuePriority (&sem->suspQ, curr_tcb_ptr) != ATOM_OK)
|
||||
{
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* There was an error putting this thread on the suspend list */
|
||||
status = ATOM_ERR_QUEUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Set suspended status for the current thread */
|
||||
curr_tcb_ptr->suspended = TRUE;
|
||||
|
||||
/* Track errors */
|
||||
status = ATOM_OK;
|
||||
|
||||
/* Register a timer callback if requested */
|
||||
if (timeout)
|
||||
{
|
||||
/* Fill out the data needed by the callback to wake us up */
|
||||
timer_data.tcb_ptr = curr_tcb_ptr;
|
||||
timer_data.sem_ptr = sem;
|
||||
|
||||
/* Fill out the timer callback request structure */
|
||||
timer_cb.cb_func = atomSemTimerCallback;
|
||||
timer_cb.cb_data = (POINTER)&timer_data;
|
||||
timer_cb.cb_ticks = timeout;
|
||||
|
||||
/**
|
||||
* Store the timer details in the TCB so that we can
|
||||
* cancel the timer callback if the semaphore is put
|
||||
* before the timeout occurs.
|
||||
*/
|
||||
curr_tcb_ptr->suspend_timo_cb = &timer_cb;
|
||||
|
||||
/* Register a callback on timeout */
|
||||
if (atomTimerRegister (&timer_cb) != ATOM_OK)
|
||||
{
|
||||
/* Timer registration failed */
|
||||
status = ATOM_ERR_TIMER;
|
||||
|
||||
/* Clean up and return to the caller */
|
||||
(void)tcbDequeueEntry (&sem->suspQ, curr_tcb_ptr);
|
||||
curr_tcb_ptr->suspended = FALSE;
|
||||
curr_tcb_ptr->suspend_timo_cb = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set no timeout requested */
|
||||
else
|
||||
{
|
||||
/* No need to cancel timeouts on this one */
|
||||
curr_tcb_ptr->suspend_timo_cb = NULL;
|
||||
}
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Check no errors have occurred */
|
||||
if (status == ATOM_OK)
|
||||
{
|
||||
/**
|
||||
* Current thread now blocking, schedule in a new
|
||||
* one. We already know we are in thread context
|
||||
* so can call the scheduler from here.
|
||||
*/
|
||||
atomSched (FALSE);
|
||||
|
||||
/**
|
||||
* Normal atomSemPut() wakeups will set ATOM_OK status,
|
||||
* while timeouts will set ATOM_TIMEOUT and semaphore
|
||||
* deletions will set ATOM_ERR_DELETED.
|
||||
*/
|
||||
status = curr_tcb_ptr->suspend_wake_status;
|
||||
|
||||
/**
|
||||
* If we have been woken up with ATOM_OK then
|
||||
* another thread incremented the semaphore and
|
||||
* handed control to this thread. In theory the
|
||||
* the posting thread increments the counter and
|
||||
* as soon as this thread wakes up we decrement
|
||||
* the counter here, but to prevent another
|
||||
* thread preempting this thread and decrementing
|
||||
* the semaphore before this section was
|
||||
* scheduled back in, we emulate the increment
|
||||
* and decrement by not incrementing in the
|
||||
* atomSemPut() and not decrementing here. The
|
||||
* count remains zero throughout preventing other
|
||||
* threads preempting before we decrement the
|
||||
* count again.
|
||||
*/
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Not currently in thread context, can't suspend */
|
||||
status = ATOM_ERR_CONTEXT;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* timeout == -1, requested not to block and count is zero */
|
||||
CRITICAL_END();
|
||||
status = ATOM_WOULDBLOCK;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Count is non-zero, just decrement it and return to calling thread */
|
||||
sem->count--;
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Successful */
|
||||
status = ATOM_OK;
|
||||
}
|
||||
}
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomSemPut
|
||||
*
|
||||
* Perform a put operation on a semaphore.
|
||||
*
|
||||
* This increments the current count value for the semaphore and returns.
|
||||
*
|
||||
* If the count value was previously zero and there are threads blocking on the
|
||||
* semaphore, the call will wake up the highest priority thread suspended. Only
|
||||
* one thread is woken per call to atomSemPut(). If multiple threads of the
|
||||
* same priority are suspended, they are woken in order of suspension (FIFO).
|
||||
*
|
||||
* This function can be called from interrupt context.
|
||||
*
|
||||
* @param[in] sem Pointer to semaphore object
|
||||
*
|
||||
* @retval ATOM_OK Success
|
||||
* @retval ATOM_ERR_OVF The semaphore count would have overflowed (>255)
|
||||
* @retval ATOM_ERR_PARAM Bad parameter
|
||||
* @retval ATOM_ERR_QUEUE Problem putting a woken thread on the ready queue
|
||||
* @retval ATOM_ERR_TIMER Problem cancelling a timeout for a woken thread
|
||||
*/
|
||||
uint8_t atomSemPut (ATOM_SEM * sem)
|
||||
{
|
||||
uint8_t status;
|
||||
CRITICAL_STORE;
|
||||
ATOM_TCB *tcb_ptr;
|
||||
|
||||
/* Check parameters */
|
||||
if (sem == NULL)
|
||||
{
|
||||
/* Bad semaphore pointer */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Protect access to the semaphore object and OS queues */
|
||||
CRITICAL_START ();
|
||||
|
||||
/* If any threads are blocking on the semaphore, wake up one */
|
||||
if (sem->suspQ)
|
||||
{
|
||||
/**
|
||||
* Threads are woken up in priority order, with a FIFO system
|
||||
* used on same priority threads. We always take the head,
|
||||
* ordering is taken care of by an ordered list enqueue.
|
||||
*/
|
||||
tcb_ptr = tcbDequeueHead (&sem->suspQ);
|
||||
if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK)
|
||||
{
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* There was a problem putting the thread on the ready queue */
|
||||
status = ATOM_ERR_QUEUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Set OK status to be returned to the waiting thread */
|
||||
tcb_ptr->suspend_wake_status = ATOM_OK;
|
||||
|
||||
/* If there's a timeout on this suspension, cancel it */
|
||||
if ((tcb_ptr->suspend_timo_cb != NULL)
|
||||
&& (atomTimerCancel (tcb_ptr->suspend_timo_cb) != ATOM_OK))
|
||||
{
|
||||
/* There was a problem cancelling a timeout on this semaphore */
|
||||
status = ATOM_ERR_TIMER;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Flag as no timeout registered */
|
||||
tcb_ptr->suspend_timo_cb = NULL;
|
||||
|
||||
/* Successful */
|
||||
status = ATOM_OK;
|
||||
}
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/**
|
||||
* The scheduler may now make a policy decision to thread
|
||||
* switch if we are currently in thread context. If we are
|
||||
* in interrupt context it will be handled by atomIntExit().
|
||||
*/
|
||||
if (atomCurrentContext())
|
||||
atomSched (FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
/* If no threads waiting, just increment the count and return */
|
||||
else
|
||||
{
|
||||
/* Check for count overflow */
|
||||
if (sem->count == 255)
|
||||
{
|
||||
/* Don't increment, just return error status */
|
||||
status = ATOM_ERR_OVF;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Increment the count and return success */
|
||||
sem->count++;
|
||||
status = ATOM_OK;
|
||||
}
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
}
|
||||
}
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomSemResetCount
|
||||
*
|
||||
* Set a new count value on a semaphore.
|
||||
*
|
||||
* Care must be taken when using this function, as there may be threads
|
||||
* suspended on the semaphore. In general it should only be used once a
|
||||
* semaphore is out of use.
|
||||
*
|
||||
* This function can be called from interrupt context.
|
||||
*
|
||||
* @param[in] sem Pointer to semaphore object
|
||||
* @param[in] count New count value
|
||||
*
|
||||
* @retval ATOM_OK Success
|
||||
* @retval ATOM_ERR_PARAM Bad parameter
|
||||
*/
|
||||
uint8_t atomSemResetCount (ATOM_SEM *sem, uint8_t count)
|
||||
{
|
||||
uint8_t status;
|
||||
|
||||
/* Parameter check */
|
||||
if (sem == NULL)
|
||||
{
|
||||
/* Bad semaphore pointer */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Set the count */
|
||||
sem->count = count;
|
||||
|
||||
/* Successful */
|
||||
status = ATOM_OK;
|
||||
}
|
||||
|
||||
return (status);
|
||||
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomSemTimerCallback
|
||||
*
|
||||
* This is an internal function not for use by application code.
|
||||
*
|
||||
* Timeouts on suspended threads are notified by the timer system through
|
||||
* this generic callback. The timer system calls us back with a pointer to
|
||||
* the relevant \c SEM_TIMER object which is used to retrieve the
|
||||
* semaphore details.
|
||||
*
|
||||
* @param[in] cb_data Pointer to a SEM_TIMER object
|
||||
*/
|
||||
static void atomSemTimerCallback (POINTER cb_data)
|
||||
{
|
||||
SEM_TIMER *timer_data_ptr;
|
||||
CRITICAL_STORE;
|
||||
|
||||
/* Get the SEM_TIMER structure pointer */
|
||||
timer_data_ptr = (SEM_TIMER *)cb_data;
|
||||
|
||||
/* Check parameter is valid */
|
||||
if (timer_data_ptr)
|
||||
{
|
||||
/* Enter critical region */
|
||||
CRITICAL_START ();
|
||||
|
||||
/* Set status to indicate to the waiting thread that it timed out */
|
||||
timer_data_ptr->tcb_ptr->suspend_wake_status = ATOM_TIMEOUT;
|
||||
|
||||
/* Flag as no timeout registered */
|
||||
timer_data_ptr->tcb_ptr->suspend_timo_cb = NULL;
|
||||
|
||||
/* Remove this thread from the semaphore's suspend list */
|
||||
(void)tcbDequeueEntry (&timer_data_ptr->sem_ptr->suspQ, timer_data_ptr->tcb_ptr);
|
||||
|
||||
/* Put the thread on the ready queue */
|
||||
(void)tcbEnqueuePriority (&tcbReadyQ, timer_data_ptr->tcb_ptr);
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/**
|
||||
* Note that we don't call the scheduler now as it will be called
|
||||
* when we exit the ISR by atomIntExit().
|
||||
*/
|
||||
}
|
||||
}
|
||||
45
kernel/atomsem.h
Executable file
45
kernel/atomsem.h
Executable file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright (c) 2010, Kelvin Lawson. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. No personal names or organizations' names associated with the
|
||||
* Atomthreads project may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE ATOMTHREADS PROJECT AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __ATOM_SEM_H
|
||||
#define __ATOM_SEM_H
|
||||
|
||||
typedef struct atom_sem
|
||||
{
|
||||
ATOM_TCB * suspQ; /* Queue of threads suspended on this semaphore */
|
||||
uint8_t count; /* Semaphore count */
|
||||
} ATOM_SEM;
|
||||
|
||||
extern uint8_t atomSemCreate (ATOM_SEM *sem, uint8_t initial_count);
|
||||
extern uint8_t atomSemDelete (ATOM_SEM *sem);
|
||||
extern uint8_t atomSemGet (ATOM_SEM *sem, int32_t timeout);
|
||||
extern uint8_t atomSemPut (ATOM_SEM *sem);
|
||||
extern uint8_t atomSemResetCount (ATOM_SEM *sem, uint8_t count);
|
||||
|
||||
#endif /* __ATOM_SEM_H */
|
||||
463
kernel/atomtimer.c
Executable file
463
kernel/atomtimer.c
Executable file
@@ -0,0 +1,463 @@
|
||||
/*
|
||||
* Copyright (c) 2010, Kelvin Lawson. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. No personal names or organizations' names associated with the
|
||||
* Atomthreads project may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE ATOMTHREADS PROJECT AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include "atom.h"
|
||||
#include "atomuser.h"
|
||||
|
||||
|
||||
/* Data types */
|
||||
|
||||
/* Delay callbacks data structure */
|
||||
typedef struct delay_timer
|
||||
{
|
||||
ATOM_TCB *tcb_ptr; /* Thread which is suspended with timeout */
|
||||
|
||||
} DELAY_TIMER;
|
||||
|
||||
|
||||
/* Global data */
|
||||
|
||||
/* Local data */
|
||||
|
||||
/** Pointer to the head of the outstanding timers queue */
|
||||
static ATOM_TIMER *timer_queue = NULL;
|
||||
|
||||
/** Current system tick count */
|
||||
static uint32_t system_ticks = 0;
|
||||
|
||||
|
||||
/* Forward declarations */
|
||||
static void atomTimerCallbacks (void);
|
||||
static void atomTimerDelayCallback (POINTER cb_data);
|
||||
|
||||
|
||||
/**
|
||||
* \b atomTimerRegister
|
||||
*
|
||||
* Register a timer callback.
|
||||
*
|
||||
* Callers should fill out and pass in a timer descriptor, containing
|
||||
* the number of system ticks until they would like a callback, together
|
||||
* with a callback function and optional parameter. The number of ticks
|
||||
* must be greater than zero.
|
||||
*
|
||||
* On the relevant system tick count, the callback function will be
|
||||
* called.
|
||||
*
|
||||
* These timers are used by some of the OS library routines, but they
|
||||
* can also be used by application code requiring timer facilities at
|
||||
* system tick resolution.
|
||||
*
|
||||
* This function can be called from interrupt context, but loops internally
|
||||
* through the time list, so the potential execution cycles cannot be
|
||||
* determined in advance.
|
||||
*
|
||||
* @param[in] timer_ptr Pointer to timer descriptor
|
||||
*
|
||||
* @retval ATOM_OK Success
|
||||
* @retval ATOM_ERR_PARAM Bad parameters
|
||||
*/
|
||||
uint8_t atomTimerRegister (ATOM_TIMER *timer_ptr)
|
||||
{
|
||||
uint8_t status;
|
||||
CRITICAL_STORE;
|
||||
|
||||
/* Parameter check */
|
||||
if ((timer_ptr == NULL) || (timer_ptr->cb_func == NULL)
|
||||
|| (timer_ptr->cb_ticks == 0))
|
||||
{
|
||||
/* Return error */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Protect the list */
|
||||
CRITICAL_START ();
|
||||
|
||||
/*
|
||||
* Enqueue in the list of timers.
|
||||
*
|
||||
* The list is not ordered, all timers are inserted at the start
|
||||
* of the list. On each system tick increment the list is walked
|
||||
* and the remaining ticks count for that timer is decremented.
|
||||
* Once the remaining ticks reaches zero, the timer callback is
|
||||
* made.
|
||||
*/
|
||||
if (timer_queue == NULL)
|
||||
{
|
||||
/* List is empty, insert new head */
|
||||
timer_ptr->next_timer = NULL;
|
||||
timer_queue = timer_ptr;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* List has at least one entry, enqueue new timer before */
|
||||
timer_ptr->next_timer = timer_queue;
|
||||
timer_queue = timer_ptr;
|
||||
}
|
||||
|
||||
/* End of list protection */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Successful */
|
||||
status = ATOM_OK;
|
||||
}
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomTimerCancel
|
||||
*
|
||||
* Cancel a timer callback previously registered using atomTimerRegister().
|
||||
*
|
||||
* This function can be called from interrupt context, but loops internally
|
||||
* through the time list, so the potential execution cycles cannot be
|
||||
* determined in advance.
|
||||
*
|
||||
* @param[in] timer_ptr Pointer to timer to cancel
|
||||
*
|
||||
* @retval ATOM_OK Success
|
||||
* @retval ATOM_ERR_PARAM Bad parameters
|
||||
* @retval ATOM_ERR_NOT_FOUND Timer registration was not found
|
||||
*/
|
||||
uint8_t atomTimerCancel (ATOM_TIMER *timer_ptr)
|
||||
{
|
||||
uint8_t status = ATOM_ERR_NOT_FOUND;
|
||||
ATOM_TIMER *prev_ptr, *next_ptr;
|
||||
CRITICAL_STORE;
|
||||
|
||||
/* Parameter check */
|
||||
if (timer_ptr == NULL)
|
||||
{
|
||||
/* Return error */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Protect the list */
|
||||
CRITICAL_START ();
|
||||
|
||||
/* Walk the list to find the relevant timer */
|
||||
prev_ptr = next_ptr = timer_queue;
|
||||
while (next_ptr)
|
||||
{
|
||||
/* Is this entry the one we're looking for? */
|
||||
if (next_ptr == timer_ptr)
|
||||
{
|
||||
if (next_ptr == timer_queue)
|
||||
{
|
||||
/* We're removing the list head */
|
||||
timer_queue = next_ptr->next_timer;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We're removing a mid or tail TCB */
|
||||
prev_ptr->next_timer = next_ptr->next_timer;
|
||||
}
|
||||
|
||||
/* Successful */
|
||||
status = ATOM_OK;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Move on to the next in the list */
|
||||
prev_ptr = next_ptr;
|
||||
next_ptr = next_ptr->next_timer;
|
||||
|
||||
}
|
||||
|
||||
/* End of list protection */
|
||||
CRITICAL_END ();
|
||||
}
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomTimeGet
|
||||
*
|
||||
* Returns the current system tick time.
|
||||
*
|
||||
* This function can be called from interrupt context.
|
||||
*
|
||||
* @retval Current system tick count
|
||||
|
||||
*/
|
||||
uint32_t atomTimeGet(void)
|
||||
{
|
||||
return (system_ticks);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomTimeSet
|
||||
*
|
||||
* This is an internal function not for use by application code.
|
||||
*
|
||||
* Sets the current system tick time.
|
||||
*
|
||||
* Currently only required for automated test suite to test
|
||||
* clock behaviour.
|
||||
*
|
||||
* This function can be called from interrupt context.
|
||||
*
|
||||
* @param[in] new_time New system tick time value
|
||||
*
|
||||
* @return None
|
||||
*/
|
||||
void atomTimeSet(uint32_t new_time)
|
||||
{
|
||||
system_ticks = new_time;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomTimerTick
|
||||
*
|
||||
* System tick handler.
|
||||
*
|
||||
* User ports are responsible for calling this routine once per system tick.
|
||||
*
|
||||
* On each system tick this routine is called to do the following:
|
||||
* 1. Increase the system tick count
|
||||
* 2. Call back to any registered timer callbacks
|
||||
*
|
||||
* @return None
|
||||
*/
|
||||
void atomTimerTick (void)
|
||||
{
|
||||
/* Only do anything if the OS is started */
|
||||
if (atomOSStarted)
|
||||
{
|
||||
/* Increment the system tick count */
|
||||
system_ticks++;
|
||||
|
||||
/* Check for any callbacks that are due */
|
||||
atomTimerCallbacks ();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomTimerDelay
|
||||
*
|
||||
* Suspend a thread for the given number of system ticks.
|
||||
*
|
||||
* Note that the wakeup time is the number of ticks from the current system
|
||||
* tick, therefore, for a one tick delay, the thread may be woken up at any
|
||||
* time between the atomTimerDelay() call and the next system tick. For
|
||||
* a minimum number of ticks, you should specify minimum number of ticks + 1.
|
||||
*
|
||||
* This function can only be called from thread context.
|
||||
*
|
||||
* @param[in] ticks Number of system ticks to delay (must be > 0)
|
||||
*
|
||||
* @retval ATOM_OK Successful delay
|
||||
* @retval ATOM_ERR_PARAM Bad parameter (ticks must be non-zero)
|
||||
* @retval ATOM_ERR_CONTEXT Not called from thread context
|
||||
*/
|
||||
uint8_t atomTimerDelay (uint32_t ticks)
|
||||
{
|
||||
ATOM_TCB *curr_tcb_ptr;
|
||||
ATOM_TIMER timer_cb;
|
||||
DELAY_TIMER timer_data;
|
||||
CRITICAL_STORE;
|
||||
uint8_t status;
|
||||
|
||||
/* Get the current TCB */
|
||||
curr_tcb_ptr = atomCurrentContext();
|
||||
|
||||
/* Parameter check */
|
||||
if (ticks == 0)
|
||||
{
|
||||
/* Return error */
|
||||
status = ATOM_ERR_PARAM;
|
||||
}
|
||||
|
||||
/* Check we are actually in thread context */
|
||||
else if (curr_tcb_ptr == NULL)
|
||||
{
|
||||
/* Not currently in thread context, can't suspend */
|
||||
status = ATOM_ERR_CONTEXT;
|
||||
}
|
||||
|
||||
/* Otherwise safe to proceed */
|
||||
else
|
||||
{
|
||||
/* Protect the system queues */
|
||||
CRITICAL_START ();
|
||||
|
||||
/* Set suspended status for the current thread */
|
||||
curr_tcb_ptr->suspended = TRUE;
|
||||
|
||||
/* Register the timer callback */
|
||||
|
||||
/* Fill out the data needed by the callback to wake us up */
|
||||
timer_data.tcb_ptr = curr_tcb_ptr;
|
||||
|
||||
/* Fill out the timer callback request structure */
|
||||
timer_cb.cb_func = atomTimerDelayCallback;
|
||||
timer_cb.cb_data = (POINTER)&timer_data;
|
||||
timer_cb.cb_ticks = ticks;
|
||||
|
||||
/* Store the timeout callback details, though we don't use it */
|
||||
curr_tcb_ptr->suspend_timo_cb = &timer_cb;
|
||||
|
||||
/* Register the callback */
|
||||
if (atomTimerRegister (&timer_cb) != ATOM_OK)
|
||||
{
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Timer registration didn't work, won't get a callback */
|
||||
status = ATOM_ERR_TIMER;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/* Successful timer registration */
|
||||
status = ATOM_OK;
|
||||
|
||||
/* Current thread should now block, schedule in another */
|
||||
atomSched (FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomTimerCallbacks
|
||||
*
|
||||
* This is an internal function not for use by application code.
|
||||
*
|
||||
* Find any callbacks that are due and call them up.
|
||||
*
|
||||
* @return None
|
||||
*/
|
||||
static void atomTimerCallbacks (void)
|
||||
{
|
||||
ATOM_TIMER *prev_ptr, *next_ptr;
|
||||
|
||||
/*
|
||||
* Walk the list decrementing each timer's remaining ticks count and
|
||||
* looking for due callbacks.
|
||||
*/
|
||||
prev_ptr = next_ptr = timer_queue;
|
||||
while (next_ptr)
|
||||
{
|
||||
/* Is this entry due? */
|
||||
if (--(next_ptr->cb_ticks) == 0)
|
||||
{
|
||||
/* Remove the entry from the timer list */
|
||||
if (next_ptr == timer_queue)
|
||||
{
|
||||
/* We're removing the list head */
|
||||
timer_queue = next_ptr->next_timer;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We're removing a mid or tail timer */
|
||||
prev_ptr->next_timer = next_ptr->next_timer;
|
||||
}
|
||||
|
||||
/* Call the registered callback */
|
||||
if (next_ptr->cb_func)
|
||||
{
|
||||
next_ptr->cb_func (next_ptr->cb_data);
|
||||
}
|
||||
|
||||
/* Do not update prev_ptr, we have just removed this one */
|
||||
|
||||
}
|
||||
|
||||
/* Entry is not due, leave it in there with its count decremented */
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Update prev_ptr to this entry. We will need it if we want
|
||||
* to remove a mid or tail timer.
|
||||
*/
|
||||
prev_ptr = next_ptr;
|
||||
}
|
||||
|
||||
/* Move on to the next in the list */
|
||||
next_ptr = next_ptr->next_timer;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \b atomTimerDelayCallback
|
||||
*
|
||||
* This is an internal function not for use by application code.
|
||||
*
|
||||
* Callback for atomTimerDelay() calls. Wakes up the sleeping threads.
|
||||
*
|
||||
* @param[in] cb_data Callback parameter (DELAY_TIMER ptr for sleeping thread)
|
||||
*
|
||||
* @return None
|
||||
*/
|
||||
static void atomTimerDelayCallback (POINTER cb_data)
|
||||
{
|
||||
DELAY_TIMER *timer_data_ptr;
|
||||
CRITICAL_STORE;
|
||||
|
||||
/* Get the DELAY_TIMER structure pointer */
|
||||
timer_data_ptr = (DELAY_TIMER *)cb_data;
|
||||
|
||||
/* Check parameter is valid */
|
||||
if (timer_data_ptr)
|
||||
{
|
||||
/* Enter critical region */
|
||||
CRITICAL_START ();
|
||||
|
||||
/* Put the thread on the ready queue */
|
||||
(void)tcbEnqueuePriority (&tcbReadyQ, timer_data_ptr->tcb_ptr);
|
||||
|
||||
/* Exit critical region */
|
||||
CRITICAL_END ();
|
||||
|
||||
/**
|
||||
* Don't call the scheduler yet. The ISR exit routine will do this
|
||||
* in case there are other callbacks to be made, which may also make
|
||||
* threads ready.
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
60
kernel/atomtimer.h
Executable file
60
kernel/atomtimer.h
Executable file
@@ -0,0 +1,60 @@
|
||||
/*
|
||||
* Copyright (c) 2010, Kelvin Lawson. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. No personal names or organizations' names associated with the
|
||||
* Atomthreads project may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE ATOMTHREADS PROJECT AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __ATOM_TIMER_H
|
||||
#define __ATOM_TIMER_H
|
||||
|
||||
#include "atomuser.h"
|
||||
|
||||
/* Callback function prototype */
|
||||
typedef void ( * TIMER_CB_FUNC ) ( POINTER cb_data ) ;
|
||||
|
||||
/* Data structures */
|
||||
|
||||
/* Timer descriptor */
|
||||
typedef struct atom_timer
|
||||
{
|
||||
TIMER_CB_FUNC cb_func; /* Callback function */
|
||||
POINTER cb_data; /* Pointer to callback parameter/data */
|
||||
uint32_t cb_ticks; /* Ticks until callback */
|
||||
|
||||
/* Internal data */
|
||||
struct atom_timer *next_timer; /* Next timer in doubly-linked list */
|
||||
|
||||
} ATOM_TIMER;
|
||||
|
||||
/* Function prototypes */
|
||||
|
||||
extern uint8_t atomTimerRegister (ATOM_TIMER *timer_ptr);
|
||||
extern uint8_t atomTimerCancel (ATOM_TIMER *timer_ptr);
|
||||
extern uint8_t atomTimerDelay (uint32_t ticks);
|
||||
extern uint32_t atomTimeGet (void);
|
||||
extern void atomTimeSet (uint32_t new_time);
|
||||
|
||||
#endif /* __ATOM_TIMER_H */
|
||||
60
kernel/atomuser-template.h
Executable file
60
kernel/atomuser-template.h
Executable file
@@ -0,0 +1,60 @@
|
||||
/*
|
||||
* Copyright (c) 2010, Kelvin Lawson. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. No personal names or organizations' names associated with the
|
||||
* Atomthreads project may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE ATOMTHREADS PROJECT AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __ATOM_USER_H
|
||||
#define __ATOM_USER_H
|
||||
|
||||
|
||||
/* Required number of system ticks per second (normally 100 for 10ms tick) */
|
||||
#define SYSTEM_TICKS_PER_SEC 100
|
||||
|
||||
|
||||
/**
|
||||
* Architecture-specific types.
|
||||
* Uses the stdint.h naming convention, so if stdint.h is available on the
|
||||
* platform it is simplest to include it from this header.
|
||||
*/
|
||||
#define uint8_t unsigned char
|
||||
#define uint16_t unsigned short
|
||||
#define uint32_t unsigned long
|
||||
#define uint64_t unsigned long long
|
||||
#define int8_t char
|
||||
#define int16_t short
|
||||
#define int32_t long
|
||||
#define int64_t long long
|
||||
#define POINTER void *
|
||||
|
||||
|
||||
/* Critical region protection */
|
||||
#define CRITICAL_STORE uint8_t sreg
|
||||
#define CRITICAL_START() sreg = SREG; cli();
|
||||
#define CRITICAL_END() SREG = sreg
|
||||
|
||||
|
||||
#endif /* __ATOM_USER_H */
|
||||
Reference in New Issue
Block a user