Changes since April

Clean up of build directories.
Simplifications to capability model.
This commit is contained in:
Bahadir Balban
2010-06-01 15:08:13 +03:00
parent aef14b55ec
commit 6fa4884a5a
450 changed files with 10449 additions and 7383 deletions

View File

@@ -0,0 +1,131 @@
#ifndef __LIST_H__
#define __LIST_H__
#define L4_DEADWORD 0xDEADCCCC
struct link {
struct link *next;
struct link *prev;
};
static inline void link_init(struct link *l)
{
l->next = l;
l->prev = l;
}
#define LINK_INIT(link) { &(link), &(link) }
#define LINK_DECLARE(l) \
struct link l = LINK_INIT(l)
#if !defined(__LINUX_CONTAINER__)
static inline void list_insert(struct link *new, struct link *list)
{
struct link *next = list->next;
/*
* The new link goes between the
* current and next links on the list e.g.
* list -> new -> next
*/
new->next = next;
next->prev = new;
list->next = new;
new->prev = list;
}
static inline void list_insert_tail(struct link *new, struct link *list)
{
struct link *prev = list->prev;
/*
* The new link goes between the
* current and prev links on the list, e.g.
* prev -> new -> list
*/
new->next = list;
list->prev = new;
new->prev = prev;
prev->next = new;
}
static inline void list_remove(struct link *link)
{
struct link *prev = link->prev;
struct link *next = link->next;
prev->next = next;
next->prev = prev;
link->next = (struct link *)L4_DEADWORD;
link->prev = (struct link *)L4_DEADWORD;
}
static inline void list_remove_init(struct link *link)
{
struct link *prev = link->prev;
struct link *next = link->next;
//BUG_ON(prev == NULL || next == NULL || link == NULL);
prev->next = next;
next->prev = prev;
link->next = link;
link->prev = link;
}
/* Cuts the whole list from head and returns it */
static inline struct link *list_detach(struct link *head)
{
struct link *next = head->next;
/* Detach head from rest of the list */
list_remove_init(head);
/* Return detached list */
return next;
}
/* append new_list to list given by head/end pair */
static inline void list_attach(struct link *new_list, struct link *head, struct link *end)
{
/* attach new list at the end of original list */
end->next = new_list;
new_list->prev = end;
/* go to the end of list to be attached */
while (new_list->next != end->next)
new_list = new_list->next;
/* set end nodes properly */
new_list->next = head;
head->prev = new_list;
/* set end to new end */
end = new_list;
}
static inline int list_empty(struct link *list)
{
return list->prev == list && list->next == list;
}
#define link_to_struct(link, struct_type, link_field) \
container_of(link, struct_type, link_field)
#define list_foreach_struct(struct_ptr, link_start, link_field) \
for (struct_ptr = link_to_struct((link_start)->next, typeof(*struct_ptr), link_field); \
&struct_ptr->link_field != (link_start); \
struct_ptr = link_to_struct(struct_ptr->link_field.next, typeof(*struct_ptr), link_field))
#define list_foreach_removable_struct(struct_ptr, temp_ptr, link_start, link_field) \
for (struct_ptr = link_to_struct((link_start)->next, typeof(*struct_ptr), link_field), \
temp_ptr = link_to_struct((struct_ptr)->link_field.next, typeof(*struct_ptr), link_field);\
&struct_ptr->link_field != (link_start); \
struct_ptr = temp_ptr, temp_ptr = link_to_struct(temp_ptr->link_field.next, typeof(*temp_ptr), link_field))
#endif /* __LINUX_CONTAINER__ */
#endif /* __LIST_H__ */

View File

@@ -0,0 +1,44 @@
#ifndef __LIB_MATH_H__
#define __LIB_MATH_H__
#if !defined (__LINUX_CONTAINER__)
#if !defined pow
static inline int pow(int val, int exp)
{
int res = 1;
for (int i = 0; i < exp; i++)
res *= val;
return res;
}
#endif
#if !defined min
static inline int min(int x, int y)
{
return x < y ? x : y;
}
static inline int max(int x, int y)
{
return x > y ? x : y;
}
#endif
#endif /* !__LINUX_CONTAINER__ */
/* Tests if ranges a-b intersect with range c-d */
static inline int set_intersection(unsigned long a, unsigned long b,
unsigned long c, unsigned long d)
{
/*
* Below is the complement set (') of the intersection
* of 2 ranges, much simpler ;-)
*/
if (b <= c || a >= d)
return 0;
/* The rest is always intersecting */
return 1;
}
#endif /* __LIB_MATH_H__ */

View File

@@ -0,0 +1,45 @@
/*
* The elementary concurrency constructs.
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __LIB_MUTEX_H__
#define __LIB_MUTEX_H__
#include <l4lib/lib/string.h>
#include <l4lib/lib/spinlock.h>
#include <l4lib/lib/list.h>
#include <l4lib/lib/printk.h>
#include <l4lib/lib/wait.h>
#include INC_ARCH(mutex.h)
/* A mutex is a binary semaphore that can sleep. */
struct mutex {
struct waitqueue_head wqh;
unsigned int lock;
};
static inline void mutex_init(struct mutex *mutex)
{
memset(mutex, 0, sizeof(struct mutex));
waitqueue_head_init(&mutex->wqh);
}
int mutex_trylock(struct mutex *mutex);
int mutex_lock(struct mutex *mutex);
void mutex_unlock(struct mutex *mutex);
void mutex_unlock_async(struct mutex *mutex);
/* NOTE: Since spinlocks guard mutex acquiring & sleeping, no locks needed */
static inline int mutex_inc(unsigned int *cnt)
{
return ++*cnt;
}
static inline int mutex_dec(unsigned int *cnt)
{
return --*cnt;
}
#endif /* __LIB_MUTEX_H__ */

View File

@@ -0,0 +1,90 @@
#ifndef __LIB_SPINLOCK_H__
#define __LIB_SPINLOCK_H__
#include <l4lib/lib/string.h>
#include <l4lib/generic/preempt.h>
#include L4LIB_INC_ARCH(irq.h)
#include L4LIB_INC_ARCH(mutex.h)
#if !defined(__LINUX_CONTAINER__)
struct spinlock {
unsigned int lock;
};
#if !defined(__LINUX_CONTAINER__)
#define DECLARE_SPINLOCK(lockname) \
struct spinlock lockname = { \
.lock = 0, \
}
void spin_lock_record_check(void *lock_addr);
void spin_unlock_delete_check(void *lock_addr);
static inline void spin_lock_init(struct spinlock *s)
{
memset(s, 0, sizeof(struct spinlock));
}
/*
* - Guards from deadlock against local processes, but not local irqs.
* - To be used for synchronising against processes on *other* cpus.
*/
static inline void spin_lock(struct spinlock *s)
{
preempt_disable(); /* This must disable local preempt */
#if defined(CONFIG_SMP)
#if defined (CONFIG_DEBUG_SPINLOCKS)
spin_lock_record_check(s);
#endif
__spin_lock(&s->lock);
#endif
}
static inline void spin_unlock(struct spinlock *s)
{
#if defined(CONFIG_SMP)
#if defined (CONFIG_DEBUG_SPINLOCKS)
spin_unlock_delete_check(s);
#endif
__spin_unlock(&s->lock);
#endif
preempt_enable();
}
/*
* - Guards from deadlock against local processes *and* local irqs.
* - To be used for synchronising against processes and irqs
* on other cpus.
*/
static inline void spin_lock_irq(struct spinlock *s,
unsigned long *state)
{
irq_local_disable_save(state);
#if defined(CONFIG_SMP)
#if defined (CONFIG_DEBUG_SPINLOCKS)
spin_lock_record_check(s);
#endif
__spin_lock(&s->lock);
#endif
}
static inline void spin_unlock_irq(struct spinlock *s,
unsigned long state)
{
#if defined(CONFIG_SMP)
#if defined (CONFIG_DEBUG_SPINLOCKS)
spin_unlock_delete_check(s);
#endif
__spin_unlock(&s->lock);
#endif
irq_local_restore(state);
}
#endif
#endif /* __LINUX_CONTAINER__ */
#endif /* __LIB__SPINLOCK_H__ */

View File

@@ -0,0 +1,86 @@
#ifndef __LIB_WAIT_H__
#define __LIB_WAIT_H__
#include <l4lib/lib/list.h>
#include <l4lib/lib/spinlock.h>
struct ktcb;
struct waitqueue {
struct link task_list;
struct ktcb *task;
};
#define WAKEUP_ASYNC 0
enum wakeup_flags {
WAKEUP_INTERRUPT = (1 << 0), /* Set interrupt flag for task */
WAKEUP_SYNC = (1 << 1), /* Wake it up synchronously */
};
#define CREATE_WAITQUEUE_ON_STACK(wq, tsk) \
struct waitqueue wq = { \
.task_list = { &wq.task_list, &wq.task_list }, \
.task = tsk, \
};
struct waitqueue_head {
int sleepers;
struct spinlock slock;
struct link task_list;
};
static inline void waitqueue_head_init(struct waitqueue_head *head)
{
memset(head, 0, sizeof(struct waitqueue_head));
link_init(&head->task_list);
}
void task_set_wqh(struct ktcb *task, struct waitqueue_head *wqh,
struct waitqueue *wq);
void task_unset_wqh(struct ktcb *task);
/*
* Sleep if the given condition isn't true.
* ret will tell whether condition was met
* or we got interrupted.
*/
#define WAIT_EVENT(wqh, condition, ret) \
do { \
ret = 0; \
for (;;) { \
unsigned long irqsave; \
spin_lock_irq(&(wqh)->slock, &irqsave); \
if (condition) { \
spin_unlock_irq(&(wqh)->slock, irqsave);\
break; \
} \
CREATE_WAITQUEUE_ON_STACK(wq, current); \
task_set_wqh(current, wqh, &wq); \
(wqh)->sleepers++; \
list_insert_tail(&wq.task_list, \
&(wqh)->task_list); \
/* printk("(%d) waiting...\n", current->tid); */\
sched_prepare_sleep(); \
spin_unlock_irq(&(wqh)->slock, irqsave); \
schedule(); \
/* Did we wake up normally or get interrupted */\
if (current->flags & TASK_INTERRUPTED) { \
current->flags &= ~TASK_INTERRUPTED; \
ret = -EINTR; \
break; \
} \
} \
} while(0);
void wake_up(struct waitqueue_head *wqh, unsigned int flags);
int wake_up_task(struct ktcb *task, unsigned int flags);
void wake_up_all(struct waitqueue_head *wqh, unsigned int flags);
int wait_on(struct waitqueue_head *wqh);
int wait_on_prepare(struct waitqueue_head *wqh, struct waitqueue *wq);
int wait_on_prepared_wait(void);
#endif /* __LIB_WAIT_H__ */