mirror of
https://github.com/drasko/codezero.git
synced 2026-01-12 10:53:16 +01:00
Added a new system call sys_timer.
sys_timer accumulates timer ticks into seconds, minutes, hours and days. It's left to the user to calculate from days into a date. It is not yet known if the calculation is even roughly correct. Reduced 2 kmem_reclaim/grant calls into one kmem_control call.
This commit is contained in:
@@ -48,8 +48,8 @@ struct kip {
|
||||
u8 api_version;
|
||||
u32 api_flags;
|
||||
|
||||
u32 kmem_reclaim;
|
||||
u32 kmem_grant;
|
||||
u32 kmem_control;
|
||||
u32 time;
|
||||
|
||||
u32 space_control;
|
||||
u32 thread_control;
|
||||
|
||||
@@ -21,9 +21,9 @@
|
||||
#define sys_map_offset 0x20
|
||||
#define sys_getid_offset 0x24
|
||||
#define sys_kread_offset 0x28
|
||||
#define sys_kmem_grant_offset 0x2C
|
||||
#define sys_kmem_reclaim_offset 0x30
|
||||
#define syscalls_end_offset sys_kmem_reclaim_offset
|
||||
#define sys_kmem_control_offset 0x2C
|
||||
#define sys_time_offset 0x30
|
||||
#define syscalls_end_offset sys_time_offset
|
||||
#define SYSCALLS_TOTAL ((syscalls_end_offset >> 2) + 1)
|
||||
|
||||
int sys_ipc(struct syscall_args *);
|
||||
@@ -37,7 +37,7 @@ int sys_ipc_control(struct syscall_args *);
|
||||
int sys_map(struct syscall_args *);
|
||||
int sys_getid(struct syscall_args *);
|
||||
int sys_kread(struct syscall_args *);
|
||||
int sys_kmem_grant(struct syscall_args *);
|
||||
int sys_kmem_reclaim(struct syscall_args *);
|
||||
int sys_kmem_control(struct syscall_args *);
|
||||
int sys_time(struct syscall_args *);
|
||||
|
||||
#endif /* __SYSCALL_H__ */
|
||||
|
||||
@@ -11,8 +11,8 @@
|
||||
#include INC_GLUE(memory.h)
|
||||
|
||||
/* Ticks per second, try ticks = 1000 + timeslice = 1 for regressed preemption test. */
|
||||
#define HZ 10
|
||||
#define TASK_TIMESLICE_DEFAULT 500
|
||||
#define HZ 100
|
||||
#define TASK_TIMESLICE_DEFAULT 5000
|
||||
/* #define TASK_TIMESLICE_DEFAULT (HZ/100)*/
|
||||
|
||||
static inline struct ktcb *current_task(void)
|
||||
@@ -42,11 +42,11 @@ void sched_runqueue_init(void);
|
||||
void sched_start_task(struct ktcb *task);
|
||||
void sched_resume_task(struct ktcb *task);
|
||||
void sched_suspend_task(struct ktcb *task);
|
||||
void sched_process_post_ipc(struct ktcb *, struct ktcb *);
|
||||
void sched_tell(struct ktcb *task, unsigned int flags);
|
||||
void scheduler_start(void);
|
||||
void sched_yield(void);
|
||||
void schedule(void);
|
||||
|
||||
/* Asynchronous notifications to scheduler */
|
||||
void sched_notify_resume(struct ktcb *task);
|
||||
void sched_notify_sleep(struct ktcb *task);
|
||||
|
||||
@@ -83,33 +83,28 @@ int validate_granted_pages(unsigned long pfn, int npages)
|
||||
* this memory is used for thread creation and memory mapping, (e.g. new
|
||||
* page tables, page middle directories, per-task kernel stack etc.)
|
||||
*/
|
||||
int sys_kmem_grant(struct syscall_args *regs)
|
||||
int sys_kmem_control(struct syscall_args *regs)
|
||||
{
|
||||
unsigned long pfn = (unsigned long)regs->r0;
|
||||
int npages = (int)regs->r1;
|
||||
int grant = (int)regs->r2;
|
||||
|
||||
/*
|
||||
* Check if given set of pages are outside the pages already
|
||||
* owned by the kernel.
|
||||
*/
|
||||
if (validate_granted_pages(pfn, npages) < 0)
|
||||
return -EINVAL;
|
||||
/* Pager is granting us pages */
|
||||
if (grant) {
|
||||
/*
|
||||
* Check if given set of pages are outside the pages already
|
||||
* owned by the kernel.
|
||||
*/
|
||||
if (validate_granted_pages(pfn, npages) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Add the granted pages to the allocator */
|
||||
if (pgalloc_add_new_grant(pfn, npages))
|
||||
/* Add the granted pages to the allocator */
|
||||
if (pgalloc_add_new_grant(pfn, npages))
|
||||
BUG();
|
||||
} else /* Reclaim not implemented yet */
|
||||
BUG();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* FIXME:
|
||||
* The pager reclaims memory from the kernel whenever it thinks this is just.
|
||||
*/
|
||||
int sys_kmem_reclaim(struct syscall_args *regs)
|
||||
{
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ BEGIN_PROC(arm_system_calls)
|
||||
swi 0x14 @ memory_control /* 0x20 */
|
||||
swi 0x14 @ getid /* 0x24 */
|
||||
swi 0x14 @ kread /* 0x28 */
|
||||
swi 0x14 @ kmem_grant /* 0x2C */
|
||||
swi 0x14 @ kmem_reclaim /* 0x30 */
|
||||
swi 0x14 @ kmem_control /* 0x2C */
|
||||
swi 0x14 @ time /* 0x30 */
|
||||
END_PROC(arm_system_calls)
|
||||
|
||||
|
||||
@@ -10,7 +10,10 @@
|
||||
#include <l4/generic/irq.h>
|
||||
#include <l4/generic/scheduler.h>
|
||||
#include <l4/generic/time.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include INC_ARCH(exception.h)
|
||||
#include <l4/api/syscall.h>
|
||||
#include <l4/api/errno.h>
|
||||
|
||||
/* TODO:
|
||||
* 1) Add RTC support.
|
||||
@@ -18,7 +21,7 @@
|
||||
* 3) Jiffies must be initialised to a reasonable value.
|
||||
*/
|
||||
|
||||
volatile u32 jiffies;
|
||||
volatile u32 jiffies = 0;
|
||||
|
||||
static inline void increase_jiffies(void)
|
||||
{
|
||||
@@ -26,36 +29,75 @@ static inline void increase_jiffies(void)
|
||||
}
|
||||
|
||||
|
||||
static int noticks_noresched = 0;
|
||||
/* Represents time since epoch */
|
||||
struct time_info {
|
||||
int reader;
|
||||
u32 thz; /* Ticks in this hertz so far */
|
||||
u32 sec;
|
||||
u32 min;
|
||||
u32 hour;
|
||||
u64 day;
|
||||
};
|
||||
|
||||
static struct time_info systime = { 0 };
|
||||
|
||||
/*
|
||||
* Check preemption anomalies:
|
||||
*
|
||||
* This checks how many times no rescheduling has occured even though ticks
|
||||
* reached zero. This suggests that preemption was enabled for more than a timer
|
||||
* interval. Normally, even if a preemption irq occured during a non-preemptive
|
||||
* state, preemption is *guaranteed* to occur before the next irq, provided that
|
||||
* the non-preemptive period is less than a timer irq interval (and it must be).
|
||||
*
|
||||
* Time:
|
||||
*
|
||||
* |-|---------------------|-|-------------------->
|
||||
* | V | V
|
||||
* | Preemption irq() | Next irq.
|
||||
* V V
|
||||
* preempt_disabled() preempt_enabled() && preemption;
|
||||
* A terribly basic (probably erroneous)
|
||||
* rule-of-thumb time calculation.
|
||||
*/
|
||||
void check_noticks_noresched(void)
|
||||
void update_system_time(void)
|
||||
{
|
||||
if (!current->ticks_left)
|
||||
noticks_noresched++;
|
||||
/* Did we interrupt a reader? Tell it to retry */
|
||||
if (systime.reader)
|
||||
systime.reader = 0;
|
||||
|
||||
if (noticks_noresched >= 2) {
|
||||
printk("Warning, no ticks and yet no rescheduling "
|
||||
"for %d times.\n", noticks_noresched);
|
||||
printk("Spending more than a timer period"
|
||||
" as nonpreemptive!!!\n");
|
||||
/* Increase just like jiffies, but reset every HZ */
|
||||
systime.thz++;
|
||||
|
||||
if (systime.thz == HZ) {
|
||||
systime.thz = 0;
|
||||
systime.sec++;
|
||||
}
|
||||
if (systime.sec == 60) {
|
||||
systime.sec = 0;
|
||||
systime.min++;
|
||||
}
|
||||
if (systime.min == 60) {
|
||||
systime.min = 0;
|
||||
systime.hour++;
|
||||
}
|
||||
if (systime.hour == 24) {
|
||||
systime.hour = 0;
|
||||
systime.day++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Read system time */
|
||||
int sys_time(struct syscall_args *args)
|
||||
{
|
||||
struct time_info *ti = (struct time_info *)args->r0;
|
||||
int retries = 20;
|
||||
|
||||
if (check_access((unsigned long)ti, sizeof(*ti), MAP_USR_RW_FLAGS) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
while(retries > 0) {
|
||||
systime.reader = 1;
|
||||
memcpy(ti, &systime, sizeof(*ti));
|
||||
retries--;
|
||||
|
||||
if (systime.reader)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* No need to reset reader since it will be reset
|
||||
* on next timer. If no retries return busy.
|
||||
*/
|
||||
if (!retries)
|
||||
return -EBUSY;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
void update_process_times(void)
|
||||
@@ -64,17 +106,10 @@ void update_process_times(void)
|
||||
|
||||
BUG_ON(cur->ticks_left < 0);
|
||||
|
||||
/*
|
||||
* If preemption is disabled we stop reducing ticks when it reaches 0
|
||||
* but set need_resched so that as soon as preempt-enabled, scheduling
|
||||
* occurs.
|
||||
*/
|
||||
if (cur->ticks_left == 0) {
|
||||
need_resched = 1;
|
||||
// check_noticks_noresched();
|
||||
return;
|
||||
}
|
||||
// noticks_noresched = 0;
|
||||
|
||||
if (in_kernel())
|
||||
cur->kernel_time++;
|
||||
@@ -91,6 +126,8 @@ int do_timer_irq(void)
|
||||
{
|
||||
increase_jiffies();
|
||||
update_process_times();
|
||||
update_system_time();
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
||||
@@ -26,8 +26,8 @@ void kip_init_syscalls(void)
|
||||
kip.thread_switch = ARM_SYSCALL_PAGE + sys_thread_switch_offset;
|
||||
kip.schedule = ARM_SYSCALL_PAGE + sys_schedule_offset;
|
||||
kip.getid = ARM_SYSCALL_PAGE + sys_getid_offset;
|
||||
kip.kmem_grant = ARM_SYSCALL_PAGE + sys_kmem_grant_offset;
|
||||
kip.kmem_reclaim = ARM_SYSCALL_PAGE + sys_kmem_reclaim_offset;
|
||||
kip.kmem_control = ARM_SYSCALL_PAGE + sys_kmem_control_offset;
|
||||
kip.time = ARM_SYSCALL_PAGE + sys_time_offset;
|
||||
}
|
||||
|
||||
/* Jump table for all system calls. */
|
||||
@@ -47,11 +47,11 @@ void syscall_init()
|
||||
syscall_table[sys_getid_offset >> 2] = (syscall_fn_t)sys_getid;
|
||||
syscall_table[sys_unmap_offset >> 2] = (syscall_fn_t)sys_unmap;
|
||||
syscall_table[sys_space_control_offset >> 2] = (syscall_fn_t)sys_space_control;
|
||||
syscall_table[sys_ipc_control_offset >> 2] = (syscall_fn_t)sys_ipc_control;
|
||||
syscall_table[sys_ipc_control_offset >> 2] = (syscall_fn_t)sys_ipc_control;
|
||||
syscall_table[sys_map_offset >> 2] = (syscall_fn_t)sys_map;
|
||||
syscall_table[sys_kread_offset >> 2] = (syscall_fn_t)sys_kread;
|
||||
syscall_table[sys_kmem_grant_offset >> 2] = (syscall_fn_t)sys_kmem_grant;
|
||||
syscall_table[sys_kmem_reclaim_offset >> 2] = (syscall_fn_t)sys_kmem_reclaim;
|
||||
syscall_table[sys_kmem_control_offset >> 2] = (syscall_fn_t)sys_kmem_control;
|
||||
syscall_table[sys_time_offset >> 2] = (syscall_fn_t)sys_time;
|
||||
|
||||
add_mapping(virt_to_phys(&__syscall_page_start),
|
||||
ARM_SYSCALL_PAGE, PAGE_SIZE, MAP_USR_RO_FLAGS);
|
||||
|
||||
@@ -64,13 +64,14 @@ typedef int (*__l4_exchange_registers_t)(unsigned int pc, unsigned int sp,
|
||||
extern __l4_exchange_registers_t __l4_exchange_registers;
|
||||
int l4_exchange_registers(unsigned int pc, unsigned int sp, int pager, l4id_t tid);
|
||||
|
||||
typedef int (*__l4_kmem_reclaim_t)(unsigned long *pfn, int *npages);
|
||||
extern __l4_kmem_reclaim_t __l4_kmem_reclaim;
|
||||
int l4_kmem_reclaim(unsigned long *pfn, int *npages);
|
||||
typedef int (*__l4_kmem_control_t)(unsigned long pfn, int npages, int grant);
|
||||
extern __l4_kmem_control_t __l4_kmem_control;
|
||||
int l4_kmem_control(unsigned long pfn, int npages, int grant);
|
||||
|
||||
typedef int (*__l4_time_t)(void *time_info, int set);
|
||||
extern __l4_time_t __l4_time;
|
||||
int l4_time(void *time_info, int set);
|
||||
|
||||
typedef int (*__l4_kmem_grant_t)(unsigned long pfn, int npages);
|
||||
extern __l4_kmem_grant_t __l4_kmem_grant;
|
||||
int l4_kmem_grant(unsigned long pfn, int npages);
|
||||
|
||||
|
||||
/* To be supplied by server tasks. */
|
||||
|
||||
@@ -139,49 +139,4 @@ static inline void *l4_unmap_helper(void *virt, int npages)
|
||||
return virt_to_phys(virt);
|
||||
}
|
||||
|
||||
/*
|
||||
* A helper to produce grant ipc between a pager and its client, or a
|
||||
* synchronous syscall to the kernel in case the grant is to the kernel.
|
||||
*/
|
||||
static inline int l4_grant_pages(unsigned long pfn, int npages, l4id_t tid)
|
||||
{
|
||||
/* Only a pager can grant pages to kernel. */
|
||||
if (tid == KERNEL_TID) {
|
||||
/* Granting physical pages via a system call in kernel case. */
|
||||
return l4_kmem_grant(pfn, npages);
|
||||
} else {
|
||||
/*
|
||||
* FIXME: This should set up appropriate message registers and
|
||||
* call l4_ipc() on the target thread. Pages given are virtual.
|
||||
*/
|
||||
while(1);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE: This is just brainstroming yet.
|
||||
* A helper to reclaim unused pages. A pager can reclaim pages from kernel or
|
||||
* other tasks this way.
|
||||
*/
|
||||
static inline int l4_reclaim_pages(l4id_t tid)
|
||||
{
|
||||
unsigned long pfn;
|
||||
int npages;
|
||||
|
||||
if (tid == KERNEL_TID) {
|
||||
/*
|
||||
* A single contiguous sequence of physical pages are returned
|
||||
* by kernel via a syscall. Simpler the better for now.
|
||||
*/
|
||||
l4_kmem_reclaim(&pfn, &npages);
|
||||
} else {
|
||||
/*
|
||||
* An ipc to a task where pfn and npages come in message regs.
|
||||
*/
|
||||
while(1);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* __L4LIB_SYSLIB_H__ */
|
||||
|
||||
@@ -83,27 +83,28 @@ END_PROC(l4_unmap)
|
||||
|
||||
/*
|
||||
* System call that grants a set of pages to the kernel.
|
||||
* @r0 = physical pfn, @r1 = number of pages
|
||||
* @r0 = physical pfn, @r1 = number of pages, @r2 = whether to
|
||||
* grant or reclaim kernel memory. grant = 1, reclaim = 0.
|
||||
*/
|
||||
BEGIN_PROC(l4_kmem_grant)
|
||||
BEGIN_PROC(l4_kmem_control)
|
||||
stmfd sp!, {lr}
|
||||
ldr r12, =__l4_kmem_grant
|
||||
ldr r12, =__l4_kmem_control
|
||||
mov lr, pc
|
||||
ldr pc, [r12]
|
||||
ldmfd sp!, {pc} @ Restore original lr and return.
|
||||
END_PROC(l4_kmem_grant)
|
||||
END_PROC(l4_kmem_control)
|
||||
|
||||
/*
|
||||
* System call that reclaims a set of pages from the kernel.
|
||||
* @r0 = ptr to physical pfn, @r1 = ptr to number of pages
|
||||
* System call that gets or sets the time info structure.
|
||||
* @r0 = ptr to time structure @r1 = set or get. set = 1, get = 0.
|
||||
*/
|
||||
BEGIN_PROC(l4_kmem_reclaim)
|
||||
BEGIN_PROC(l4_time)
|
||||
stmfd sp!, {lr}
|
||||
ldr r12, =__l4_kmem_reclaim
|
||||
ldr r12, =__l4_time
|
||||
mov lr, pc
|
||||
ldr pc, [r12]
|
||||
ldmfd sp!, {pc} @ Restore original lr and return.
|
||||
END_PROC(l4_kmem_reclaim)
|
||||
END_PROC(l4_time)
|
||||
|
||||
/*
|
||||
* System call that controls thread creation, destruction and modification.
|
||||
|
||||
@@ -23,8 +23,8 @@ __l4_thread_control_t __l4_thread_control = 0;
|
||||
__l4_ipc_control_t __l4_ipc_control = 0;
|
||||
__l4_space_control_t __l4_space_control = 0;
|
||||
__l4_exchange_registers_t __l4_exchange_registers = 0;
|
||||
__l4_kmem_grant_t __l4_kmem_grant = 0;
|
||||
__l4_kmem_reclaim_t __l4_kmem_reclaim = 0;
|
||||
__l4_kmem_control_t __l4_kmem_control = 0;
|
||||
__l4_time_t __l4_time = 0;
|
||||
|
||||
struct kip *kip;
|
||||
|
||||
@@ -120,8 +120,8 @@ void __l4_init(void)
|
||||
__l4_space_control= (__l4_space_control_t)kip->space_control;
|
||||
__l4_exchange_registers =
|
||||
(__l4_exchange_registers_t)kip->exchange_registers;
|
||||
__l4_kmem_grant = (__l4_kmem_grant_t)kip->kmem_grant;
|
||||
__l4_kmem_reclaim = (__l4_kmem_reclaim_t)kip->kmem_reclaim;
|
||||
__l4_kmem_control = (__l4_kmem_control_t)kip->kmem_control;
|
||||
__l4_time = (__l4_time_t)kip->time;
|
||||
|
||||
utcb_init();
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ void init_mm(struct initdata *initdata)
|
||||
// printf("%s: Initialised utcb address pool.\n", __TASKNAME__);
|
||||
|
||||
/* Give the kernel some memory to use for its allocators */
|
||||
l4_kmem_grant(__pfn(alloc_page(__pfn(SZ_1MB))), __pfn(SZ_1MB));
|
||||
l4_kmem_control(__pfn(alloc_page(__pfn(SZ_1MB))), __pfn(SZ_1MB), 1);
|
||||
}
|
||||
|
||||
void initialise(void)
|
||||
|
||||
Reference in New Issue
Block a user