Added per-task ipc flags checking instead of the active task flags.

Previously during ipc copy, only the currently active task flags were
checked. This means the flags of whoever doing the actual copy was used
in the ipc. Now flags are stored in the ktcb and checked by the copy routine.

Current use of the flags is to determine short/full/extended ipc.
This commit is contained in:
Bahadir Balban
2009-05-28 11:50:39 +03:00
parent 53310aa31b
commit b977e6597e
6 changed files with 88 additions and 52 deletions

View File

@@ -146,6 +146,7 @@
/* Codezero specific error codes */
#define EACTIVE 132 /* Task active */
#define ENOIPC 133 /* General IPC error */
#ifdef __KERNEL__

View File

@@ -17,9 +17,6 @@
#include INC_GLUE(context.h)
#include INC_SUBARCH(mm.h)
/*
* Bit mappings for the ktcb flags field
*/
/*
* These are a mixture of flags that indicate the task is
@@ -30,13 +27,6 @@
#define TASK_SUSPENDING (1 << 1)
#define TASK_RESUMING (1 << 2)
/* IPC resulted in a fault error (For ipcs that cannot page fault) */
#define IPC_EFAULT (1 << 3)
/* IPC type is encoded in task flags in bits [7:4] */
#define TASK_FLAGS_IPC_TYPE_MASK 0xF0
#define TASK_FLAGS_IPC_TYPE_SHIFT 4
/* Task states */
enum task_state {
TASK_INACTIVE = 0,
@@ -73,6 +63,9 @@ struct ktcb {
/* Flags to indicate various task status */
unsigned int flags;
/* IPC flags */
unsigned int ipc_flags;
/* Lock for blocking thread state modifications via a syscall */
struct mutex thread_control_lock;
@@ -144,21 +137,27 @@ static inline void set_task_ids(struct ktcb *task, struct task_ids *ids)
static inline void tcb_set_ipc_flags(struct ktcb *task,
unsigned int flags)
{
task->flags |= ((flags & L4_IPC_FLAGS_TYPE_MASK)
<< TASK_FLAGS_IPC_TYPE_SHIFT) &
TASK_FLAGS_IPC_TYPE_MASK;
task->ipc_flags = flags;
}
static inline unsigned int tcb_get_ipc_flags(struct ktcb *task)
{
return ((task->flags & TASK_FLAGS_IPC_TYPE_MASK)
>> TASK_FLAGS_IPC_TYPE_SHIFT)
& L4_IPC_FLAGS_TYPE_MASK;
return task->ipc_flags;
}
static inline void tcb_set_ipc_type(struct ktcb *task,
unsigned int type)
{
task->ipc_flags = type & L4_IPC_FLAGS_TYPE_MASK;
}
static inline unsigned int tcb_get_ipc_type(struct ktcb *task)
{
return task->ipc_flags & L4_IPC_FLAGS_TYPE_MASK;
}
#define THREAD_IDS_MAX 1024
#define SPACE_IDS_MAX 1024
#define TGROUP_IDS_MAX 1024
extern struct id_pool *thread_id_pool;

View File

@@ -87,6 +87,13 @@
#include INC_GLUE(memlayout.h)
#if defined (__KERNEL__)
/* Kernel-only flags */
#define L4_IPC_FLAGS_ERROR_MASK 0xF0000000
#define L4_IPC_FLAGS_ERROR_SHIFT 28
#define L4_IPC_EFAULT (1 << 28)
#define L4_IPC_ENOIPC (1 << 29)
struct utcb {
u32 mr[MR_TOTAL]; /* MRs that are mapped to real registers */
u32 saved_tag; /* Saved tag field for stacked ipcs */

View File

@@ -104,41 +104,50 @@ int ipc_extended_copy(struct ktcb *to, struct ktcb *from)
* L4_ANYTHREAD. This is done for security since the receiver cannot trust
* the sender info provided by the sender task.
*/
int ipc_msg_copy(struct ktcb *to, struct ktcb *from,
unsigned int current_flags)
int ipc_msg_copy(struct ktcb *to, struct ktcb *from)
{
int ret = 0;
unsigned int recv_ipc_type;
unsigned int send_ipc_type;
unsigned int *mr0_dst;
int ret = 0;
#if 0
unsigned int recv_ipc_type = tcb_get_ipc_flags(to);
unsigned int send_ipc_type = tcb_get_ipc_flags(from);
recv_ipc_type = tcb_get_ipc_type(to);
send_ipc_type = tcb_get_ipc_type(from);
if (recv_ipc_type == L4_IPC_FLAGS_FULL ||
send_ipc_type == L4_IPC_FLAGS_FULL) {
ret = ipc_full_copy(to, from);
}
if (recv_ipc_type == L4_IPC_FLAGS_SHORT)
/*
* Check ipc type flags of both parties and use the following rules:
* Check ipc type flags of both parties and
* use the following rules:
*
* SHORT SHORT -> SHORT IPC
* FULL X -> FULL IPC
* EXTENDED EXTENDED-> EXTENDED IPC
* EXTENDED X -> X IPC
* SHORT SHORT -> SHORT IPC
* FULL FULL/SHORT -> FULL IPC
* EXTENDED EXTENDED -> EXTENDED IPC
* EXTENDED NON-EXTENDED -> ENOIPC
*/
#endif
/* Check type of utcb copying and do it */
switch (current_flags & L4_IPC_FLAGS_TYPE_MASK) {
switch(recv_ipc_type) {
case L4_IPC_FLAGS_SHORT:
ret = ipc_short_copy(to, from);
if (send_ipc_type == L4_IPC_FLAGS_SHORT)
ret = ipc_short_copy(to, from);
if (send_ipc_type == L4_IPC_FLAGS_FULL)
ret = ipc_full_copy(to, from);
if (send_ipc_type == L4_IPC_FLAGS_EXTENDED)
ret = -ENOIPC;
break;
case L4_IPC_FLAGS_FULL:
ret = ipc_full_copy(to, from);
if (send_ipc_type == L4_IPC_FLAGS_SHORT)
ret = ipc_full_copy(to, from);
if (send_ipc_type == L4_IPC_FLAGS_FULL)
ret = ipc_full_copy(to, from);
if (send_ipc_type == L4_IPC_FLAGS_EXTENDED)
ret = -ENOIPC;
break;
case L4_IPC_FLAGS_EXTENDED:
ret = ipc_extended_copy(to, from);
if (send_ipc_type == L4_IPC_FLAGS_EXTENDED)
ret = ipc_extended_copy(to, from);
if (send_ipc_type == L4_IPC_FLAGS_SHORT)
ret = -ENOIPC;
if (send_ipc_type == L4_IPC_FLAGS_FULL)
ret = -ENOIPC;
break;
}
@@ -163,11 +172,17 @@ int sys_ipc_control(syscall_context_t *regs)
void ipc_signal_error(struct ktcb *sleeper, int retval)
{
/*
* Set ipc error flag in receiver.
* Only EFAULT is expected for now
* Only EFAULT and ENOIPC is expected for now
*/
BUG_ON(retval != -EFAULT);
sleeper->flags |= IPC_EFAULT;
BUG_ON(retval != -EFAULT && retval != -ENOIPC);
/*
* Set ipc error flag for sleeper.
*/
if (retval == -EFAULT)
sleeper->ipc_flags |= L4_IPC_EFAULT;
if (retval == -ENOIPC)
sleeper->ipc_flags |= L4_IPC_ENOIPC;
}
/*
@@ -184,11 +199,17 @@ int ipc_handle_errors(void)
}
/* Did ipc fail with a fault error? */
if (current->flags & IPC_EFAULT) {
current->flags &= ~IPC_EFAULT;
if (current->ipc_flags & L4_IPC_EFAULT) {
current->ipc_flags &= ~L4_IPC_EFAULT;
return -EFAULT;
}
/* Did ipc fail with a general ipc error? */
if (current->ipc_flags & L4_IPC_ENOIPC) {
current->ipc_flags &= ~L4_IPC_ENOIPC;
return -ENOIPC;
}
return 0;
}
@@ -231,7 +252,7 @@ int ipc_send(l4id_t recv_tid, unsigned int flags)
spin_unlock(&wqhs->slock);
/* Copy message registers */
if ((ret = ipc_msg_copy(receiver, current, flags)) < 0)
if ((ret = ipc_msg_copy(receiver, current)) < 0)
ipc_signal_error(receiver, ret);
// printk("%s: (%d) Waking up (%d)\n", __FUNCTION__,
@@ -295,8 +316,7 @@ int ipc_recv(l4id_t senderid, unsigned int flags)
spin_unlock(&wqhs->slock);
/* Copy message registers */
if ((ret = ipc_msg_copy(current, sleeper,
flags)) < 0)
if ((ret = ipc_msg_copy(current, sleeper)) < 0)
ipc_signal_error(sleeper, ret);
// printk("%s: (%d) Waking up (%d)\n",
@@ -424,7 +444,6 @@ int ipc_recv_extended(l4id_t sendertid, unsigned int flags)
return 0;
}
/*
* In extended IPC, userspace buffers are copied to process
* kernel stack before engaging in real calls ipc. If page fault
@@ -456,6 +475,8 @@ int ipc_send_extended(l4id_t recv_tid, unsigned int flags)
/* Check size is good */
if (size > L4_IPC_EXTENDED_MAX_SIZE)
return -EINVAL;
/* Set extended ipc copy size */
current->extended_ipc_size = size;
/*

View File

@@ -32,6 +32,7 @@ void fault_ipc_to_pager(u32 faulty_pc, u32 fsr, u32 far)
u32 mr[MR_TOTAL] = { [MR_TAG] = L4_IPC_TAG_PFAULT,
[MR_SENDER] = current->tid };
fault_kdata_t *fault = (fault_kdata_t *)&mr[MR_UNUSED_START];
unsigned int saved_flags;
/* Fill in fault information to pass over during ipc */
fault->faulty_pc = faulty_pc;
@@ -57,9 +58,16 @@ void fault_ipc_to_pager(u32 faulty_pc, u32 fsr, u32 far)
((unsigned long)&mr[0] -
offsetof(syscall_context_t, r3));
/* Save current ipc flags and set current flags to short ipc */
saved_flags = tcb_get_ipc_flags(current);
tcb_set_ipc_flags(current, L4_IPC_FLAGS_SHORT);
/* Send ipc to the task's pager */
ipc_sendrecv(current->pagerid, current->pagerid, 0);
/* Restore ipc flags */
tcb_set_ipc_flags(current, saved_flags);
/*
* FIXME: CHECK TASK KILL REPLY !!!
* Here, pager has handled the request and sent us back a message.

View File

@@ -19,7 +19,7 @@ void wait_pager(l4id_t partner)
for (int i = 0; i < 6; i++)
write_mr(i, i);
l4_send(partner, L4_IPC_TAG_SYNC);
printf("Pager synced with us.\n");
// printf("Pager synced with us.\n");
}
pid_t parent_of_all;
@@ -51,8 +51,8 @@ void main(void)
if (parent_of_all == getpid())
ipc_extended_test();
exectest();
else
exectest();
while (1)
wait_pager(0);