mirror of
https://github.com/drasko/codezero.git
synced 2026-01-12 02:43:15 +01:00
Unstable changes for ipc - Fork fails for some reason
This commit is contained in:
@@ -104,6 +104,9 @@ struct ktcb {
|
||||
struct spinlock waitlock;
|
||||
struct waitqueue_head *waiting_on;
|
||||
struct waitqueue *wq;
|
||||
|
||||
/* Extended ipc buffer, points to the space after ktcb */
|
||||
char extended_ipc_buffer[];
|
||||
};
|
||||
|
||||
/* Per thread kernel stack unified on a single page. */
|
||||
|
||||
@@ -73,14 +73,17 @@
|
||||
#define MR0_REGISTER r3
|
||||
#define MR_RETURN_REGISTER r3
|
||||
|
||||
#define L4_IPC_FLAGS_SHORT 0x00000000 /* Short IPC involves just primary message registers */
|
||||
#define L4_IPC_FLAGS_FULL 0x00000001 /* Full IPC involves full UTCB copy */
|
||||
#define L4_IPC_FLAGS_EXTENDED 0x00000002 /* Extended IPC can page-fault and copy up to 2KB */
|
||||
#define L4_IPC_FLAGS_MSG_INDEX_MASK 0x00000FF0 /* Index of message register with buffer pointer */
|
||||
#define L4_IPC_FLAGS_MASK 0x0000000F
|
||||
#define L4_IPC_FLAGS_TYPE_MASK 0x0000000F
|
||||
#define L4_IPC_FLAGS_SIZE_MASK 0x0FFF0000
|
||||
#define L4_IPC_FLAGS_SIZE_SHIFT 16
|
||||
#define L4_IPC_FLAGS_MSG_INDEX_SHIFT 4
|
||||
|
||||
#define L4_IPC_EXTENDED_MAX_SIZE (SZ_1K*2)
|
||||
|
||||
#include INC_GLUE(memlayout.h)
|
||||
|
||||
#if defined (__KERNEL__)
|
||||
|
||||
218
src/api/ipc.c
218
src/api/ipc.c
@@ -25,13 +25,31 @@ enum IPC_TYPE {
|
||||
IPC_SENDRECV = 3,
|
||||
};
|
||||
|
||||
int ipc_short_copy(struct ktcb *to, struct ktcb *from, unsigned int flags)
|
||||
{
|
||||
unsigned int *mr0_src = KTCB_REF_MR0(from);
|
||||
unsigned int *mr0_dst = KTCB_REF_MR0(to);
|
||||
|
||||
/* NOTE:
|
||||
* Make sure MR_TOTAL matches the number of registers saved on stack.
|
||||
*/
|
||||
memcpy(mr0_dst, mr0_src, MR_TOTAL * sizeof(unsigned int));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Copy full utcb region from one task to another. */
|
||||
int ipc_full_copy(struct ktcb *to, struct ktcb *from)
|
||||
int ipc_full_copy(struct ktcb *to, struct ktcb *from, unsigned int flags)
|
||||
{
|
||||
struct utcb *from_utcb = (struct utcb *)from->utcb_address;
|
||||
struct utcb *to_utcb = (struct utcb *)to->utcb_address;
|
||||
int ret;
|
||||
|
||||
/* First do the short copy of primary mrs */
|
||||
if ((ret = ipc_short_copy(to, from, flags)) < 0)
|
||||
return ret;
|
||||
|
||||
/* Check that utcb memory accesses won't fault us */
|
||||
if ((ret = tcb_check_and_lazy_map_utcb(to)) < 0)
|
||||
return ret;
|
||||
@@ -45,6 +63,54 @@ int ipc_full_copy(struct ktcb *to, struct ktcb *from)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extended_ipc_msg_index(unsigned int flags)
|
||||
{
|
||||
return (flags >> L4_IPC_FLAGS_MSG_INDEX_SHIFT) & L4_IPC_FLAGS_MSG_INDEX_MASK;
|
||||
}
|
||||
|
||||
static inline int extended_ipc_msg_size(unsigned int flags)
|
||||
{
|
||||
return (flags >> L4_IPC_FLAGS_SIZE_SHIFT) & L4_IPC_FLAGS_SIZE_MASK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Extended copy is asymmetric in that the copying always occurs from
|
||||
* the sender's kernel stack to receivers userspace buffers.
|
||||
*/
|
||||
int ipc_extended_copy(struct ktcb *to, struct ktcb *from, unsigned int flags)
|
||||
{
|
||||
unsigned long msg_index;
|
||||
unsigned long ipc_address;
|
||||
unsigned int size;
|
||||
unsigned int *mr0_receiver;
|
||||
|
||||
/*
|
||||
* Obtain primary message register index
|
||||
* containing extended ipc buffer address
|
||||
*/
|
||||
msg_index = extended_ipc_msg_index(flags);
|
||||
|
||||
/* Get the pointer to primary message registers */
|
||||
mr0_receiver = KTCB_REF_MR0(to);
|
||||
|
||||
/* Obtain extended ipc address */
|
||||
ipc_address = (unsigned long)mr0_receiver[msg_index];
|
||||
|
||||
/* Obtain extended ipc size */
|
||||
size = extended_ipc_msg_size(flags);
|
||||
|
||||
/* This ought to be checked before coming here */
|
||||
BUG_ON(size > L4_IPC_EXTENDED_MAX_SIZE);
|
||||
|
||||
/*
|
||||
* Copy from sender's kernel stack buffer
|
||||
* to receiver's paged-in userspace buffer
|
||||
*/
|
||||
memcpy((void *)ipc_address, from->extended_ipc_buffer, size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copies message registers from one ktcb stack to another. During the return
|
||||
* from system call, the registers are popped from the stack. In the future
|
||||
@@ -56,23 +122,27 @@ int ipc_full_copy(struct ktcb *to, struct ktcb *from)
|
||||
*/
|
||||
int ipc_msg_copy(struct ktcb *to, struct ktcb *from, unsigned int flags)
|
||||
{
|
||||
unsigned int *mr0_src = KTCB_REF_MR0(from);
|
||||
unsigned int *mr0_dst = KTCB_REF_MR0(to);
|
||||
int ret = 0;
|
||||
unsigned int *mr0_dst;
|
||||
|
||||
/* NOTE:
|
||||
* Make sure MR_TOTAL matches the number of registers saved on stack.
|
||||
*/
|
||||
memcpy(mr0_dst, mr0_src, MR_TOTAL * sizeof(unsigned int));
|
||||
/* Check type of utcb copying and do it */
|
||||
switch (flags & L4_IPC_FLAGS_TYPE_MASK) {
|
||||
case L4_IPC_FLAGS_SHORT:
|
||||
ret = ipc_short_copy(to, from, flags);
|
||||
break;
|
||||
case L4_IPC_FLAGS_FULL:
|
||||
ret = ipc_full_copy(to, from, flags);
|
||||
break;
|
||||
case L4_IPC_FLAGS_EXTENDED:
|
||||
ret = ipc_extended_copy(to, from, flags);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Save the sender id in case of ANYTHREAD receiver */
|
||||
if (to->expected_sender == L4_ANYTHREAD)
|
||||
mr0_dst = KTCB_REF_MR0(to);
|
||||
mr0_dst[MR_SENDER] = from->tid;
|
||||
|
||||
/* Check if full utcb copying is requested and do it */
|
||||
if (flags & L4_IPC_FLAGS_FULL)
|
||||
ret = ipc_full_copy(to, from);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -126,28 +196,6 @@ int ipc_handle_errors(void)
|
||||
* waitqueue have been removed at that stage.
|
||||
*/
|
||||
|
||||
int ipc_recv_extended(l4id_t recv_tid, unsigned int flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ipc_sendrecv_extended(l4id_t to, l4id_t from, unsigned int flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ipc_send_extended(l4id_t recv_tid, unsigned int flags)
|
||||
{
|
||||
//struct ktcb *receiver = tcb_find(recv_tid);
|
||||
|
||||
/*
|
||||
* First we copy userspace buffer to process kernel stack.
|
||||
* If we page fault, we only punish current process time.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Interruptible ipc */
|
||||
int ipc_send(l4id_t recv_tid, unsigned int flags)
|
||||
{
|
||||
@@ -307,6 +355,103 @@ int ipc_sendrecv(l4id_t to, l4id_t from, unsigned int flags)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ipc_sendrecv_extended(l4id_t to, l4id_t from, unsigned int flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* In extended receive, receive buffers are page faulted before engaging
|
||||
* in real ipc.
|
||||
*/
|
||||
int ipc_recv_extended(l4id_t sendertid, unsigned int flags)
|
||||
{
|
||||
unsigned long msg_index;
|
||||
unsigned long ipc_address;
|
||||
unsigned int size;
|
||||
unsigned int *mr0_current;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Obtain primary message register index
|
||||
* containing extended ipc buffer address
|
||||
*/
|
||||
msg_index = extended_ipc_msg_index(flags);
|
||||
|
||||
/* Get the pointer to primary message registers */
|
||||
mr0_current = KTCB_REF_MR0(current);
|
||||
|
||||
/* Obtain extended ipc address */
|
||||
ipc_address = (unsigned long)mr0_current[msg_index];
|
||||
|
||||
/* Obtain extended ipc size */
|
||||
size = extended_ipc_msg_size(flags);
|
||||
|
||||
/* Check size is good */
|
||||
if (size > L4_IPC_EXTENDED_MAX_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
/* Page fault those pages on the current task if needed */
|
||||
if ((err = check_access(ipc_address, size,
|
||||
MAP_USR_RW_FLAGS, 1)) < 0)
|
||||
return err;
|
||||
|
||||
/*
|
||||
* Now we can engage in the real ipc, copying of ipc data
|
||||
* shall occur during the message copying.
|
||||
*/
|
||||
return ipc_recv(sendertid, flags);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* In extended IPC, userspace buffers are copied to process
|
||||
* kernel stack before engaging in real calls ipc. If page fault
|
||||
* occurs, only the current process time is consumed.
|
||||
*/
|
||||
int ipc_send_extended(l4id_t recv_tid, unsigned int flags)
|
||||
{
|
||||
unsigned long msg_index;
|
||||
unsigned long ipc_address;
|
||||
unsigned int size;
|
||||
unsigned int *mr0_current;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Obtain primary message register index
|
||||
* containing extended ipc buffer address
|
||||
*/
|
||||
msg_index = extended_ipc_msg_index(flags);
|
||||
|
||||
/* Get the pointer to primary message registers */
|
||||
mr0_current = KTCB_REF_MR0(current);
|
||||
|
||||
/* Obtain extended ipc address */
|
||||
ipc_address = (unsigned long)mr0_current[msg_index];
|
||||
|
||||
/* Obtain extended ipc size */
|
||||
size = extended_ipc_msg_size(flags);
|
||||
|
||||
/* Check size is good */
|
||||
if (size > L4_IPC_EXTENDED_MAX_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
/* Page fault those pages on the current task if needed */
|
||||
if ((err = check_access(ipc_address, size,
|
||||
MAP_USR_RW_FLAGS, 1)) < 0)
|
||||
return err;
|
||||
|
||||
/*
|
||||
* It is now safe to access user pages.
|
||||
* Copy message from user buffer into kernel stack
|
||||
*/
|
||||
memcpy(current->extended_ipc_buffer,
|
||||
(void *)ipc_address, size);
|
||||
|
||||
/* Now we can engage in the real ipc */
|
||||
return ipc_send(recv_tid, flags);
|
||||
}
|
||||
|
||||
static inline int __sys_ipc(l4id_t to, l4id_t from,
|
||||
unsigned int ipc_type, unsigned int flags)
|
||||
{
|
||||
@@ -369,12 +514,13 @@ int sys_ipc(syscall_context_t *regs)
|
||||
unsigned int ipc_type = 0;
|
||||
int ret = 0;
|
||||
|
||||
#if 0
|
||||
if (regs->r2)
|
||||
/*
|
||||
if (flags)
|
||||
__asm__ __volatile__ (
|
||||
"1:\n"
|
||||
"b 1b\n");
|
||||
#endif
|
||||
*/
|
||||
|
||||
/* Check arguments */
|
||||
if (from < L4_ANYTHREAD) {
|
||||
ret = -EINVAL;
|
||||
|
||||
@@ -145,6 +145,14 @@ struct address_space *address_space_create(struct address_space *orig)
|
||||
return space;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: This does not guarantee that a kernel can access a user pointer.
|
||||
* A pager could map an address as requested by the kernel, and unmap it
|
||||
* before the kernel has accessed that user address. In order to fix this,
|
||||
* per-pte locks (via a bitmap) should be introduced, and map syscalls can
|
||||
* check if a pte is locked before going forward with a request.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Checks whether the given user address is a valid userspace address.
|
||||
* If so, whether it is currently mapped into its own address space.
|
||||
|
||||
@@ -85,14 +85,14 @@ static inline unsigned int l4_get_ipc_msg_index(unsigned int word)
|
||||
|
||||
static inline unsigned int l4_set_ipc_flags(unsigned int word, unsigned int flags)
|
||||
{
|
||||
word &= ~L4_IPC_FLAGS_MASK;
|
||||
word |= flags & L4_IPC_FLAGS_MASK;
|
||||
word &= ~L4_IPC_FLAGS_TYPE_MASK;
|
||||
word |= flags & L4_IPC_FLAGS_TYPE_MASK;
|
||||
return word;
|
||||
}
|
||||
|
||||
static inline unsigned int l4_get_ipc_flags(unsigned int word)
|
||||
{
|
||||
return word & L4_IPC_FLAGS_MASK;
|
||||
return word & L4_IPC_FLAGS_TYPE_MASK;
|
||||
}
|
||||
|
||||
static inline unsigned int l4_get_tag(void)
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
#define __TASKNAME__ "test0"
|
||||
|
||||
//#define TEST_VERBOSE_PRINT
|
||||
#define TEST_VERBOSE_PRINT
|
||||
#if defined (TEST_VERBOSE_PRINT)
|
||||
#define test_printf(...) printf(__VA_ARGS__)
|
||||
#else
|
||||
|
||||
@@ -18,6 +18,7 @@ int forktest(void)
|
||||
|
||||
/* 16 forks */
|
||||
for (int i = 0; i < 4; i++)
|
||||
test_printf("%d: Forking...\n", getpid());
|
||||
if (fork() < 0)
|
||||
goto out_err;
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include <stdio.h>
|
||||
#include <tests.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
|
||||
int shmtest(void)
|
||||
{
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
cd build
|
||||
#arm-none-eabi-insight &
|
||||
/opt/qemu/bin/qemu-system-arm -s -kernel final.axf -nographic -m 128 -M versatilepb &
|
||||
arm-none-linux-gnueabi-insight ; pkill qemu-system-arm
|
||||
/opt/qemu/bin/qemu-system-arm -s -kernel final.axf -serial stdio -m 128 -M versatilepb &
|
||||
#arm-none-linux-gnueabi-insight ; pkill qemu-system-arm
|
||||
#arm-none-eabi-gdb ; pkill qemu-system-arm
|
||||
cd ..
|
||||
|
||||
|
||||
Reference in New Issue
Block a user