New sources layout

Change-Id: Ic716f336b7071063997cf5b4dae6d50e0b4631e9
This commit is contained in:
2014-07-28 21:19:37 +02:00
parent 428aa25dc6
commit 433d6423c3
3138 changed files with 693 additions and 606 deletions

View File

@@ -0,0 +1,49 @@
# Makefile for system library implementation
.include <bsd.own.mk>
.PATH: ${.CURDIR}/system
SRCS+= \
do_fork.c \
do_exec.c \
do_clear.c \
do_exit.c \
do_trace.c \
do_runctl.c \
do_update.c \
do_times.c \
do_setalarm.c \
do_stime.c \
do_settime.c \
do_vtimer.c \
do_irqctl.c \
do_copy.c \
do_umap.c \
do_umap_remote.c \
do_vumap.c \
do_memset.c \
do_setgrant.c \
do_privctl.c \
do_safecopy.c \
do_safememset.c \
do_diagctl.c \
do_getksig.c \
do_endksig.c \
do_kill.c \
do_sigsend.c \
do_sigreturn.c \
do_abort.c \
do_getinfo.c \
do_cprofile.c \
do_profbuf.c \
do_vmctl.c \
do_mcontext.c \
do_schedule.c \
do_schedctl.c \
do_statectl.c
.if ${MACHINE_ARCH} == "i386"
SRCS+= \
do_devio.c \
do_vdevio.c
.endif

View File

@@ -0,0 +1,29 @@
/* The kernel call implemented in this file:
* m_type: SYS_ABORT
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_abort.how (how to abort, possibly fetch monitor params)
*/
#include "kernel/system.h"
#include <unistd.h>
#if USE_ABORT
/*===========================================================================*
* do_abort *
*===========================================================================*/
int do_abort(struct proc * caller, message * m_ptr)
{
/* Handle sys_abort. MINIX is unable to continue. This can originate e.g.
* in the PM (normal abort) or TTY (after CTRL-ALT-DEL).
*/
int how = m_ptr->m_lsys_krn_sys_abort.how;
/* Now prepare to shutdown MINIX. */
prepare_shutdown(how);
return(OK); /* pro-forma (really EDISASTER) */
}
#endif /* USE_ABORT */

View File

@@ -0,0 +1,80 @@
/* The kernel call implemented in this file:
* m_type: SYS_CLEAR
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_clear.endpt (endpoint of process to clean up)
*/
#include "kernel/system.h"
#include <minix/endpoint.h>
#if USE_CLEAR
/*===========================================================================*
* do_clear *
*===========================================================================*/
int do_clear(struct proc * caller, message * m_ptr)
{
/* Handle sys_clear. Only the PM can request other process slots to be cleared
* when a process has exited.
* The routine to clean up a process table slot cancels outstanding timers,
* possibly removes the process from the message queues, and resets certain
* process table fields to the default values.
*/
struct proc *rc;
int exit_p;
int i;
if(!isokendpt(m_ptr->m_lsys_krn_sys_clear.endpt, &exit_p)) {
/* get exiting process */
return EINVAL;
}
rc = proc_addr(exit_p); /* clean up */
release_address_space(rc);
/* Don't clear if already cleared. */
if(isemptyp(rc)) return OK;
/* Check the table with IRQ hooks to see if hooks should be released. */
for (i=0; i < NR_IRQ_HOOKS; i++) {
if (rc->p_endpoint == irq_hooks[i].proc_nr_e) {
rm_irq_handler(&irq_hooks[i]); /* remove interrupt handler */
irq_hooks[i].proc_nr_e = NONE; /* mark hook as free */
}
}
/* Remove the process' ability to send and receive messages */
clear_endpoint(rc);
/* Turn off any alarm timers at the clock. */
reset_kernel_timer(&priv(rc)->s_alarm_timer);
/* Make sure that the exiting process is no longer scheduled,
* and mark slot as FREE. Also mark saved fpu contents as not significant.
*/
RTS_SETFLAGS(rc, RTS_SLOT_FREE);
/* release FPU */
release_fpu(rc);
rc->p_misc_flags &= ~MF_FPU_INITIALIZED;
/* Release the process table slot. If this is a system process, also
* release its privilege structure. Further cleanup is not needed at
* this point. All important fields are reinitialized when the
* slots are assigned to another, new process.
*/
if (priv(rc)->s_flags & SYS_PROC) priv(rc)->s_proc_nr = NONE;
#if 0
/* Clean up virtual memory */
if (rc->p_misc_flags & MF_VM) {
vm_map_default(rc);
}
#endif
return OK;
}
#endif /* USE_CLEAR */

View File

@@ -0,0 +1,92 @@
/* The kernel call implemented in this file:
* m_type: SYS_VIRCOPY, SYS_PHYSCOPY
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_copy.src_addr source offset within segment
* m_lsys_krn_sys_copy.src_endpt source process number
* m_lsys_krn_sys_copy.dst_addr destination offset within segment
* m_lsys_krn_sys_copy.dst_endpt destination process number
* m_lsys_krn_sys_copy.nr_bytes number of bytes to copy
* m_lsys_krn_sys_copy.flags
*/
#include "kernel/system.h"
#include "kernel/vm.h"
#include <minix/type.h>
#include <assert.h>
#if (USE_VIRCOPY || USE_PHYSCOPY)
/*===========================================================================*
* do_copy *
*===========================================================================*/
int do_copy(struct proc * caller, message * m_ptr)
{
/* Handle sys_vircopy() and sys_physcopy(). Copy data using virtual or
* physical addressing. Although a single handler function is used, there
* are two different kernel calls so that permissions can be checked.
*/
struct vir_addr vir_addr[2]; /* virtual source and destination address */
phys_bytes bytes; /* number of bytes to copy */
int i;
#if 0
if (caller->p_endpoint != PM_PROC_NR && caller->p_endpoint != VFS_PROC_NR &&
caller->p_endpoint != RS_PROC_NR && caller->p_endpoint != MEM_PROC_NR &&
caller->p_endpoint != VM_PROC_NR)
{
static int first=1;
if (first)
{
first= 0;
printf(
"do_copy: got request from %d (source %d, destination %d)\n",
caller->p_endpoint,
m_ptr->m_lsys_krn_sys_copy.src_endpt,
m_ptr->m_lsys_krn_sys_copy.dst_endpt);
}
}
#endif
/* Dismember the command message. */
vir_addr[_SRC_].proc_nr_e = m_ptr->m_lsys_krn_sys_copy.src_endpt;
vir_addr[_DST_].proc_nr_e = m_ptr->m_lsys_krn_sys_copy.dst_endpt;
vir_addr[_SRC_].offset = m_ptr->m_lsys_krn_sys_copy.src_addr;
vir_addr[_DST_].offset = m_ptr->m_lsys_krn_sys_copy.dst_addr;
bytes = m_ptr->m_lsys_krn_sys_copy.nr_bytes;
/* Now do some checks for both the source and destination virtual address.
* This is done once for _SRC_, then once for _DST_.
*/
for (i=_SRC_; i<=_DST_; i++) {
int p;
/* Check if process number was given implicitly with SELF and is valid. */
if (vir_addr[i].proc_nr_e == SELF)
vir_addr[i].proc_nr_e = caller->p_endpoint;
if (vir_addr[i].proc_nr_e != NONE) {
if(! isokendpt(vir_addr[i].proc_nr_e, &p)) {
printf("do_copy: %d: %d not ok endpoint\n", i, vir_addr[i].proc_nr_e);
return(EINVAL);
}
}
}
/* Check for overflow. This would happen for 64K segments and 16-bit
* vir_bytes. Especially copying by the PM on do_fork() is affected.
*/
if (bytes != (phys_bytes) (vir_bytes) bytes) return(E2BIG);
/* Now try to make the actual virtual copy. */
if(m_ptr->m_lsys_krn_sys_copy.flags & CP_FLAG_TRY) {
int r;
assert(caller->p_endpoint == VFS_PROC_NR);
r = virtual_copy(&vir_addr[_SRC_], &vir_addr[_DST_], bytes);
if(r == EFAULT_SRC || r == EFAULT_DST) return r = EFAULT;
return r;
} else {
return( virtual_copy_vmcheck(caller, &vir_addr[_SRC_],
&vir_addr[_DST_], bytes) );
}
}
#endif /* (USE_VIRCOPY || USE_PHYSCOPY) */

View File

@@ -0,0 +1,152 @@
/* The kernel call that is implemented in this file:
* m_type: SYS_CPROF
*
* The parameters for this kernel call are:
* m_lsys_krn_cprof.action (get/reset profiling data)
* m_lsys_krn_cprof.mem_size (available memory for data)
* m_lsys_krn_cprof.endpt (endpoint of caller)
* m_lsys_krn_cprof.ctl_ptr (location of info struct)
* m_lsys_krn_cprof.mem_ptr (location of memory for data)
*
* Changes:
* 14 Aug, 2006 Created (Rogier Meurs)
*/
#include "kernel/system.h"
#include <string.h>
#if CPROFILE
static struct cprof_ctl_s cprof_ctl_inst;
static struct cprof_tbl_s cprof_tbl_inst;
/*===========================================================================*
* do_cprofile *
*===========================================================================*/
int do_cprofile(struct proc * caller, message * m_ptr)
{
int proc_nr, i;
phys_bytes len;
vir_bytes vir_dst;
switch (m_ptr->m_lsys_krn_cprof.action) {
case PROF_RESET:
/* Reset profiling tables. */
cprof_ctl_inst.reset = 1;
printf("CPROFILE notice: resetting tables:");
for (i=0; i<cprof_procs_no; i++) {
printf(" %s", cprof_proc_info[i].name);
/* Test whether proc still alive. */
if (!isokendpt(cprof_proc_info[i].endpt, &proc_nr)) {
printf("endpt not valid %u (%s)\n",
cprof_proc_info[i].endpt, cprof_proc_info[i].name);
continue;
}
/* Set reset flag. */
data_copy(KERNEL, (vir_bytes) &cprof_ctl_inst.reset,
cprof_proc_info[i].endpt, cprof_proc_info[i].ctl_v,
sizeof(cprof_ctl_inst.reset));
}
printf("\n");
return OK;
case PROF_GET:
/* Get profiling data.
*
* Calculate physical addresses of user pointers. Copy to user
* program the info struct. Copy to user program the profiling
* tables of the profiled processes.
*/
if(!isokendpt(m_ptr->m_lsys_krn_cprof.endpt, &proc_nr))
return EINVAL;
cprof_mem_size = m_ptr->m_lsys_krn_cprof.mem_size;
printf("CPROFILE notice: getting tables:");
/* Copy control structs of profiled processes to calculate total
* nr of bytes to be copied to user program and find out if any
* errors happened. */
cprof_info.mem_used = 0;
cprof_info.err = 0;
for (i=0; i<cprof_procs_no; i++) {
printf(" %s", cprof_proc_info[i].name);
/* Test whether proc still alive. */
if (!isokendpt(cprof_proc_info[i].endpt, &proc_nr)) {
printf("endpt not valid %u (%s)\n",
cprof_proc_info[i].endpt, cprof_proc_info[i].name);
continue;
}
/* Copy control struct from proc to local variable. */
data_copy(cprof_proc_info[i].endpt, cprof_proc_info[i].ctl_v,
KERNEL, (vir_bytes) &cprof_ctl_inst,
sizeof(cprof_ctl_inst));
/* Calculate memory used. */
cprof_proc_info[i].slots_used = cprof_ctl_inst.slots_used;
cprof_info.mem_used += CPROF_PROCNAME_LEN;
cprof_info.mem_used += sizeof(cprof_proc_info_inst.slots_used);
cprof_info.mem_used += cprof_proc_info[i].slots_used *
sizeof(cprof_tbl_inst);
/* Collect errors. */
cprof_info.err |= cprof_ctl_inst.err;
}
printf("\n");
/* Do we have the space available? */
if (cprof_mem_size < cprof_info.mem_used) cprof_info.mem_used = -1;
/* Copy the info struct to the user process. */
data_copy(KERNEL, (vir_bytes) &cprof_info,
m_ptr->m_lsys_krn_cprof.endpt, m_ptr->m_lsys_krn_cprof.ctl_ptr,
sizeof(cprof_info));
/* If there is no space or errors occurred, don't bother copying. */
if (cprof_info.mem_used == -1 || cprof_info.err) return OK;
/* For each profiled process, copy its name, slots_used and profiling
* table to the user process. */
vir_dst = m_ptr->m_lsys_krn_cprof.mem_ptr;
for (i=0; i<cprof_procs_no; i++) {
len = (phys_bytes) strlen(cprof_proc_info[i].name);
data_copy(KERNEL, (vir_bytes) cprof_proc_info[i].name,
m_ptr->m_lsys_krn_cprof.endpt, vir_dst, len);
vir_dst += CPROF_PROCNAME_LEN;
len = (phys_bytes) sizeof(cprof_ctl_inst.slots_used);
data_copy(cprof_proc_info[i].endpt,
cprof_proc_info[i].ctl_v + sizeof(cprof_ctl_inst.reset),
m_ptr->m_lsys_krn_cprof.endpt, vir_dst, len);
vir_dst += len;
len = (phys_bytes)
(sizeof(cprof_tbl_inst) * cprof_proc_info[i].slots_used);
data_copy(cprof_proc_info[i].endpt, cprof_proc_info[i].buf_v,
m_ptr->m_lsys_krn_cprof.endpt, vir_dst, len);
vir_dst += len;
}
return OK;
default:
return EINVAL;
}
}
#endif /* CPROFILE */

View File

@@ -0,0 +1,108 @@
/* The kernel call implemented in this file:
* m_type: SYS_DEVIO
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_devio.request (request input or output)
* m_lsys_krn_sys_devio.port (port to read/ write)
* m_lsys_krn_sys_devio.value (value to write/ return value read)
*/
#include "kernel/system.h"
#include <minix/devio.h>
#include <minix/endpoint.h>
#include <minix/portio.h>
#if USE_DEVIO
/*===========================================================================*
* do_devio *
*===========================================================================*/
int do_devio(struct proc * caller, message * m_ptr)
{
struct priv *privp;
port_t port;
struct io_range *iorp;
int i, size, nr_io_range;
int io_type, io_dir;
io_type = m_ptr->m_lsys_krn_sys_devio.request & _DIO_TYPEMASK;
io_dir = m_ptr->m_lsys_krn_sys_devio.request & _DIO_DIRMASK;
switch (io_type)
{
case _DIO_BYTE: size= 1; break;
case _DIO_WORD: size= 2; break;
case _DIO_LONG: size= 4; break;
default: size= 4; break; /* Be conservative */
}
privp= priv(caller);
if (!privp)
{
printf("no priv structure!\n");
goto doit;
}
if (privp->s_flags & CHECK_IO_PORT)
{
port= m_ptr->m_lsys_krn_sys_devio.port;
nr_io_range= privp->s_nr_io_range;
for (i= 0, iorp= privp->s_io_tab; i<nr_io_range; i++, iorp++)
{
if (port >= iorp->ior_base && port+size-1 <= iorp->ior_limit)
break;
}
if (i >= nr_io_range)
{
printf("do_devio: port 0x%x (size %d) not allowed\n",
m_ptr->m_lsys_krn_sys_devio.port, size);
return EPERM;
}
}
doit:
if (m_ptr->m_lsys_krn_sys_devio.port & (size-1))
{
printf("do_devio: unaligned port 0x%x (size %d)\n",
m_ptr->m_lsys_krn_sys_devio.port, size);
return EPERM;
}
/* Process a single I/O request for byte, word, and long values. */
if (io_dir == _DIO_INPUT) {
switch (io_type) {
/* maybe "it" should not be called ports */
case _DIO_BYTE:
m_ptr->m_krn_lsys_sys_devio.value =
inb(m_ptr->m_lsys_krn_sys_devio.port);
break;
case _DIO_WORD:
m_ptr->m_krn_lsys_sys_devio.value =
inw(m_ptr->m_lsys_krn_sys_devio.port);
break;
case _DIO_LONG:
m_ptr->m_krn_lsys_sys_devio.value =
inl(m_ptr->m_lsys_krn_sys_devio.port);
break;
default: return(EINVAL);
}
} else {
switch (io_type) {
case _DIO_BYTE:
outb(m_ptr->m_lsys_krn_sys_devio.port,
m_ptr->m_lsys_krn_sys_devio.value);
break;
case _DIO_WORD:
outw(m_ptr->m_lsys_krn_sys_devio.port,
m_ptr->m_lsys_krn_sys_devio.value);
break;
case _DIO_LONG:
outl(m_ptr->m_lsys_krn_sys_devio.port,
m_ptr->m_lsys_krn_sys_devio.value);
break;
default: return(EINVAL);
}
}
return(OK);
}
#endif /* USE_DEVIO */

View File

@@ -0,0 +1,68 @@
/* The kernel call implemented in this file:
* m_type: SYS_DIAGCTL
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_diagctl.code request
* and then request-specific arguments in
* m_lsys_krn_sys_diagctl.buf
* m_lsys_krn_sys_diagctl.len
* m_lsys_krn_sys_diagctl.endpt
*/
#include "kernel/system.h"
/*===========================================================================*
* do_diagctl *
*===========================================================================*/
int do_diagctl(struct proc * caller, message * m_ptr)
{
vir_bytes len, buf;
static char mybuf[DIAG_BUFSIZE];
int s, i, proc_nr;
switch (m_ptr->m_lsys_krn_sys_diagctl.code) {
case DIAGCTL_CODE_DIAG:
buf = m_ptr->m_lsys_krn_sys_diagctl.buf;
len = m_ptr->m_lsys_krn_sys_diagctl.len;
if(len < 1 || len > DIAG_BUFSIZE) {
printf("do_diagctl: diag for %d: len %d out of range\n",
caller->p_endpoint, len);
return EINVAL;
}
if((s=data_copy_vmcheck(caller, caller->p_endpoint, buf, KERNEL,
(vir_bytes) mybuf, len)) != OK) {
printf("do_diagctl: diag for %d: len %d: copy failed: %d\n",
caller->p_endpoint, len, s);
return s;
}
for(i = 0; i < len; i++)
kputc(mybuf[i]);
kputc(END_OF_KMESS);
return OK;
case DIAGCTL_CODE_STACKTRACE:
if(!isokendpt(m_ptr->m_lsys_krn_sys_diagctl.endpt, &proc_nr))
return EINVAL;
proc_stacktrace(proc_addr(proc_nr));
return OK;
case DIAGCTL_CODE_REGISTER:
if (!(priv(caller)->s_flags & SYS_PROC))
return EPERM;
priv(caller)->s_diag_sig = TRUE;
/* If the message log is not empty, send a first notification
* immediately. After bootup the log is basically never empty.
*/
if (kmess.km_size > 0 && !kinfo.do_serial_debug)
send_sig(caller->p_endpoint, SIGKMESS);
return OK;
case DIAGCTL_CODE_UNREGISTER:
if (!(priv(caller)->s_flags & SYS_PROC))
return EPERM;
priv(caller)->s_diag_sig = FALSE;
return OK;
default:
printf("do_diagctl: invalid request %d\n", m_ptr->m_lsys_krn_sys_diagctl.code);
return(EINVAL);
}
}

View File

@@ -0,0 +1,41 @@
/* The kernel call that is implemented in this file:
* m_type: SYS_ENDKSIG
*
* The parameters for this kernel call are:
* m_sigcalls.endpt # process for which PM is done
*/
#include "kernel/system.h"
#if USE_ENDKSIG
/*===========================================================================*
* do_endksig *
*===========================================================================*/
int do_endksig(struct proc * caller, message * m_ptr)
{
/* Finish up after a kernel type signal, caused by a SYS_KILL message or a
* call to cause_sig by a task. This is called by a signal manager after
* processing a signal it got with SYS_GETKSIG.
*/
register struct proc *rp;
int proc_nr;
/* Get process pointer and verify that it had signals pending. If the
* process is already dead its flags will be reset.
*/
if(!isokendpt(m_ptr->m_sigcalls.endpt, &proc_nr))
return EINVAL;
rp = proc_addr(proc_nr);
if (caller->p_endpoint != priv(rp)->s_sig_mgr) return(EPERM);
if (!RTS_ISSET(rp, RTS_SIG_PENDING)) return(EINVAL);
/* The signal manager has finished one kernel signal. Is the process ready? */
if (!RTS_ISSET(rp, RTS_SIGNALED)) /* new signal arrived */
RTS_UNSET(rp, RTS_SIG_PENDING); /* remove pending flag */
return(OK);
}
#endif /* USE_ENDKSIG */

View File

@@ -0,0 +1,60 @@
/* The kernel call implemented in this file:
* m_type: SYS_EXEC
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_exec.endpt (process that did exec call)
* m_lsys_krn_sys_exec.stack (new stack pointer)
* m_lsys_krn_sys_exec.name (pointer to program name)
* m_lsys_krn_sys_exec.ip (new instruction pointer)
* m_lsys_krn_sys_exec.ps_str (struct ps_strings *)
*/
#include "kernel/system.h"
#include <string.h>
#include <minix/endpoint.h>
#if USE_EXEC
/*===========================================================================*
* do_exec *
*===========================================================================*/
int do_exec(struct proc * caller, message * m_ptr)
{
/* Handle sys_exec(). A process has done a successful EXEC. Patch it up. */
register struct proc *rp;
int proc_nr;
char name[PROC_NAME_LEN];
if(!isokendpt(m_ptr->m_lsys_krn_sys_exec.endpt, &proc_nr))
return EINVAL;
rp = proc_addr(proc_nr);
if(rp->p_misc_flags & MF_DELIVERMSG) {
rp->p_misc_flags &= ~MF_DELIVERMSG;
}
/* Save command name for debugging, ps(1) output, etc. */
if(data_copy(caller->p_endpoint, m_ptr->m_lsys_krn_sys_exec.name,
KERNEL, (vir_bytes) name,
(phys_bytes) sizeof(name) - 1) != OK)
strncpy(name, "<unset>", PROC_NAME_LEN);
name[sizeof(name)-1] = '\0';
/* Set process state. */
arch_proc_init(rp,
(u32_t) m_ptr->m_lsys_krn_sys_exec.ip,
(u32_t) m_ptr->m_lsys_krn_sys_exec.stack,
(u32_t) m_ptr->m_lsys_krn_sys_exec.ps_str, name);
/* No reply to EXEC call */
RTS_UNSET(rp, RTS_RECEIVING);
/* Mark fpu_regs contents as not significant, so fpu
* will be initialized, when it's used next time. */
rp->p_misc_flags &= ~MF_FPU_INITIALIZED;
/* force reloading FPU if the current process is the owner */
release_fpu(rp);
return(OK);
}
#endif /* USE_EXEC */

View File

@@ -0,0 +1,27 @@
/* The kernel call implemented in this file:
* m_type: SYS_EXIT
*/
#include "kernel/system.h"
#include <signal.h>
#if USE_EXIT
/*===========================================================================*
* do_exit *
*===========================================================================*/
int do_exit(struct proc * caller, message * m_ptr)
{
/* Handle sys_exit. A system process has requested to exit. Generate a
* self-termination signal.
*/
int sig_nr = SIGABRT;
cause_sig(caller->p_nr, sig_nr); /* send a signal to the caller */
return(EDONTREPLY); /* don't reply */
}
#endif /* USE_EXIT */

View File

@@ -0,0 +1,134 @@
/* The kernel call implemented in this file:
* m_type: SYS_FORK
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_fork.endpt (parent, process that forked)
* m_lsys_krn_sys_fork.slot (child's process table slot)
* m_lsys_krn_sys_fork.flags (fork flags)
* m_krn_lsys_sys_fork.endpt (endpoint of the child)
* m_krn_lsys_sys_fork.msgaddr (new memory map for the child)
*/
#include "kernel/system.h"
#include "kernel/vm.h"
#include <signal.h>
#include <string.h>
#include <assert.h>
#include <minix/endpoint.h>
#include <minix/u64.h>
#if USE_FORK
/*===========================================================================*
* do_fork *
*===========================================================================*/
int do_fork(struct proc * caller, message * m_ptr)
{
/* Handle sys_fork().
* m_lsys_krn_sys_fork.endpt has forked.
* The child is m_lsys_krn_sys_fork.slot.
*/
#if defined(__i386__)
char *old_fpu_save_area_p;
#endif
register struct proc *rpc; /* child process pointer */
struct proc *rpp; /* parent process pointer */
int gen;
int p_proc;
int namelen;
if(!isokendpt(m_ptr->m_lsys_krn_sys_fork.endpt, &p_proc))
return EINVAL;
rpp = proc_addr(p_proc);
rpc = proc_addr(m_ptr->m_lsys_krn_sys_fork.slot);
if (isemptyp(rpp) || ! isemptyp(rpc)) return(EINVAL);
assert(!(rpp->p_misc_flags & MF_DELIVERMSG));
/* needs to be receiving so we know where the message buffer is */
if(!RTS_ISSET(rpp, RTS_RECEIVING)) {
printf("kernel: fork not done synchronously?\n");
return EINVAL;
}
/* make sure that the FPU context is saved in parent before copy */
save_fpu(rpp);
/* Copy parent 'proc' struct to child. And reinitialize some fields. */
gen = _ENDPOINT_G(rpc->p_endpoint);
#if defined(__i386__)
old_fpu_save_area_p = rpc->p_seg.fpu_state;
#endif
*rpc = *rpp; /* copy 'proc' struct */
#if defined(__i386__)
rpc->p_seg.fpu_state = old_fpu_save_area_p;
if(proc_used_fpu(rpp))
memcpy(rpc->p_seg.fpu_state, rpp->p_seg.fpu_state, FPU_XFP_SIZE);
#endif
if(++gen >= _ENDPOINT_MAX_GENERATION) /* increase generation */
gen = 1; /* generation number wraparound */
rpc->p_nr = m_ptr->m_lsys_krn_sys_fork.slot; /* this was obliterated by copy */
rpc->p_endpoint = _ENDPOINT(gen, rpc->p_nr); /* new endpoint of slot */
rpc->p_reg.retreg = 0; /* child sees pid = 0 to know it is child */
rpc->p_user_time = 0; /* set all the accounting times to 0 */
rpc->p_sys_time = 0;
rpc->p_misc_flags &=
~(MF_VIRT_TIMER | MF_PROF_TIMER | MF_SC_TRACE | MF_SPROF_SEEN | MF_STEP);
rpc->p_virt_left = 0; /* disable, clear the process-virtual timers */
rpc->p_prof_left = 0;
/* Mark process name as being a forked copy */
namelen = strlen(rpc->p_name);
#define FORKSTR "*F"
if(namelen+strlen(FORKSTR) < sizeof(rpc->p_name))
strcat(rpc->p_name, FORKSTR);
/* the child process is not runnable until it's scheduled. */
RTS_SET(rpc, RTS_NO_QUANTUM);
reset_proc_accounting(rpc);
rpc->p_cpu_time_left = 0;
rpc->p_cycles = 0;
rpc->p_kcall_cycles = 0;
rpc->p_kipc_cycles = 0;
rpc->p_signal_received = 0;
/* If the parent is a privileged process, take away the privileges from the
* child process and inhibit it from running by setting the NO_PRIV flag.
* The caller should explicitly set the new privileges before executing.
*/
if (priv(rpp)->s_flags & SYS_PROC) {
rpc->p_priv = priv_addr(USER_PRIV_ID);
rpc->p_rts_flags |= RTS_NO_PRIV;
}
/* Calculate endpoint identifier, so caller knows what it is. */
m_ptr->m_krn_lsys_sys_fork.endpt = rpc->p_endpoint;
m_ptr->m_krn_lsys_sys_fork.msgaddr = rpp->p_delivermsg_vir;
/* Don't schedule process in VM mode until it has a new pagetable. */
if(m_ptr->m_lsys_krn_sys_fork.flags & PFF_VMINHIBIT) {
RTS_SET(rpc, RTS_VMINHIBIT);
}
/*
* Only one in group should have RTS_SIGNALED, child doesn't inherit tracing.
*/
RTS_UNSET(rpc, (RTS_SIGNALED | RTS_SIG_PENDING | RTS_P_STOP));
(void) sigemptyset(&rpc->p_pending);
#if defined(__i386__)
rpc->p_seg.p_cr3 = 0;
rpc->p_seg.p_cr3_v = NULL;
#elif defined(__arm__)
rpc->p_seg.p_ttbr = 0;
rpc->p_seg.p_ttbr_v = NULL;
#endif
return OK;
}
#endif /* USE_FORK */

View File

@@ -0,0 +1,241 @@
/* The kernel call implemented in this file:
* m_type: SYS_GETINFO
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_getinfo.request (what info to get)
* m_lsys_krn_sys_getinfo.val_ptr (where to put it)
* m_lsys_krn_sys_getinfo.val_len (maximum length expected, optional)
* m_lsys_krn_sys_getinfo.val_ptr2 (second, optional pointer)
* m_lsys_krn_sys_getinfo.val_len2_e (second length or process nr)
*
* Upon return of the GETWHOAMI request the following parameters are used:
* m_krn_lsys_sys_getwhoami.endpt (the caller endpoint)
* m_krn_lsys_sys_getwhoami.privflags (the caller priviledes)
* m_krn_lsys_sys_getwhoami.name (the caller process name)
*
*/
#include <string.h>
#include "kernel/system.h"
#if USE_GETINFO
#include <minix/u64.h>
#include <sys/resource.h>
/*===========================================================================*
* update_idle_time *
*===========================================================================*/
static void update_idle_time(void)
{
int i;
struct proc * idl = proc_addr(IDLE);
idl->p_cycles = make64(0, 0);
for (i = 0; i < CONFIG_MAX_CPUS ; i++) {
idl->p_cycles += get_cpu_var(i, idle_proc).p_cycles;
}
}
/*===========================================================================*
* do_getinfo *
*===========================================================================*/
int do_getinfo(struct proc * caller, message * m_ptr)
{
/* Request system information to be copied to caller's address space. This
* call simply copies entire data structures to the caller.
*/
size_t length;
vir_bytes src_vir;
int nr_e, nr, r;
int wipe_rnd_bin = -1;
struct proc *p;
struct rusage r_usage;
/* Set source address and length based on request type. */
switch (m_ptr->m_lsys_krn_sys_getinfo.request) {
case GET_MACHINE: {
length = sizeof(struct machine);
src_vir = (vir_bytes) &machine;
break;
}
case GET_KINFO: {
length = sizeof(struct kinfo);
src_vir = (vir_bytes) &kinfo;
break;
}
case GET_LOADINFO: {
length = sizeof(struct loadinfo);
src_vir = (vir_bytes) &kloadinfo;
break;
}
case GET_CPUINFO: {
length = sizeof(cpu_info);
src_vir = (vir_bytes) &cpu_info;
break;
}
case GET_HZ: {
length = sizeof(system_hz);
src_vir = (vir_bytes) &system_hz;
break;
}
case GET_IMAGE: {
length = sizeof(struct boot_image) * NR_BOOT_PROCS;
src_vir = (vir_bytes) image;
break;
}
case GET_IRQHOOKS: {
length = sizeof(struct irq_hook) * NR_IRQ_HOOKS;
src_vir = (vir_bytes) irq_hooks;
break;
}
case GET_PROCTAB: {
update_idle_time();
length = sizeof(struct proc) * (NR_PROCS + NR_TASKS);
src_vir = (vir_bytes) proc;
break;
}
case GET_PRIVTAB: {
length = sizeof(struct priv) * (NR_SYS_PROCS);
src_vir = (vir_bytes) priv;
break;
}
case GET_PROC: {
nr_e = (m_ptr->m_lsys_krn_sys_getinfo.val_len2_e == SELF) ?
caller->p_endpoint : m_ptr->m_lsys_krn_sys_getinfo.val_len2_e;
if(!isokendpt(nr_e, &nr)) return EINVAL; /* validate request */
length = sizeof(struct proc);
src_vir = (vir_bytes) proc_addr(nr);
break;
}
case GET_PRIV: {
nr_e = (m_ptr->m_lsys_krn_sys_getinfo.val_len2_e == SELF) ?
caller->p_endpoint : m_ptr->m_lsys_krn_sys_getinfo.val_len2_e;
if(!isokendpt(nr_e, &nr)) return EINVAL; /* validate request */
length = sizeof(struct priv);
src_vir = (vir_bytes) priv_addr(nr_to_id(nr));
break;
}
case GET_REGS: {
nr_e = (m_ptr->m_lsys_krn_sys_getinfo.val_len2_e == SELF) ?
caller->p_endpoint : m_ptr->m_lsys_krn_sys_getinfo.val_len2_e;
if(!isokendpt(nr_e, &nr)) return EINVAL; /* validate request */
p = proc_addr(nr);
length = sizeof(p->p_reg);
src_vir = (vir_bytes) &p->p_reg;
break;
}
case GET_WHOAMI: {
int len;
m_ptr->m_krn_lsys_sys_getwhoami.endpt = caller->p_endpoint;
len = MIN(sizeof(m_ptr->m_krn_lsys_sys_getwhoami.name),
sizeof(caller->p_name))-1;
strncpy(m_ptr->m_krn_lsys_sys_getwhoami.name, caller->p_name, len);
m_ptr->m_krn_lsys_sys_getwhoami.name[len] = '\0';
m_ptr->m_krn_lsys_sys_getwhoami.privflags = priv(caller)->s_flags;
return OK;
}
case GET_MONPARAMS: {
src_vir = (vir_bytes) kinfo.param_buf;
length = sizeof(kinfo.param_buf);
break;
}
case GET_RANDOMNESS: {
static struct k_randomness copy; /* copy to keep counters */
int i;
copy = krandom;
for (i= 0; i<RANDOM_SOURCES; i++) {
krandom.bin[i].r_size = 0; /* invalidate random data */
krandom.bin[i].r_next = 0;
}
length = sizeof(copy);
src_vir = (vir_bytes) &copy;
break;
}
case GET_RANDOMNESS_BIN: {
int bin = m_ptr->m_lsys_krn_sys_getinfo.val_len2_e;
if(bin < 0 || bin >= RANDOM_SOURCES) {
printf("SYSTEM: GET_RANDOMNESS_BIN: %d out of range\n", bin);
return EINVAL;
}
if(krandom.bin[bin].r_size < RANDOM_ELEMENTS)
return ENOENT;
length = sizeof(krandom.bin[bin]);
src_vir = (vir_bytes) &krandom.bin[bin];
wipe_rnd_bin = bin;
break;
}
case GET_IRQACTIDS: {
length = sizeof(irq_actids);
src_vir = (vir_bytes) irq_actids;
break;
}
case GET_IDLETSC: {
struct proc * idl;
update_idle_time();
idl = proc_addr(IDLE);
length = sizeof(idl->p_cycles);
src_vir = (vir_bytes) &idl->p_cycles;
break;
}
case GET_RUSAGE: {
struct proc *target = NULL;
int target_slot = 0;
u64_t usec;
nr_e = (m_ptr->m_lsys_krn_sys_getinfo.val_len2_e == SELF) ?
caller->p_endpoint : m_ptr->m_lsys_krn_sys_getinfo.val_len2_e;
if (!isokendpt(nr_e, &target_slot))
return EINVAL;
target = proc_addr(target_slot);
if (isemptyp(target))
return EINVAL;
length = sizeof(r_usage);
memset(&r_usage, 0, sizeof(r_usage));
usec = target->p_user_time * 1000000 / system_hz;
r_usage.ru_utime.tv_sec = usec / 1000000;
r_usage.ru_utime.tv_usec = usec % 1000000;
usec = target->p_sys_time * 1000000 / system_hz;
r_usage.ru_stime.tv_sec = usec / 1000000;
r_usage.ru_stime.tv_usec = usec % 1000000;
r_usage.ru_nsignals = target->p_signal_received;
src_vir = (vir_bytes) &r_usage;
break;
}
default:
printf("do_getinfo: invalid request %d\n",
m_ptr->m_lsys_krn_sys_getinfo.request);
return(EINVAL);
}
/* Try to make the actual copy for the requested data. */
if (m_ptr->m_lsys_krn_sys_getinfo.val_len > 0 &&
length > m_ptr->m_lsys_krn_sys_getinfo.val_len)
return (E2BIG);
r = data_copy_vmcheck(caller, KERNEL, src_vir, caller->p_endpoint,
m_ptr->m_lsys_krn_sys_getinfo.val_ptr, length);
if(r != OK) return r;
if(wipe_rnd_bin >= 0 && wipe_rnd_bin < RANDOM_SOURCES) {
krandom.bin[wipe_rnd_bin].r_size = 0;
krandom.bin[wipe_rnd_bin].r_next = 0;
}
return(OK);
}
#endif /* USE_GETINFO */

View File

@@ -0,0 +1,43 @@
/* The kernel call that is implemented in this file:
* m_type: SYS_GETKSIG
*
* The parameters for this kernel call are:
* m_sigcalls.endpt # process with pending signals
* m_sigcalls.map # bit map with pending signals
*/
#include "kernel/system.h"
#include <signal.h>
#include <minix/endpoint.h>
#if USE_GETKSIG
/*===========================================================================*
* do_getksig *
*===========================================================================*/
int do_getksig(struct proc * caller, message * m_ptr)
{
/* The signal manager is ready to accept signals and repeatedly does a kernel
* call to get one. Find a process with pending signals. If no signals are
* available, return NONE in the process number field.
*/
register struct proc *rp;
/* Find the next process with pending signals. */
for (rp = BEG_USER_ADDR; rp < END_PROC_ADDR; rp++) {
if (RTS_ISSET(rp, RTS_SIGNALED)) {
if (caller->p_endpoint != priv(rp)->s_sig_mgr) continue;
/* store signaled process' endpoint */
m_ptr->m_sigcalls.endpt = rp->p_endpoint;
m_ptr->m_sigcalls.map = rp->p_pending; /* pending signals map */
(void) sigemptyset(&rp->p_pending); /* clear map in the kernel */
RTS_UNSET(rp, RTS_SIGNALED); /* blocked by SIG_PENDING */
return(OK);
}
}
/* No process with pending signals was found. */
m_ptr->m_sigcalls.endpt = NONE;
return(OK);
}
#endif /* USE_GETKSIG */

View File

@@ -0,0 +1,176 @@
/* The kernel call implemented in this file:
* m_type: SYS_IRQCTL
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_irqctl.request (control operation to perform)
* m_lsys_krn_sys_irqctl.vector (irq line that must be controlled)
* m_lsys_krn_sys_irqctl.policy (irq policy allows reenabling interrupts)
* m_lsys_krn_sys_irqctl.hook_id (provides index to be returned on interrupt)
* m_krn_lsys_sys_irqctl.hook_id (returns index of irq hook assigned at kernel)
*/
#include "kernel/kernel.h"
#include "kernel/system.h"
#include <minix/endpoint.h>
#if USE_IRQCTL
static int generic_handler(irq_hook_t *hook);
/*===========================================================================*
* do_irqctl *
*===========================================================================*/
int do_irqctl(struct proc * caller, message * m_ptr)
{
/* Dismember the request message. */
int irq_vec;
int irq_hook_id;
int notify_id;
int r = OK;
int i;
irq_hook_t *hook_ptr;
struct priv *privp;
/* Hook identifiers start at 1 and end at NR_IRQ_HOOKS. */
irq_hook_id = m_ptr->m_lsys_krn_sys_irqctl.hook_id - 1;
irq_vec = m_ptr->m_lsys_krn_sys_irqctl.vector;
/* See what is requested and take needed actions. */
switch(m_ptr->m_lsys_krn_sys_irqctl.request) {
/* Enable or disable IRQs. This is straightforward. */
case IRQ_ENABLE:
case IRQ_DISABLE:
if (irq_hook_id >= NR_IRQ_HOOKS || irq_hook_id < 0 ||
irq_hooks[irq_hook_id].proc_nr_e == NONE) return(EINVAL);
if (irq_hooks[irq_hook_id].proc_nr_e != caller->p_endpoint) return(EPERM);
if (m_ptr->m_lsys_krn_sys_irqctl.request == IRQ_ENABLE) {
enable_irq(&irq_hooks[irq_hook_id]);
}
else
disable_irq(&irq_hooks[irq_hook_id]);
break;
/* Control IRQ policies. Set a policy and needed details in the IRQ table.
* This policy is used by a generic function to handle hardware interrupts.
*/
case IRQ_SETPOLICY:
/* Check if IRQ line is acceptable. */
if (irq_vec < 0 || irq_vec >= NR_IRQ_VECTORS) return(EINVAL);
privp= priv(caller);
if (!privp)
{
printf("do_irqctl: no priv structure!\n");
return EPERM;
}
if (privp->s_flags & CHECK_IRQ)
{
for (i= 0; i<privp->s_nr_irq; i++)
{
if (irq_vec == privp->s_irq_tab[i])
break;
}
if (i >= privp->s_nr_irq)
{
printf(
"do_irqctl: IRQ check failed for proc %d, IRQ %d\n",
caller->p_endpoint, irq_vec);
return EPERM;
}
}
/* When setting a policy, the caller must provide an identifier that
* is returned on the notification message if a interrupt occurs.
*/
notify_id = m_ptr->m_lsys_krn_sys_irqctl.hook_id;
if (notify_id > CHAR_BIT * sizeof(irq_id_t) - 1) return(EINVAL);
/* Try to find an existing mapping to override. */
hook_ptr = NULL;
for (i=0; !hook_ptr && i<NR_IRQ_HOOKS; i++) {
if (irq_hooks[i].proc_nr_e == caller->p_endpoint
&& irq_hooks[i].notify_id == notify_id) {
irq_hook_id = i;
hook_ptr = &irq_hooks[irq_hook_id]; /* existing hook */
rm_irq_handler(&irq_hooks[irq_hook_id]);
}
}
/* If there is nothing to override, find a free hook for this mapping. */
for (i=0; !hook_ptr && i<NR_IRQ_HOOKS; i++) {
if (irq_hooks[i].proc_nr_e == NONE) {
irq_hook_id = i;
hook_ptr = &irq_hooks[irq_hook_id]; /* free hook */
}
}
if (hook_ptr == NULL) return(ENOSPC);
/* Install the handler. */
hook_ptr->proc_nr_e = caller->p_endpoint; /* process to notify */
hook_ptr->notify_id = notify_id; /* identifier to pass */
hook_ptr->policy = m_ptr->m_lsys_krn_sys_irqctl.policy; /* policy for interrupts */
put_irq_handler(hook_ptr, irq_vec, generic_handler);
DEBUGBASIC(("IRQ %d handler registered by %s / %d\n",
irq_vec, caller->p_name, caller->p_endpoint));
/* Return index of the IRQ hook in use. */
m_ptr->m_krn_lsys_sys_irqctl.hook_id = irq_hook_id + 1;
break;
case IRQ_RMPOLICY:
if (irq_hook_id < 0 || irq_hook_id >= NR_IRQ_HOOKS ||
irq_hooks[irq_hook_id].proc_nr_e == NONE) {
return(EINVAL);
} else if (caller->p_endpoint != irq_hooks[irq_hook_id].proc_nr_e) {
return(EPERM);
}
/* Remove the handler and return. */
rm_irq_handler(&irq_hooks[irq_hook_id]);
irq_hooks[irq_hook_id].proc_nr_e = NONE;
break;
default:
r = EINVAL; /* invalid IRQ REQUEST */
}
return(r);
}
/*===========================================================================*
* generic_handler *
*===========================================================================*/
static int generic_handler(hook)
irq_hook_t *hook;
{
/* This function handles hardware interrupt in a simple and generic way. All
* interrupts are transformed into messages to a driver. The IRQ line will be
* reenabled if the policy says so.
*/
int proc_nr;
/* As a side-effect, the interrupt handler gathers random information by
* timestamping the interrupt events. This is used for /dev/random.
*/
get_randomness(&krandom, hook->irq);
/* Check if the handler is still alive.
* If it's dead, this should never happen, as processes that die
* automatically get their interrupt hooks unhooked.
*/
if(!isokendpt(hook->proc_nr_e, &proc_nr))
panic("invalid interrupt handler: %d", hook->proc_nr_e);
/* Add a bit for this interrupt to the process' pending interrupts. When
* sending the notification message, this bit map will be magically set
* as an argument.
*/
priv(proc_addr(proc_nr))->s_int_pending |= (1 << hook->notify_id);
/* Build notification message and return. */
mini_notify(proc_addr(HARDWARE), hook->proc_nr_e);
return(hook->policy & IRQ_REENABLE);
}
#endif /* USE_IRQCTL */

View File

@@ -0,0 +1,41 @@
/* The kernel call that is implemented in this file:
* m_type: SYS_KILL
*
* The parameters for this kernel call are:
* m_sigcalls.endpt # process to signal/ pending
* m_sigcalls.sig # signal number to send to process
*/
#include "kernel/system.h"
#include <signal.h>
#if USE_KILL
/*===========================================================================*
* do_kill *
*===========================================================================*/
int do_kill(struct proc * caller, message * m_ptr)
{
/* Handle sys_kill(). Cause a signal to be sent to a process. Any request
* is added to the map of pending signals and the signal manager
* associated to the process is informed about the new signal. The signal
* is then delivered using POSIX signal handlers for user processes, or
* translated into an IPC message for system services.
*/
proc_nr_t proc_nr, proc_nr_e;
int sig_nr = m_ptr->m_sigcalls.sig;
proc_nr_e = (proc_nr_t)m_ptr->m_sigcalls.endpt;
if (!isokendpt(proc_nr_e, &proc_nr)) return(EINVAL);
if (sig_nr >= _NSIG) return(EINVAL);
if (iskerneln(proc_nr)) return(EPERM);
/* Set pending signal to be processed by the signal manager. */
cause_sig(proc_nr, sig_nr);
return(OK);
}
#endif /* USE_KILL */

View File

@@ -0,0 +1,106 @@
/* The kernel calls that are implemented in this file:
* m_type: SYS_SETMCONTEXT
* m_type: SYS_GETMCONTEXT
*
* The parameters for SYS_SETMCONTEXT kernel call are:
* m_lsys_krn_sys_setmcontext.endpt # proc endpoint doing call
* m_lsys_krn_sys_setmcontext.ctx_ptr # pointer to mcontext structure
*
* The parameters for SYS_GETMCONTEXT kernel call are:
* m_lsys_krn_sys_getmcontext.endpt # proc endpoint doing call
* m_lsys_krn_sys_getmcontext.ctx_ptr # pointer to mcontext structure
*/
#include "kernel/system.h"
#include <string.h>
#include <assert.h>
#include <machine/mcontext.h>
#if USE_MCONTEXT
/*===========================================================================*
* do_getmcontext *
*===========================================================================*/
int do_getmcontext(struct proc * caller, message * m_ptr)
{
/* Retrieve machine context of a process */
register struct proc *rp;
int proc_nr, r;
mcontext_t mc;
if (!isokendpt(m_ptr->m_lsys_krn_sys_getmcontext.endpt, &proc_nr))
return(EINVAL);
if (iskerneln(proc_nr)) return(EPERM);
rp = proc_addr(proc_nr);
#if defined(__i386__)
if (!proc_used_fpu(rp))
return(OK); /* No state to copy */
#endif
/* Get the mcontext structure into our address space. */
if ((r = data_copy(m_ptr->m_lsys_krn_sys_getmcontext.endpt,
m_ptr->m_lsys_krn_sys_getmcontext.ctx_ptr, KERNEL,
(vir_bytes) &mc, (phys_bytes) sizeof(mcontext_t))) != OK)
return(r);
mc.mc_flags = 0;
#if defined(__i386__)
/* Copy FPU state */
if (proc_used_fpu(rp)) {
/* make sure that the FPU context is saved into proc structure first */
save_fpu(rp);
mc.mc_flags = (rp->p_misc_flags & MF_FPU_INITIALIZED) ? _MC_FPU_SAVED : 0;
assert(sizeof(mc.__fpregs.__fp_reg_set) == FPU_XFP_SIZE);
memcpy(&(mc.__fpregs.__fp_reg_set), rp->p_seg.fpu_state, FPU_XFP_SIZE);
}
#endif
/* Copy the mcontext structure to the user's address space. */
if ((r = data_copy(KERNEL, (vir_bytes) &mc,
m_ptr->m_lsys_krn_sys_getmcontext.endpt,
m_ptr->m_lsys_krn_sys_getmcontext.ctx_ptr,
(phys_bytes) sizeof(mcontext_t))) != OK)
return(r);
return(OK);
}
/*===========================================================================*
* do_setmcontext *
*===========================================================================*/
int do_setmcontext(struct proc * caller, message * m_ptr)
{
/* Set machine context of a process */
register struct proc *rp;
int proc_nr, r;
mcontext_t mc;
if (!isokendpt(m_ptr->m_lsys_krn_sys_setmcontext.endpt, &proc_nr)) return(EINVAL);
rp = proc_addr(proc_nr);
/* Get the mcontext structure into our address space. */
if ((r = data_copy(m_ptr->m_lsys_krn_sys_setmcontext.endpt,
m_ptr->m_lsys_krn_sys_setmcontext.ctx_ptr, KERNEL,
(vir_bytes) &mc, (phys_bytes) sizeof(mcontext_t))) != OK)
return(r);
#if defined(__i386__)
/* Copy FPU state */
if (mc.mc_flags & _MC_FPU_SAVED) {
rp->p_misc_flags |= MF_FPU_INITIALIZED;
assert(sizeof(mc.__fpregs.__fp_reg_set) == FPU_XFP_SIZE);
memcpy(rp->p_seg.fpu_state, &(mc.__fpregs.__fp_reg_set), FPU_XFP_SIZE);
} else
rp->p_misc_flags &= ~MF_FPU_INITIALIZED;
/* force reloading FPU in either case */
release_fpu(rp);
#endif
return(OK);
}
#endif

View File

@@ -0,0 +1,28 @@
/* The kernel call implemented in this file:
* m_type: SYS_MEMSET
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_memset.base (virtual address)
* m_lsys_krn_sys_memset.count (returns physical address)
* m_lsys_krn_sys_memset.pattern (pattern byte to be written)
*/
#include "kernel/system.h"
#if USE_MEMSET
/*===========================================================================*
* do_memset *
*===========================================================================*/
int do_memset(struct proc * caller, message * m_ptr)
{
/* Handle sys_memset(). This writes a pattern into the specified memory. */
vm_memset(caller, m_ptr->m_lsys_krn_sys_memset.process,
m_ptr->m_lsys_krn_sys_memset.base,
m_ptr->m_lsys_krn_sys_memset.pattern,
m_ptr->m_lsys_krn_sys_memset.count);
return(OK);
}
#endif /* USE_MEMSET */

View File

@@ -0,0 +1,419 @@
/* The kernel call implemented in this file:
* m_type: SYS_PRIVCTL
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_privctl.endpt (process endpoint of target)
* m_lsys_krn_sys_privctl.request (privilege control request)
* m_lsys_krn_sys_privctl.arg_ptr (pointer to request data)
* m.m_lsys_krn_sys_privctl.phys_start
* m.m_lsys_krn_sys_privctl.phys_len
*/
#include "kernel/system.h"
#include "kernel/ipc.h"
#include <signal.h>
#include <string.h>
#include <minix/endpoint.h>
#if USE_PRIVCTL
#define PRIV_DEBUG 0
static int update_priv(struct proc *rp, struct priv *priv);
/*===========================================================================*
* do_privctl *
*===========================================================================*/
int do_privctl(struct proc * caller, message * m_ptr)
{
/* Handle sys_privctl(). Update a process' privileges. If the process is not
* yet a system process, make sure it gets its own privilege structure.
*/
struct proc *rp;
proc_nr_t proc_nr;
sys_id_t priv_id;
sys_map_t map;
int ipc_to_m, kcalls;
int i, r;
struct io_range io_range;
struct minix_mem_range mem_range;
struct priv priv;
int irq;
/* Check whether caller is allowed to make this call. Privileged processes
* can only update the privileges of processes that are inhibited from
* running by the RTS_NO_PRIV flag. This flag is set when a privileged process
* forks.
*/
if (! (priv(caller)->s_flags & SYS_PROC)) return(EPERM);
if(m_ptr->m_lsys_krn_sys_privctl.endpt == SELF) okendpt(caller->p_endpoint,
&proc_nr);
else if(!isokendpt(m_ptr->m_lsys_krn_sys_privctl.endpt, &proc_nr))
return(EINVAL);
rp = proc_addr(proc_nr);
switch(m_ptr->m_lsys_krn_sys_privctl.request)
{
case SYS_PRIV_ALLOW:
/* Allow process to run. Make sure its privilege structure has already
* been set.
*/
if (!RTS_ISSET(rp, RTS_NO_PRIV) || priv(rp)->s_proc_nr == NONE) {
return(EPERM);
}
RTS_UNSET(rp, RTS_NO_PRIV);
return(OK);
case SYS_PRIV_YIELD:
/* Allow process to run and suspend the caller. */
if (!RTS_ISSET(rp, RTS_NO_PRIV) || priv(rp)->s_proc_nr == NONE) {
return(EPERM);
}
RTS_SET(caller, RTS_NO_PRIV);
RTS_UNSET(rp, RTS_NO_PRIV);
return(OK);
case SYS_PRIV_DISALLOW:
/* Disallow process from running. */
if (RTS_ISSET(rp, RTS_NO_PRIV)) return(EPERM);
RTS_SET(rp, RTS_NO_PRIV);
return(OK);
case SYS_PRIV_SET_SYS:
/* Set a privilege structure of a blocked system process. */
if (! RTS_ISSET(rp, RTS_NO_PRIV)) return(EPERM);
/* Check whether a static or dynamic privilege id must be allocated. */
priv_id = NULL_PRIV_ID;
if (m_ptr->m_lsys_krn_sys_privctl.arg_ptr)
{
/* Copy privilege structure from caller */
if((r=data_copy(caller->p_endpoint,
m_ptr->m_lsys_krn_sys_privctl.arg_ptr, KERNEL,
(vir_bytes) &priv, sizeof(priv))) != OK)
return r;
/* See if the caller wants to assign a static privilege id. */
if(!(priv.s_flags & DYN_PRIV_ID)) {
priv_id = priv.s_id;
}
}
/* Make sure this process has its own privileges structure. This may
* fail, since there are only a limited number of system processes.
* Then copy privileges from the caller and restore some defaults.
*/
if ((i=get_priv(rp, priv_id)) != OK)
{
printf("do_privctl: unable to allocate priv_id %d: %d\n",
priv_id, i);
return(i);
}
priv_id = priv(rp)->s_id; /* backup privilege id */
*priv(rp) = *priv(caller); /* copy from caller */
priv(rp)->s_id = priv_id; /* restore privilege id */
priv(rp)->s_proc_nr = proc_nr; /* reassociate process nr */
for (i=0; i< NR_SYS_CHUNKS; i++) /* remove pending: */
priv(rp)->s_notify_pending.chunk[i] = 0; /* - notifications */
priv(rp)->s_int_pending = 0; /* - interrupts */
(void) sigemptyset(&priv(rp)->s_sig_pending); /* - signals */
reset_kernel_timer(&priv(rp)->s_alarm_timer); /* - alarm */
priv(rp)->s_asyntab= -1; /* - asynsends */
priv(rp)->s_asynsize= 0;
priv(rp)->s_diag_sig = FALSE; /* no request for diag sigs */
/* Set defaults for privilege bitmaps. */
priv(rp)->s_flags= DSRV_F; /* privilege flags */
priv(rp)->s_trap_mask= DSRV_T; /* allowed traps */
memset(&map, 0, sizeof(map));
ipc_to_m = DSRV_M; /* allowed targets */
if (ipc_to_m == ALL_M) {
for (i = 0; i < NR_SYS_PROCS; i++)
set_sys_bit(map, i);
}
fill_sendto_mask(rp, &map);
kcalls = DSRV_KC; /* allowed kernel calls */
for(i = 0; i < SYS_CALL_MASK_SIZE; i++) {
priv(rp)->s_k_call_mask[i] = (kcalls == NO_C ? 0 : (~0));
}
/* Set the default signal managers. */
priv(rp)->s_sig_mgr = DSRV_SM;
priv(rp)->s_bak_sig_mgr = NONE;
/* Set defaults for resources: no I/O resources, no memory resources,
* no IRQs, no grant table
*/
priv(rp)->s_nr_io_range= 0;
priv(rp)->s_nr_mem_range= 0;
priv(rp)->s_nr_irq= 0;
priv(rp)->s_grant_table= 0;
priv(rp)->s_grant_entries= 0;
/* Override defaults if the caller has supplied a privilege structure. */
if (m_ptr->m_lsys_krn_sys_privctl.arg_ptr)
{
if((r = update_priv(rp, &priv)) != OK) {
return r;
}
}
return(OK);
case SYS_PRIV_SET_USER:
/* Set a privilege structure of a blocked user process. */
if (!RTS_ISSET(rp, RTS_NO_PRIV)) return(EPERM);
/* Link the process to the privilege structure of the root user
* process all the user processes share.
*/
priv(rp) = priv_addr(USER_PRIV_ID);
return(OK);
case SYS_PRIV_ADD_IO:
if (RTS_ISSET(rp, RTS_NO_PRIV))
return(EPERM);
/* Only system processes get I/O resources? */
if (!(priv(rp)->s_flags & SYS_PROC))
return EPERM;
#if 0 /* XXX -- do we need a call for this? */
if (strcmp(rp->p_name, "fxp") == 0 ||
strcmp(rp->p_name, "rtl8139") == 0)
{
printf("setting ipc_stats_target to %d\n", rp->p_endpoint);
ipc_stats_target= rp->p_endpoint;
}
#endif
/* Get the I/O range */
data_copy(caller->p_endpoint, m_ptr->m_lsys_krn_sys_privctl.arg_ptr,
KERNEL, (vir_bytes) &io_range, sizeof(io_range));
priv(rp)->s_flags |= CHECK_IO_PORT; /* Check I/O accesses */
for (i = 0; i < priv(rp)->s_nr_io_range; i++) {
if (priv(rp)->s_io_tab[i].ior_base == io_range.ior_base &&
priv(rp)->s_io_tab[i].ior_limit == io_range.ior_limit)
return OK;
}
i= priv(rp)->s_nr_io_range;
if (i >= NR_IO_RANGE) {
printf("do_privctl: %d already has %d i/o ranges.\n",
rp->p_endpoint, i);
return ENOMEM;
}
priv(rp)->s_io_tab[i].ior_base= io_range.ior_base;
priv(rp)->s_io_tab[i].ior_limit= io_range.ior_limit;
priv(rp)->s_nr_io_range++;
return OK;
case SYS_PRIV_ADD_MEM:
if (RTS_ISSET(rp, RTS_NO_PRIV))
return(EPERM);
/* Only system processes get memory resources? */
if (!(priv(rp)->s_flags & SYS_PROC))
return EPERM;
/* Get the memory range */
if((r=data_copy(caller->p_endpoint,
m_ptr->m_lsys_krn_sys_privctl.arg_ptr, KERNEL,
(vir_bytes) &mem_range, sizeof(mem_range))) != OK)
return r;
priv(rp)->s_flags |= CHECK_MEM; /* Check memory mappings */
/* When restarting a driver, check if it already has the permission */
for (i = 0; i < priv(rp)->s_nr_mem_range; i++) {
if (priv(rp)->s_mem_tab[i].mr_base == mem_range.mr_base &&
priv(rp)->s_mem_tab[i].mr_limit == mem_range.mr_limit)
return OK;
}
i= priv(rp)->s_nr_mem_range;
if (i >= NR_MEM_RANGE) {
printf("do_privctl: %d already has %d mem ranges.\n",
rp->p_endpoint, i);
return ENOMEM;
}
priv(rp)->s_mem_tab[i].mr_base= mem_range.mr_base;
priv(rp)->s_mem_tab[i].mr_limit= mem_range.mr_limit;
priv(rp)->s_nr_mem_range++;
return OK;
case SYS_PRIV_ADD_IRQ:
if (RTS_ISSET(rp, RTS_NO_PRIV))
return(EPERM);
/* Only system processes get IRQs? */
if (!(priv(rp)->s_flags & SYS_PROC))
return EPERM;
data_copy(caller->p_endpoint, m_ptr->m_lsys_krn_sys_privctl.arg_ptr,
KERNEL, (vir_bytes) &irq, sizeof(irq));
priv(rp)->s_flags |= CHECK_IRQ; /* Check IRQs */
/* When restarting a driver, check if it already has the permission */
for (i = 0; i < priv(rp)->s_nr_irq; i++) {
if (priv(rp)->s_irq_tab[i] == irq)
return OK;
}
i= priv(rp)->s_nr_irq;
if (i >= NR_IRQ) {
printf("do_privctl: %d already has %d irq's.\n",
rp->p_endpoint, i);
return ENOMEM;
}
priv(rp)->s_irq_tab[i]= irq;
priv(rp)->s_nr_irq++;
return OK;
case SYS_PRIV_QUERY_MEM:
{
phys_bytes addr, limit;
struct priv *sp;
/* See if a certain process is allowed to map in certain physical
* memory.
*/
addr = (phys_bytes) m_ptr->m_lsys_krn_sys_privctl.phys_start;
limit = addr + (phys_bytes) m_ptr->m_lsys_krn_sys_privctl.phys_len - 1;
if(limit < addr)
return EPERM;
if(!(sp = priv(rp)))
return EPERM;
if (!(sp->s_flags & SYS_PROC))
return EPERM;
for(i = 0; i < sp->s_nr_mem_range; i++) {
if(addr >= sp->s_mem_tab[i].mr_base &&
limit <= sp->s_mem_tab[i].mr_limit)
return OK;
}
return EPERM;
}
case SYS_PRIV_UPDATE_SYS:
/* Update the privilege structure of a system process. */
if(!m_ptr->m_lsys_krn_sys_privctl.arg_ptr) return EINVAL;
/* Copy privilege structure from caller */
if((r=data_copy(caller->p_endpoint,
m_ptr->m_lsys_krn_sys_privctl.arg_ptr, KERNEL,
(vir_bytes) &priv, sizeof(priv))) != OK)
return r;
/* Override settings in existing privilege structure. */
if((r = update_priv(rp, &priv)) != OK) {
return r;
}
return(OK);
default:
printf("do_privctl: bad request %d\n",
m_ptr->m_lsys_krn_sys_privctl.request);
return EINVAL;
}
}
/*===========================================================================*
* update_priv *
*===========================================================================*/
static int update_priv(struct proc *rp, struct priv *priv)
{
/* Update the privilege structure of a given process. */
int i;
/* Copy s_flags and signal managers. */
priv(rp)->s_flags = priv->s_flags;
priv(rp)->s_sig_mgr = priv->s_sig_mgr;
priv(rp)->s_bak_sig_mgr = priv->s_bak_sig_mgr;
/* Copy IRQs. */
if(priv->s_flags & CHECK_IRQ) {
if (priv->s_nr_irq < 0 || priv->s_nr_irq > NR_IRQ)
return EINVAL;
priv(rp)->s_nr_irq= priv->s_nr_irq;
for (i= 0; i<priv->s_nr_irq; i++)
{
priv(rp)->s_irq_tab[i]= priv->s_irq_tab[i];
#if PRIV_DEBUG
printf("do_privctl: adding IRQ %d for %d\n",
priv(rp)->s_irq_tab[i], rp->p_endpoint);
#endif
}
}
/* Copy I/O ranges. */
if(priv->s_flags & CHECK_IO_PORT) {
if (priv->s_nr_io_range < 0 || priv->s_nr_io_range > NR_IO_RANGE)
return EINVAL;
priv(rp)->s_nr_io_range= priv->s_nr_io_range;
for (i= 0; i<priv->s_nr_io_range; i++)
{
priv(rp)->s_io_tab[i]= priv->s_io_tab[i];
#if PRIV_DEBUG
printf("do_privctl: adding I/O range [%x..%x] for %d\n",
priv(rp)->s_io_tab[i].ior_base,
priv(rp)->s_io_tab[i].ior_limit,
rp->p_endpoint);
#endif
}
}
/* Copy memory ranges. */
if(priv->s_flags & CHECK_MEM) {
if (priv->s_nr_mem_range < 0 || priv->s_nr_mem_range > NR_MEM_RANGE)
return EINVAL;
priv(rp)->s_nr_mem_range= priv->s_nr_mem_range;
for (i= 0; i<priv->s_nr_mem_range; i++)
{
priv(rp)->s_mem_tab[i]= priv->s_mem_tab[i];
#if PRIV_DEBUG
printf("do_privctl: adding mem range [%x..%x] for %d\n",
priv(rp)->s_mem_tab[i].mr_base,
priv(rp)->s_mem_tab[i].mr_limit,
rp->p_endpoint);
#endif
}
}
/* Copy trap mask. */
priv(rp)->s_trap_mask = priv->s_trap_mask;
/* Copy target mask. */
#if PRIV_DEBUG
printf("do_privctl: Setting ipc target mask for %d:");
for (i=0; i < NR_SYS_PROCS; i += BITCHUNK_BITS) {
printf(" %08x", get_sys_bits(priv->s_ipc_to, i));
}
printf("\n");
#endif
fill_sendto_mask(rp, &priv->s_ipc_to);
#if PRIV_DEBUG
printf("do_privctl: Set ipc target mask for %d:");
for (i=0; i < NR_SYS_PROCS; i += BITCHUNK_BITS) {
printf(" %08x", get_sys_bits(priv(rp)->s_ipc_to, i));
}
printf("\n");
#endif
/* Copy kernel call mask. */
memcpy(priv(rp)->s_k_call_mask, priv->s_k_call_mask,
sizeof(priv(rp)->s_k_call_mask));
return OK;
}
#endif /* USE_PRIVCTL */

View File

@@ -0,0 +1,50 @@
/* The kernel call that is implemented in this file:
* m_type: SYS_PROFBUF
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_profbuf.ctl_ptr (location of control struct)
* m_lsys_krn_sys_profbuf.mem_ptr (location of profiling table)
*
* Changes:
* 14 Aug, 2006 Created (Rogier Meurs)
*/
#include "kernel/system.h"
#if CPROFILE
/*===========================================================================*
* do_profbuf *
*===========================================================================*/
int do_profbuf(struct proc * caller, message * m_ptr)
{
/* This kernel call is used by profiled system processes when Call
* Profiling is enabled. It is called on the first execution of procentry.
* By means of this kernel call, the profiled processes inform the kernel
* about the location of their profiling table and the control structure
* which is used to enable the kernel to have the tables cleared.
*/
int proc_nr;
struct proc *rp;
/* Store process name, control struct, table locations. */
if(!isokendpt(caller->p_endpoint, &proc_nr))
return EDEADSRCDST;
if(cprof_procs_no >= NR_SYS_PROCS)
return ENOSPC;
rp = proc_addr(proc_nr);
cprof_proc_info[cprof_procs_no].endpt = caller->p_endpoint;
cprof_proc_info[cprof_procs_no].name = rp->p_name;
cprof_proc_info[cprof_procs_no].ctl_v = m_ptr->m_lsys_krn_sys_profbuf.ctl_ptr;
cprof_proc_info[cprof_procs_no].buf_v = m_ptr->m_lsys_krn_sys_profbuf.mem_ptr;
cprof_procs_no++;
return OK;
}
#endif /* CPROFILE */

View File

@@ -0,0 +1,76 @@
/* The kernel call implemented in this file:
* m_type: SYS_RUNCTL
*
* The parameters for this kernel call are:
* m1_i1: RC_ENDPT process number to control
* m1_i2: RC_ACTION stop or resume the process
* m1_i3: RC_FLAGS request flags
*/
#include "kernel/system.h"
#include <assert.h>
#if USE_RUNCTL
/*===========================================================================*
* do_runctl *
*===========================================================================*/
int do_runctl(struct proc * caller, message * m_ptr)
{
/* Control a process's RTS_PROC_STOP flag. Used for process management.
* If the process is queued sending a message or stopped for system call
* tracing, and the RC_DELAY request flag is given, set MF_SIG_DELAY instead
* of RTS_PROC_STOP, and send a SIGSNDELAY signal later when the process is done
* sending (ending the delay). Used by PM for safe signal delivery.
*/
int proc_nr, action, flags;
register struct proc *rp;
/* Extract the message parameters and do sanity checking. */
if (!isokendpt(m_ptr->RC_ENDPT, &proc_nr)) return(EINVAL);
if (iskerneln(proc_nr)) return(EPERM);
rp = proc_addr(proc_nr);
action = m_ptr->RC_ACTION;
flags = m_ptr->RC_FLAGS;
/* Is the target sending or syscall-traced? Then set MF_SIG_DELAY instead.
* Do this only when the RC_DELAY flag is set in the request flags field.
* The process will not become runnable before PM has called SYS_ENDKSIG.
* Note that asynchronous messages are not covered: a process using SENDA
* should not also install signal handlers *and* expect POSIX compliance.
*/
if (action == RC_STOP && (flags & RC_DELAY)) {
if (RTS_ISSET(rp, RTS_SENDING) || (rp->p_misc_flags & MF_SC_DEFER))
rp->p_misc_flags |= MF_SIG_DELAY;
if (rp->p_misc_flags & MF_SIG_DELAY)
return (EBUSY);
}
/* Either set or clear the stop flag. */
switch (action) {
case RC_STOP:
#if CONFIG_SMP
/* check if we must stop a process on a different CPU */
if (rp->p_cpu != cpuid) {
smp_schedule_stop_proc(rp);
break;
}
#endif
RTS_SET(rp, RTS_PROC_STOP);
break;
case RC_RESUME:
assert(RTS_ISSET(rp, RTS_PROC_STOP));
RTS_UNSET(rp, RTS_PROC_STOP);
break;
default:
return(EINVAL);
}
return(OK);
}
#endif /* USE_RUNCTL */

View File

@@ -0,0 +1,390 @@
/* The kernel call implemented in this file:
* m_type: SYS_SAFECOPYFROM or SYS_SAFECOPYTO or SYS_VSAFECOPY
*
* The parameters for this kernel call are:
* m_lsys_kern_safecopy.from_to other endpoint
* m_lsys_kern_safecopy.gid grant id
* m_lsys_kern_safecopy.offset offset within granted space
* m_lsys_kern_safecopy.address address in own address space
* m_lsys_kern_safecopy.bytes bytes to be copied
*
* For the vectored variant (do_vsafecopy):
* m_lsys_kern_vsafecopy.vec_addr address of vector
* m_lsys_kern_vsafecopy.vec_size number of significant elements in vector
*/
#include <assert.h>
#include "kernel/system.h"
#include "kernel/kernel.h"
#include "kernel/vm.h"
#define MAX_INDIRECT_DEPTH 5 /* up to how many indirect grants to follow? */
#define MEM_TOP 0xFFFFFFFFUL
static int safecopy(struct proc *, endpoint_t, endpoint_t,
cp_grant_id_t, size_t, vir_bytes, vir_bytes, int);
#define HASGRANTTABLE(gr) \
(priv(gr) && priv(gr)->s_grant_table)
/*===========================================================================*
* verify_grant *
*===========================================================================*/
int verify_grant(granter, grantee, grant, bytes, access,
offset_in, offset_result, e_granter, flags)
endpoint_t granter, grantee; /* copyee, copyer */
cp_grant_id_t grant; /* grant id */
vir_bytes bytes; /* copy size */
int access; /* direction (read/write) */
vir_bytes offset_in; /* copy offset within grant */
vir_bytes *offset_result; /* copy offset within virtual address space */
endpoint_t *e_granter; /* new granter (magic grants) */
u32_t *flags; /* CPF_* */
{
static cp_grant_t g;
static int proc_nr;
static const struct proc *granter_proc;
int depth = 0;
do {
/* Get granter process slot (if valid), and check range of
* grant id.
*/
if(!isokendpt(granter, &proc_nr) ) {
printf(
"grant verify failed: invalid granter %d\n", (int) granter);
return(EINVAL);
}
if(!GRANT_VALID(grant)) {
printf(
"grant verify failed: invalid grant %d\n", (int) grant);
return(EINVAL);
}
granter_proc = proc_addr(proc_nr);
/* If there is no priv. structure, or no grant table in the
* priv. structure, or the grant table in the priv. structure
* is too small for the grant, return EPERM.
*/
if(!HASGRANTTABLE(granter_proc)) {
printf(
"grant verify failed: granter %d has no grant table\n",
granter);
return(EPERM);
}
if(priv(granter_proc)->s_grant_entries <= grant) {
printf(
"verify_grant: grant verify failed in ep %d "
"proc %d: grant %d out of range "
"for table size %d\n",
granter, proc_nr, grant,
priv(granter_proc)->s_grant_entries);
return(EPERM);
}
/* Copy the grant entry corresponding to this id to see what it
* looks like. If it fails, hide the fact that granter has
* (presumably) set an invalid grant table entry by returning
* EPERM, just like with an invalid grant id.
*/
if(data_copy(granter,
priv(granter_proc)->s_grant_table + sizeof(g)*grant,
KERNEL, (vir_bytes) &g, sizeof(g)) != OK) {
printf(
"verify_grant: grant verify: data_copy failed\n");
return EPERM;
}
if(flags) *flags = g.cp_flags;
/* Check validity. */
if((g.cp_flags & (CPF_USED | CPF_VALID)) !=
(CPF_USED | CPF_VALID)) {
printf(
"verify_grant: grant failed: invalid (%d flags 0x%lx)\n",
grant, g.cp_flags);
return EPERM;
}
/* The given grant may be an indirect grant, that is, a grant
* that provides permission to use a grant given to the
* granter (i.e., for which it is the grantee). This can lead
* to a chain of indirect grants which must be followed back.
*/
if((g.cp_flags & CPF_INDIRECT)) {
/* Stop after a few iterations. There may be a loop. */
if (depth == MAX_INDIRECT_DEPTH) {
printf(
"verify grant: indirect grant verify "
"failed: exceeded maximum depth\n");
return ELOOP;
}
depth++;
/* Verify actual grantee. */
if(g.cp_u.cp_indirect.cp_who_to != grantee &&
grantee != ANY &&
g.cp_u.cp_indirect.cp_who_to != ANY) {
printf(
"verify_grant: indirect grant verify "
"failed: bad grantee\n");
return EPERM;
}
/* Start over with new granter, grant, and grantee. */
grantee = granter;
granter = g.cp_u.cp_indirect.cp_who_from;
grant = g.cp_u.cp_indirect.cp_grant;
}
} while(g.cp_flags & CPF_INDIRECT);
/* Check access of grant. */
if(((g.cp_flags & access) != access)) {
printf(
"verify_grant: grant verify failed: access invalid; want 0x%x, have 0x%x\n",
access, g.cp_flags);
return EPERM;
}
if((g.cp_flags & CPF_DIRECT)) {
/* Don't fiddle around with grants that wrap, arithmetic
* below may be confused.
*/
if(MEM_TOP - g.cp_u.cp_direct.cp_len + 1 <
g.cp_u.cp_direct.cp_start) {
printf(
"verify_grant: direct grant verify failed: len too long\n");
return EPERM;
}
/* Verify actual grantee. */
if(g.cp_u.cp_direct.cp_who_to != grantee && grantee != ANY
&& g.cp_u.cp_direct.cp_who_to != ANY) {
printf(
"verify_grant: direct grant verify failed: bad grantee\n");
return EPERM;
}
/* Verify actual copy range. */
if((offset_in+bytes < offset_in) ||
offset_in+bytes > g.cp_u.cp_direct.cp_len) {
printf(
"verify_grant: direct grant verify failed: bad size or range. "
"granted %d bytes @ 0x%lx; wanted %d bytes @ 0x%lx\n",
g.cp_u.cp_direct.cp_len,
g.cp_u.cp_direct.cp_start,
bytes, offset_in);
return EPERM;
}
/* Verify successful - tell caller what address it is. */
*offset_result = g.cp_u.cp_direct.cp_start + offset_in;
*e_granter = granter;
} else if(g.cp_flags & CPF_MAGIC) {
/* Currently, it is hardcoded that only FS may do
* magic grants.
*/
if(granter != VFS_PROC_NR) {
printf(
"verify_grant: magic grant verify failed: granter (%d) "
"is not FS (%d)\n", granter, VFS_PROC_NR);
return EPERM;
}
/* Verify actual grantee. */
if(g.cp_u.cp_magic.cp_who_to != grantee && grantee != ANY
&& g.cp_u.cp_direct.cp_who_to != ANY) {
printf(
"verify_grant: magic grant verify failed: bad grantee\n");
return EPERM;
}
/* Verify actual copy range. */
if((offset_in+bytes < offset_in) ||
offset_in+bytes > g.cp_u.cp_magic.cp_len) {
printf(
"verify_grant: magic grant verify failed: bad size or range. "
"granted %d bytes @ 0x%lx; wanted %d bytes @ 0x%lx\n",
g.cp_u.cp_magic.cp_len,
g.cp_u.cp_magic.cp_start,
bytes, offset_in);
return EPERM;
}
/* Verify successful - tell caller what address it is. */
*offset_result = g.cp_u.cp_magic.cp_start + offset_in;
*e_granter = g.cp_u.cp_magic.cp_who_from;
} else {
printf(
"verify_grant: grant verify failed: unknown grant type\n");
return EPERM;
}
return OK;
}
/*===========================================================================*
* safecopy *
*===========================================================================*/
static int safecopy(caller, granter, grantee, grantid, bytes,
g_offset, addr, access)
struct proc * caller;
endpoint_t granter, grantee;
cp_grant_id_t grantid;
size_t bytes;
vir_bytes g_offset, addr;
int access; /* CPF_READ for a copy from granter to grantee, CPF_WRITE
* for a copy from grantee to granter.
*/
{
static struct vir_addr v_src, v_dst;
static vir_bytes v_offset;
endpoint_t new_granter, *src, *dst;
struct proc *granter_p;
int r;
u32_t flags;
#if PERF_USE_COW_SAFECOPY
vir_bytes size;
#endif
if(granter == NONE || grantee == NONE) {
printf("safecopy: nonsense processes\n");
return EFAULT;
}
/* See if there is a reasonable grant table. */
if(!(granter_p = endpoint_lookup(granter))) return EINVAL;
if(!HASGRANTTABLE(granter_p)) {
printf(
"safecopy failed: granter %d has no grant table\n", granter);
return(EPERM);
}
/* Decide who is src and who is dst. */
if(access & CPF_READ) {
src = &granter;
dst = &grantee;
} else {
src = &grantee;
dst = &granter;
}
/* Verify permission exists. */
if((r=verify_grant(granter, grantee, grantid, bytes, access,
g_offset, &v_offset, &new_granter, &flags)) != OK) {
printf(
"grant %d verify to copy %d->%d by %d failed: err %d\n",
grantid, *src, *dst, grantee, r);
return r;
}
/* verify_grant() can redirect the grantee to someone else,
* meaning the source or destination changes.
*/
granter = new_granter;
/* Now it's a regular copy. */
v_src.proc_nr_e = *src;
v_dst.proc_nr_e = *dst;
/* Now the offset in virtual addressing is known in 'offset'.
* Depending on the access, this is the source or destination
* address.
*/
if(access & CPF_READ) {
v_src.offset = v_offset;
v_dst.offset = (vir_bytes) addr;
} else {
v_src.offset = (vir_bytes) addr;
v_dst.offset = v_offset;
}
/* Do the regular copy. */
if(flags & CPF_TRY) {
int r;
/* Try copy without transparently faulting in pages. */
r = virtual_copy(&v_src, &v_dst, bytes);
if(r == EFAULT_SRC || r == EFAULT_DST) return EFAULT;
return r;
}
return virtual_copy_vmcheck(caller, &v_src, &v_dst, bytes);
}
/*===========================================================================*
* do_safecopy_to *
*===========================================================================*/
int do_safecopy_to(struct proc * caller, message * m_ptr)
{
return safecopy(caller, m_ptr->m_lsys_kern_safecopy.from_to, caller->p_endpoint,
(cp_grant_id_t) m_ptr->m_lsys_kern_safecopy.gid,
m_ptr->m_lsys_kern_safecopy.bytes, m_ptr->m_lsys_kern_safecopy.offset,
(vir_bytes) m_ptr->m_lsys_kern_safecopy.address, CPF_WRITE);
}
/*===========================================================================*
* do_safecopy_from *
*===========================================================================*/
int do_safecopy_from(struct proc * caller, message * m_ptr)
{
return safecopy(caller, m_ptr->m_lsys_kern_safecopy.from_to, caller->p_endpoint,
(cp_grant_id_t) m_ptr->m_lsys_kern_safecopy.gid,
m_ptr->m_lsys_kern_safecopy.bytes, m_ptr->m_lsys_kern_safecopy.offset,
(vir_bytes) m_ptr->m_lsys_kern_safecopy.address, CPF_READ);
}
/*===========================================================================*
* do_vsafecopy *
*===========================================================================*/
int do_vsafecopy(struct proc * caller, message * m_ptr)
{
static struct vscp_vec vec[SCPVEC_NR];
static struct vir_addr src, dst;
int r, i, els;
size_t bytes;
/* Set vector copy parameters. */
src.proc_nr_e = caller->p_endpoint;
assert(src.proc_nr_e != NONE);
src.offset = (vir_bytes) m_ptr->m_lsys_kern_vsafecopy.vec_addr;
dst.proc_nr_e = KERNEL;
dst.offset = (vir_bytes) vec;
/* No. of vector elements. */
els = m_ptr->m_lsys_kern_vsafecopy.vec_size;
bytes = els * sizeof(struct vscp_vec);
/* Obtain vector of copies. */
if((r=virtual_copy_vmcheck(caller, &src, &dst, bytes)) != OK)
return r;
/* Perform safecopies. */
for(i = 0; i < els; i++) {
int access;
endpoint_t granter;
if(vec[i].v_from == SELF) {
access = CPF_WRITE;
granter = vec[i].v_to;
} else if(vec[i].v_to == SELF) {
access = CPF_READ;
granter = vec[i].v_from;
} else {
printf("vsafecopy: %d: element %d/%d: no SELF found\n",
caller->p_endpoint, i, els);
return EINVAL;
}
/* Do safecopy for this element. */
if((r=safecopy(caller, granter, caller->p_endpoint,
vec[i].v_gid,
vec[i].v_bytes, vec[i].v_offset,
vec[i].v_addr, access)) != OK) {
return r;
}
}
return OK;
}

View File

@@ -0,0 +1,58 @@
/* The kernel call implemented in this file:
* m_type: SYS_SAFEMEMSET
*
* The parameters for this kernel call are:
* SMS_DST dst endpoint
* SMS_GID grant id
* SMS_OFFSET offset within grant
* SMS_PATTERN memset pattern byte
* SMS_BYTES bytes from offset
*/
#include <assert.h>
#include <minix/safecopies.h>
#include "kernel/system.h"
#include "kernel/kernel.h"
/*===========================================================================*
* do_safememset *
*===========================================================================*/
int do_safememset(struct proc *caller, message *m_ptr) {
/* Implementation of the do_safememset() kernel call */
/* Extract parameters */
endpoint_t dst_endpt = m_ptr->SMS_DST;
endpoint_t caller_endpt = caller->p_endpoint;
cp_grant_id_t grantid = m_ptr->SMS_GID;
vir_bytes g_offset = m_ptr->SMS_OFFSET;
int pattern = m_ptr->SMS_PATTERN;
size_t len = (size_t)m_ptr->SMS_BYTES;
struct proc *dst_p;
endpoint_t new_granter;
static vir_bytes v_offset;
int r;
if (dst_endpt == NONE || caller_endpt == NONE)
return EFAULT;
if (!(dst_p = endpoint_lookup(dst_endpt)))
return EINVAL;
if (!(priv(dst_p) && priv(dst_p)->s_grant_table)) {
printf("safememset: dst %d has no grant table\n", dst_endpt);
return EINVAL;
}
/* Verify permission exists, memset always requires CPF_WRITE */
r = verify_grant(dst_endpt, caller_endpt, grantid, len, CPF_WRITE,
g_offset, &v_offset, &new_granter, NULL);
if (r != OK) {
printf("safememset: grant %d verify failed %d", grantid, r);
return r;
}
return vm_memset(caller, new_granter, v_offset, pattern, len);
}

View File

@@ -0,0 +1,46 @@
#include "kernel/system.h"
#include <minix/endpoint.h>
/*===========================================================================*
* do_schedctl *
*===========================================================================*/
int do_schedctl(struct proc * caller, message * m_ptr)
{
struct proc *p;
uint32_t flags;
int priority, quantum, cpu;
int proc_nr;
int r;
/* check parameter validity */
flags = m_ptr->m_lsys_krn_schedctl.flags;
if (flags & ~SCHEDCTL_FLAG_KERNEL) {
printf("do_schedctl: flags 0x%x invalid, caller=%d\n",
flags, caller - proc);
return EINVAL;
}
if (!isokendpt(m_ptr->m_lsys_krn_schedctl.endpoint, &proc_nr))
return EINVAL;
p = proc_addr(proc_nr);
if ((flags & SCHEDCTL_FLAG_KERNEL) == SCHEDCTL_FLAG_KERNEL) {
/* the kernel becomes the scheduler and starts
* scheduling the process.
*/
priority = m_ptr->m_lsys_krn_schedctl.priority;
quantum = m_ptr->m_lsys_krn_schedctl.quantum;
cpu = m_ptr->m_lsys_krn_schedctl.cpu;
/* Try to schedule the process. */
if((r = sched_proc(p, priority, quantum, cpu) != OK))
return r;
p->p_scheduler = NULL;
} else {
/* the caller becomes the scheduler */
p->p_scheduler = caller;
}
return(OK);
}

View File

@@ -0,0 +1,29 @@
#include "kernel/system.h"
#include <minix/endpoint.h>
#include "kernel/clock.h"
/*===========================================================================*
* do_schedule *
*===========================================================================*/
int do_schedule(struct proc * caller, message * m_ptr)
{
struct proc *p;
int proc_nr;
int priority, quantum, cpu;
if (!isokendpt(m_ptr->m_lsys_krn_schedule.endpoint, &proc_nr))
return EINVAL;
p = proc_addr(proc_nr);
/* Only this process' scheduler can schedule it */
if (caller != p->p_scheduler)
return(EPERM);
/* Try to schedule the process. */
priority = m_ptr->m_lsys_krn_schedule.priority;
quantum = m_ptr->m_lsys_krn_schedule.quantum;
cpu = m_ptr->m_lsys_krn_schedule.cpu;
return sched_proc(p, priority, quantum, cpu);
}

View File

@@ -0,0 +1,71 @@
/* The kernel call implemented in this file:
* m_type: SYS_SETALARM
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_setalarm.exp_time (alarm's expiration time)
* m_lsys_krn_sys_setalarm.abs_time (expiration time is absolute?)
* m_lsys_krn_sys_setalarm.time_left (return seconds left of previous)
*/
#include "kernel/system.h"
#include <minix/endpoint.h>
#include <assert.h>
#if USE_SETALARM
static void cause_alarm(minix_timer_t *tp);
/*===========================================================================*
* do_setalarm *
*===========================================================================*/
int do_setalarm(struct proc * caller, message * m_ptr)
{
/* A process requests a synchronous alarm, or wants to cancel its alarm. */
long exp_time; /* expiration time for this alarm */
int use_abs_time; /* use absolute or relative time */
minix_timer_t *tp; /* the process' timer structure */
clock_t uptime; /* placeholder for current uptime */
/* Extract shared parameters from the request message. */
exp_time = m_ptr->m_lsys_krn_sys_setalarm.exp_time; /* alarm's expiration time */
use_abs_time = m_ptr->m_lsys_krn_sys_setalarm.abs_time; /* flag for absolute time */
if (! (priv(caller)->s_flags & SYS_PROC)) return(EPERM);
/* Get the timer structure and set the parameters for this alarm. */
tp = &(priv(caller)->s_alarm_timer);
tmr_arg(tp)->ta_int = caller->p_endpoint;
tp->tmr_func = cause_alarm;
/* Return the ticks left on the previous alarm. */
uptime = get_monotonic();
if ((tp->tmr_exp_time != TMR_NEVER) && (uptime < tp->tmr_exp_time) ) {
m_ptr->m_lsys_krn_sys_setalarm.time_left = (tp->tmr_exp_time - uptime);
} else {
m_ptr->m_lsys_krn_sys_setalarm.time_left = 0;
}
/* Finally, (re)set the timer depending on the expiration time. */
if (exp_time == 0) {
reset_kernel_timer(tp);
} else {
tp->tmr_exp_time = (use_abs_time) ? exp_time : exp_time + get_monotonic();
set_kernel_timer(tp, tp->tmr_exp_time, tp->tmr_func);
}
return(OK);
}
/*===========================================================================*
* cause_alarm *
*===========================================================================*/
static void cause_alarm(minix_timer_t *tp)
{
/* Routine called if a timer goes off and the process requested a synchronous
* alarm. The process number is stored in timer argument 'ta_int'. Notify that
* process with a notification message from CLOCK.
*/
endpoint_t proc_nr_e = tmr_arg(tp)->ta_int; /* get process number */
mini_notify(proc_addr(CLOCK), proc_nr_e); /* notify process */
}
#endif /* USE_SETALARM */

View File

@@ -0,0 +1,30 @@
/* The kernel call implemented in this file:
* m_type: SYS_SETGRANT
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_setgrant.addr address of grant table in own address space
* m_lsys_krn_sys_setgrant.size number of entries
*/
#include "kernel/system.h"
#include <minix/safecopies.h>
/*===========================================================================*
* do_setgrant *
*===========================================================================*/
int do_setgrant(struct proc * caller, message * m_ptr)
{
int r;
/* Copy grant table set in priv. struct. */
if (RTS_ISSET(caller, RTS_NO_PRIV) || !(priv(caller))) {
r = EPERM;
} else {
_K_SET_GRANT_TABLE(caller,
m_ptr->m_lsys_krn_sys_setgrant.addr,
m_ptr->m_lsys_krn_sys_setgrant.size);
r = OK;
}
return r;
}

View File

@@ -0,0 +1,54 @@
/* The kernel call implemented in this file:
* m_type: SYS_SETTIME
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_settime.now
* m_lsys_krn_sys_settime.clock_id
* m_lsys_krn_sys_settime.sec
* m_lsys_krn_sys_settime.nsec
*/
#include "kernel/system.h"
#include <minix/endpoint.h>
#include <time.h>
/*===========================================================================*
* do_settime *
*===========================================================================*/
int do_settime(struct proc * caller, message * m_ptr)
{
clock_t newclock;
int32_t ticks;
time_t timediff, timediff_ticks;
if (m_ptr->m_lsys_krn_sys_settime.clock_id != CLOCK_REALTIME) /* only realtime can change */
return EINVAL;
if (m_ptr->m_lsys_krn_sys_settime.now == 0) { /* user just wants to adjtime() */
/* convert delta value from seconds and nseconds to ticks */
ticks = (m_ptr->m_lsys_krn_sys_settime.sec * system_hz) +
(m_ptr->m_lsys_krn_sys_settime.nsec/(1000000000/system_hz));
set_adjtime_delta(ticks);
return(OK);
} /* else user wants to set the time */
timediff = m_ptr->m_lsys_krn_sys_settime.sec - boottime;
timediff_ticks = timediff * system_hz;
/* prevent a negative value for realtime */
if (m_ptr->m_lsys_krn_sys_settime.sec <= boottime ||
timediff_ticks < LONG_MIN/2 || timediff_ticks > LONG_MAX/2) {
/* boottime was likely wrong, try to correct it. */
boottime = m_ptr->m_lsys_krn_sys_settime.sec;
set_realtime(1);
return(OK);
}
/* calculate the new value of realtime in ticks */
newclock = timediff_ticks +
(m_ptr->m_lsys_krn_sys_settime.nsec/(1000000000/system_hz));
set_realtime(newclock);
return(OK);
}

View File

@@ -0,0 +1,98 @@
/* The kernel call that is implemented in this file:
* m_type: SYS_SIGRETURN
*
* The parameters for this kernel call are:
* m_sigcalls.endp # process returning from handler
* m_sigcalls.sigctx # pointer to sigcontext structure
*
*/
#include "kernel/system.h"
#include <string.h>
#include <machine/cpu.h>
#if USE_SIGRETURN
/*===========================================================================*
* do_sigreturn *
*===========================================================================*/
int do_sigreturn(struct proc * caller, message * m_ptr)
{
/* POSIX style signals require sys_sigreturn to put things in order before
* the signalled process can resume execution
*/
struct sigcontext sc;
register struct proc *rp;
int proc_nr, r;
if (!isokendpt(m_ptr->m_sigcalls.endpt, &proc_nr)) return EINVAL;
if (iskerneln(proc_nr)) return EPERM;
rp = proc_addr(proc_nr);
/* Copy in the sigcontext structure. */
if ((r = data_copy(m_ptr->m_sigcalls.endpt,
(vir_bytes)m_ptr->m_sigcalls.sigctx, KERNEL,
(vir_bytes)&sc, sizeof(struct sigcontext))) != OK)
return r;
#if defined(__i386__)
/* Restore user bits of psw from sc, maintain system bits from proc. */
sc.sc_eflags = (sc.sc_eflags & X86_FLAGS_USER) |
(rp->p_reg.psw & ~X86_FLAGS_USER);
#endif
#if defined(__i386__)
/* Write back registers we allow to be restored, i.e.
* not the segment ones.
*/
rp->p_reg.di = sc.sc_edi;
rp->p_reg.si = sc.sc_esi;
rp->p_reg.fp = sc.sc_ebp;
rp->p_reg.bx = sc.sc_ebx;
rp->p_reg.dx = sc.sc_edx;
rp->p_reg.cx = sc.sc_ecx;
rp->p_reg.retreg = sc.sc_eax;
rp->p_reg.pc = sc.sc_eip;
rp->p_reg.psw = sc.sc_eflags;
rp->p_reg.sp = sc.sc_esp;
#endif
#if defined(__arm__)
rp->p_reg.psr = sc.sc_spsr;
rp->p_reg.retreg = sc.sc_r0;
rp->p_reg.r1 = sc.sc_r1;
rp->p_reg.r2 = sc.sc_r2;
rp->p_reg.r3 = sc.sc_r3;
rp->p_reg.r4 = sc.sc_r4;
rp->p_reg.r5 = sc.sc_r5;
rp->p_reg.r6 = sc.sc_r6;
rp->p_reg.r7 = sc.sc_r7;
rp->p_reg.r8 = sc.sc_r8;
rp->p_reg.r9 = sc.sc_r9;
rp->p_reg.r10 = sc.sc_r10;
rp->p_reg.fp = sc.sc_r11;
rp->p_reg.r12 = sc.sc_r12;
rp->p_reg.sp = sc.sc_usr_sp;
rp->p_reg.lr = sc.sc_usr_lr;
rp->p_reg.pc = sc.sc_pc;
#endif
/* Restore the registers. */
arch_proc_setcontext(rp, &rp->p_reg, 1, sc.trap_style);
if(sc.sc_magic != SC_MAGIC) { printf("kernel sigreturn: corrupt signal context\n"); }
#if defined(__i386__)
if (sc.sc_flags & MF_FPU_INITIALIZED)
{
memcpy(rp->p_seg.fpu_state, &sc.sc_fpu_state, FPU_XFP_SIZE);
rp->p_misc_flags |= MF_FPU_INITIALIZED; /* Restore math usage flag. */
/* force reloading FPU */
release_fpu(rp);
}
#endif
return OK;
}
#endif /* USE_SIGRETURN */

View File

@@ -0,0 +1,166 @@
/* The kernel call that is implemented in this file:
* m_type: SYS_SIGSEND
*
* The parameters for this kernel call are:
* m_sigcalls.endpt # process to call signal handler
* m_sigcalls.sigctx # pointer to sigcontext structure
*
*/
#include "kernel/system.h"
#include <signal.h>
#include <string.h>
#if USE_SIGSEND
/*===========================================================================*
* do_sigsend *
*===========================================================================*/
int do_sigsend(struct proc * caller, message * m_ptr)
{
/* Handle sys_sigsend, POSIX-style signal handling. */
struct sigmsg smsg;
register struct proc *rp;
struct sigframe_sigcontext fr, *frp;
int proc_nr, r;
#if defined(__i386__)
reg_t new_fp;
#endif
if (!isokendpt(m_ptr->m_sigcalls.endpt, &proc_nr)) return EINVAL;
if (iskerneln(proc_nr)) return EPERM;
rp = proc_addr(proc_nr);
/* Get the sigmsg structure into our address space. */
if ((r = data_copy_vmcheck(caller, caller->p_endpoint,
(vir_bytes)m_ptr->m_sigcalls.sigctx, KERNEL,
(vir_bytes)&smsg, (phys_bytes) sizeof(struct sigmsg))) != OK)
return r;
/* WARNING: the following code may be run more than once even for a single
* signal delivery. Do not change registers here. See the comment below.
*/
/* Compute the user stack pointer where sigframe will start. */
smsg.sm_stkptr = arch_get_sp(rp);
frp = (struct sigframe_sigcontext *) smsg.sm_stkptr - 1;
/* Copy the registers to the sigcontext structure. */
memset(&fr, 0, sizeof(fr));
fr.sf_scp = &frp->sf_sc;
#if defined(__i386__)
fr.sf_sc.sc_gs = rp->p_reg.gs;
fr.sf_sc.sc_fs = rp->p_reg.fs;
fr.sf_sc.sc_es = rp->p_reg.es;
fr.sf_sc.sc_ds = rp->p_reg.ds;
fr.sf_sc.sc_edi = rp->p_reg.di;
fr.sf_sc.sc_esi = rp->p_reg.si;
fr.sf_sc.sc_ebp = rp->p_reg.fp;
fr.sf_sc.sc_ebx = rp->p_reg.bx;
fr.sf_sc.sc_edx = rp->p_reg.dx;
fr.sf_sc.sc_ecx = rp->p_reg.cx;
fr.sf_sc.sc_eax = rp->p_reg.retreg;
fr.sf_sc.sc_eip = rp->p_reg.pc;
fr.sf_sc.sc_cs = rp->p_reg.cs;
fr.sf_sc.sc_eflags = rp->p_reg.psw;
fr.sf_sc.sc_esp = rp->p_reg.sp;
fr.sf_sc.sc_ss = rp->p_reg.ss;
fr.sf_fp = rp->p_reg.fp;
fr.sf_signum = smsg.sm_signo;
new_fp = (reg_t) &frp->sf_fp;
fr.sf_scpcopy = fr.sf_scp;
fr.sf_ra_sigreturn = smsg.sm_sigreturn;
fr.sf_ra= rp->p_reg.pc;
fr.sf_sc.trap_style = rp->p_seg.p_kern_trap_style;
if (fr.sf_sc.trap_style == KTS_NONE) {
printf("do_sigsend: sigsend an unsaved process\n");
return EINVAL;
}
if (proc_used_fpu(rp)) {
/* save the FPU context before saving it to the sig context */
save_fpu(rp);
memcpy(&fr.sf_sc.sc_fpu_state, rp->p_seg.fpu_state, FPU_XFP_SIZE);
}
#endif
#if defined(__arm__)
fr.sf_sc.sc_spsr = rp->p_reg.psr;
fr.sf_sc.sc_r0 = rp->p_reg.retreg;
fr.sf_sc.sc_r1 = rp->p_reg.r1;
fr.sf_sc.sc_r2 = rp->p_reg.r2;
fr.sf_sc.sc_r3 = rp->p_reg.r3;
fr.sf_sc.sc_r4 = rp->p_reg.r4;
fr.sf_sc.sc_r5 = rp->p_reg.r5;
fr.sf_sc.sc_r6 = rp->p_reg.r6;
fr.sf_sc.sc_r7 = rp->p_reg.r7;
fr.sf_sc.sc_r8 = rp->p_reg.r8;
fr.sf_sc.sc_r9 = rp->p_reg.r9;
fr.sf_sc.sc_r10 = rp->p_reg.r10;
fr.sf_sc.sc_r11 = rp->p_reg.fp;
fr.sf_sc.sc_r12 = rp->p_reg.r12;
fr.sf_sc.sc_usr_sp = rp->p_reg.sp;
fr.sf_sc.sc_usr_lr = rp->p_reg.lr;
fr.sf_sc.sc_svc_lr = 0; /* ? */
fr.sf_sc.sc_pc = rp->p_reg.pc; /* R15 */
#endif
/* Finish the sigcontext initialization. */
fr.sf_sc.sc_mask = smsg.sm_mask;
fr.sf_sc.sc_flags = rp->p_misc_flags & MF_FPU_INITIALIZED;
fr.sf_sc.sc_magic = SC_MAGIC;
/* Initialize the sigframe structure. */
fpu_sigcontext(rp, &fr, &fr.sf_sc);
/* Copy the sigframe structure to the user's stack. */
if ((r = data_copy_vmcheck(caller, KERNEL, (vir_bytes)&fr,
m_ptr->m_sigcalls.endpt, (vir_bytes)frp,
(vir_bytes)sizeof(struct sigframe_sigcontext))) != OK)
return r;
/* WARNING: up to the statement above, the code may run multiple times, since
* copying out the frame/context may fail with VMSUSPEND the first time. For
* that reason, changes to process registers *MUST* be deferred until after
* this last copy -- otherwise, these changes will be made several times,
* possibly leading to corrupted process state.
*/
/* Reset user registers to execute the signal handler. */
rp->p_reg.sp = (reg_t) frp;
rp->p_reg.pc = (reg_t) smsg.sm_sighandler;
#if defined(__i386__)
rp->p_reg.fp = new_fp;
#elif defined(__arm__)
/* use the ARM link register to set the return address from the signal
* handler
*/
rp->p_reg.lr = (reg_t) smsg.sm_sigreturn;
if(rp->p_reg.lr & 1) { printf("sigsend: LSB LR makes no sense.\n"); }
/* pass signal handler parameters in registers */
rp->p_reg.retreg = (reg_t) smsg.sm_signo;
rp->p_reg.r1 = 0; /* sf_code */
rp->p_reg.r2 = (reg_t) fr.sf_scp;
rp->p_misc_flags |= MF_CONTEXT_SET;
#endif
/* Signal handler should get clean FPU. */
rp->p_misc_flags &= ~MF_FPU_INITIALIZED;
if(!RTS_ISSET(rp, RTS_PROC_STOP)) {
printf("system: warning: sigsend a running process\n");
printf("caller stack: ");
proc_stacktrace(caller);
}
return OK;
}
#endif /* USE_SIGSEND */

View File

@@ -0,0 +1,132 @@
/* The kernel call that is implemented in this file:
* m_type: SYS_SPROF
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_sprof.action (start/stop profiling)
* m_lsys_krn_sys_sprof.mem_size (available memory for data)
* m_lsys_krn_sys_sprof.freq (requested sample frequency)
* m_lsys_krn_sys_sprof.endpt (endpoint of caller)
* m_lsys_krn_sys_sprof.ctl_ptr (location of info struct)
* m_lsys_krn_sys_sprof.mem_ptr (location of memory for data)
* m_lsys_krn_sys_sprof.intr_type (interrupt source: RTC/NMI)
*
* Changes:
* 14 Aug, 2006 Created (Rogier Meurs)
*/
#include "kernel/system.h"
#include "kernel/watchdog.h"
#if SPROFILE
/* user address to write info struct */
static vir_bytes sprof_info_addr_vir;
static void clean_seen_flag(void)
{
int i;
for (i = 0; i < NR_TASKS + NR_PROCS; i++)
proc[i].p_misc_flags &= ~MF_SPROF_SEEN;
}
/*===========================================================================*
* do_sprofile *
*===========================================================================*/
int do_sprofile(struct proc * caller, message * m_ptr)
{
int proc_nr;
int err;
switch(m_ptr->m_lsys_krn_sys_sprof.action) {
case PROF_START:
/* Starting profiling.
*
* Check if profiling is not already running. Calculate physical
* addresses of user pointers. Reset counters. Start CMOS timer.
* Turn on profiling.
*/
if (sprofiling) {
printf("SYSTEM: start s-profiling: already started\n");
return EBUSY;
}
/* Test endpoint number. */
if(!isokendpt(m_ptr->m_lsys_krn_sys_sprof.endpt, &proc_nr))
return EINVAL;
/* Set parameters for statistical profiler. */
sprof_ep = m_ptr->m_lsys_krn_sys_sprof.endpt;
sprof_info_addr_vir = m_ptr->m_lsys_krn_sys_sprof.ctl_ptr;
sprof_data_addr_vir = m_ptr->m_lsys_krn_sys_sprof.mem_ptr;
sprof_info.mem_used = 0;
sprof_info.total_samples = 0;
sprof_info.idle_samples = 0;
sprof_info.system_samples = 0;
sprof_info.user_samples = 0;
sprof_mem_size =
m_ptr->m_lsys_krn_sys_sprof.mem_size < SAMPLE_BUFFER_SIZE ?
m_ptr->m_lsys_krn_sys_sprof.mem_size : SAMPLE_BUFFER_SIZE;
switch (sprofiling_type = m_ptr->m_lsys_krn_sys_sprof.intr_type) {
case PROF_RTC:
init_profile_clock(m_ptr->m_lsys_krn_sys_sprof.freq);
break;
case PROF_NMI:
err = nmi_watchdog_start_profiling(
_ptr->m_lsys_krn_sys_sprof.freq);
if (err)
return err;
break;
default:
printf("ERROR : unknown profiling interrupt type\n");
return EINVAL;
}
sprofiling = 1;
clean_seen_flag();
return OK;
case PROF_STOP:
/* Stopping profiling.
*
* Check if profiling is indeed running. Turn off profiling.
* Stop CMOS timer. Copy info struct to user process.
*/
if (!sprofiling) {
printf("SYSTEM: stop s-profiling: not started\n");
return EBUSY;
}
sprofiling = 0;
switch (sprofiling_type) {
case PROF_RTC:
stop_profile_clock();
break;
case PROF_NMI:
nmi_watchdog_stop_profiling();
break;
}
data_copy(KERNEL, (vir_bytes) &sprof_info,
sprof_ep, sprof_info_addr_vir, sizeof(sprof_info));
data_copy(KERNEL, (vir_bytes) sprof_sample_buffer,
sprof_ep, sprof_data_addr_vir, sprof_info.mem_used);
clean_seen_flag();
return OK;
default:
return EINVAL;
}
}
#endif /* SPROFILE */

View File

@@ -0,0 +1,34 @@
/* The kernel call implemented in this file:
* m_type: SYS_STATECTL
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_statectl.request (state control request)
*/
#include "kernel/system.h"
#if USE_STATECTL
/*===========================================================================*
* do_statectl *
*===========================================================================*/
int do_statectl(struct proc * caller, message * m_ptr)
{
/* Handle sys_statectl(). A process has issued a state control request. */
switch(m_ptr->m_lsys_krn_sys_statectl.request)
{
case SYS_STATE_CLEAR_IPC_REFS:
/* Clear IPC references for all the processes communicating
* with the caller.
*/
clear_ipc_refs(caller, EDEADSRCDST);
return(OK);
default:
printf("do_statectl: bad request %d\n",
m_ptr->m_lsys_krn_sys_statectl.request);
return EINVAL;
}
}
#endif /* USE_STATECTL */

View File

@@ -0,0 +1,19 @@
/* The kernel call implemented in this file:
* m_type: SYS_STIME
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_stime.boot_time
*/
#include "kernel/system.h"
#include <minix/endpoint.h>
/*===========================================================================*
* do_stime *
*===========================================================================*/
int do_stime(struct proc * caller, message * m_ptr)
{
boottime = m_ptr->m_lsys_krn_sys_stime.boot_time;
return(OK);
}

View File

@@ -0,0 +1,46 @@
/* The kernel call implemented in this file:
* m_type: SYS_TIMES
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_times.endpt (get info for this process)
* m_krn_lsys_sys_times.user_time (return values ...)
* m_krn_lsys_sys_times.system_time
* m_krn_lsys_sys_times.boot_time
* m_krn_lsys_sys_times.boot_ticks
* m_krn_lsys_sys_times.real_ticks
*/
#include "kernel/system.h"
#include <minix/endpoint.h>
#if USE_TIMES
/*===========================================================================*
* do_times *
*===========================================================================*/
int do_times(struct proc * caller, message * m_ptr)
{
/* Handle sys_times(). Retrieve the accounting information. */
register const struct proc *rp;
int proc_nr;
endpoint_t e_proc_nr;
/* Insert the times needed by the SYS_TIMES kernel call in the message.
* The clock's interrupt handler may run to update the user or system time
* while in this code, but that cannot do any harm.
*/
e_proc_nr = (m_ptr->m_lsys_krn_sys_times.endpt == SELF) ?
caller->p_endpoint : m_ptr->m_lsys_krn_sys_times.endpt;
if(e_proc_nr != NONE && isokendpt(e_proc_nr, &proc_nr)) {
rp = proc_addr(proc_nr);
m_ptr->m_krn_lsys_sys_times.user_time = rp->p_user_time;
m_ptr->m_krn_lsys_sys_times.system_time = rp->p_sys_time;
}
m_ptr->m_krn_lsys_sys_times.boot_ticks = get_monotonic();
m_ptr->m_krn_lsys_sys_times.real_ticks = get_realtime();
m_ptr->m_krn_lsys_sys_times.boot_time = boottime;
return(OK);
}
#endif /* USE_TIMES */

View File

@@ -0,0 +1,206 @@
/* The kernel call implemented in this file:
* m_type: SYS_TRACE
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_trace.endpt process that is traced
* m_lsys_krn_sys_trace.request trace request
* m_lsys_krn_sys_trace.address address at traced process' space
* m_lsys_krn_sys_trace.data data to be written
* m_krn_lsys_sys_trace.data data to be returned
*/
#include "kernel/system.h"
#include <sys/ptrace.h>
#if USE_TRACE
/*==========================================================================*
* do_trace *
*==========================================================================*/
int do_trace(struct proc * caller, message * m_ptr)
{
/* Handle the debugging commands supported by the ptrace system call
* The commands are:
* T_STOP stop the process
* T_OK enable tracing by parent for this process
* T_GETINS return value from instruction space
* T_GETDATA return value from data space
* T_GETUSER return value from user process table
* T_SETINS set value in instruction space
* T_SETDATA set value in data space
* T_SETUSER set value in user process table
* T_RESUME resume execution
* T_EXIT exit
* T_STEP set trace bit
* T_SYSCALL trace system call
* T_ATTACH attach to an existing process
* T_DETACH detach from a traced process
* T_SETOPT set trace options
* T_GETRANGE get range of values
* T_SETRANGE set range of values
*
* The T_OK, T_ATTACH, T_EXIT, and T_SETOPT commands are handled completely by
* the process manager. T_GETRANGE and T_SETRANGE use sys_vircopy(). All others
* come here.
*/
register struct proc *rp;
vir_bytes tr_addr = m_ptr->m_lsys_krn_sys_trace.address;
long tr_data = m_ptr->m_lsys_krn_sys_trace.data;
int tr_request = m_ptr->m_lsys_krn_sys_trace.request;
int tr_proc_nr_e = m_ptr->m_lsys_krn_sys_trace.endpt, tr_proc_nr;
unsigned char ub;
int i;
#define COPYTOPROC(addr, myaddr, length) { \
struct vir_addr fromaddr, toaddr; \
int r; \
fromaddr.proc_nr_e = KERNEL; \
toaddr.proc_nr_e = tr_proc_nr_e; \
fromaddr.offset = (myaddr); \
toaddr.offset = (addr); \
if((r=virtual_copy_vmcheck(caller, &fromaddr, \
&toaddr, length)) != OK) { \
printf("Can't copy in sys_trace: %d\n", r);\
return r;\
} \
}
#define COPYFROMPROC(addr, myaddr, length) { \
struct vir_addr fromaddr, toaddr; \
int r; \
fromaddr.proc_nr_e = tr_proc_nr_e; \
toaddr.proc_nr_e = KERNEL; \
fromaddr.offset = (addr); \
toaddr.offset = (myaddr); \
if((r=virtual_copy_vmcheck(caller, &fromaddr, \
&toaddr, length)) != OK) { \
printf("Can't copy in sys_trace: %d\n", r);\
return r;\
} \
}
if(!isokendpt(tr_proc_nr_e, &tr_proc_nr)) return(EINVAL);
if (iskerneln(tr_proc_nr)) return(EPERM);
rp = proc_addr(tr_proc_nr);
if (isemptyp(rp)) return(EINVAL);
switch (tr_request) {
case T_STOP: /* stop process */
RTS_SET(rp, RTS_P_STOP);
/* clear syscall trace and single step flags */
rp->p_misc_flags &= ~(MF_SC_TRACE | MF_STEP);
return(OK);
case T_GETINS: /* return value from instruction space */
COPYFROMPROC(tr_addr, (vir_bytes) &tr_data, sizeof(long));
m_ptr->m_lsys_krn_sys_trace.data = tr_data;
break;
case T_GETDATA: /* return value from data space */
COPYFROMPROC(tr_addr, (vir_bytes) &tr_data, sizeof(long));
m_ptr->m_lsys_krn_sys_trace.data= tr_data;
break;
case T_GETUSER: /* return value from process table */
if ((tr_addr & (sizeof(long) - 1)) != 0) return(EFAULT);
if (tr_addr <= sizeof(struct proc) - sizeof(long)) {
m_ptr->m_lsys_krn_sys_trace.data = *(long *) ((char *) rp + (int) tr_addr);
break;
}
/* The process's proc struct is followed by its priv struct.
* The alignment here should be unnecessary, but better safe..
*/
i = sizeof(long) - 1;
tr_addr -= (sizeof(struct proc) + i) & ~i;
if (tr_addr > sizeof(struct priv) - sizeof(long)) return(EFAULT);
m_ptr->m_lsys_krn_sys_trace.data = *(long *) ((char *) rp->p_priv + (int) tr_addr);
break;
case T_SETINS: /* set value in instruction space */
COPYTOPROC(tr_addr, (vir_bytes) &tr_data, sizeof(long));
m_ptr->m_krn_lsys_sys_trace.data = 0;
break;
case T_SETDATA: /* set value in data space */
COPYTOPROC(tr_addr, (vir_bytes) &tr_data, sizeof(long));
m_ptr->m_krn_lsys_sys_trace.data = 0;
break;
case T_SETUSER: /* set value in process table */
if ((tr_addr & (sizeof(reg_t) - 1)) != 0 ||
tr_addr > sizeof(struct stackframe_s) - sizeof(reg_t))
return(EFAULT);
i = (int) tr_addr;
#if defined(__i386__)
/* Altering segment registers might crash the kernel when it
* tries to load them prior to restarting a process, so do
* not allow it.
*/
if (i == (int) &((struct proc *) 0)->p_reg.cs ||
i == (int) &((struct proc *) 0)->p_reg.ds ||
i == (int) &((struct proc *) 0)->p_reg.es ||
i == (int) &((struct proc *) 0)->p_reg.gs ||
i == (int) &((struct proc *) 0)->p_reg.fs ||
i == (int) &((struct proc *) 0)->p_reg.ss)
return(EFAULT);
if (i == (int) &((struct proc *) 0)->p_reg.psw)
/* only selected bits are changeable */
SETPSW(rp, tr_data);
else
*(reg_t *) ((char *) &rp->p_reg + i) = (reg_t) tr_data;
#elif defined(__arm__)
if (i == (int) &((struct proc *) 0)->p_reg.psr) {
/* only selected bits are changeable */
SET_USR_PSR(rp, tr_data);
} else {
*(reg_t *) ((char *) &rp->p_reg + i) = (reg_t) tr_data;
}
#endif
m_ptr->m_krn_lsys_sys_trace.data = 0;
break;
case T_DETACH: /* detach tracer */
rp->p_misc_flags &= ~MF_SC_ACTIVE;
/* fall through */
case T_RESUME: /* resume execution */
RTS_UNSET(rp, RTS_P_STOP);
m_ptr->m_krn_lsys_sys_trace.data = 0;
break;
case T_STEP: /* set trace bit */
rp->p_misc_flags |= MF_STEP;
RTS_UNSET(rp, RTS_P_STOP);
m_ptr->m_krn_lsys_sys_trace.data = 0;
break;
case T_SYSCALL: /* trace system call */
rp->p_misc_flags |= MF_SC_TRACE;
RTS_UNSET(rp, RTS_P_STOP);
m_ptr->m_krn_lsys_sys_trace.data = 0;
break;
case T_READB_INS: /* get value from instruction space */
COPYFROMPROC(tr_addr, (vir_bytes) &ub, 1);
m_ptr->m_krn_lsys_sys_trace.data = ub;
break;
case T_WRITEB_INS: /* set value in instruction space */
ub = (unsigned char) (tr_data & 0xff);
COPYTOPROC(tr_addr, (vir_bytes) &ub, 1);
m_ptr->m_krn_lsys_sys_trace.data = 0;
break;
default:
return(EINVAL);
}
return(OK);
}
#endif /* USE_TRACE */

39
minix/kernel/system/do_umap.c Executable file
View File

@@ -0,0 +1,39 @@
/* The kernel call implemented in this file:
* m_type: SYS_UMAP
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_umap.src_endpt (process number)
* m_lsys_krn_sys_umap.segment (segment where address is: T, D, or S)
* m_lsys_krn_sys_umap.src_addr (virtual address)
* m_krn_lsys_sys_umap.dst_addr (returns physical address)
* m_lsys_krn_sys_umap.nr_bytes (size of datastructure)
*/
#include "kernel/system.h"
#include <minix/endpoint.h>
#if USE_UMAP
#if ! USE_UMAP_REMOTE
#undef do_umap_remote
#endif
/*==========================================================================*
* do_umap *
*==========================================================================*/
int do_umap(struct proc * caller, message * m_ptr)
{
int seg_index = m_ptr->m_lsys_krn_sys_umap.segment & SEGMENT_INDEX;
int endpt = m_ptr->m_lsys_krn_sys_umap.src_endpt;
/* This call is a subset of umap_remote, it allows mapping virtual addresses
* in the caller's address space and grants where the caller is specified as
* grantee; after the security check we simply invoke do_umap_remote
*/
if (seg_index != MEM_GRANT && endpt != SELF) return EPERM;
m_ptr->m_lsys_krn_sys_umap.dst_endpt = SELF;
return do_umap_remote(caller, m_ptr);
}
#endif /* USE_UMAP */

View File

@@ -0,0 +1,122 @@
/* The kernel call implemented in this file:
* m_type: SYS_UMAP_REMOTE
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_umap.src_endpt (process number)
* m_lsys_krn_sys_umap.segment (segment where address is: T, D, or S)
* m_lsys_krn_sys_umap.src_addr (virtual address)
* m_lsys_krn_sys_umap.dst_endpt (process number of grantee to check access for)
* m_krn_lsys_sys_umap.dst_addr (returns physical address)
* m_lsys_krn_sys_umap.nr_bytes (size of datastructure)
*/
#include "kernel/system.h"
#include <minix/endpoint.h>
#if USE_UMAP || USE_UMAP_REMOTE
#if ! USE_UMAP_REMOTE
#undef do_umap_remote
#endif
/*==========================================================================*
* do_umap_remote *
*==========================================================================*/
int do_umap_remote(struct proc * caller, message * m_ptr)
{
/* Map virtual address to physical, for non-kernel processes. */
int seg_type = m_ptr->m_lsys_krn_sys_umap.segment & SEGMENT_TYPE;
int seg_index = m_ptr->m_lsys_krn_sys_umap.segment & SEGMENT_INDEX;
vir_bytes offset = m_ptr->m_lsys_krn_sys_umap.src_addr;
int count = m_ptr->m_lsys_krn_sys_umap.nr_bytes;
endpoint_t endpt = m_ptr->m_lsys_krn_sys_umap.src_endpt;
endpoint_t grantee = m_ptr->m_lsys_krn_sys_umap.dst_endpt;
int proc_nr, proc_nr_grantee;
phys_bytes phys_addr = 0, lin_addr = 0;
struct proc *targetpr;
/* Verify process number. */
if (endpt == SELF)
okendpt(caller->p_endpoint, &proc_nr);
else
if (! isokendpt(endpt, &proc_nr))
return(EINVAL);
targetpr = proc_addr(proc_nr);
/* Verify grantee endpoint */
if (grantee == SELF) {
grantee = caller->p_endpoint;
} else if (grantee == NONE ||
grantee == ANY ||
seg_index != MEM_GRANT ||
!isokendpt(grantee, &proc_nr_grantee)) {
return EINVAL;
}
/* See which mapping should be made. */
switch(seg_type) {
case LOCAL_VM_SEG:
if(seg_index == MEM_GRANT) {
vir_bytes newoffset;
endpoint_t newep;
int new_proc_nr;
cp_grant_id_t grant = (cp_grant_id_t) offset;
if(verify_grant(targetpr->p_endpoint, grantee, grant, count,
0, 0, &newoffset, &newep, NULL) != OK) {
printf("SYSTEM: do_umap: verify_grant in %s, grant %d, bytes 0x%lx, failed, caller %s\n", targetpr->p_name, offset, count, caller->p_name);
proc_stacktrace(caller);
return EFAULT;
}
if(!isokendpt(newep, &new_proc_nr)) {
printf("SYSTEM: do_umap: isokendpt failed\n");
return EFAULT;
}
/* New lookup. */
offset = newoffset;
targetpr = proc_addr(new_proc_nr);
seg_index = VIR_ADDR;
}
if(seg_index == VIR_ADDR) {
phys_addr = lin_addr = offset;
} else {
printf("SYSTEM: bogus seg type 0x%lx\n", seg_index);
return EFAULT;
}
if(!lin_addr) {
printf("SYSTEM:do_umap: umap_local failed\n");
return EFAULT;
}
if(vm_lookup(targetpr, lin_addr, &phys_addr, NULL) != OK) {
printf("SYSTEM:do_umap: vm_lookup failed\n");
return EFAULT;
}
if(phys_addr == 0)
panic("vm_lookup returned zero physical address");
break;
default:
printf("umap: peculiar type\n");
return EINVAL;
}
if(vm_running && vm_lookup_range(targetpr, lin_addr, NULL, count) != count) {
printf("SYSTEM:do_umap: not contiguous\n");
return EFAULT;
}
m_ptr->m_krn_lsys_sys_umap.dst_addr = phys_addr;
if(phys_addr == 0) {
printf("kernel: umap 0x%x done by %d / %s, pc 0x%lx, 0x%lx -> 0x%lx\n",
seg_type, caller->p_endpoint, caller->p_name,
caller->p_reg.pc, offset, phys_addr);
printf("caller stack: ");
proc_stacktrace(caller);
}
return (phys_addr == 0) ? EFAULT: OK;
}
#endif /* USE_UMAP || USE_UMAP_REMOTE */

View File

@@ -0,0 +1,182 @@
/* The kernel call implemented in this file:
* m_type: SYS_UPDATE
*
* The parameters for this kernel call are:
* m2_i1: SYS_UPD_SRC_ENDPT (source process endpoint)
* m2_i2: SYS_UPD_DST_ENDPT (destination process endpoint)
*/
#include "kernel/system.h"
#include "kernel/ipc.h"
#include <string.h>
#include <assert.h>
#if USE_UPDATE
#define DEBUG 0
#define proc_is_updatable(p) \
(RTS_ISSET(p, RTS_NO_PRIV) || RTS_ISSET(p, RTS_SIG_PENDING) \
|| (RTS_ISSET(p, RTS_RECEIVING) && !RTS_ISSET(p, RTS_SENDING)))
static void adjust_proc_slot(struct proc *rp, struct proc *from_rp);
static void adjust_priv_slot(struct priv *privp, struct priv
*from_privp);
static void swap_proc_slot_pointer(struct proc **rpp, struct proc
*src_rp, struct proc *dst_rp);
/*===========================================================================*
* do_update *
*===========================================================================*/
int do_update(struct proc * caller, message * m_ptr)
{
/* Handle sys_update(). Update a process into another by swapping their process
* slots.
*/
endpoint_t src_e, dst_e;
int src_p, dst_p;
struct proc *src_rp, *dst_rp;
struct priv *src_privp, *dst_privp;
struct proc orig_src_proc;
struct proc orig_dst_proc;
struct priv orig_src_priv;
struct priv orig_dst_priv;
int i;
/* Lookup slots for source and destination process. */
src_e = m_ptr->SYS_UPD_SRC_ENDPT;
if(!isokendpt(src_e, &src_p)) {
return EINVAL;
}
src_rp = proc_addr(src_p);
src_privp = priv(src_rp);
if(!(src_privp->s_flags & SYS_PROC)) {
return EPERM;
}
dst_e = m_ptr->SYS_UPD_DST_ENDPT;
if(!isokendpt(dst_e, &dst_p)) {
return EINVAL;
}
dst_rp = proc_addr(dst_p);
dst_privp = priv(dst_rp);
if(!(dst_privp->s_flags & SYS_PROC)) {
return EPERM;
}
assert(!proc_is_runnable(src_rp) && !proc_is_runnable(dst_rp));
/* Check if processes are updatable. */
if(!proc_is_updatable(src_rp) || !proc_is_updatable(dst_rp)) {
return EBUSY;
}
#if DEBUG
printf("do_update: updating %d (%s, %d, %d) into %d (%s, %d, %d)\n",
src_rp->p_endpoint, src_rp->p_name, src_rp->p_nr, priv(src_rp)->s_proc_nr,
dst_rp->p_endpoint, dst_rp->p_name, dst_rp->p_nr, priv(dst_rp)->s_proc_nr);
proc_stacktrace(src_rp);
proc_stacktrace(dst_rp);
printf("do_update: curr ptproc %d\n", get_cpulocal_var(ptproc)->p_endpoint);
#endif
/* Let destination inherit the target mask from source. */
for (i=0; i < NR_SYS_PROCS; i++) {
if (get_sys_bit(priv(src_rp)->s_ipc_to, i)) {
set_sendto_bit(dst_rp, i);
}
}
/* Save existing data. */
orig_src_proc = *src_rp;
orig_src_priv = *(priv(src_rp));
orig_dst_proc = *dst_rp;
orig_dst_priv = *(priv(dst_rp));
/* Swap slots. */
*src_rp = orig_dst_proc;
*src_privp = orig_dst_priv;
*dst_rp = orig_src_proc;
*dst_privp = orig_src_priv;
/* Adjust process slots. */
adjust_proc_slot(src_rp, &orig_src_proc);
adjust_proc_slot(dst_rp, &orig_dst_proc);
/* Adjust privilege slots. */
adjust_priv_slot(priv(src_rp), &orig_src_priv);
adjust_priv_slot(priv(dst_rp), &orig_dst_priv);
/* Swap global process slot addresses. */
swap_proc_slot_pointer(get_cpulocal_var_ptr(ptproc), src_rp, dst_rp);
#if DEBUG
printf("do_update: updated %d (%s, %d, %d) into %d (%s, %d, %d)\n",
src_rp->p_endpoint, src_rp->p_name, src_rp->p_nr, priv(src_rp)->s_proc_nr,
dst_rp->p_endpoint, dst_rp->p_name, dst_rp->p_nr, priv(dst_rp)->s_proc_nr);
proc_stacktrace(src_rp);
proc_stacktrace(dst_rp);
printf("do_update: curr ptproc %d\n", get_cpulocal_var(ptproc)->p_endpoint);
#endif
#ifdef CONFIG_SMP
bits_fill(src_rp->p_stale_tlb, CONFIG_MAX_CPUS);
bits_fill(dst_rp->p_stale_tlb, CONFIG_MAX_CPUS);
#endif
return OK;
}
/*===========================================================================*
* adjust_proc_slot *
*===========================================================================*/
static void adjust_proc_slot(struct proc *rp, struct proc *from_rp)
{
/* Preserve endpoints, slot numbers, priv structure, and IPC. */
rp->p_endpoint = from_rp->p_endpoint;
rp->p_nr = from_rp->p_nr;
rp->p_priv = from_rp->p_priv;
priv(rp)->s_proc_nr = from_rp->p_nr;
rp->p_caller_q = from_rp->p_caller_q;
/* preserve scheduling */
rp->p_scheduler = from_rp->p_scheduler;
#ifdef CONFIG_SMP
rp->p_cpu = from_rp->p_cpu;
memcpy(rp->p_cpu_mask, from_rp->p_cpu_mask,
sizeof(bitchunk_t) * BITMAP_CHUNKS(CONFIG_MAX_CPUS));
#endif
}
/*===========================================================================*
* adjust_priv_slot *
*===========================================================================*/
static void adjust_priv_slot(struct priv *privp, struct priv *from_privp)
{
/* Preserve privilege ids and non-privilege stuff in the priv structure. */
privp->s_id = from_privp->s_id;
privp->s_notify_pending = from_privp->s_notify_pending;
privp->s_int_pending = from_privp->s_int_pending;
privp->s_sig_pending = from_privp->s_sig_pending;
privp->s_alarm_timer = from_privp->s_alarm_timer;
privp->s_diag_sig = from_privp->s_diag_sig;
}
/*===========================================================================*
* swap_proc_slot_pointer *
*===========================================================================*/
static void swap_proc_slot_pointer(struct proc **rpp, struct proc *src_rp,
struct proc *dst_rp)
{
if(*rpp == src_rp) {
*rpp = dst_rp;
}
else if(*rpp == dst_rp) {
*rpp = src_rp;
}
}
#endif /* USE_UPDATE */

View File

@@ -0,0 +1,166 @@
/* The kernel call implemented in this file:
* m_type: SYS_VDEVIO
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_vdevio.request (request input or output)
* m_lsys_krn_sys_vdevio.vec_addr (pointer to port/ value pairs)
* m_lsys_krn_sys_vdevio.vec_size (number of ports to read or write)
*/
#include "kernel/system.h"
#include <minix/devio.h>
#include <minix/endpoint.h>
#include <minix/portio.h>
#if USE_VDEVIO
/* Buffer for SYS_VDEVIO to copy (port,value)-pairs from/ to user. */
static char vdevio_buf[VDEVIO_BUF_SIZE];
static pvb_pair_t * const pvb = (pvb_pair_t *) vdevio_buf;
static pvw_pair_t * const pvw = (pvw_pair_t *) vdevio_buf;
static pvl_pair_t * const pvl = (pvl_pair_t *) vdevio_buf;
/*===========================================================================*
* do_vdevio *
*===========================================================================*/
int do_vdevio(struct proc * caller, message * m_ptr)
{
/* Perform a series of device I/O on behalf of a non-kernel process. The
* I/O addresses and I/O values are fetched from and returned to some buffer
* in user space. The actual I/O is wrapped by lock() and unlock() to prevent
* that I/O batch from being interrupted.
* This is the counterpart of do_devio, which performs a single device I/O.
*/
int vec_size; /* size of vector */
int io_in; /* true if input */
size_t bytes; /* # bytes to be copied */
port_t port;
int i, j, io_size, nr_io_range;
int io_dir, io_type;
struct priv *privp;
struct io_range *iorp;
int r;
/* Get the request, size of the request vector, and check the values. */
io_dir = m_ptr->m_lsys_krn_sys_vdevio.request & _DIO_DIRMASK;
io_type = m_ptr->m_lsys_krn_sys_vdevio.request & _DIO_TYPEMASK;
if (io_dir == _DIO_INPUT) io_in = TRUE;
else if (io_dir == _DIO_OUTPUT) io_in = FALSE;
else return(EINVAL);
if ((vec_size = m_ptr->m_lsys_krn_sys_vdevio.vec_size) <= 0) return(EINVAL);
switch (io_type) {
case _DIO_BYTE:
bytes = vec_size * sizeof(pvb_pair_t);
io_size= sizeof(u8_t);
break;
case _DIO_WORD:
bytes = vec_size * sizeof(pvw_pair_t);
io_size= sizeof(u16_t);
break;
case _DIO_LONG:
bytes = vec_size * sizeof(pvl_pair_t);
io_size= sizeof(u32_t);
break;
default: return(EINVAL); /* check type once and for all */
}
if (bytes > sizeof(vdevio_buf)) return(E2BIG);
/* Copy (port,value)-pairs from user. */
if((r=data_copy(caller->p_endpoint, m_ptr->m_lsys_krn_sys_vdevio.vec_addr,
KERNEL, (vir_bytes) vdevio_buf, bytes)) != OK)
return r;
privp= priv(caller);
if (privp && (privp->s_flags & CHECK_IO_PORT))
{
/* Check whether the I/O is allowed */
nr_io_range= privp->s_nr_io_range;
for (i=0; i<vec_size; i++)
{
switch (io_type) {
case _DIO_BYTE: port= pvb[i].port; break;
case _DIO_WORD: port= pvw[i].port; break;
default: port= pvl[i].port; break;
}
for (j= 0, iorp= privp->s_io_tab; j<nr_io_range; j++, iorp++)
{
if (port >= iorp->ior_base &&
port+io_size-1 <= iorp->ior_limit)
{
break;
}
}
if (j >= nr_io_range)
{
printf(
"do_vdevio: I/O port check failed for proc %d, port 0x%x\n",
caller->p_endpoint, port);
return EPERM;
}
}
}
/* Perform actual device I/O for byte, word, and long values */
switch (io_type) {
case _DIO_BYTE: /* byte values */
if (io_in) for (i=0; i<vec_size; i++)
pvb[i].value = inb( pvb[i].port);
else for (i=0; i<vec_size; i++)
outb( pvb[i].port, pvb[i].value);
break;
case _DIO_WORD: /* word values */
if (io_in)
{
for (i=0; i<vec_size; i++)
{
port= pvw[i].port;
if (port & 1) goto bad;
pvw[i].value = inw( pvw[i].port);
}
}
else
{
for (i=0; i<vec_size; i++)
{
port= pvw[i].port;
if (port & 1) goto bad;
outw( pvw[i].port, pvw[i].value);
}
}
break;
default: /* long values */
if (io_in)
{
for (i=0; i<vec_size; i++)
{
port= pvl[i].port;
if (port & 3) goto bad;
pvl[i].value = inl(pvl[i].port);
}
}
else
{
for (i=0; i<vec_size; i++)
{
port= pvl[i].port;
if (port & 3) goto bad;
outl( pvb[i].port, pvl[i].value);
}
}
}
/* Almost done, copy back results for input requests. */
if (io_in)
if((r=data_copy(KERNEL, (vir_bytes) vdevio_buf,
caller->p_endpoint, m_ptr->m_lsys_krn_sys_vdevio.vec_addr,
(phys_bytes) bytes)) != OK)
return r;
return(OK);
bad:
panic("do_vdevio: unaligned port: %d", port);
return EPERM;
}
#endif /* USE_VDEVIO */

View File

@@ -0,0 +1,164 @@
/* The kernel call implemented in this file:
* m_type: SYS_VMCTL
*
* The parameters for this kernel call are:
* SVMCTL_WHO which process
* SVMCTL_PARAM set this setting (VMCTL_*)
* SVMCTL_VALUE to this value
*/
#include "kernel/system.h"
#include "kernel/vm.h"
#include "kernel/debug.h"
#include <assert.h>
#include <minix/type.h>
/*===========================================================================*
* do_vmctl *
*===========================================================================*/
int do_vmctl(struct proc * caller, message * m_ptr)
{
int proc_nr;
endpoint_t ep = m_ptr->SVMCTL_WHO;
struct proc *p, *rp, *target;
if(ep == SELF) { ep = caller->p_endpoint; }
if(!isokendpt(ep, &proc_nr)) {
printf("do_vmctl: unexpected endpoint %d from VM\n", ep);
return EINVAL;
}
p = proc_addr(proc_nr);
switch(m_ptr->SVMCTL_PARAM) {
case VMCTL_CLEAR_PAGEFAULT:
assert(RTS_ISSET(p,RTS_PAGEFAULT));
RTS_UNSET(p, RTS_PAGEFAULT);
return OK;
case VMCTL_MEMREQ_GET:
/* Send VM the information about the memory request. */
if(!(rp = vmrequest))
return ESRCH;
assert(RTS_ISSET(rp, RTS_VMREQUEST));
okendpt(rp->p_vmrequest.target, &proc_nr);
target = proc_addr(proc_nr);
/* Reply with request fields. */
switch(rp->p_vmrequest.req_type) {
case VMPTYPE_CHECK:
m_ptr->SVMCTL_MRG_TARGET =
rp->p_vmrequest.target;
m_ptr->SVMCTL_MRG_ADDR =
rp->p_vmrequest.params.check.start;
m_ptr->SVMCTL_MRG_LENGTH =
rp->p_vmrequest.params.check.length;
m_ptr->SVMCTL_MRG_FLAG =
rp->p_vmrequest.params.check.writeflag;
m_ptr->SVMCTL_MRG_REQUESTOR =
(void *) rp->p_endpoint;
break;
default:
panic("VMREQUEST wrong type");
}
rp->p_vmrequest.vmresult = VMSUSPEND;
/* Remove from request chain. */
vmrequest = vmrequest->p_vmrequest.nextrequestor;
return rp->p_vmrequest.req_type;
case VMCTL_MEMREQ_REPLY:
assert(RTS_ISSET(p, RTS_VMREQUEST));
assert(p->p_vmrequest.vmresult == VMSUSPEND);
okendpt(p->p_vmrequest.target, &proc_nr);
target = proc_addr(proc_nr);
p->p_vmrequest.vmresult = m_ptr->SVMCTL_VALUE;
assert(p->p_vmrequest.vmresult != VMSUSPEND);
switch(p->p_vmrequest.type) {
case VMSTYPE_KERNELCALL:
/*
* we will have to resume execution of the kernel call
* as soon the scheduler picks up this process again
*/
p->p_misc_flags |= MF_KCALL_RESUME;
break;
case VMSTYPE_DELIVERMSG:
assert(p->p_misc_flags & MF_DELIVERMSG);
assert(p == target);
assert(RTS_ISSET(p, RTS_VMREQUEST));
break;
case VMSTYPE_MAP:
assert(RTS_ISSET(p, RTS_VMREQUEST));
break;
default:
panic("strange request type: %d",p->p_vmrequest.type);
}
RTS_UNSET(p, RTS_VMREQUEST);
return OK;
case VMCTL_KERN_PHYSMAP:
{
int i = m_ptr->SVMCTL_VALUE;
return arch_phys_map(i,
(phys_bytes *) &m_ptr->SVMCTL_MAP_PHYS_ADDR,
(phys_bytes *) &m_ptr->SVMCTL_MAP_PHYS_LEN,
&m_ptr->SVMCTL_MAP_FLAGS);
}
case VMCTL_KERN_MAP_REPLY:
{
return arch_phys_map_reply(m_ptr->SVMCTL_VALUE,
(vir_bytes) m_ptr->SVMCTL_MAP_VIR_ADDR);
}
case VMCTL_VMINHIBIT_SET:
/* check if we must stop a process on a different CPU */
#if CONFIG_SMP
if (p->p_cpu != cpuid) {
smp_schedule_vminhibit(p);
} else
#endif
RTS_SET(p, RTS_VMINHIBIT);
#if CONFIG_SMP
p->p_misc_flags |= MF_FLUSH_TLB;
#endif
return OK;
case VMCTL_VMINHIBIT_CLEAR:
assert(RTS_ISSET(p, RTS_VMINHIBIT));
/*
* the processes is certainly not runnable, no need to tell its
* cpu
*/
RTS_UNSET(p, RTS_VMINHIBIT);
#ifdef CONFIG_SMP
if (p->p_misc_flags & MF_SENDA_VM_MISS) {
struct priv *privp;
p->p_misc_flags &= ~MF_SENDA_VM_MISS;
privp = priv(p);
try_deliver_senda(p, (asynmsg_t *) privp->s_asyntab,
privp->s_asynsize);
}
/*
* We don't know whether kernel has the changed mapping
* installed to access userspace memory. And if so, on what CPU.
* More over we don't know what mapping has changed and how and
* therefore we must invalidate all mappings we have anywhere.
* Next time we map memory, we map it fresh.
*/
bits_fill(p->p_stale_tlb, CONFIG_MAX_CPUS);
#endif
return OK;
case VMCTL_CLEARMAPCACHE:
/* VM says: forget about old mappings we have cached. */
mem_clear_mapcache();
return OK;
case VMCTL_BOOTINHIBIT_CLEAR:
RTS_UNSET(p, RTS_BOOTINHIBIT);
return OK;
}
/* Try architecture-specific vmctls. */
return arch_do_vmctl(m_ptr, p);
}

View File

@@ -0,0 +1,104 @@
/* The kernel call implemented in this file:
* m_type: SYS_VTIMER
*
* The parameters for this kernel call are:
* m2_i1: VT_WHICH (the timer: VT_VIRTUAL or VT_PROF)
* m2_i2: VT_SET (whether to set, or just retrieve)
* m2_l1: VT_VALUE (new/old expiration time, in ticks)
* m2_l2: VT_ENDPT (process to which the timer belongs)
*/
#include "kernel/system.h"
#include <signal.h>
#include <minix/endpoint.h>
#if USE_VTIMER
/*===========================================================================*
* do_vtimer *
*===========================================================================*/
int do_vtimer(struct proc * caller, message * m_ptr)
{
/* Set and/or retrieve the value of one of a process' virtual timers. */
struct proc *rp; /* pointer to process the timer belongs to */
register int pt_flag; /* the misc on/off flag for the req.d timer */
register clock_t *pt_left; /* pointer to the process' ticks-left field */
clock_t old_value; /* the previous number of ticks left */
int proc_nr, proc_nr_e;
/* The requesting process must be privileged. */
if (! (priv(caller)->s_flags & SYS_PROC)) return(EPERM);
if (m_ptr->VT_WHICH != VT_VIRTUAL && m_ptr->VT_WHICH != VT_PROF)
return(EINVAL);
/* The target process must be valid. */
proc_nr_e = (m_ptr->VT_ENDPT == SELF) ? caller->p_endpoint : m_ptr->VT_ENDPT;
if (!isokendpt(proc_nr_e, &proc_nr)) return(EINVAL);
rp = proc_addr(proc_nr);
/* Determine which flag and which field in the proc structure we want to
* retrieve and/or modify. This saves us having to differentiate between
* VT_VIRTUAL and VT_PROF multiple times below.
*/
if (m_ptr->VT_WHICH == VT_VIRTUAL) {
pt_flag = MF_VIRT_TIMER;
pt_left = &rp->p_virt_left;
} else { /* VT_PROF */
pt_flag = MF_PROF_TIMER;
pt_left = &rp->p_prof_left;
}
/* Retrieve the old value. */
if (rp->p_misc_flags & pt_flag) {
old_value = *pt_left;
} else {
old_value = 0;
}
if (m_ptr->VT_SET) {
rp->p_misc_flags &= ~pt_flag; /* disable virtual timer */
if (m_ptr->VT_VALUE > 0) {
*pt_left = m_ptr->VT_VALUE; /* set new timer value */
rp->p_misc_flags |= pt_flag; /* (re)enable virtual timer */
} else {
*pt_left = 0; /* clear timer value */
}
}
m_ptr->VT_VALUE = old_value;
return(OK);
}
#endif /* USE_VTIMER */
/*===========================================================================*
* vtimer_check *
*===========================================================================*/
void vtimer_check(rp)
struct proc *rp; /* pointer to the process */
{
/* This is called from the clock task, so we can be interrupted by the clock
* interrupt, but not by the system task. Therefore we only have to protect
* against interference from the clock handler. We can safely perform the
* following actions without locking as well though, as the clock handler
* never alters p_misc_flags, and only decreases p_virt_left/p_prof_left.
*/
/* Check if the virtual timer expired. If so, send a SIGVTALRM signal. */
if ((rp->p_misc_flags & MF_VIRT_TIMER) && rp->p_virt_left == 0) {
rp->p_misc_flags &= ~MF_VIRT_TIMER;
rp->p_virt_left = 0;
cause_sig(rp->p_nr, SIGVTALRM);
}
/* Check if the profile timer expired. If so, send a SIGPROF signal. */
if ((rp->p_misc_flags & MF_PROF_TIMER) && rp->p_prof_left == 0) {
rp->p_misc_flags &= ~MF_PROF_TIMER;
rp->p_prof_left = 0;
cause_sig(rp->p_nr, SIGPROF);
}
}

View File

@@ -0,0 +1,131 @@
/* The kernel call implemented in this file:
* m_type: SYS_VUMAP
*
* The parameters for this kernel call are:
* m_lsys_krn_sys_vumap.endpt (grant owner, or SELF for local addresses)
* m_lsys_krn_sys_vumap.vaddr (address of virtual (input) vector)
* m_lsys_krn_sys_vumap.vcount (number of elements in virtual vector)
* m_lsys_krn_sys_vumap.offset (offset into first entry of input vector)
* m_lsys_krn_sys_vumap.access (safecopy access requested for input)
* m_lsys_krn_sys_vumap.paddr (address of physical (output) vector)
* m_lsys_krn_sys_vumap.pmax (maximum number of physical vector elements)
* m_krn_lsys_sys_vumap.pcount (upon return: number of elements filled)
*/
#include "kernel/system.h"
#include <assert.h>
/*===========================================================================*
* do_vumap *
*===========================================================================*/
int do_vumap(struct proc *caller, message *m_ptr)
{
/* Map a vector of grants or local virtual addresses to physical addresses.
* Designed to be used by drivers to perform an efficient lookup of physical
* addresses for the purpose of direct DMA from/to a remote process.
*/
endpoint_t endpt, source, granter;
struct proc *procp;
struct vumap_vir vvec[MAPVEC_NR];
struct vumap_phys pvec[MAPVEC_NR];
vir_bytes vaddr, paddr, vir_addr;
phys_bytes phys_addr;
int i, r, proc_nr, vcount, pcount, pmax, access;
size_t size, chunk, offset;
endpt = caller->p_endpoint;
/* Retrieve and check input parameters. */
source = m_ptr->m_lsys_krn_sys_vumap.endpt;
vaddr = m_ptr->m_lsys_krn_sys_vumap.vaddr;
vcount = m_ptr->m_lsys_krn_sys_vumap.vcount;
offset = m_ptr->m_lsys_krn_sys_vumap.offset;
access = m_ptr->m_lsys_krn_sys_vumap.access;
paddr = m_ptr->m_lsys_krn_sys_vumap.paddr;
pmax = m_ptr->m_lsys_krn_sys_vumap.pmax;
if (vcount <= 0 || pmax <= 0)
return EINVAL;
if (vcount > MAPVEC_NR) vcount = MAPVEC_NR;
if (pmax > MAPVEC_NR) pmax = MAPVEC_NR;
/* Convert access to safecopy access flags. */
switch (access) {
case VUA_READ: access = CPF_READ; break;
case VUA_WRITE: access = CPF_WRITE; break;
case VUA_READ|VUA_WRITE: access = CPF_READ|CPF_WRITE; break;
default: return EINVAL;
}
/* Copy in the vector of virtual addresses. */
size = vcount * sizeof(vvec[0]);
if (data_copy(endpt, vaddr, KERNEL, (vir_bytes) vvec, size) != OK)
return EFAULT;
pcount = 0;
/* Go through the input entries, one at a time. Stop early in case the output
* vector has filled up.
*/
for (i = 0; i < vcount && pcount < pmax; i++) {
size = vvec[i].vv_size;
if (size <= offset)
return EINVAL;
size -= offset;
if (source != SELF) {
r = verify_grant(source, endpt, vvec[i].vv_grant, size, access,
offset, &vir_addr, &granter, NULL);
if (r != OK)
return r;
} else {
vir_addr = vvec[i].vv_addr + offset;
granter = endpt;
}
okendpt(granter, &proc_nr);
procp = proc_addr(proc_nr);
/* Each virtual range is made up of one or more physical ranges. */
while (size > 0 && pcount < pmax) {
chunk = vm_lookup_range(procp, vir_addr, &phys_addr, size);
if (!chunk) {
/* Try to get the memory allocated, unless the memory
* is supposed to be there to be read from.
*/
if (access & CPF_READ)
return EFAULT;
/* This call may suspend the current call, or return an
* error for a previous invocation.
*/
return vm_check_range(caller, procp, vir_addr, size, 1);
}
pvec[pcount].vp_addr = phys_addr;
pvec[pcount].vp_size = chunk;
pcount++;
vir_addr += chunk;
size -= chunk;
}
offset = 0;
}
/* Copy out the resulting vector of physical addresses. */
assert(pcount > 0);
size = pcount * sizeof(pvec[0]);
r = data_copy_vmcheck(caller, KERNEL, (vir_bytes) pvec, endpt, paddr, size);
if (r == OK)
m_ptr->m_krn_lsys_sys_vumap.pcount = pcount;
return r;
}