Basic VM and other minor improvements.

Not complete, probably not fully debugged or optimized.
This commit is contained in:
Ben Gras
2008-11-19 12:26:10 +00:00
parent c888305e21
commit c078ec0331
273 changed files with 10814 additions and 4305 deletions

View File

@@ -18,21 +18,20 @@ CFLAGS=$(CPROFILE) $(CPPFLAGS) $(EXTRA_OPTS)
LDFLAGS=-i
# first-stage, arch-dependent startup code
HEAD = head.o
FULLHEAD = $a/$(HEAD)
HEAD = $a/mpx386.o
OBJS = start.o table.o main.o proc.o \
system.o clock.o utility.o debug.o profile.o interrupt.o
SYSTEM = system.a
ARCHLIB = $a/$(ARCH).a
LIBS = -ltimers -lsysutil
LIBS = -ltimers -lsys
# What to make.
all: build
kernel build install: $(HEAD) $(OBJS)
kernel build install: $(OBJS)
cd system && $(MAKE) $@
cd $a && $(MAKE) $@
$(LD) $(CFLAGS) $(LDFLAGS) -o kernel $(FULLHEAD) $(OBJS) \
$(LD) $(CFLAGS) $(LDFLAGS) -o kernel $(HEAD) $(OBJS) \
$(SYSTEM) $(ARCHLIB) $(LIBS)
install -S 0 kernel
@@ -50,8 +49,5 @@ depend:
.c.o:
$(CC) $(CFLAGS) -c -o $@ $<
$(HEAD):
cd $a && make HEAD=$(HEAD) $(HEAD)
# Include generated dependencies.
include .depend

View File

@@ -8,17 +8,19 @@ ARCHAR=$(ARCH).a
# the HEAD variable is passed as an argument to this Makefile
# by an upper level Makefile.
OBJS=$(ARCHAR)(exception.o) \
$(ARCHAR)(i8259.o) \
$(ARCHAR)(memory.o) \
$(ARCHAR)(protect.o) \
$(ARCHAR)(system.o) \
$(ARCHAR)(clock.o) \
$(ARCHAR)(klib386.o) \
$(ARCHAR)(do_readbios.o) \
$(ARCHAR)(do_int86.o) \
$(ARCHAR)(do_sdevio.o) \
$(ARCHAR)(do_iopenable.o)
OBJS= arch_do_vmctl.o \
clock.o \
do_int86.o \
do_iopenable.o \
do_readbios.o \
do_sdevio.o \
exception.o \
i8259.o \
klib386.o \
memory.o \
mpx386.o \
protect.o \
system.o
CPPFLAGS=-Iinclude
CFLAGS=$(CPPFLAGS) -Wall
@@ -56,6 +58,9 @@ $(ARCHAR)(do_int86.o): do_int86.c
$(ARCHAR)(do_iopenable.o): do_iopenable.c
$(CC) $(CFLAGS) -c $<
$(ARCHAR)(arch_do_vmctl.o): arch_do_vmctl.c
$(CC) $(CFLAGS) -c $<
$(ARCHAR)(do_readbios.o): do_readbios.c
$(CC) $(CFLAGS) -c $<

View File

@@ -0,0 +1,56 @@
/* The kernel call implemented in this file:
* m_type: SYS_VMCTL
*
* The parameters for this kernel call are:
* SVMCTL_WHO which process
* SVMCTL_PARAM set this setting (VMCTL_*)
* SVMCTL_VALUE to this value
*/
#include "../../system.h"
#include <minix/type.h>
extern u32_t kernel_cr3;
/*===========================================================================*
* arch_do_vmctl *
*===========================================================================*/
PUBLIC int arch_do_vmctl(m_ptr, p)
register message *m_ptr; /* pointer to request message */
struct proc *p;
{
switch(m_ptr->SVMCTL_PARAM) {
case VMCTL_I386_GETCR3:
/* Get process CR3. */
m_ptr->SVMCTL_VALUE = p->p_seg.p_cr3;
return OK;
case VMCTL_I386_SETCR3:
/* Set process CR3. */
if(m_ptr->SVMCTL_VALUE) {
p->p_seg.p_cr3 = m_ptr->SVMCTL_VALUE;
p->p_misc_flags |= MF_FULLVM;
} else {
p->p_seg.p_cr3 = kernel_cr3;
p->p_misc_flags &= ~MF_FULLVM;
}
RTS_LOCK_UNSET(p, VMINHIBIT);
return OK;
case VMCTL_GET_PAGEFAULT:
{
struct proc *rp;
if(!(rp=pagefaults))
return ESRCH;
pagefaults = rp->p_nextpagefault;
if(!RTS_ISSET(rp, PAGEFAULT))
minix_panic("non-PAGEFAULT process on pagefault chain",
rp->p_endpoint);
m_ptr->SVMCTL_PF_WHO = rp->p_endpoint;
m_ptr->SVMCTL_PF_I386_CR2 = rp->p_pagefault.pf_virtual;
m_ptr->SVMCTL_PF_I386_ERR = rp->p_pagefault.pf_flags;
return OK;
}
}
kprintf("arch_do_vmctl: strange param %d\n", m_ptr->SVMCTL_PARAM);
return EINVAL;
}

View File

@@ -21,28 +21,23 @@ struct reg86u reg86;
PUBLIC int do_int86(m_ptr)
register message *m_ptr; /* pointer to request message */
{
vir_bytes caller_vir;
phys_bytes caller_phys, kernel_phys;
caller_vir = (vir_bytes) m_ptr->INT86_REG86;
caller_phys = umap_local(proc_addr(who_p), D, caller_vir, sizeof(reg86));
if (0 == caller_phys) return(EFAULT);
kernel_phys = vir2phys(&reg86);
phys_copy(caller_phys, kernel_phys, (phys_bytes) sizeof(reg86));
data_copy(who_e, (vir_bytes) m_ptr->INT86_REG86,
SYSTEM, (vir_bytes) &reg86, sizeof(reg86));
level0(int86);
/* Copy results back to the caller */
phys_copy(kernel_phys, caller_phys, (phys_bytes) sizeof(reg86));
data_copy(SYSTEM, (vir_bytes) &reg86,
who_e, (vir_bytes) m_ptr->INT86_REG86, sizeof(reg86));
/* The BIOS call eats interrupts. Call get_randomness to generate some
* entropy. Normally, get_randomness is called from an interrupt handler.
* Figuring out the exact source is too complicated. CLOCK_IRQ is normally
* not very random.
*/
lock(0, "do_int86");
lock;
get_randomness(CLOCK_IRQ);
unlock(0);
unlock;
return(OK);
}

View File

@@ -16,24 +16,14 @@
PUBLIC int do_readbios(m_ptr)
register message *m_ptr; /* pointer to request message */
{
int proc_nr;
struct proc *p;
phys_bytes address, phys_buf, phys_bios;
vir_bytes buf;
size_t size;
struct vir_addr src, dst;
src.segment = BIOS_SEG;
dst.segment = D;
src.offset = m_ptr->RDB_ADDR;
dst.offset = (vir_bytes) m_ptr->RDB_BUF;
src.proc_nr_e = NONE;
dst.proc_nr_e = m_ptr->m_source;
address = m_ptr->RDB_ADDR;
buf = (vir_bytes)m_ptr->RDB_BUF;
size = m_ptr->RDB_SIZE;
okendpt(m_ptr->m_source, &proc_nr);
p = proc_addr(proc_nr);
phys_buf = umap_local(p, D, buf, size);
if (phys_buf == 0)
return EFAULT;
phys_bios = umap_bios(p, address, size);
if (phys_bios == 0)
return EPERM;
phys_copy(phys_bios, phys_buf, size);
return 0;
return virtual_copy_vmcheck(&src, &dst, m_ptr->RDB_SIZE);
}

View File

@@ -77,7 +77,7 @@ register message *m_ptr; /* pointer to request message */
return EPERM;
}
/* Get and check physical address. */
if ((phys_buf = numap_local(proc_nr,
if ((phys_buf = umap_virtual(proc_addr(proc_nr), D,
(vir_bytes) m_ptr->DIO_VEC_ADDR, count)) == 0)
return(EFAULT);
}

View File

@@ -6,8 +6,66 @@
#include "../../kernel.h"
#include "proto.h"
#include <signal.h>
#include <stdio.h>
#include <string.h>
#include <minix/sysutil.h>
#include "../../proc.h"
extern int vm_copy_in_progress;
extern struct proc *vm_copy_from, *vm_copy_to;
extern u32_t vm_copy_from_v, vm_copy_to_v;
extern u32_t vm_copy_from_p, vm_copy_to_p, vm_copy_cr3;
u32_t pagefault_cr2, pagefault_count = 0;
void pagefault(struct proc *pr, int trap_errno)
{
int s;
vir_bytes ph;
u32_t pte;
if(pagefault_count != 1)
minix_panic("recursive pagefault", pagefault_count);
/* Don't schedule this process until pagefault is handled. */
if(RTS_ISSET(pr, PAGEFAULT))
minix_panic("PAGEFAULT set", pr->p_endpoint);
RTS_LOCK_SET(pr, PAGEFAULT);
if(pr->p_endpoint <= INIT_PROC_NR) {
/* Page fault we can't / don't want to
* handle.
*/
kprintf("pagefault for process %d ('%s'), pc = 0x%x\n",
pr->p_endpoint, pr->p_name, pr->p_reg.pc);
proc_stacktrace(pr);
minix_panic("page fault in system process", pr->p_endpoint);
return;
}
/* Save pagefault details, suspend process,
* add process to pagefault chain,
* and tell VM there is a pagefault to be
* handled.
*/
pr->p_pagefault.pf_virtual = pagefault_cr2;
pr->p_pagefault.pf_flags = trap_errno;
pr->p_nextpagefault = pagefaults;
pagefaults = pr;
lock_notify(HARDWARE, VM_PROC_NR);
pagefault_count = 0;
#if 0
kprintf("pagefault for process %d ('%s'), pc = 0x%x\n",
pr->p_endpoint, pr->p_name, pr->p_reg.pc);
proc_stacktrace(pr);
#endif
return;
}
/*===========================================================================*
* exception *
*===========================================================================*/
@@ -62,8 +120,13 @@ u32_t old_eflags;
* k_reenter larger than zero.
*/
if (k_reenter == 0 && ! iskernelp(saved_proc)) {
#if 0
{
switch(vec_nr) {
case PAGE_FAULT_VECTOR:
pagefault(saved_proc, trap_errno);
return;
}
kprintf(
"exception for process %d, endpoint %d ('%s'), pc = 0x%x:0x%x, sp = 0x%x:0x%x\n",
proc_nr(saved_proc), saved_proc->p_endpoint,
@@ -75,12 +138,11 @@ u32_t old_eflags;
vec_nr, (unsigned long)trap_errno,
(unsigned long)old_eip, old_cs,
(unsigned long)old_eflags);
#if DEBUG_STACKTRACE
stacktrace(saved_proc);
#endif
proc_stacktrace(saved_proc);
}
#endif
kprintf("kernel: cause_sig %d for %d\n",
ep->signum, saved_proc->p_endpoint);
cause_sig(proc_nr(saved_proc), ep->signum);
return;
}
@@ -92,43 +154,45 @@ u32_t old_eflags;
kprintf("\n%s\n", ep->msg);
kprintf("k_reenter = %d ", k_reenter);
kprintf("process %d (%s), ", proc_nr(saved_proc), saved_proc->p_name);
kprintf("pc = %u:0x%x", (unsigned) saved_proc->p_reg.cs,
(unsigned) saved_proc->p_reg.pc);
kprintf("pc = %u:0x%x\n", (unsigned) saved_proc->p_reg.cs,
(unsigned) saved_proc->p_reg.pc);
kprintf(
"vec_nr= %d, trap_errno= 0x%lx, eip= 0x%lx, cs= 0x%x, eflags= 0x%lx\n",
vec_nr, (unsigned long)trap_errno,
(unsigned long)old_eip, old_cs, (unsigned long)old_eflags);
proc_stacktrace(saved_proc);
panic("exception in a kernel task", NO_NUM);
minix_panic("exception in a kernel task", saved_proc->p_endpoint);
}
#if DEBUG_STACKTRACE
/*===========================================================================*
* stacktrace *
*===========================================================================*/
PUBLIC void stacktrace(struct proc *proc)
PUBLIC void proc_stacktrace(struct proc *proc)
{
reg_t bp, v_bp, v_pc, v_hbp;
v_bp = proc->p_reg.fp;
kprintf("stacktrace: ");
kprintf("ep %d pc 0x%lx stack ", proc->p_endpoint, proc->p_reg.pc);
while(v_bp) {
phys_bytes p;
if(!(p = umap_local(proc, D, v_bp, sizeof(v_bp)))) {
kprintf("(bad bp %lx)", v_bp);
if(data_copy(proc->p_endpoint, v_bp,
SYSTEM, (vir_bytes) &v_hbp, sizeof(v_hbp)) != OK) {
kprintf("(v_bp 0x%lx ?)", v_bp);
break;
}
if(data_copy(proc->p_endpoint, v_bp + sizeof(v_pc),
SYSTEM, (vir_bytes) &v_pc, sizeof(v_pc)) != OK) {
kprintf("(v_pc 0x%lx ?)", v_pc);
break;
}
phys_copy(p+sizeof(v_pc), vir2phys(&v_pc), sizeof(v_pc));
phys_copy(p, vir2phys(&v_hbp), sizeof(v_hbp));
kprintf("0x%lx ", (unsigned long) v_pc);
if(v_hbp != 0 && v_hbp <= v_bp) {
kprintf("(bad hbp %lx)", v_hbp);
kprintf("(hbp %lx ?)", v_hbp);
break;
}
v_bp = v_hbp;
}
kprintf("\n");
}
#endif

View File

@@ -135,12 +135,6 @@
#define IF_MASK 0x00000200
#define IOPL_MASK 0x003000
/* Sizes of memory tables. The boot monitor distinguishes three memory areas,
* namely low mem below 1M, 1M-16M, and mem after 16M. More chunks are needed
* for DOS MINIX.
*/
#define NR_MEMS 8
#define vir2phys(vir) (kinfo.data_base + (vir_bytes) (vir))
#endif /* _I386_ACONST_H */

View File

@@ -55,8 +55,18 @@ struct segdesc_s { /* segment descriptor for protected mode */
typedef struct segframe {
reg_t p_ldt_sel; /* selector in gdt with ldt base and limit */
reg_t p_cr3; /* page table root */
struct segdesc_s p_ldt[2+NR_REMOTE_SEGS]; /* CS, DS and remote */
} segframe_t;
/* Page fault event. Stored in process table. Only valid if PAGEFAULT
* set in p_rts_flags.
*/
struct pagefault
{
u32_t pf_virtual; /* Address causing fault (CR2). */
u32_t pf_flags; /* Pagefault flags on stack. */
};
#endif /* #ifndef _I386_TYPES_H */

View File

@@ -8,6 +8,7 @@
#include <ibm/interrupt.h>
#include <archconst.h>
#include "../../const.h"
#include "vm.h"
#include "sconst.h"
! This file contains a number of assembly code utility routines needed by the
@@ -15,7 +16,7 @@
.define _monitor ! exit Minix and return to the monitor
.define _int86 ! let the monitor make an 8086 interrupt call
.define _cp_mess ! copies messages from source to destination
!.define _cp_mess ! copies messages from source to destination
.define _exit ! dummy for library routines
.define __exit ! dummy for library routines
.define ___exit ! dummy for library routines
@@ -34,8 +35,11 @@
.define _level0 ! call a function at level 0
.define _read_cpu_flags ! read the cpu flags
.define _read_cr0 ! read cr0
.define _write_cr3 ! write cr3
.define _last_cr3
.define _write_cr0 ! write a value in cr0
.define _write_cr3 ! write a value in cr3 (root of the page table)
.define _kernel_cr3
! The routines only guarantee to preserve the registers the C compiler
! expects to be preserved (ebx, esi, edi, ebp, esp, segment registers, and
@@ -162,42 +166,42 @@ csinit: mov eax, DS_SELECTOR
! Note that the message size, "Msize" is in DWORDS (not bytes) and must be set
! correctly. Changing the definition of message in the type file and not
! changing it here will lead to total disaster.
CM_ARGS = 4 + 4 + 4 + 4 + 4 ! 4 + 4 + 4 + 4 + 4
! es ds edi esi eip proc scl sof dcl dof
.align 16
_cp_mess:
cld
push esi
push edi
push ds
push es
mov eax, FLAT_DS_SELECTOR
mov ds, ax
mov es, ax
mov esi, CM_ARGS+4(esp) ! src clicks
shl esi, CLICK_SHIFT
add esi, CM_ARGS+4+4(esp) ! src offset
mov edi, CM_ARGS+4+4+4(esp) ! dst clicks
shl edi, CLICK_SHIFT
add edi, CM_ARGS+4+4+4+4(esp) ! dst offset
mov eax, CM_ARGS(esp) ! process number of sender
stos ! copy number of sender to dest message
add esi, 4 ! do not copy first word
mov ecx, Msize - 1 ! remember, first word does not count
rep
movs ! copy the message
pop es
pop ds
pop edi
pop esi
ret ! that is all folks!
!
!CM_ARGS = 4 + 4 + 4 + 4 + 4 ! 4 + 4 + 4 + 4 + 4
!! es ds edi esi eip proc scl sof dcl dof
!
! .align 16
!_cp_mess:
! cld
! push esi
! push edi
! push ds
! push es
!
! mov eax, FLAT_DS_SELECTOR
! mov ds, ax
! mov es, ax
!
! mov esi, CM_ARGS+4(esp) ! src clicks
! shl esi, CLICK_SHIFT
! add esi, CM_ARGS+4+4(esp) ! src offset
! mov edi, CM_ARGS+4+4+4(esp) ! dst clicks
! shl edi, CLICK_SHIFT
! add edi, CM_ARGS+4+4+4+4(esp) ! dst offset
!
! mov eax, CM_ARGS(esp) ! process number of sender
! stos ! copy number of sender to dest message
! add esi, 4 ! do not copy first word
! mov ecx, Msize - 1 ! remember, first word does not count
! rep
! movs ! copy the message
!
! pop es
! pop ds
! pop edi
! pop esi
! ret ! that is all folks!
!
!*===========================================================================*
!* exit *
@@ -229,6 +233,9 @@ _phys_insw:
cld
push edi
push es
LOADKERNELCR3
mov ecx, FLAT_DS_SELECTOR
mov es, cx
mov edx, 8(ebp) ! port to read from
@@ -254,6 +261,9 @@ _phys_insb:
cld
push edi
push es
LOADKERNELCR3
mov ecx, FLAT_DS_SELECTOR
mov es, cx
mov edx, 8(ebp) ! port to read from
@@ -280,6 +290,9 @@ _phys_outsw:
cld
push esi
push ds
LOADKERNELCR3
mov ecx, FLAT_DS_SELECTOR
mov ds, cx
mov edx, 8(ebp) ! port to write to
@@ -306,6 +319,9 @@ _phys_outsb:
cld
push esi
push ds
LOADKERNELCR3
mov ecx, FLAT_DS_SELECTOR
mov ds, cx
mov edx, 8(ebp) ! port to write to
@@ -412,6 +428,8 @@ _phys_copy:
push edi
push es
LOADKERNELCR3
mov eax, FLAT_DS_SELECTOR
mov es, ax
@@ -456,6 +474,9 @@ _phys_memset:
push esi
push ebx
push ds
LOADKERNELCR3
mov esi, 8(ebp)
mov eax, 16(ebp)
mov ebx, FLAT_DS_SELECTOR
@@ -485,6 +506,7 @@ fill_done:
pop esi
pop ebp
ret
!*===========================================================================*
!* mem_rdw *
@@ -585,14 +607,13 @@ _write_cr0:
ret
!*===========================================================================*
!* write_cr3 *
!* write_cr3 *
!*===========================================================================*
! PUBLIC void write_cr3(unsigned long value);
_write_cr3:
push ebp
mov ebp, esp
mov eax, 8(ebp)
mov cr3, eax
pop ebp
push ebp
mov ebp, esp
LOADCR3WITHEAX(0x22, 8(ebp))
pop ebp
ret

View File

@@ -1,20 +1,26 @@
#include "../../kernel.h"
#include "../../proc.h"
#include "../../vm.h"
#include <minix/type.h>
#include <minix/syslib.h>
#include <minix/sysutil.h>
#include <string.h>
#include <sys/vm.h>
#include <sys/vm_i386.h>
#include <minix/portio.h>
#include "proto.h"
#include "../../proto.h"
#include "../../debug.h"
/* VM functions and data. */
PRIVATE int vm_needs_init= 1;
PRIVATE u32_t vm_cr3;
PUBLIC u32_t kernel_cr3;
extern u32_t cswitch;
u32_t last_cr3 = 0;
FORWARD _PROTOTYPE( void phys_put32, (phys_bytes addr, u32_t value) );
FORWARD _PROTOTYPE( u32_t phys_get32, (phys_bytes addr) );
@@ -22,6 +28,13 @@ FORWARD _PROTOTYPE( void vm_set_cr3, (u32_t value) );
FORWARD _PROTOTYPE( void set_cr3, (void) );
FORWARD _PROTOTYPE( void vm_enable_paging, (void) );
#if DEBUG_VMASSERT
#define vmassert(t) { \
if(!(t)) { minix_panic("vm: assert " #t " failed\n", __LINE__); } }
#else
#define vmassert(t) { }
#endif
/* *** Internal VM Functions *** */
PUBLIC void vm_init(void)
@@ -31,31 +44,35 @@ PUBLIC void vm_init(void)
phys_bytes vm_dir_base, vm_pt_base, phys_mem;
u32_t entry;
unsigned pages;
struct proc* rp;
if (!vm_size)
panic("i386_vm_init: no space for page tables", NO_NUM);
minix_panic("i386_vm_init: no space for page tables", NO_NUM);
if(vm_running)
return;
/* Align page directory */
o= (vm_base % PAGE_SIZE);
o= (vm_base % I386_PAGE_SIZE);
if (o != 0)
o= PAGE_SIZE-o;
o= I386_PAGE_SIZE-o;
vm_dir_base= vm_base+o;
/* Page tables start after the page directory */
vm_pt_base= vm_dir_base+PAGE_SIZE;
vm_pt_base= vm_dir_base+I386_PAGE_SIZE;
pt_size= (vm_base+vm_size)-vm_pt_base;
pt_size -= (pt_size % PAGE_SIZE);
pt_size -= (pt_size % I386_PAGE_SIZE);
/* Compute the number of pages based on vm_mem_high */
pages= (vm_mem_high-1)/PAGE_SIZE + 1;
pages= (vm_mem_high-1)/I386_PAGE_SIZE + 1;
if (pages * I386_VM_PT_ENT_SIZE > pt_size)
panic("i386_vm_init: page table too small", NO_NUM);
minix_panic("i386_vm_init: page table too small", NO_NUM);
for (p= 0; p*I386_VM_PT_ENT_SIZE < pt_size; p++)
{
phys_mem= p*PAGE_SIZE;
phys_mem= p*I386_PAGE_SIZE;
entry= phys_mem | I386_VM_USER | I386_VM_WRITE |
I386_VM_PRESENT;
if (phys_mem >= vm_mem_high)
@@ -65,15 +82,33 @@ PUBLIC void vm_init(void)
for (p= 0; p < I386_VM_DIR_ENTRIES; p++)
{
phys_mem= vm_pt_base + p*PAGE_SIZE;
phys_mem= vm_pt_base + p*I386_PAGE_SIZE;
entry= phys_mem | I386_VM_USER | I386_VM_WRITE |
I386_VM_PRESENT;
if (phys_mem >= vm_pt_base + pt_size)
entry= 0;
phys_put32(vm_dir_base + p*I386_VM_PT_ENT_SIZE, entry);
}
/* Set this cr3 in all currently running processes for
* future context switches.
*/
for (rp=BEG_PROC_ADDR; rp<END_PROC_ADDR; rp++) {
u32_t mycr3;
if(isemptyp(rp)) continue;
rp->p_seg.p_cr3 = vm_dir_base;
}
kernel_cr3 = vm_dir_base;
/* Set this cr3 now (not active until paging enabled). */
vm_set_cr3(vm_dir_base);
/* Actually enable paging (activating cr3 load above). */
level0(vm_enable_paging);
/* Don't do this init in the future. */
vm_running = 1;
}
PRIVATE void phys_put32(addr, value)
@@ -113,50 +148,6 @@ PRIVATE void vm_enable_paging(void)
write_cr0(cr0 | I386_CR0_PG);
}
PUBLIC void vm_map_range(base, size, offset)
u32_t base;
u32_t size;
u32_t offset;
{
u32_t curr_pt, curr_pt_addr, entry;
int dir_ent, pt_ent;
if (base % PAGE_SIZE != 0)
panic("map_range: bad base", base);
if (size % PAGE_SIZE != 0)
panic("map_range: bad size", size);
if (offset % PAGE_SIZE != 0)
panic("map_range: bad offset", offset);
curr_pt= -1;
curr_pt_addr= 0;
while (size != 0)
{
dir_ent= (base >> I386_VM_DIR_ENT_SHIFT);
pt_ent= (base >> I386_VM_PT_ENT_SHIFT) & I386_VM_PT_ENT_MASK;
if (dir_ent != curr_pt)
{
/* Get address of page table */
curr_pt= dir_ent;
curr_pt_addr= phys_get32(vm_cr3 +
dir_ent * I386_VM_PT_ENT_SIZE);
curr_pt_addr &= I386_VM_ADDR_MASK;
}
entry= offset | I386_VM_USER | I386_VM_WRITE |
I386_VM_PRESENT;
#if 0 /* Do we need this for memory mapped I/O? */
entry |= I386_VM_PCD | I386_VM_PWT;
#endif
phys_put32(curr_pt_addr + pt_ent * I386_VM_PT_ENT_SIZE, entry);
offset += PAGE_SIZE;
base += PAGE_SIZE;
size -= PAGE_SIZE;
}
/* reload root of page table. */
vm_set_cr3(vm_cr3);
}
PUBLIC vir_bytes alloc_remote_segment(u32_t *selector,
segframe_t *segments, int index, phys_bytes phys, vir_bytes size,
int priv)
@@ -188,6 +179,10 @@ PUBLIC phys_bytes umap_remote(struct proc* rp, int seg,
/* Calculate the physical memory address for a given virtual address. */
struct far_mem *fm;
#if 0
if(rp->p_misc_flags & MF_FULLVM) return 0;
#endif
if (bytes <= 0) return( (phys_bytes) 0);
if (seg < 0 || seg >= NR_REMOTE_SEGS) return( (phys_bytes) 0);
@@ -212,6 +207,9 @@ vir_bytes bytes; /* # of bytes to be copied */
phys_bytes pa; /* intermediate variables as phys_bytes */
phys_bytes seg_base;
if(seg != T && seg != D && seg != S)
minix_panic("umap_local: wrong seg", seg);
if (bytes <= 0) return( (phys_bytes) 0);
if (vir_addr + bytes <= vir_addr) return 0; /* overflow */
vc = (vir_addr + bytes - 1) >> CLICK_SHIFT; /* last click of data */
@@ -232,3 +230,569 @@ vir_bytes bytes; /* # of bytes to be copied */
return(seg_base + pa);
}
/*===========================================================================*
* umap_virtual *
*===========================================================================*/
PUBLIC phys_bytes umap_virtual(rp, seg, vir_addr, bytes)
register struct proc *rp; /* pointer to proc table entry for process */
int seg; /* T, D, or S segment */
vir_bytes vir_addr; /* virtual address in bytes within the seg */
vir_bytes bytes; /* # of bytes to be copied */
{
vir_bytes linear;
u32_t phys = 0;
if(seg == MEM_GRANT) {
phys = umap_grant(rp, vir_addr, bytes);
} else {
if(!(linear = umap_local(rp, seg, vir_addr, bytes))) {
kprintf("SYSTEM:umap_virtual: umap_local failed\n");
phys = 0;
} else {
if(vm_lookup(rp, linear, &phys, NULL) != OK) {
kprintf("SYSTEM:umap_virtual: vm_lookup of %s: seg 0x%lx: 0x%lx failed\n", rp->p_name, seg, vir_addr);
phys = 0;
}
if(phys == 0)
minix_panic("vm_lookup returned phys", phys);
}
}
if(phys == 0) {
kprintf("SYSTEM:umap_virtual: lookup failed\n");
return 0;
}
/* Now make sure addresses are contiguous in physical memory
* so that the umap makes sense.
*/
if(bytes > 0 && !vm_contiguous(rp, linear, bytes)) {
kprintf("umap_virtual: %s: %d at 0x%lx (vir 0x%lx) not contiguous\n",
rp->p_name, bytes, linear, vir_addr);
return 0;
}
/* phys must be larger than 0 (or the caller will think the call
* failed), and address must not cross a page boundary.
*/
vmassert(phys);
return phys;
}
/*===========================================================================*
* vm_lookup *
*===========================================================================*/
PUBLIC int vm_lookup(struct proc *proc, vir_bytes virtual, vir_bytes *physical, u32_t *ptent)
{
u32_t *root, *pt;
int pde, pte;
u32_t pde_v, pte_v;
vmassert(proc);
vmassert(physical);
vmassert(!(proc->p_rts_flags & SLOT_FREE));
/* Retrieve page directory entry. */
root = (u32_t *) proc->p_seg.p_cr3;
vmassert(!((u32_t) root % I386_PAGE_SIZE));
pde = I386_VM_PDE(virtual);
vmassert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
pde_v = phys_get32((u32_t) (root + pde));
if(!(pde_v & I386_VM_PRESENT)) {
#if 0
kprintf("vm_lookup: %d:%s:0x%lx: cr3 0x%lx: pde %d not present\n",
proc->p_endpoint, proc->p_name, virtual, root, pde);
kprintf("kernel stack: ");
util_stacktrace();
#endif
return EFAULT;
}
/* Retrieve page table entry. */
pt = (u32_t *) I386_VM_PFA(pde_v);
vmassert(!((u32_t) pt % I386_PAGE_SIZE));
pte = I386_VM_PTE(virtual);
vmassert(pte >= 0 && pte < I386_VM_PT_ENTRIES);
pte_v = phys_get32((u32_t) (pt + pte));
if(!(pte_v & I386_VM_PRESENT)) {
#if 0
kprintf("vm_lookup: %d:%s:0x%lx: cr3 %lx: pde %d: pte %d not present\n",
proc->p_endpoint, proc->p_name, virtual, root, pde, pte);
kprintf("kernel stack: ");
util_stacktrace();
#endif
return EFAULT;
}
if(ptent) *ptent = pte_v;
/* Actual address now known; retrieve it and add page offset. */
*physical = I386_VM_PFA(pte_v);
*physical += virtual % I386_PAGE_SIZE;
return OK;
}
/* From virtual address v in process p,
* lookup physical address and assign it to d.
* If p is NULL, assume it's already a physical address.
*/
#define LOOKUP(d, p, v, flagsp) { \
int r; \
if(!(p)) { (d) = (v); } \
else { \
if((r=vm_lookup((p), (v), &(d), flagsp)) != OK) { \
kprintf("vm_copy: lookup failed of 0x%lx in %d (%s)\n"\
"kernel stacktrace: ", (v), (p)->p_endpoint, \
(p)->p_name); \
util_stacktrace(); \
return r; \
} } }
/*===========================================================================*
* vm_copy *
*===========================================================================*/
int vm_copy(vir_bytes src, struct proc *srcproc,
vir_bytes dst, struct proc *dstproc, phys_bytes bytes)
{
#define WRAPS(v) (ULONG_MAX - (v) <= bytes)
if(WRAPS(src) || WRAPS(dst))
minix_panic("vm_copy: linear address wraps", NO_NUM);
while(bytes > 0) {
u32_t n, flags;
phys_bytes p_src, p_dst;
#define PAGEREMAIN(v) (I386_PAGE_SIZE - ((v) % I386_PAGE_SIZE))
/* We can copy this number of bytes without
* crossing a page boundary, but don't copy more
* than asked.
*/
n = MIN(PAGEREMAIN(src), PAGEREMAIN(dst));
n = MIN(n, bytes);
vmassert(n > 0);
vmassert(n <= I386_PAGE_SIZE);
/* Convert both virtual addresses to physical and do
* copy.
*/
LOOKUP(p_src, srcproc, src, NULL);
LOOKUP(p_dst, dstproc, dst, &flags);
if(!(flags & I386_VM_WRITE)) {
kprintf("vm_copy: copying to nonwritable page\n");
kprintf("kernel stack: ");
util_stacktrace();
return EFAULT;
}
phys_copy(p_src, p_dst, n);
/* Book number of bytes copied. */
vmassert(bytes >= n);
bytes -= n;
src += n;
dst += n;
}
return OK;
}
/*===========================================================================*
* vm_contiguous *
*===========================================================================*/
PUBLIC int vm_contiguous(struct proc *targetproc, u32_t vir_buf, size_t bytes)
{
int first = 1, r, boundaries = 0;
u32_t prev_phys, po;
u32_t prev_vir;
vmassert(targetproc);
vmassert(bytes > 0);
vmassert(vm_running);
/* Start and end at page boundary to make logic simpler. */
po = vir_buf % I386_PAGE_SIZE;
if(po > 0) {
bytes += po;
vir_buf -= po;
}
po = (vir_buf + bytes) % I386_PAGE_SIZE;
if(po > 0)
bytes += I386_PAGE_SIZE - po;
/* Keep going as long as we cross a page boundary. */
while(bytes > 0) {
u32_t phys;
if((r=vm_lookup(targetproc, vir_buf, &phys, NULL)) != OK) {
kprintf("vm_contiguous: vm_lookup failed, %d\n", r);
kprintf("kernel stack: ");
util_stacktrace();
return 0;
}
if(!first) {
if(prev_phys+I386_PAGE_SIZE != phys) {
kprintf("vm_contiguous: no (0x%lx, 0x%lx)\n",
prev_phys, phys);
kprintf("kernel stack: ");
util_stacktrace();
return 0;
}
}
first = 0;
prev_phys = phys;
prev_vir = vir_buf;
vir_buf += I386_PAGE_SIZE;
bytes -= I386_PAGE_SIZE;
boundaries++;
}
if(verbose_vm)
kprintf("vm_contiguous: yes (%d boundaries tested)\n",
boundaries);
return 1;
}
int vm_checkrange_verbose = 0;
/*===========================================================================*
* vm_checkrange *
*===========================================================================*/
PUBLIC int vm_checkrange(struct proc *caller, struct proc *target,
vir_bytes vir, vir_bytes bytes, int wrfl, int checkonly)
{
u32_t flags, po, v;
int r;
vmassert(vm_running);
/* If caller has had a reply to this request, return it. */
if(RTS_ISSET(caller, VMREQUEST)) {
if(caller->p_vmrequest.who == target->p_endpoint) {
if(caller->p_vmrequest.vmresult == VMSUSPEND)
minix_panic("check sees VMSUSPEND?", NO_NUM);
RTS_LOCK_UNSET(caller, VMREQUEST);
#if 0
kprintf("SYSTEM: vm_checkrange: returning vmresult %d\n",
caller->p_vmrequest.vmresult);
#endif
return caller->p_vmrequest.vmresult;
} else {
#if 0
kprintf("SYSTEM: vm_checkrange: caller has a request for %d, "
"but our target is %d\n",
caller->p_vmrequest.who, target->p_endpoint);
#endif
}
}
po = vir % I386_PAGE_SIZE;
if(po > 0) {
vir -= po;
bytes += po;
}
vmassert(target);
vmassert(bytes > 0);
for(v = vir; v < vir + bytes; v+= I386_PAGE_SIZE) {
u32_t phys;
/* If page exists and it's writable if desired, we're OK
* for this page.
*/
if(vm_lookup(target, v, &phys, &flags) == OK &&
!(wrfl && !(flags & I386_VM_WRITE))) {
if(vm_checkrange_verbose) {
#if 0
kprintf("SYSTEM: checkrange:%s:%d: 0x%lx: write 0x%lx, flags 0x%lx, phys 0x%lx, OK\n",
target->p_name, target->p_endpoint, v, wrfl, flags, phys);
#endif
}
continue;
}
if(vm_checkrange_verbose) {
kprintf("SYSTEM: checkrange:%s:%d: 0x%lx: write 0x%lx, flags 0x%lx, phys 0x%lx, NOT OK\n",
target->p_name, target->p_endpoint, v, wrfl, flags, phys);
}
if(checkonly)
return VMSUSPEND;
/* This range is not OK for this process. Set parameters
* of the request and notify VM about the pending request.
*/
if(RTS_ISSET(caller, VMREQUEST))
minix_panic("VMREQUEST already set", caller->p_endpoint);
RTS_LOCK_SET(caller, VMREQUEST);
/* Set parameters in caller. */
caller->p_vmrequest.writeflag = wrfl;
caller->p_vmrequest.start = vir;
caller->p_vmrequest.length = bytes;
caller->p_vmrequest.who = target->p_endpoint;
/* Set caller in target. */
target->p_vmrequest.requestor = caller;
/* Connect caller on vmrequest wait queue. */
caller->p_vmrequest.nextrequestor = vmrequest;
vmrequest = caller;
soft_notify(VM_PROC_NR);
#if 0
kprintf("SYSTEM: vm_checkrange: range bad for "
"target %s:0x%lx-0x%lx, caller %s\n",
target->p_name, vir, vir+bytes, caller->p_name);
kprintf("vm_checkrange kernel trace: ");
util_stacktrace();
kprintf("target trace: ");
proc_stacktrace(target);
#endif
if(target->p_endpoint == VM_PROC_NR) {
kprintf("caller trace: ");
proc_stacktrace(caller);
kprintf("target trace: ");
proc_stacktrace(target);
minix_panic("VM ranges should be OK", NO_NUM);
}
return VMSUSPEND;
}
return OK;
}
char *flagstr(u32_t e, int dir)
{
static char str[80];
strcpy(str, "");
#define FLAG(v) do { if(e & (v)) { strcat(str, #v " "); } } while(0)
FLAG(I386_VM_PRESENT);
FLAG(I386_VM_WRITE);
FLAG(I386_VM_USER);
FLAG(I386_VM_PWT);
FLAG(I386_VM_PCD);
if(dir)
FLAG(I386_VM_BIGPAGE); /* Page directory entry only */
else
FLAG(I386_VM_DIRTY); /* Page table entry only */
return str;
}
void vm_pt_print(u32_t *pagetable, u32_t v)
{
int pte, l = 0;
int col = 0;
vmassert(!((u32_t) pagetable % I386_PAGE_SIZE));
for(pte = 0; pte < I386_VM_PT_ENTRIES; pte++) {
u32_t pte_v, pfa;
pte_v = phys_get32((u32_t) (pagetable + pte));
if(!(pte_v & I386_VM_PRESENT))
continue;
pfa = I386_VM_PFA(pte_v);
kprintf("%4d:%08lx:%08lx ",
pte, v + I386_PAGE_SIZE*pte, pfa);
col++;
if(col == 3) { kprintf("\n"); col = 0; }
}
if(col > 0) kprintf("\n");
return;
}
/*===========================================================================*
* vm_print *
*===========================================================================*/
void vm_print(u32_t *root)
{
int pde;
vmassert(!((u32_t) root % I386_PAGE_SIZE));
for(pde = 0; pde < I386_VM_DIR_ENTRIES; pde++) {
u32_t pde_v;
u32_t *pte_a;
pde_v = phys_get32((u32_t) (root + pde));
if(!(pde_v & I386_VM_PRESENT))
continue;
pte_a = (u32_t *) I386_VM_PFA(pde_v);
kprintf("%4d: pt %08lx %s\n",
pde, pte_a, flagstr(pde_v, 1));
vm_pt_print(pte_a, pde * I386_VM_PT_ENTRIES * I386_PAGE_SIZE);
}
return;
}
/*===========================================================================*
* virtual_copy_f *
*===========================================================================*/
PUBLIC int virtual_copy_f(src_addr, dst_addr, bytes, vmcheck)
struct vir_addr *src_addr; /* source virtual address */
struct vir_addr *dst_addr; /* destination virtual address */
vir_bytes bytes; /* # of bytes to copy */
int vmcheck; /* if nonzero, can return VMSUSPEND */
{
/* Copy bytes from virtual address src_addr to virtual address dst_addr.
* Virtual addresses can be in ABS, LOCAL_SEG, REMOTE_SEG, or BIOS_SEG.
*/
struct vir_addr *vir_addr[2]; /* virtual source and destination address */
phys_bytes phys_addr[2]; /* absolute source and destination */
int seg_index;
int i, r;
struct proc *procs[2];
/* Check copy count. */
if (bytes <= 0) return(EDOM);
/* Do some more checks and map virtual addresses to physical addresses. */
vir_addr[_SRC_] = src_addr;
vir_addr[_DST_] = dst_addr;
for (i=_SRC_; i<=_DST_; i++) {
int proc_nr, type;
struct proc *p;
type = vir_addr[i]->segment & SEGMENT_TYPE;
if((type != PHYS_SEG && type != BIOS_SEG) &&
isokendpt(vir_addr[i]->proc_nr_e, &proc_nr))
p = proc_addr(proc_nr);
else
p = NULL;
procs[i] = p;
/* Get physical address. */
switch(type) {
case LOCAL_SEG:
case LOCAL_VM_SEG:
if(!p) return EDEADSRCDST;
seg_index = vir_addr[i]->segment & SEGMENT_INDEX;
if(type == LOCAL_SEG)
phys_addr[i] = umap_local(p, seg_index, vir_addr[i]->offset,
bytes);
else
phys_addr[i] = umap_virtual(p, seg_index, vir_addr[i]->offset,
bytes);
if(phys_addr[i] == 0) {
kprintf("virtual_copy: map 0x%x failed for %s seg %d, "
"offset %lx, len %d, i %d\n",
type, p->p_name, seg_index, vir_addr[i]->offset,
bytes, i);
}
break;
case REMOTE_SEG:
if(!p) return EDEADSRCDST;
seg_index = vir_addr[i]->segment & SEGMENT_INDEX;
phys_addr[i] = umap_remote(p, seg_index, vir_addr[i]->offset, bytes);
break;
#if _MINIX_CHIP == _CHIP_INTEL
case BIOS_SEG:
phys_addr[i] = umap_bios(vir_addr[i]->offset, bytes );
break;
#endif
case PHYS_SEG:
phys_addr[i] = vir_addr[i]->offset;
break;
case GRANT_SEG:
phys_addr[i] = umap_grant(p, vir_addr[i]->offset, bytes);
break;
default:
kprintf("virtual_copy: strange type 0x%x\n", type);
return(EINVAL);
}
/* Check if mapping succeeded. */
if (phys_addr[i] <= 0 && vir_addr[i]->segment != PHYS_SEG) {
kprintf("virtual_copy EFAULT\n");
return(EFAULT);
}
}
if(vmcheck && procs[_SRC_])
CHECKRANGE_OR_SUSPEND(procs[_SRC_], phys_addr[_SRC_], bytes, 0);
if(vmcheck && procs[_DST_])
CHECKRANGE_OR_SUSPEND(procs[_DST_], phys_addr[_DST_], bytes, 1);
/* Now copy bytes between physical addresseses. */
if(!vm_running || (procs[_SRC_] == NULL && procs[_DST_] == NULL)) {
/* Without vm, address ranges actually are physical. */
phys_copy(phys_addr[_SRC_], phys_addr[_DST_], (phys_bytes) bytes);
r = OK;
} else {
/* With vm, addresses need further interpretation. */
r = vm_copy(phys_addr[_SRC_], procs[_SRC_],
phys_addr[_DST_], procs[_DST_], (phys_bytes) bytes);
if(r != OK) {
kprintf("vm_copy: %lx to %lx failed\n",
phys_addr[_SRC_],phys_addr[_DST_]);
}
}
return(r);
}
/*===========================================================================*
* data_copy *
*===========================================================================*/
PUBLIC int data_copy(
endpoint_t from_proc, vir_bytes from_addr,
endpoint_t to_proc, vir_bytes to_addr,
size_t bytes)
{
struct vir_addr src, dst;
src.segment = dst.segment = D;
src.offset = from_addr;
dst.offset = to_addr;
src.proc_nr_e = from_proc;
dst.proc_nr_e = to_proc;
return virtual_copy(&src, &dst, bytes);
}
/*===========================================================================*
* arch_pre_exec *
*===========================================================================*/
PUBLIC int arch_pre_exec(struct proc *pr, u32_t ip, u32_t sp)
{
/* wipe extra LDT entries, set program counter, and stack pointer. */
memset(pr->p_seg.p_ldt + EXTRA_LDT_INDEX, 0,
sizeof(pr->p_seg.p_ldt[0]) * (LDT_SIZE - EXTRA_LDT_INDEX));
pr->p_reg.pc = ip;
pr->p_reg.sp = sp;
}
/*===========================================================================*
* arch_umap *
*===========================================================================*/
PUBLIC int arch_umap(struct proc *pr, vir_bytes offset, vir_bytes count,
int seg, phys_bytes *addr)
{
switch(seg) {
case BIOS_SEG:
*addr = umap_bios(offset, count);
return OK;
}
/* This must be EINVAL; the umap fallback function in
* lib/syslib/alloc_util.c depends on it to detect an
* older kernel (as opposed to mapping error).
*/
return EINVAL;
}

View File

@@ -1,4 +1,4 @@
#
#
! This file, mpx386.s, is included by mpx.s when Minix is compiled for
! 32-bit Intel CPUs. The alternative mpx88.s is compiled for 16-bit CPUs.
@@ -58,6 +58,7 @@ begbss:
#include <ibm/interrupt.h>
#include <archconst.h>
#include "../../const.h"
#include "vm.h"
#include "sconst.h"
/* Selected 386 tss offsets. */
@@ -71,6 +72,13 @@ begbss:
.define _restart
.define save
.define _kernel_cr3
.define _pagefault_cr2
.define _pagefault_count
.define errexception
.define exception1
.define exception
.define _divide_error
.define _single_step_exception
@@ -88,6 +96,9 @@ begbss:
.define _general_protection
.define _page_fault
.define _copr_error
.define _params_size
.define _params_offset
.define _mon_ds
.define _hwint00 ! handlers for hardware interrupts
.define _hwint01
@@ -173,6 +184,11 @@ copygdt:
mov ss, ax
mov esp, k_stktop ! set sp to point to the top of kernel stack
! Save boot parameters into these global variables for i386 code
mov (_params_size), edx
mov (_params_offset), ebx
mov (_mon_ds), SS_SELECTOR
! Call C startup code to set up a proper environment to run main().
push edx
push ebx
@@ -216,6 +232,7 @@ csinit:
#define hwint_master(irq) \
call save /* save interrupted process state */;\
push (_irq_handlers+4*irq) /* irq_handlers[irq] */;\
LOADCR3WITHEAX(irq, (_kernel_cr3)) /* switch to kernel page table */;\
call _intr_handle /* intr_handle(irq_handlers[irq]) */;\
pop ecx ;\
cmp (_irq_actids+4*irq), 0 /* interrupt still active? */;\
@@ -267,6 +284,7 @@ _hwint07: ! Interrupt routine for irq 7 (printer)
#define hwint_slave(irq) \
call save /* save interrupted process state */;\
push (_irq_handlers+4*irq) /* irq_handlers[irq] */;\
LOADCR3WITHEAX(irq, (_kernel_cr3)) /* switch to kernel page table */;\
call _intr_handle /* intr_handle(irq_handlers[irq]) */;\
pop ecx ;\
cmp (_irq_actids+4*irq), 0 /* interrupt still active? */;\
@@ -358,6 +376,7 @@ _p_s_call:
o16 push es
o16 push fs
o16 push gs
mov si, ss ! ss is kernel data segment
mov ds, si ! load rest of kernel segments
mov es, si ! kernel does not use fs, gs
@@ -371,6 +390,9 @@ _p_s_call:
push ebx ! pointer to user message
push eax ! source / destination
push ecx ! call number (ipc primitive to use)
! LOADCR3WITHEAX(0x20, (_kernel_cr3))
call _sys_call ! sys_call(call_nr, src_dst, m_ptr, bit_map)
! caller is now explicitly in proc_ptr
mov AXREG(esi), eax ! sys_call MUST PRESERVE si
@@ -391,6 +413,7 @@ _restart:
mov (_next_ptr), 0
0: mov esp, (_proc_ptr) ! will assume P_STACKBASE == 0
lldt P_LDT_SEL(esp) ! enable process' segment descriptors
LOADCR3WITHEAX(0x21, P_CR3(esp)) ! switch to process page table
lea eax, P_STACKTOP(esp) ! arrange for next interrupt
mov (_tss+TSS3_S_SP0), eax ! to save state in process table
restart1:
@@ -464,6 +487,11 @@ _general_protection:
_page_fault:
push PAGE_FAULT_VECTOR
push eax
mov eax, cr2
sseg mov (_pagefault_cr2), eax
sseg inc (_pagefault_count)
pop eax
jmp errexception
_copr_error:
@@ -492,12 +520,16 @@ errexception:
sseg pop (trap_errno)
exception1: ! Common for all exceptions.
push eax ! eax is scratch register
mov eax, 0+4(esp) ! old eip
sseg mov (old_eip), eax
movzx eax, 4+4(esp) ! old cs
sseg mov (old_cs), eax
mov eax, 8+4(esp) ! old eflags
sseg mov (old_eflags), eax
LOADCR3WITHEAX(0x24, (_kernel_cr3))
pop eax
call save
push (old_eflags)
@@ -517,6 +549,15 @@ _level0_call:
call save
jmp (_level0_func)
!*===========================================================================*
!* load_kernel_cr3 *
!*===========================================================================*
.align 16
_load_kernel_cr3:
mov eax, (_kernel_cr3)
mov cr3, eax
ret
!*===========================================================================*
!* data *
!*===========================================================================*
@@ -533,3 +574,4 @@ k_stktop: ! top of kernel stack
.comm old_eip, 4
.comm old_cs, 4
.comm old_eflags, 4

View File

@@ -307,7 +307,10 @@ PUBLIC void alloc_segments(register struct proc *rp)
code_bytes = data_bytes; /* common I&D, poor protect */
else
code_bytes = (phys_bytes) rp->p_memmap[T].mem_len << CLICK_SHIFT;
privilege = (iskernelp(rp)) ? TASK_PRIVILEGE : USER_PRIVILEGE;
if( (iskernelp(rp)))
privilege = TASK_PRIVILEGE;
else
privilege = USER_PRIVILEGE;
init_codeseg(&rp->p_seg.p_ldt[CS_LDT_INDEX],
(phys_bytes) rp->p_memmap[T].mem_phys << CLICK_SHIFT,
code_bytes, privilege);

View File

@@ -44,10 +44,16 @@ _PROTOTYPE( void trp, (void) );
_PROTOTYPE( void s_call, (void) ), _PROTOTYPE( p_s_call, (void) );
_PROTOTYPE( void level0_call, (void) );
/* memory.c */
_PROTOTYPE( void vir_insb, (u16_t port, struct proc *proc, u32_t vir, size_t count));
_PROTOTYPE( void vir_outsb, (u16_t port, struct proc *proc, u32_t vir, size_t count));
_PROTOTYPE( void vir_insw, (u16_t port, struct proc *proc, u32_t vir, size_t count));
_PROTOTYPE( void vir_outsw, (u16_t port, struct proc *proc, u32_t vir, size_t count));
/* exception.c */
_PROTOTYPE( void exception, (unsigned vec_nr, u32_t trap_errno,
u32_t old_eip, U16_t old_cs, u32_t old_eflags) );
_PROTOTYPE( void stacktrace, (struct proc *proc) );
/* klib386.s */
_PROTOTYPE( void level0, (void (*func)(void)) );
@@ -62,6 +68,7 @@ _PROTOTYPE( void phys_insb, (U16_t port, phys_bytes buf, size_t count) );
_PROTOTYPE( void phys_insw, (U16_t port, phys_bytes buf, size_t count) );
_PROTOTYPE( void phys_outsb, (U16_t port, phys_bytes buf, size_t count) );
_PROTOTYPE( void phys_outsw, (U16_t port, phys_bytes buf, size_t count) );
_PROTOTYPE( void i386_invlpg, (U32_t addr) );
/* protect.c */
_PROTOTYPE( void prot_init, (void) );

View File

@@ -23,5 +23,6 @@ SPREG = PSWREG + W
SSREG = SPREG + W
P_STACKTOP = SSREG + W
P_LDT_SEL = P_STACKTOP
P_LDT = P_LDT_SEL + W
P_CR3 = P_LDT_SEL + W
P_LDT = P_CR3 + W
Msize = 9 ! size of a message in 32-bit words

View File

@@ -3,10 +3,14 @@
#include "../../kernel.h"
#include <unistd.h>
#include <ctype.h>
#include <string.h>
#include <ibm/cmos.h>
#include <ibm/bios.h>
#include <minix/portio.h>
#include <minix/u64.h>
#include <minix/sysutil.h>
#include <a.out.h>
#include "proto.h"
#include "../../proc.h"
@@ -31,7 +35,39 @@ PUBLIC void arch_shutdown(int how)
* the program if not already done.
*/
if (how != RBT_MONITOR)
phys_copy(vir2phys(""), kinfo.params_base, 1);
arch_set_params("", 1);
if(minix_panicing) {
int source, dest;
static char mybuffer[sizeof(params_buffer)];
char *lead = "echo \\n*** kernel messages:\\n";
int leadlen = strlen(lead);
strcpy(mybuffer, lead);
#define DECSOURCE source = (source - 1 + _KMESS_BUF_SIZE) % _KMESS_BUF_SIZE
dest = sizeof(mybuffer)-1;
mybuffer[dest--] = '\0';
source = kmess.km_next;
DECSOURCE;
while(dest >= leadlen) {
char c = kmess.km_buf[source];
if(c == '\n') {
mybuffer[dest--] = 'n';
mybuffer[dest] = '\\';
} else if(isprint(c) &&
c != '\'' && c != '"' &&
c != '\\' && c != ';') {
mybuffer[dest] = c;
} else mybuffer[dest] = '|';
DECSOURCE;
dest--;
}
arch_set_params(mybuffer, strlen(mybuffer)+1);
}
level0(monitor);
} else {
/* Reset the system by forcing a processor shutdown. First stop
@@ -44,6 +80,17 @@ PUBLIC void arch_shutdown(int how)
}
}
/* address of a.out headers, set in mpx386.s */
phys_bytes aout;
PUBLIC void arch_get_aout_headers(int i, struct exec *h)
{
/* The bootstrap loader created an array of the a.out headers at
* absolute address 'aout'. Get one element to h.
*/
phys_copy(aout + i * A_MINHDR, vir2phys(h), (phys_bytes) A_MINHDR);
}
PUBLIC void system_init(void)
{
prot_init();
@@ -122,7 +169,7 @@ PUBLIC void ser_dump_proc()
pp->p_priority, pp->p_max_priority,
pp->p_user_time, pp->p_sys_time,
pp->p_reg.pc);
stacktrace(pp);
proc_stacktrace(pp);
}
}
@@ -219,3 +266,23 @@ PUBLIC void cons_seth(int pos, int n)
else
cons_setc(pos, 'A'+(n-10));
}
/* Saved by mpx386.s into these variables. */
u32_t params_size, params_offset, mon_ds;
PUBLIC int arch_get_params(char *params, int maxsize)
{
phys_copy(seg2phys(mon_ds) + params_offset, vir2phys(params),
MIN(maxsize, params_size));
params[maxsize-1] = '\0';
return OK;
}
PUBLIC int arch_set_params(char *params, int size)
{
if(size > params_size)
return E2BIG;
phys_copy(vir2phys(params), seg2phys(mon_ds) + params_offset, size);
return OK;
}

27
kernel/arch/i386/vm.h Normal file
View File

@@ -0,0 +1,27 @@
.define _load_kernel_cr3
.define _last_cr3
#define LOADKERNELCR3 ;\
inc (_cr3switch) ;\
mov eax, (_kernel_cr3) ;\
cmp (_last_cr3), eax ;\
jz 9f ;\
push _load_kernel_cr3 ;\
call _level0 ;\
pop eax ;\
mov eax, (_kernel_cr3) ;\
mov (_last_cr3), eax ;\
inc (_cr3reload) ;\
9:
#define LOADCR3WITHEAX(type, newcr3) ;\
sseg inc (_cr3switch) ;\
sseg mov eax, newcr3 ;\
sseg cmp (_last_cr3), eax ;\
jz 8f ;\
mov cr3, eax ;\
sseg inc (_cr3reload) ;\
sseg mov (_last_cr3), eax ;\
8:

View File

@@ -75,7 +75,7 @@ PUBLIC void clock_task()
result = receive(ANY, &m);
if(result != OK)
panic("receive() failed", result);
minix_panic("receive() failed", result);
/* Handle the request. Only clock ticks are expected. */
switch (m.m_type) {
@@ -181,6 +181,8 @@ irq_hook_t *hook;
*/
register unsigned ticks;
if(minix_panicing) return;
/* Get number of ticks and update realtime. */
ticks = lost_ticks + 1;
lost_ticks = 0;
@@ -201,8 +203,10 @@ irq_hook_t *hook;
bill_ptr->p_ticks_left -= ticks;
}
#if 0
/* Update load average. */
load_update();
#endif
/* Check if do_clocktick() must be called. Done for alarms and scheduling.
* Some processes, such as the kernel tasks, cannot be preempted.

View File

@@ -49,12 +49,6 @@
*/
#define P_NAME_LEN 8
/* Kernel diagnostics are written to a circular buffer. After each message,
* a system server is notified and a copy of the buffer can be retrieved to
* display the message. The buffers size can safely be reduced.
*/
#define KMESS_BUF_SIZE 256
/* Buffer to gather randomness. This is used to generate a random stream by
* the MEMORY driver when reading from /dev/random.
*/
@@ -74,11 +68,5 @@
#define K_PARAM_SIZE 512
/* This section allows to enable kernel debugging and timing functionality.
* For normal operation all options should be disabled.
*/
#define DEBUG_SCHED_CHECK 0 /* sanity check of scheduling queues */
#define DEBUG_TIME_LOCKS 0 /* measure time spent in locks */
#endif /* CONFIG_H */

View File

@@ -6,6 +6,7 @@
#include <minix/bitmap.h>
#include "config.h"
#include "debug.h"
/* Map a process number to a privilege structure id. */
#define s_nr_to_id(n) (NR_TASKS + (n) + 1)
@@ -37,24 +38,15 @@
( MAP_CHUNK(map.chunk,bit) &= ~(1 << CHUNK_OFFSET(bit) )
#define NR_SYS_CHUNKS BITMAP_CHUNKS(NR_SYS_PROCS)
#if DEBUG_LOCK_CHECK
#define reallock(c, v) { if(intr_disabled()) { kinfo.relocking++; } else { intr_disable(); } }
#else
#define reallock(c, v) intr_disable()
#endif
#define reallock do { int d; d = intr_disabled(); intr_disable(); locklevel++; if(d && locklevel == 1) { minix_panic("reallock while interrupts disabled first time", __LINE__); } } while(0)
#define realunlock(c) intr_enable()
#define realunlock do { if(!intr_disabled()) { minix_panic("realunlock while interrupts enabled", __LINE__); } if(locklevel < 1) { minix_panic("realunlock while locklevel below 1", __LINE__); } locklevel--; if(locklevel == 0) { intr_enable(); } } while(0)
#if DEBUG_TIME_LOCKS
#define lock(c, v) do { reallock(c, v); locktimestart(c, v); } while(0)
#define unlock(c) do { locktimeend(c); realunlock(c); } while(0)
#else
/* Disable/ enable hardware interrupts. The parameters of lock() and unlock()
* are used when debugging is enabled. See debug.h for more information.
*/
#define lock(c, v) reallock(c, v)
#define unlock(c) realunlock(c)
#endif
#define lock reallock
#define unlock realunlock
/* args to intr_init() */
#define INTS_ORIG 0 /* restore interrupts */

View File

@@ -6,12 +6,13 @@
#include "kernel.h"
#include "proc.h"
#include "debug.h"
#include <limits.h>
#if DEBUG_TIME_LOCKS /* only include code if enabled */
#include <minix/sysutil.h>
#include <limits.h>
#include <string.h>
/* Data structures to store lock() timing data. */
struct lock_timingdata timingdata[TIMING_CATEGORIES];
static unsigned long starttimes[TIMING_CATEGORIES][2];
#define HIGHCOUNT 0
@@ -100,69 +101,75 @@ void timer_end(int cat)
return;
}
#endif /* DEBUG_TIME_LOCKS */
#if DEBUG_SCHED_CHECK /* only include code if enabled */
#define MAX_LOOP (NR_PROCS + NR_TASKS)
PUBLIC void
check_runqueues(char *when)
check_runqueues_f(char *file, int line)
{
int q, l = 0;
register struct proc *xp;
#define MYPANIC(msg) { \
static char buf[100]; \
strcpy(buf, file); \
strcat(buf, ": "); \
util_nstrcat(buf, line);\
strcat(buf, ": "); \
strcat(buf, msg); \
minix_panic(buf, NO_NUM); \
}
for (xp = BEG_PROC_ADDR; xp < END_PROC_ADDR; ++xp) {
xp->p_found = 0;
if (l++ > MAX_LOOP) { panic("check error", NO_NUM); }
if (l++ > MAX_LOOP) { MYPANIC("check error"); }
}
for (q=l=0; q < NR_SCHED_QUEUES; q++) {
if (rdy_head[q] && !rdy_tail[q]) {
kprintf("head but no tail in %d: %s", q, when);
panic("scheduling error", NO_NUM);
kprintf("head but no tail in %d\n", q);
MYPANIC("scheduling error");
}
if (!rdy_head[q] && rdy_tail[q]) {
kprintf("tail but no head in %d: %s", q, when);
panic("scheduling error", NO_NUM);
kprintf("tail but no head in %d\n", q);
MYPANIC("scheduling error");
}
if (rdy_tail[q] && rdy_tail[q]->p_nextready != NIL_PROC) {
kprintf("tail and tail->next not null in %d: %s", q, when);
panic("scheduling error", NO_NUM);
kprintf("tail and tail->next not null in %d\n", q);
MYPANIC("scheduling error");
}
for(xp = rdy_head[q]; xp != NIL_PROC; xp = xp->p_nextready) {
if (!xp->p_ready) {
kprintf("scheduling error: unready on runq %d proc %d: %s\n",
q, xp->p_nr, when);
panic("found unready process on run queue", NO_NUM);
kprintf("scheduling error: unready on runq %d proc %d\n",
q, xp->p_nr);
MYPANIC("found unready process on run queue");
}
if (xp->p_priority != q) {
kprintf("scheduling error: wrong priority q %d proc %d: %s\n",
q, xp->p_nr, when);
panic("wrong priority", NO_NUM);
kprintf("scheduling error: wrong priority q %d proc %d\n",
q, xp->p_nr);
MYPANIC("wrong priority");
}
if (xp->p_found) {
kprintf("scheduling error: double sched q %d proc %d: %s\n",
q, xp->p_nr, when);
panic("proc more than once on scheduling queue", NO_NUM);
kprintf("scheduling error: double sched q %d proc %d\n",
q, xp->p_nr);
MYPANIC("proc more than once on scheduling queue");
}
xp->p_found = 1;
if (xp->p_nextready == NIL_PROC && rdy_tail[q] != xp) {
kprintf("sched err: last element not tail q %d proc %d: %s\n",
q, xp->p_nr, when);
panic("scheduling error", NO_NUM);
kprintf("sched err: last element not tail q %d proc %d\n",
q, xp->p_nr);
MYPANIC("scheduling error");
}
if (l++ > MAX_LOOP) panic("loop in schedule queue?", NO_NUM);
if (l++ > MAX_LOOP) MYPANIC("loop in schedule queue?");
}
}
l = 0;
for (xp = BEG_PROC_ADDR; xp < END_PROC_ADDR; ++xp) {
if (! isemptyp(xp) && xp->p_ready && ! xp->p_found) {
kprintf("sched error: ready proc %d not on queue: %s\n",
xp->p_nr, when);
panic("ready proc not on scheduling queue", NO_NUM);
if (l++ > MAX_LOOP) { panic("loop in proc.t?", NO_NUM); }
kprintf("sched error: ready proc %d not on queue\n", xp->p_nr);
MYPANIC("ready proc not on scheduling queue");
if (l++ > MAX_LOOP) { MYPANIC("loop in debug.c?"); }
}
}
}

View File

@@ -21,36 +21,16 @@
*/
#define DEBUG_ENABLE_IPC_WARNINGS 0
#define DEBUG_STACKTRACE 1
#define DEBUG_VMASSERT 1
#define DEBUG_SCHED_CHECK 1
#define DEBUG_TIME_LOCKS 1
/* It's interesting to measure the time spent withing locked regions, because
* this is the time that the system is deaf to interrupts.
*/
#if DEBUG_TIME_LOCKS
#define TIMING_POINTS 20 /* timing resolution */
#define TIMING_CATEGORIES 20
#define TIMING_NAME 10
/* Definition of the data structure to store lock() timing data. */
struct lock_timingdata {
char names[TIMING_NAME];
unsigned long lock_timings[TIMING_POINTS];
unsigned long lock_timings_range[2];
unsigned long binsize, resets, misses, measurements;
};
/* The data is declared here, but allocated in debug.c. */
extern struct lock_timingdata timingdata[TIMING_CATEGORIES];
/* Prototypes for the timing functionality. */
_PROTOTYPE( void timer_start, (int cat, char *name) );
_PROTOTYPE( void timer_end, (int cat) );
#define locktimestart(c, v) timer_start(c, v)
#define locktimeend(c) timer_end(c)
#else
#define locktimestart(c, v)
#define locktimeend(c)
#endif /* DEBUG_TIME_LOCKS */
#endif /* DEBUG_H */

View File

@@ -20,7 +20,6 @@ EXTERN char kernel_exception; /* TRUE after system exceptions */
EXTERN char shutdown_started; /* TRUE after shutdowns / reboots */
/* Kernel information structures. This groups vital kernel information. */
EXTERN phys_bytes aout; /* address of a.out headers */
EXTERN struct kinfo kinfo; /* kernel information for users */
EXTERN struct machine machine; /* machine information for users */
EXTERN struct kmessages kmess; /* diagnostic messages in kernel */
@@ -32,6 +31,10 @@ EXTERN struct proc *prev_ptr; /* previously running process */
EXTERN struct proc *proc_ptr; /* pointer to currently running process */
EXTERN struct proc *next_ptr; /* next process to run after restart() */
EXTERN struct proc *bill_ptr; /* process to bill for clock ticks */
EXTERN struct proc *vmrestart; /* first process on vmrestart queue */
EXTERN struct proc *vmrequest; /* first process on vmrequest queue */
EXTERN struct proc *pagefaults; /* first process on pagefault queue */
EXTERN struct proc *softnotify; /* first process on softnotify queue */
EXTERN char k_reenter; /* kernel reentry count (entry count less 1) */
EXTERN unsigned lost_ticks; /* clock ticks counted outside clock task */
@@ -75,11 +78,25 @@ EXTERN endpoint_t who_e; /* message source endpoint */
EXTERN int who_p; /* message source proc */
EXTERN int sys_call_code; /* kernel call number in SYSTEM */
EXTERN time_t boottime;
EXTERN char params_buffer[512]; /* boot monitor parameters */
EXTERN int minix_panicing;
EXTERN int locklevel;
EXTERN unsigned long cr3switch;
EXTERN unsigned long cr3reload;
/* VM */
EXTERN phys_bytes vm_base;
EXTERN phys_bytes vm_size;
EXTERN phys_bytes vm_mem_high;
EXTERN int vm_running;
EXTERN int must_notify_vm;
/* Verbose flags (debugging). */
EXTERN int verbose_vm;
/* Timing measurements. */
EXTERN struct lock_timingdata timingdata[TIMING_CATEGORIES];
/* Variables that are initialized elsewhere are just extern here. */
extern struct boot_image image[]; /* system image processes */

View File

@@ -30,7 +30,7 @@ PUBLIC void put_irq_handler( irq_hook_t* hook, int irq, irq_handler_t handler)
irq_hook_t **line;
if( irq < 0 || irq >= NR_IRQ_VECTORS )
panic("invalid call to put_irq_handler", irq);
minix_panic("invalid call to put_irq_handler", irq);
line = &irq_handlers[irq];
id = 1;
@@ -42,7 +42,7 @@ PUBLIC void put_irq_handler( irq_hook_t* hook, int irq, irq_handler_t handler)
}
if(id == 0)
panic("Too many handlers for irq", irq);
minix_panic("Too many handlers for irq", irq);
hook->next = NULL;
hook->handler = handler;
@@ -68,7 +68,7 @@ PUBLIC void rm_irq_handler( irq_hook_t* hook ) {
irq_hook_t **line;
if( irq < 0 || irq >= NR_IRQ_VECTORS )
panic("invalid call to rm_irq_handler", irq);
minix_panic("invalid call to rm_irq_handler", irq);
/* disable the irq. */
intr_mask(hook);

View File

@@ -14,10 +14,15 @@
#define RECEIVE 2 /* blocking receive */
#define SENDREC 3 /* SEND + RECEIVE */
#define NOTIFY 4 /* asynchronous notify */
#define SENDNB 5 /* nonblocking send */
#define SENDNB 5 /* nonblocking send */
#define SENDA 16 /* asynchronous send */
/* The following bit masks determine what checks that should be done. */
#define CHECK_DEADLOCK 0x03 /* 0000 0011 : check for deadlock */
#define WILLRECEIVE(target, source_ep) \
((RTS_ISSET(target, RECEIVING) && !RTS_ISSET(target, SENDING)) && \
(target->p_getfrom_e == ANY || target->p_getfrom_e == source_ep))
#endif /* IPC_H */

View File

@@ -20,7 +20,6 @@
/* Prototype declarations for PRIVATE functions. */
FORWARD _PROTOTYPE( void announce, (void));
FORWARD _PROTOTYPE( void shutdown, (timer_t *));
/*===========================================================================*
* main *
@@ -115,11 +114,11 @@ PUBLIC void main()
hdrindex = 1 + i-NR_TASKS; /* servers, drivers, INIT */
}
/* The bootstrap loader created an array of the a.out headers at
* absolute address 'aout'. Get one element to e_hdr.
/* Architecture-specific way to find out aout header of this
* boot process.
*/
phys_copy(aout + hdrindex * A_MINHDR, vir2phys(&e_hdr),
(phys_bytes) A_MINHDR);
arch_get_aout_headers(hdrindex, &e_hdr);
/* Convert addresses to clicks and build process memory map */
text_base = e_hdr.a_syms >> CLICK_SHIFT;
text_clicks = (e_hdr.a_text + CLICK_SIZE-1) >> CLICK_SHIFT;
@@ -156,8 +155,8 @@ PUBLIC void main()
}
/* Set ready. The HARDWARE task is never ready. */
if (rp->p_nr == HARDWARE) RTS_LOCK_SET(rp, NO_PRIORITY);
RTS_LOCK_UNSET(rp, SLOT_FREE); /* remove SLOT_FREE and schedule */
if (rp->p_nr == HARDWARE) RTS_SET(rp, NO_PRIORITY);
RTS_UNSET(rp, SLOT_FREE); /* remove SLOT_FREE and schedule */
/* Code and data segments must be allocated in protected mode. */
alloc_segments(rp);
@@ -170,6 +169,8 @@ PUBLIC void main()
cprof_procs_no = 0; /* init nr of hash table slots used */
#endif /* CPROFILE */
vm_running = 0;
/* MINIX is now ready. All boot image processes are on the ready queue.
* Return to the assembly code to start running the current process.
*/
@@ -203,34 +204,19 @@ int how;
register struct proc *rp;
message m;
/* Send a signal to all system processes that are still alive to inform
* them that the MINIX kernel is shutting down. A proper shutdown sequence
* should be implemented by a user-space server. This mechanism is useful
* as a backup in case of system panics, so that system processes can still
* run their shutdown code, e.g, to synchronize the FS or to let the TTY
* switch to the first console.
*/
#if DEAD_CODE
kprintf("Sending SIGKSTOP to system processes ...\n");
for (rp=BEG_PROC_ADDR; rp<END_PROC_ADDR; rp++) {
if (!isemptyp(rp) && (priv(rp)->s_flags & SYS_PROC) && !iskernelp(rp))
send_sig(proc_nr(rp), SIGKSTOP);
}
#endif
/* Continue after 1 second, to give processes a chance to get scheduled to
* do shutdown work. Set a watchog timer to call shutdown(). The timer
* argument passes the shutdown status.
*/
kprintf("MINIX will now be shut down ...\n");
tmr_arg(&shutdown_timer)->ta_int = how;
set_timer(&shutdown_timer, get_uptime() + HZ, shutdown);
set_timer(&shutdown_timer, get_uptime() + 5*HZ, minix_shutdown);
}
/*===========================================================================*
* shutdown *
*===========================================================================*/
PRIVATE void shutdown(tp)
PUBLIC void minix_shutdown(tp)
timer_t *tp;
{
/* This function is called from prepare_shutdown or stop_sequence to bring
@@ -239,6 +225,6 @@ timer_t *tp;
*/
intr_init(INTS_ORIG);
clock_stop();
arch_shutdown(tmr_arg(tp)->ta_int);
arch_shutdown(tp ? tmr_arg(tp)->ta_int : RBT_PANIC);
}

View File

@@ -39,22 +39,24 @@
#include <minix/com.h>
#include <minix/callnr.h>
#include <minix/endpoint.h>
#include "debug.h"
#include "kernel.h"
#include "proc.h"
#include <stddef.h>
#include <signal.h>
#include <minix/portio.h>
#include <minix/u64.h>
#include "debug.h"
#include "kernel.h"
#include "proc.h"
#include "vm.h"
/* Scheduling and message passing functions. The functions are available to
* other parts of the kernel through lock_...(). The lock temporarily disables
* interrupts to prevent race conditions.
*/
FORWARD _PROTOTYPE( int mini_send, (struct proc *caller_ptr, int dst_e,
message *m_ptr, unsigned flags));
message *m_ptr, int flags));
FORWARD _PROTOTYPE( int mini_receive, (struct proc *caller_ptr, int src,
message *m_ptr, unsigned flags));
message *m_ptr, int flags));
FORWARD _PROTOTYPE( int mini_notify, (struct proc *caller_ptr, int dst));
FORWARD _PROTOTYPE( int mini_senda, (struct proc *caller_ptr,
asynmsg_t *table, size_t size));
@@ -62,8 +64,6 @@ FORWARD _PROTOTYPE( int deadlock, (int function,
register struct proc *caller, int src_dst));
FORWARD _PROTOTYPE( int try_async, (struct proc *caller_ptr));
FORWARD _PROTOTYPE( int try_one, (struct proc *src_ptr, struct proc *dst_ptr));
FORWARD _PROTOTYPE( void enqueue, (struct proc *rp));
FORWARD _PROTOTYPE( void dequeue, (struct proc *rp));
FORWARD _PROTOTYPE( void sched, (struct proc *rp, int *queue, int *front));
FORWARD _PROTOTYPE( void pick_proc, (void));
@@ -82,10 +82,51 @@ FORWARD _PROTOTYPE( void pick_proc, (void));
break; \
}
#define CopyMess(s,sp,sm,dp,dm) \
cp_mess(proc_addr(s)->p_endpoint, \
(sp)->p_memmap[D].mem_phys, \
(vir_bytes)sm, (dp)->p_memmap[D].mem_phys, (vir_bytes)dm)
#define CopyMess(s,sp,sm,dp,dm) do { \
vir_bytes dstlin; \
endpoint_t e = proc_addr(s)->p_endpoint; \
struct vir_addr src, dst; \
int r; \
timer_start(0, "copymess"); \
if((dstlin = umap_local((dp), D, (vir_bytes) dm, sizeof(message))) == 0){\
minix_panic("CopyMess: umap_local failed", __LINE__); \
} \
\
if(vm_running && \
(r=vm_checkrange((dp), (dp), dstlin, sizeof(message), 1, 0)) != OK) { \
if(r != VMSUSPEND) \
minix_panic("CopyMess: vm_checkrange error", __LINE__); \
(dp)->p_vmrequest.saved.msgcopy.dst = (dp); \
(dp)->p_vmrequest.saved.msgcopy.dst_v = (vir_bytes) dm; \
if(data_copy((sp)->p_endpoint, \
(vir_bytes) (sm), SYSTEM, \
(vir_bytes) &(dp)->p_vmrequest.saved.msgcopy.msgbuf, \
sizeof(message)) != OK) { \
minix_panic("CopyMess: data_copy failed", __LINE__);\
} \
(dp)->p_vmrequest.saved.msgcopy.msgbuf.m_source = e; \
(dp)->p_vmrequest.type = VMSTYPE_MSGCOPY; \
} else { \
src.proc_nr_e = (sp)->p_endpoint; \
dst.proc_nr_e = (dp)->p_endpoint; \
src.segment = dst.segment = D; \
src.offset = (vir_bytes) (sm); \
dst.offset = (vir_bytes) (dm); \
if(virtual_copy(&src, &dst, sizeof(message)) != OK) { \
kprintf("copymess: copy %d:%lx to %d:%lx failed\n",\
(sp)->p_endpoint, (sm), (dp)->p_endpoint, dm);\
minix_panic("CopyMess: virtual_copy (1) failed", __LINE__); \
} \
src.proc_nr_e = SYSTEM; \
src.offset = (vir_bytes) &e; \
if(virtual_copy(&src, &dst, sizeof(e)) != OK) { \
kprintf("copymess: copy %d:%lx to %d:%lx\n", \
(sp)->p_endpoint, (sm), (dp)->p_endpoint, dm);\
minix_panic("CopyMess: virtual_copy (2) failed", __LINE__); \
} \
} \
timer_end(0); \
} while(0)
/*===========================================================================*
* sys_call *
@@ -105,11 +146,25 @@ long bit_map; /* notification event set or flags */
int group_size; /* used for deadlock check */
int result; /* the system call's result */
int src_dst_p; /* Process slot number */
vir_clicks vlo, vhi; /* virtual clicks containing message to send */
size_t msg_size;
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.total= add64u(ipc_stats.total, 1);
#if 0
if(src_dst_e != 4 && src_dst_e != 5 &&
caller_ptr->p_endpoint != 4 && caller_ptr->p_endpoint != 5) {
if(call_nr == SEND)
kprintf("(%d SEND to %d) ", caller_ptr->p_endpoint, src_dst_e);
else if(call_nr == RECEIVE)
kprintf("(%d RECEIVE from %d) ", caller_ptr->p_endpoint, src_dst_e);
else if(call_nr == SENDREC)
kprintf("(%d SENDREC to %d) ", caller_ptr->p_endpoint, src_dst_e);
else
kprintf("(%d %d to/from %d) ", caller_ptr->p_endpoint, call_nr, src_dst_e);
}
#endif
#if 1
if (RTS_ISSET(caller_ptr, SLOT_FREE))
{
@@ -122,10 +177,10 @@ long bit_map; /* notification event set or flags */
/* Check destination. SENDA is special because its argument is a table and
* not a single destination. RECEIVE is the only call that accepts ANY (in
* addition to a real endpoint). The other calls (SEND, SENDNB, SENDREC,
* addition to a real endpoint). The other calls (SEND, SENDREC,
* and NOTIFY) require an endpoint to corresponds to a process. In addition,
* it is necessary to check whether a process is allow to send to a given
* destination. For SENDREC we check s_ipc_sendrec, and for SEND, SENDNB,
* it is necessary to check whether a process is allowed to send to a given
* destination. For SENDREC we check s_ipc_sendrec, and for SEND,
* and NOTIFY we check s_ipc_to.
*/
if (call_nr == SENDA)
@@ -150,7 +205,6 @@ long bit_map; /* notification event set or flags */
{
/* Require a valid source and/or destination process. */
if(!isokendpt(src_dst_e, &src_dst_p)) {
if (src_dst_e == 0) panic("sys_call: no PM", NO_NUM);
#if DEBUG_ENABLE_IPC_WARNINGS
kprintf("sys_call: trap %d by %d with bad endpoint %d\n",
call_nr, proc_nr(caller_ptr), src_dst_e);
@@ -160,7 +214,7 @@ if (src_dst_e == 0) panic("sys_call: no PM", NO_NUM);
return EDEADSRCDST;
}
/* If the call is to send to a process, i.e., for SEND, SENDNB,
/* If the call is to send to a process, i.e., for SEND,
* SENDREC or NOTIFY, verify that the caller is allowed to send to
* the given destination.
*/
@@ -224,40 +278,85 @@ if (src_dst_e == 0) panic("sys_call: no PM", NO_NUM);
if ((iskerneln(src_dst_p) && call_nr != SENDREC && call_nr != RECEIVE)) {
#if DEBUG_ENABLE_IPC_WARNINGS
kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
call_nr, proc_nr(caller_ptr), src_dst);
call_nr, proc_nr(caller_ptr), src_dst_e);
#endif
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.call_not_allowed++;
return(ETRAPDENIED); /* trap denied by mask or kernel */
}
/* If the call involves a message buffer, i.e., for SEND, SENDNB, SENDREC,
/* Get and check the size of the argument in bytes.
* Normally this is just the size of a regular message, but in the
* case of SENDA the argument is a table.
*/
if(call_nr == SENDA) {
msg_size = (size_t) src_dst_e;
/* Limit size to something reasonable. An arbitrary choice is 16
* times the number of process table entries.
*/
if (msg_size > 16*(NR_TASKS + NR_PROCS))
return EDOM;
msg_size *= sizeof(asynmsg_t); /* convert to bytes */
} else {
msg_size = sizeof(*m_ptr);
}
/* If the call involves a message buffer, i.e., for SEND, SENDREC,
* or RECEIVE, check the message pointer. This check allows a message to be
* anywhere in data or stack or gap. It will have to be made more elaborate
* for machines which don't have the gap mapped.
*
* We use msg_size decided above.
*/
if (call_nr == SEND || call_nr == SENDNB || call_nr == SENDREC ||
call_nr == RECEIVE) {
vlo = (vir_bytes) m_ptr >> CLICK_SHIFT;
vhi = ((vir_bytes) m_ptr + MESS_SIZE - 1) >> CLICK_SHIFT;
if (vlo < caller_ptr->p_memmap[D].mem_vir || vlo > vhi ||
vhi >= caller_ptr->p_memmap[S].mem_vir +
caller_ptr->p_memmap[S].mem_len) {
#if DEBUG_ENABLE_IPC_WARNINGS
kprintf(
"sys_call: invalid message pointer, trap %d, caller %d\n",
call_nr, proc_nr(caller_ptr));
#endif
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.bad_buffer++;
return(EFAULT); /* invalid message pointer */
if (call_nr == SEND || call_nr == SENDREC ||
call_nr == RECEIVE || call_nr == SENDA || call_nr == SENDNB) {
int r;
phys_bytes lin;
/* Map to linear address. */
if((lin = umap_local(caller_ptr, D, (vir_bytes) m_ptr, msg_size)) == 0)
return EFAULT;
/* Check if message pages in calling process are mapped.
* We don't have to check the recipient if this is a send,
* because this code will do that before its receive() starts.
*
* It is important the range is verified as _writable_, because
* the kernel will want to write to the SENDA buffer in the future,
* and those pages may not be shared between processes.
*/
if(vm_running &&
(r=vm_checkrange(caller_ptr, caller_ptr, lin, msg_size, 1, 0)) != OK) {
if(r != VMSUSPEND) {
kprintf("SYSTEM:sys_call:vm_checkrange: err %d\n", r);
return r;
}
minix_panic("vmsuspend", __LINE__);
/* We can't go ahead with this call. Caller is suspended
* and we have to save the state in its process struct.
*/
caller_ptr->p_vmrequest.saved.sys_call.call_nr = call_nr;
caller_ptr->p_vmrequest.saved.sys_call.m_ptr = m_ptr;
caller_ptr->p_vmrequest.saved.sys_call.src_dst_e = src_dst_e;
caller_ptr->p_vmrequest.saved.sys_call.bit_map = bit_map;
caller_ptr->p_vmrequest.type = VMSTYPE_SYS_CALL;
kprintf("SYSTEM: %s:%d: suspending call 0x%lx on ipc buffer 0x%lx\n",
caller_ptr->p_name, caller_ptr->p_endpoint, call_nr, m_ptr);
/* vm_checkrange() will have suspended caller with VMREQUEST. */
return OK;
}
}
}
/* Check for a possible deadlock for blocking SEND(REC) and RECEIVE. */
if (call_nr == SEND || call_nr == SENDREC || call_nr == RECEIVE) {
if (group_size = deadlock(call_nr, caller_ptr, src_dst_p)) {
#if DEBUG_ENABLE_IPC_WARNINGS
#if 0
kprintf("sys_call: trap %d from %d to %d deadlocked, group size %d\n",
call_nr, proc_nr(caller_ptr), src_dst_p, group_size);
#endif
@@ -273,7 +372,6 @@ if (src_dst_e == 0) panic("sys_call: no PM", NO_NUM);
* - SEND: sender blocks until its message has been delivered
* - RECEIVE: receiver blocks until an acceptable message has arrived
* - NOTIFY: asynchronous call; deliver notification or mark pending
* - SENDNB: nonblocking send
* - SENDA: list of asynchronous send requests
*/
switch(call_nr) {
@@ -282,21 +380,21 @@ if (src_dst_e == 0) panic("sys_call: no PM", NO_NUM);
caller_ptr->p_misc_flags |= REPLY_PENDING;
/* fall through */
case SEND:
result = mini_send(caller_ptr, src_dst_e, m_ptr, 0 /*flags*/);
result = mini_send(caller_ptr, src_dst_e, m_ptr, 0);
if (call_nr == SEND || result != OK)
break; /* done, or SEND failed */
/* fall through for SENDREC */
case RECEIVE:
if (call_nr == RECEIVE)
caller_ptr->p_misc_flags &= ~REPLY_PENDING;
result = mini_receive(caller_ptr, src_dst_e, m_ptr, 0 /*flags*/);
result = mini_receive(caller_ptr, src_dst_e, m_ptr, 0);
break;
case NOTIFY:
result = mini_notify(caller_ptr, src_dst_p);
break;
case SENDNB:
result = mini_send(caller_ptr, src_dst_e, m_ptr, NON_BLOCKING);
break;
case SENDNB:
result = mini_send(caller_ptr, src_dst_e, m_ptr, NON_BLOCKING);
break;
case SENDA:
result = mini_senda(caller_ptr, (asynmsg_t *)m_ptr, (size_t)src_dst_e);
break;
@@ -325,10 +423,17 @@ int src_dst; /* src or dst process */
register struct proc *xp; /* process pointer */
int group_size = 1; /* start with only caller */
int trap_flags;
#if DEBUG_ENABLE_IPC_WARNINGS
static struct proc *processes[NR_PROCS + NR_TASKS];
processes[0] = cp;
#endif
while (src_dst != ANY) { /* check while process nr */
int src_dst_e;
xp = proc_addr(src_dst); /* follow chain of processes */
#if DEBUG_ENABLE_IPC_WARNINGS
processes[group_size] = xp;
#endif
group_size ++; /* extra process in group */
/* Check whether the last process in the chain has a dependency. If it
@@ -354,12 +459,38 @@ int src_dst; /* src or dst process */
return(0); /* not a deadlock */
}
}
#if DEBUG_ENABLE_IPC_WARNINGS
{
int i;
kprintf("deadlock between these processes:\n");
for(i = 0; i < group_size; i++) {
kprintf(" %10s ", processes[i]->p_name);
proc_stacktrace(processes[i]);
}
}
#endif
return(group_size); /* deadlock found */
}
}
return(0); /* not a deadlock */
}
/*===========================================================================*
* sys_call_restart *
*===========================================================================*/
PUBLIC void sys_call_restart(caller)
struct proc *caller;
{
int r;
minix_panic("sys_call_restart", NO_NUM);
kprintf("restarting sys_call code 0x%lx, "
"m_ptr 0x%lx, srcdst %d, bitmap 0x%lx, but not really\n",
caller->p_vmrequest.saved.sys_call.call_nr,
caller->p_vmrequest.saved.sys_call.m_ptr,
caller->p_vmrequest.saved.sys_call.src_dst_e,
caller->p_vmrequest.saved.sys_call.bit_map);
caller->p_reg.retreg = r;
}
/*===========================================================================*
* mini_send *
@@ -368,7 +499,7 @@ PRIVATE int mini_send(caller_ptr, dst_e, m_ptr, flags)
register struct proc *caller_ptr; /* who is trying to send a message? */
int dst_e; /* to whom is message being sent? */
message *m_ptr; /* pointer to message buffer */
unsigned flags; /* system call flags */
int flags;
{
/* Send a message from 'caller_ptr' to 'dst'. If 'dst' is blocked waiting
* for this message, copy the message to it and unblock 'dst'. If 'dst' is
@@ -391,14 +522,18 @@ unsigned flags; /* system call flags */
/* Check if 'dst' is blocked waiting for this message. The destination's
* SENDING flag may be set when its SENDREC call blocked while sending.
*/
if ( (RTS_ISSET(dst_ptr, RECEIVING) && !RTS_ISSET(dst_ptr, SENDING)) &&
(dst_ptr->p_getfrom_e == ANY
|| dst_ptr->p_getfrom_e == caller_ptr->p_endpoint)) {
if (WILLRECEIVE(dst_ptr, caller_ptr->p_endpoint)) {
/* Destination is indeed waiting for this message. */
CopyMess(caller_ptr->p_nr, caller_ptr, m_ptr, dst_ptr,
dst_ptr->p_messbuf);
RTS_UNSET(dst_ptr, RECEIVING);
} else if ( ! (flags & NON_BLOCKING)) {
} else {
if(flags & NON_BLOCKING) {
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.not_ready++;
return(ENOTREADY);
}
/* Destination is not waiting. Block and dequeue caller. */
caller_ptr->p_messbuf = m_ptr;
RTS_SET(caller_ptr, SENDING);
@@ -409,10 +544,6 @@ unsigned flags; /* system call flags */
while (*xpp != NIL_PROC) xpp = &(*xpp)->p_q_link;
*xpp = caller_ptr; /* add caller to end */
caller_ptr->p_q_link = NIL_PROC; /* mark new end of list */
} else {
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.not_ready++;
return(ENOTREADY);
}
return(OK);
}
@@ -424,11 +555,11 @@ PRIVATE int mini_receive(caller_ptr, src_e, m_ptr, flags)
register struct proc *caller_ptr; /* process trying to get message */
int src_e; /* which message source is wanted */
message *m_ptr; /* pointer to message buffer */
unsigned flags; /* system call flags */
int flags;
{
/* A process or task wants to get a message. If a message is already queued,
* acquire it and deblock the sender. If no message from the desired source
* is available block the caller, unless the flags don't allow blocking.
* is available block the caller.
*/
register struct proc **xpp;
register struct notification **ntf_q_pp;
@@ -491,7 +622,9 @@ unsigned flags; /* system call flags */
#if 1
if (RTS_ISSET(*xpp, SLOT_FREE))
{
kprintf("listening to the dead?!?\n");
kprintf("%d: receive from %d; found dead %d (%s)?\n",
caller_ptr->p_endpoint, src_e, (*xpp)->p_endpoint,
(*xpp)->p_name);
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.deadproc++;
return EINVAL;
@@ -580,6 +713,28 @@ int dst; /* which process to notify */
return(OK);
}
#define ASCOMPLAIN(caller, entry, field) \
kprintf("kernel:%s:%d: asyn failed for %s in %s " \
"(%d/%d, tab 0x%lx)\n",__FILE__,__LINE__, \
field, caller->p_name, entry, priv(caller)->s_asynsize, priv(caller)->s_asyntab)
#define A_RETRIEVE(entry, field) \
if(data_copy(caller_ptr->p_endpoint, \
table_v + (entry)*sizeof(asynmsg_t) + offsetof(struct asynmsg,field),\
SYSTEM, (vir_bytes) &tabent.field, \
sizeof(tabent.field)) != OK) {\
ASCOMPLAIN(caller_ptr, entry, #field); \
return EFAULT; \
}
#define A_INSERT(entry, field) \
if(data_copy(SYSTEM, (vir_bytes) &tabent.field, \
caller_ptr->p_endpoint, \
table_v + (entry)*sizeof(asynmsg_t) + offsetof(struct asynmsg,field),\
sizeof(tabent.field)) != OK) {\
ASCOMPLAIN(caller_ptr, entry, #field); \
return EFAULT; \
}
/*===========================================================================*
* mini_senda *
@@ -591,11 +746,11 @@ size_t size;
{
int i, dst_p, done, do_notify;
unsigned flags;
phys_bytes tab_phys;
struct proc *dst_ptr;
struct priv *privp;
message *m_ptr;
asynmsg_t tabent;
vir_bytes table_v = (vir_bytes) table;
privp= priv(caller_ptr);
if (!(privp->s_flags & SYS_PROC))
@@ -619,6 +774,9 @@ size_t size;
/* Limit size to something reasonable. An arbitrary choice is 16
* times the number of process table entries.
*
* (this check has been duplicated in sys_call but is left here
* as a sanity check)
*/
if (size > 16*(NR_TASKS + NR_PROCS))
{
@@ -627,26 +785,14 @@ size_t size;
return EDOM;
}
/* Map table */
tab_phys= umap_local(caller_ptr, D, (vir_bytes)table,
size*sizeof(table[0]));
if (tab_phys == 0)
{
kprintf("mini_senda: got bad table pointer/size\n");
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.bad_buffer++;
return EFAULT;
}
/* Scan the table */
do_notify= FALSE;
done= TRUE;
for (i= 0; i<size; i++)
{
/* Read status word */
phys_copy(tab_phys + i*sizeof(table[0]) +
offsetof(struct asynmsg, flags),
vir2phys(&tabent.flags), sizeof(tabent.flags));
A_RETRIEVE(i, flags);
flags= tabent.flags;
/* Skip empty entries */
@@ -662,28 +808,20 @@ size_t size;
return EINVAL;
}
/* Skip entry is AMF_DONE is already set */
/* Skip entry if AMF_DONE is already set */
if (flags & AMF_DONE)
continue;
/* Get destination */
phys_copy(tab_phys + i*sizeof(table[0]) +
offsetof(struct asynmsg, dst),
vir2phys(&tabent.dst), sizeof(tabent.dst));
A_RETRIEVE(i, dst);
if (!isokendpt(tabent.dst, &dst_p))
{
/* Bad destination, report the error */
tabent.result= EDEADSRCDST;
phys_copy(vir2phys(&tabent.result),
tab_phys + i*sizeof(table[0]) +
offsetof(struct asynmsg, result),
sizeof(tabent.result));
A_INSERT(i, result);
tabent.flags= flags | AMF_DONE;
phys_copy(vir2phys(&tabent.flags),
tab_phys + i*sizeof(table[0]) +
offsetof(struct asynmsg, flags),
sizeof(tabent.flags));
A_INSERT(i, flags);
if (flags & AMF_NOTIFY)
do_notify= 1;
@@ -701,15 +839,9 @@ size_t size;
if (dst_ptr->p_rts_flags & NO_ENDPOINT)
{
tabent.result= EDSTDIED;
phys_copy(vir2phys(&tabent.result),
tab_phys + i*sizeof(table[0]) +
offsetof(struct asynmsg, result),
sizeof(tabent.result));
A_INSERT(i, result);
tabent.flags= flags | AMF_DONE;
phys_copy(vir2phys(&tabent.flags),
tab_phys + i*sizeof(table[0]) +
offsetof(struct asynmsg, flags),
sizeof(tabent.flags));
A_INSERT(i, flags);
if (flags & AMF_NOTIFY)
do_notify= TRUE;
@@ -732,19 +864,12 @@ size_t size;
CopyMess(caller_ptr->p_nr, caller_ptr, m_ptr, dst_ptr,
dst_ptr->p_messbuf);
if ((dst_ptr->p_rts_flags &= ~RECEIVING) == 0)
enqueue(dst_ptr);
RTS_UNSET(dst_ptr, RECEIVING);
tabent.result= OK;
phys_copy(vir2phys(&tabent.result),
tab_phys + i*sizeof(table[0]) +
offsetof(struct asynmsg, result),
sizeof(tabent.result));
A_INSERT(i, result);
tabent.flags= flags | AMF_DONE;
phys_copy(vir2phys(&tabent.flags),
tab_phys + i*sizeof(table[0]) +
offsetof(struct asynmsg, flags),
sizeof(tabent.flags));
A_INSERT(i, flags);
if (flags & AMF_NOTIFY)
do_notify= 1;
@@ -759,11 +884,18 @@ size_t size;
}
}
if (do_notify)
kprintf("mini_senda: should notifiy caller\n");
kprintf("mini_senda: should notify caller\n");
if (!done)
{
privp->s_asyntab= (vir_bytes)table;
privp->s_asynsize= size;
#if 0
if(caller_ptr->p_endpoint > INIT_PROC_NR) {
kprintf("kernel: %s (%d) asynsend table at 0x%lx, %d\n",
caller_ptr->p_name, caller_ptr->p_endpoint,
table, size);
}
#endif
}
return OK;
}
@@ -814,38 +946,27 @@ struct proc *dst_ptr;
unsigned flags;
size_t size;
endpoint_t dst_e;
phys_bytes tab_phys;
asynmsg_t *table_ptr;
message *m_ptr;
struct priv *privp;
asynmsg_t tabent;
vir_bytes table_v;
struct proc *caller_ptr;
privp= priv(src_ptr);
size= privp->s_asynsize;
table_v = privp->s_asyntab;
caller_ptr = src_ptr;
dst_e= dst_ptr->p_endpoint;
/* Map table */
tab_phys= umap_local(src_ptr, D, privp->s_asyntab,
size*sizeof(tabent));
if (tab_phys == 0)
{
kprintf("try_one: got bad table pointer/size\n");
privp->s_asynsize= 0;
if (src_ptr->p_endpoint == ipc_stats_target)
ipc_stats.bad_buffer++;
return EFAULT;
}
/* Scan the table */
do_notify= FALSE;
done= TRUE;
for (i= 0; i<size; i++)
{
/* Read status word */
phys_copy(tab_phys + i*sizeof(tabent) +
offsetof(struct asynmsg, flags),
vir2phys(&tabent.flags), sizeof(tabent.flags));
A_RETRIEVE(i, flags);
flags= tabent.flags;
/* Skip empty entries */
@@ -877,9 +998,7 @@ struct proc *dst_ptr;
done= FALSE;
/* Get destination */
phys_copy(tab_phys + i*sizeof(tabent) +
offsetof(struct asynmsg, dst),
vir2phys(&tabent.dst), sizeof(tabent.dst));
A_RETRIEVE(i, dst);
if (tabent.dst != dst_e)
{
@@ -895,15 +1014,9 @@ struct proc *dst_ptr;
dst_ptr->p_messbuf);
tabent.result= OK;
phys_copy(vir2phys(&tabent.result),
tab_phys + i*sizeof(tabent) +
offsetof(struct asynmsg, result),
sizeof(tabent.result));
A_INSERT(i, result);
tabent.flags= flags | AMF_DONE;
phys_copy(vir2phys(&tabent.flags),
tab_phys + i*sizeof(tabent) +
offsetof(struct asynmsg, flags),
sizeof(tabent.flags));
A_INSERT(i, flags);
if (flags & AMF_NOTIFY)
{
@@ -941,17 +1054,54 @@ int dst_e; /* (endpoint) who is to be notified */
/* Call from task level, locking is required. */
else {
lock(0, "notify");
lock;
result = mini_notify(proc_addr(src), dst);
unlock(0);
unlock;
}
return(result);
}
/*===========================================================================*
* soft_notify *
*===========================================================================*/
PUBLIC int soft_notify(dst_e)
int dst_e; /* (endpoint) who is to be notified */
{
int dst, u = 0;
struct proc *dstp, *sys = proc_addr(SYSTEM);
/* Delayed interface to notify() from SYSTEM that is safe/easy to call
* from more places than notify().
*/
if(!intr_disabled()) { lock; u = 1; }
{
if(!isokendpt(dst_e, &dst))
minix_panic("soft_notify to dead ep", dst_e);
dstp = proc_addr(dst);
if(!dstp->p_softnotified) {
dstp->next_soft_notify = softnotify;
softnotify = dstp;
dstp->p_softnotified = 1;
if (RTS_ISSET(sys, RECEIVING)) {
sys->p_messbuf->m_source = SYSTEM;
RTS_UNSET(sys, RECEIVING);
}
}
}
if(u) { unlock; }
return OK;
}
/*===========================================================================*
* enqueue *
*===========================================================================*/
PRIVATE void enqueue(rp)
PUBLIC void enqueue(rp)
register struct proc *rp; /* this process is now runnable */
{
/* Add 'rp' to one of the queues of runnable processes. This function is
@@ -963,8 +1113,9 @@ register struct proc *rp; /* this process is now runnable */
int front; /* add to front or back */
#if DEBUG_SCHED_CHECK
check_runqueues("enqueue1");
if (rp->p_ready) kprintf("enqueue() already ready process\n");
if(!intr_disabled()) { minix_panic("enqueue with interrupts enabled", NO_NUM); }
CHECK_RUNQUEUES;
if (rp->p_ready) minix_panic("enqueue already ready process", NO_NUM);
#endif
/* Determine where to insert to process. */
@@ -996,14 +1147,14 @@ register struct proc *rp; /* this process is now runnable */
#if DEBUG_SCHED_CHECK
rp->p_ready = 1;
check_runqueues("enqueue2");
CHECK_RUNQUEUES;
#endif
}
/*===========================================================================*
* dequeue *
*===========================================================================*/
PRIVATE void dequeue(rp)
PUBLIC void dequeue(rp)
register struct proc *rp; /* this process is no longer runnable */
{
/* A process must be removed from the scheduling queues, for example, because
@@ -1017,12 +1168,13 @@ register struct proc *rp; /* this process is no longer runnable */
/* Side-effect for kernel: check if the task's stack still is ok? */
if (iskernelp(rp)) {
if (*priv(rp)->s_stack_guard != STACK_GUARD)
panic("stack overrun by task", proc_nr(rp));
minix_panic("stack overrun by task", proc_nr(rp));
}
#if DEBUG_SCHED_CHECK
check_runqueues("dequeue1");
if (! rp->p_ready) kprintf("dequeue() already unready process\n");
CHECK_RUNQUEUES;
if(!intr_disabled()) { minix_panic("dequeue with interrupts enabled", NO_NUM); }
if (! rp->p_ready) minix_panic("dequeue() already unready process", NO_NUM);
#endif
/* Now make sure that the process is not in its ready queue. Remove the
@@ -1045,7 +1197,7 @@ register struct proc *rp; /* this process is no longer runnable */
#if DEBUG_SCHED_CHECK
rp->p_ready = 0;
check_runqueues("dequeue2");
CHECK_RUNQUEUES;
#endif
}
@@ -1101,12 +1253,16 @@ PRIVATE void pick_proc()
for (q=0; q < NR_SCHED_QUEUES; q++) {
if ( (rp = rdy_head[q]) != NIL_PROC) {
next_ptr = rp; /* run process 'rp' next */
#if 0
if(rp->p_endpoint != 4 && rp->p_endpoint != 5 && rp->p_endpoint != IDLE && rp->p_endpoint != SYSTEM)
kprintf("[run %s]", rp->p_name);
#endif
if (priv(rp)->s_flags & BILLABLE)
bill_ptr = rp; /* bill for system time */
return;
}
}
panic("no ready process", NO_NUM);
minix_panic("no ready process", NO_NUM);
}
/*===========================================================================*
@@ -1127,7 +1283,7 @@ timer_t *tp; /* watchdog timer pointer */
for (rp=BEG_PROC_ADDR; rp<END_PROC_ADDR; rp++) {
if (! isemptyp(rp)) { /* check slot use */
lock(5,"balance_queues");
lock;
if (rp->p_priority > rp->p_max_priority) { /* update priority? */
if (rp->p_rts_flags == 0) dequeue(rp); /* take off queue */
ticks_added += rp->p_quantum_size; /* do accounting */
@@ -1138,7 +1294,7 @@ timer_t *tp; /* watchdog timer pointer */
ticks_added += rp->p_quantum_size - rp->p_ticks_left;
rp->p_ticks_left = rp->p_quantum_size; /* give new quantum */
}
unlock(5);
unlock;
}
}
#if DEBUG
@@ -1161,9 +1317,9 @@ message *m_ptr; /* pointer to message buffer */
{
/* Safe gateway to mini_send() for tasks. */
int result;
lock(2, "send");
result = mini_send(proc_ptr, dst_e, m_ptr, NON_BLOCKING);
unlock(2);
lock;
result = mini_send(proc_ptr, dst_e, m_ptr, 0);
unlock;
return(result);
}
@@ -1174,9 +1330,9 @@ PUBLIC void lock_enqueue(rp)
struct proc *rp; /* this process is now runnable */
{
/* Safe gateway to enqueue() for tasks. */
lock(3, "enqueue");
lock;
enqueue(rp);
unlock(3);
unlock;
}
/*===========================================================================*
@@ -1192,12 +1348,24 @@ struct proc *rp; /* this process is no longer runnable */
*/
dequeue(rp);
} else {
lock(4, "dequeue");
lock;
dequeue(rp);
unlock(4);
unlock;
}
}
/*===========================================================================*
* endpoint_lookup *
*===========================================================================*/
PUBLIC struct proc *endpoint_lookup(endpoint_t e)
{
int n;
if(!isokendpt(e, &n)) return NULL;
return proc_addr(n);
}
/*===========================================================================*
* isokendpt_f *
*===========================================================================*/
@@ -1228,22 +1396,29 @@ int *p, fatalflag;
*p = _ENDPOINT_P(e);
if(!isokprocn(*p)) {
#if DEBUG_ENABLE_IPC_WARNINGS
#if 0
kprintf("kernel:%s:%d: bad endpoint %d: proc %d out of range\n",
file, line, e, *p);
#endif
#endif
} else if(isemptyn(*p)) {
#if DEBUG_ENABLE_IPC_WARNINGS
#if 0
kprintf("kernel:%s:%d: bad endpoint %d: proc %d empty\n", file, line, e, *p);
#endif
#endif
} else if(proc_addr(*p)->p_endpoint != e) {
#if DEBUG_ENABLE_IPC_WARNINGS
#if 0
kprintf("kernel:%s:%d: bad endpoint %d: proc %d has ept %d (generation %d vs. %d)\n", file, line,
e, *p, proc_addr(*p)->p_endpoint,
_ENDPOINT_G(e), _ENDPOINT_G(proc_addr(*p)->p_endpoint));
#endif
#endif
} else ok = 1;
if(!ok && fatalflag) {
panic("invalid endpoint ", e);
minix_panic("invalid endpoint ", e);
}
return ok;
}

View File

@@ -27,6 +27,8 @@ struct proc {
char p_quantum_size; /* quantum size in ticks */
struct mem_map p_memmap[NR_LOCAL_SEGS]; /* memory map (T, D, S) */
struct pagefault p_pagefault; /* valid if PAGEFAULT in p_rts_flags set */
struct proc *p_nextpagefault; /* next on PAGEFAULT chain */
clock_t p_user_time; /* user time in ticks */
clock_t p_sys_time; /* sys time in ticks */
@@ -44,6 +46,62 @@ struct proc {
endpoint_t p_endpoint; /* endpoint number, generation-aware */
/* If handler functions detect a process wants to do something with
* memory that isn't present, VM has to fix it. Until it has asked
* what needs to be done and fixed it, save necessary state here.
*
* The requester gets a copy of its request message in reqmsg and gets
* VMREQUEST set.
*/
struct {
struct proc *nextrestart; /* next in vmrestart chain */
struct proc *nextrequestor; /* next in vmrequest chain */
#define VMSTYPE_SYS_NONE 0
#define VMSTYPE_SYS_MESSAGE 1
#define VMSTYPE_SYS_CALL 2
#define VMSTYPE_MSGCOPY 3
int type; /* suspended operation */
union {
/* VMSTYPE_SYS_MESSAGE */
message reqmsg; /* suspended request message */
/* VMSTYPE_SYS_CALL */
struct {
int call_nr;
message *m_ptr;
int src_dst_e;
long bit_map;
} sys_call;
/* VMSTYPE_MSGCOPY */
struct {
struct proc *dst;
vir_bytes dst_v;
message msgbuf;
} msgcopy;
} saved;
/* Parameters of request to VM */
vir_bytes start, length; /* memory range */
u8_t writeflag; /* nonzero for write access */
endpoint_t who;
/* VM result when available */
int vmresult;
/* Target gets this set. (But caller and target can be
* the same, so we can't put this in the 'saved' union.)
*/
struct proc *requestor;
/* If the suspended operation is a sys_call, its details are
* stored here.
*/
} p_vmrequest;
struct proc *next_soft_notify;
int p_softnotified;
#if DEBUG_SCHED_CHECK
int p_ready, p_found;
#endif
@@ -59,6 +117,9 @@ struct proc {
#define P_STOP 0x40 /* set when process is being traced */
#define NO_PRIV 0x80 /* keep forked system process from running */
#define NO_ENDPOINT 0x100 /* process cannot send or receive messages */
#define VMINHIBIT 0x200 /* not scheduled until released by VM */
#define PAGEFAULT 0x400 /* process has unhandled pagefault */
#define VMREQUEST 0x800 /* originator of vm memory request */
/* These runtime flags can be tested and manipulated by these macros. */
@@ -108,6 +169,7 @@ struct proc {
#define REPLY_PENDING 0x01 /* reply to IPC_REQUEST is pending */
#define MF_VM 0x08 /* process uses VM */
#define MF_ASYNMSG 0x10 /* Asynchrous message pending */
#define MF_FULLVM 0x20
/* Scheduling priorities for p_priority. Values must start at zero (highest
* priority) and increment. Priorities of the processes in the boot image

View File

@@ -98,14 +98,14 @@ irq_hook_t *hook;
/* Note: k_reenter is always 0 here. */
/* Store sample (process name and program counter). */
phys_copy(vir2phys(proc_ptr->p_name),
(phys_bytes) (sprof_data_addr + sprof_info.mem_used),
(phys_bytes) strlen(proc_ptr->p_name));
data_copy(SYSTEM, (vir_bytes) proc_ptr->p_name,
sprof_ep, sprof_data_addr_vir + sprof_info.mem_used,
strlen(proc_ptr->p_name));
phys_copy(vir2phys(&proc_ptr->p_reg.pc),
(phys_bytes) (sprof_data_addr+sprof_info.mem_used +
data_copy(SYSTEM, (vir_bytes) &proc_ptr->p_reg.pc, sprof_ep,
(vir_bytes) (sprof_data_addr_vir + sprof_info.mem_used +
sizeof(proc_ptr->p_name)),
(phys_bytes) sizeof(proc_ptr->p_reg.pc));
(vir_bytes) sizeof(proc_ptr->p_reg.pc));
sprof_info.mem_used += sizeof(sprof_sample);
@@ -159,26 +159,20 @@ PUBLIC void profile_register(ctl_ptr, tbl_ptr)
void *ctl_ptr;
void *tbl_ptr;
{
int len, proc_nr;
int proc_nr;
vir_bytes vir_dst;
struct proc *rp;
if(cprof_procs_no >= NR_SYS_PROCS)
return;
/* Store process name, control struct, table locations. */
proc_nr = KERNEL;
rp = proc_addr(proc_nr);
rp = proc_addr(SYSTEM);
cprof_proc_info[cprof_procs_no].endpt = rp->p_endpoint;
cprof_proc_info[cprof_procs_no].name = rp->p_name;
len = (phys_bytes) sizeof (void *);
vir_dst = (vir_bytes) ctl_ptr;
cprof_proc_info[cprof_procs_no].ctl =
numap_local(proc_nr, vir_dst, len);
vir_dst = (vir_bytes) tbl_ptr;
cprof_proc_info[cprof_procs_no].buf =
numap_local(proc_nr, vir_dst, len);
cprof_proc_info[cprof_procs_no].ctl_v = (vir_bytes) ctl_ptr;
cprof_proc_info[cprof_procs_no].buf_v = (vir_bytes) tbl_ptr;
cprof_procs_no++;
}

View File

@@ -10,8 +10,8 @@
EXTERN int sprofiling; /* whether profiling is running */
EXTERN int sprof_mem_size; /* available user memory for data */
EXTERN struct sprof_info_s sprof_info; /* profiling info for user program */
EXTERN phys_bytes sprof_data_addr; /* user address to write data */
EXTERN phys_bytes sprof_info_addr; /* user address to write info struct */
EXTERN vir_bytes sprof_data_addr_vir; /* user address to write data */
EXTERN endpoint_t sprof_ep; /* user process */
#endif /* SPROFILE */
@@ -20,14 +20,12 @@ EXTERN phys_bytes sprof_info_addr; /* user address to write info struct */
EXTERN int cprof_mem_size; /* available user memory for data */
EXTERN struct cprof_info_s cprof_info; /* profiling info for user program */
EXTERN phys_bytes cprof_data_addr; /* user address to write data */
EXTERN phys_bytes cprof_info_addr; /* user address to write info struct */
EXTERN int cprof_procs_no; /* number of profiled processes */
EXTERN struct cprof_proc_info_s { /* info about profiled process */
int endpt; /* endpoint */
endpoint_t endpt; /* endpoint */
char *name; /* name */
phys_bytes ctl; /* location of control struct */
phys_bytes buf; /* location of buffer */
vir_bytes ctl_v; /* location of control struct */
vir_bytes buf_v; /* location of buffer */
int slots_used; /* table slots used */
} cprof_proc_info_inst;
EXTERN struct cprof_proc_info_s cprof_proc_info[NR_SYS_PROCS];

View File

@@ -5,6 +5,7 @@
#include <minix/safecopies.h>
#include <archtypes.h>
#include <a.out.h>
/* Struct declarations. */
struct proc;
@@ -20,20 +21,26 @@ _PROTOTYPE( void ser_dump_proc, (void) );
/* main.c */
_PROTOTYPE( void main, (void) );
_PROTOTYPE( void prepare_shutdown, (int how) );
_PROTOTYPE( void minix_shutdown, (struct timer *tp) );
_PROTOTYPE( void idle_task, (void) );
/* utility.c */
_PROTOTYPE( int kprintf, (const char *fmt, ...) );
_PROTOTYPE( void panic, (_CONST char *s, int n) );
_PROTOTYPE( void minix_panic, (char *s, int n) );
/* proc.c */
_PROTOTYPE( int sys_call, (int call_nr, int src_dst,
message *m_ptr, long bit_map) );
_PROTOTYPE( void sys_call_restart, (struct proc *caller) );
_PROTOTYPE( int lock_notify, (int src, int dst) );
_PROTOTYPE( int soft_notify, (int dst) );
_PROTOTYPE( int lock_send, (int dst, message *m_ptr) );
_PROTOTYPE( void lock_enqueue, (struct proc *rp) );
_PROTOTYPE( void lock_dequeue, (struct proc *rp) );
_PROTOTYPE( void enqueue, (struct proc *rp) );
_PROTOTYPE( void dequeue, (struct proc *rp) );
_PROTOTYPE( void balance_queues, (struct timer *tp) );
_PROTOTYPE( struct proc *endpoint_lookup, (endpoint_t ep) );
#if DEBUG_ENABLE_IPC_WARNINGS
_PROTOTYPE( int isokendpt_f, (char *file, int line, endpoint_t e, int *p, int f));
#define isokendpt_d(e, p, f) isokendpt_f(__FILE__, __LINE__, (e), (p), (f))
@@ -52,17 +59,16 @@ _PROTOTYPE( void send_sig, (int proc_nr, int sig_nr) );
_PROTOTYPE( void cause_sig, (int proc_nr, int sig_nr) );
_PROTOTYPE( void sys_task, (void) );
_PROTOTYPE( void get_randomness, (int source) );
_PROTOTYPE( int virtual_copy, (struct vir_addr *src, struct vir_addr *dst,
vir_bytes bytes) );
#define numap_local(proc_nr, vir_addr, bytes) \
umap_local(proc_addr(proc_nr), D, (vir_addr), (bytes))
_PROTOTYPE( phys_bytes umap_grant, (struct proc *, cp_grant_id_t,
vir_bytes));
_PROTOTYPE( phys_bytes umap_verify_grant, (struct proc *, endpoint_t,
cp_grant_id_t, vir_bytes, vir_bytes, int));
_PROTOTYPE( vir_bytes vir_verify_grant, (struct proc *, endpoint_t,
cp_grant_id_t, vir_bytes, vir_bytes, int, endpoint_t *));
_PROTOTYPE( void clear_endpoint, (struct proc *rc) );
_PROTOTYPE( phys_bytes umap_bios, (struct proc *rp, vir_bytes vir_addr,
vir_bytes bytes));
_PROTOTYPE( phys_bytes umap_bios, (vir_bytes vir_addr, vir_bytes bytes));
_PROTOTYPE( phys_bytes umap_verify_grant, (struct proc *rp, endpoint_t grantee, cp_grant_id_t grant, vir_bytes offset, vir_bytes bytes, int access));
/* system/do_newmap.c */
_PROTOTYPE( int newmap, (struct proc *rp, struct mem_map *map_ptr) );
@@ -79,11 +85,11 @@ _PROTOTYPE( void cons_seth, (int pos, int n) );
/* debug.c */
#if DEBUG_SCHED_CHECK
_PROTOTYPE( void check_runqueues, (char *when) );
#define CHECK_RUNQUEUES check_runqueues_f(__FILE__, __LINE__)
_PROTOTYPE( void check_runqueues_f, (char *file, int line) );
#endif
/* system/do_vm.c */
_PROTOTYPE( void vm_map_default, (struct proc *pp) );
_PROTOTYPE( void timer_start, (int cat, char *name) );
_PROTOTYPE( void timer_end, (int cat) );
/* system/do_safecopy.c */
_PROTOTYPE( int verify_grant, (endpoint_t, endpoint_t, cp_grant_id_t, vir_bytes,
@@ -98,15 +104,27 @@ _PROTOTYPE( void stop_profile_clock, (void) );
/* functions defined in architecture-dependent files. */
_PROTOTYPE( void phys_copy, (phys_bytes source, phys_bytes dest,
phys_bytes count) );
#define virtual_copy(src, dst, bytes) virtual_copy_f(src, dst, bytes, 0)
#define virtual_copy_vmcheck(src, dst, bytes) virtual_copy_f(src, dst, bytes, 1)
_PROTOTYPE( int virtual_copy_f, (struct vir_addr *src, struct vir_addr *dst,
vir_bytes bytes, int vmcheck) );
_PROTOTYPE( int data_copy, (endpoint_t from, vir_bytes from_addr,
endpoint_t to, vir_bytes to_addr, size_t bytes));
#define data_copy_to(d, p, v, n) data_copy(SYSTEM, (d), (p), (v), (n));
#define data_copy_from(d, p, v, n) data_copy((p), (v), SYSTEM, (d), (n));
_PROTOTYPE( void alloc_segments, (struct proc *rp) );
_PROTOTYPE( void vm_init, (void) );
_PROTOTYPE( void vm_map_range, (u32_t base, u32_t size, u32_t offset) );
_PROTOTYPE( int vm_copy, (vir_bytes src, struct proc *srcproc,
vir_bytes dst, struct proc *dstproc, phys_bytes bytes));
_PROTOTYPE( phys_bytes umap_local, (register struct proc *rp, int seg,
vir_bytes vir_addr, vir_bytes bytes));
_PROTOTYPE( void cp_mess, (int src,phys_clicks src_clicks,
vir_bytes src_offset, phys_clicks dst_clicks, vir_bytes dst_offset));
_PROTOTYPE( phys_bytes umap_remote, (struct proc* rp, int seg,
vir_bytes vir_addr, vir_bytes bytes) );
_PROTOTYPE( phys_bytes umap_virtual, (struct proc* rp, int seg,
vir_bytes vir_addr, vir_bytes bytes) );
_PROTOTYPE( phys_bytes seg2phys, (U16_t) );
_PROTOTYPE( void phys_memset, (phys_bytes source, unsigned long pattern,
phys_bytes count) );
@@ -123,6 +141,7 @@ _PROTOTYPE( void idle_task, (void) );
_PROTOTYPE( void system_init, (void) );
_PROTOTYPE( void ser_putc, (char) );
_PROTOTYPE( void arch_shutdown, (int) );
_PROTOTYPE( void arch_get_aout_headers, (int i, struct exec *h) );
_PROTOTYPE( void restart, (void) );
_PROTOTYPE( void idle_task, (void) );
_PROTOTYPE( void read_tsc, (unsigned long *high, unsigned long *low) );
@@ -130,5 +149,16 @@ _PROTOTYPE( int arch_init_profile_clock, (u32_t freq) );
_PROTOTYPE( void arch_stop_profile_clock, (void) );
_PROTOTYPE( void arch_ack_profile_clock, (void) );
_PROTOTYPE( void do_ser_debug, (void) );
_PROTOTYPE( int arch_get_params, (char *parm, int max));
_PROTOTYPE( int arch_set_params, (char *parm, int max));
_PROTOTYPE( int arch_pre_exec, (struct proc *pr, u32_t, u32_t));
_PROTOTYPE( int arch_umap, (struct proc *pr, vir_bytes, vir_bytes,
int, phys_bytes *));
_PROTOTYPE( int arch_do_vmctl, (message *m_ptr, struct proc *p));
_PROTOTYPE( int vm_contiguous, (struct proc *targetproc, u32_t vir_buf, size_t count));
_PROTOTYPE( int vm_checkrange, (struct proc *caller, struct proc *target,
vir_bytes start, vir_bytes length, int writeflag, int checkonly));
_PROTOTYPE( void proc_stacktrace, (struct proc *proc) );
_PROTOTYPE( int vm_lookup, (struct proc *proc, vir_bytes virtual, vir_bytes *result, u32_t *ptent));
#endif /* PROTO_H */

View File

@@ -7,8 +7,6 @@
#include <string.h>
#include <archconst.h>
PRIVATE char params[K_PARAM_SIZE];
FORWARD _PROTOTYPE( char *get_value, (_CONST char *params, _CONST char *key));
/*===========================================================================*
* cstart *
@@ -21,7 +19,6 @@ U16_t parmoff, parmsize; /* boot parameters offset and length */
/* Perform system initializations prior to calling main(). Most settings are
* determined with help of the environment strings passed by MINIX' loader.
*/
char params[128*sizeof(char *)]; /* boot monitor parameters */
register char *value; /* value in key=value pair */
extern int etext, end;
int h;
@@ -36,10 +33,7 @@ U16_t parmoff, parmsize; /* boot parameters offset and length */
system_init();
/* Copy the boot parameters to the local buffer. */
kinfo.params_base = seg2phys(mds) + parmoff;
kinfo.params_size = MIN(parmsize,sizeof(params)-2);
phys_copy(kinfo.params_base,
vir2phys(params), kinfo.params_size);
arch_get_params(params_buffer, sizeof(params_buffer));
/* Record miscellaneous information for user-space servers. */
kinfo.nr_procs = NR_PROCS;
@@ -49,8 +43,6 @@ U16_t parmoff, parmsize; /* boot parameters offset and length */
strncpy(kinfo.version, OS_VERSION, sizeof(kinfo.version));
kinfo.version[sizeof(kinfo.version)-1] = '\0';
kinfo.proc_addr = (vir_bytes) proc;
kinfo.kmem_base = vir2phys(0);
kinfo.kmem_size = (phys_bytes) &end;
/* Load average data initialization. */
kloadinfo.proc_last_slot = 0;
@@ -58,10 +50,10 @@ U16_t parmoff, parmsize; /* boot parameters offset and length */
kloadinfo.proc_load_history[h] = 0;
/* Processor? Decide if mode is protected for older machines. */
machine.processor=atoi(get_value(params, "processor"));
machine.processor=atoi(get_value(params_buffer, "processor"));
/* XT, AT or MCA bus? */
value = get_value(params, "bus");
value = get_value(params_buffer, "bus");
if (value == NIL_PTR || strcmp(value, "at") == 0) {
machine.pc_at = TRUE; /* PC-AT compatible hardware */
} else if (strcmp(value, "mca") == 0) {
@@ -69,7 +61,7 @@ U16_t parmoff, parmsize; /* boot parameters offset and length */
}
/* Type of VDU: */
value = get_value(params, "video"); /* EGA or VGA video unit */
value = get_value(params_buffer, "video"); /* EGA or VGA video unit */
if (strcmp(value, "ega") == 0) machine.vdu_ega = TRUE;
if (strcmp(value, "vga") == 0) machine.vdu_vga = machine.vdu_ega = TRUE;

View File

@@ -15,7 +15,6 @@
* send_sig: send a signal directly to a system process
* cause_sig: take action to cause a signal to occur via PM
* umap_bios: map virtual address in BIOS_SEG to physical
* virtual_copy: copy bytes from one virtual address to another
* get_randomness: accumulate randomness in a buffer
* clear_endpoint: remove a process' ability to send and receive messages
*
@@ -31,13 +30,16 @@
#include "kernel.h"
#include "system.h"
#include "proc.h"
#include "vm.h"
#include <stdlib.h>
#include <signal.h>
#include <unistd.h>
#include <string.h>
#include <sys/sigcontext.h>
#include <minix/endpoint.h>
#include <minix/safecopies.h>
#include <minix/u64.h>
#include <sys/vm_i386.h>
/* Declaration of the call vector that defines the mapping of system calls
* to handler functions. The vector is initialized in sys_init() with map(),
@@ -46,12 +48,16 @@
* array size will be negative and this won't compile.
*/
PUBLIC int (*call_vec[NR_SYS_CALLS])(message *m_ptr);
char *callnames[NR_SYS_CALLS];
#define map(call_nr, handler) \
{extern int dummy[NR_SYS_CALLS>(unsigned)(call_nr-KERNEL_CALL) ? 1:-1];} \
callnames[(call_nr-KERNEL_CALL)] = #call_nr; \
call_vec[(call_nr-KERNEL_CALL)] = (handler)
FORWARD _PROTOTYPE( void initialize, (void));
FORWARD _PROTOTYPE( void softnotify_check, (void));
FORWARD _PROTOTYPE( struct proc *vmrestart_check, (message *));
/*===========================================================================*
* sys_task *
@@ -64,14 +70,39 @@ PUBLIC void sys_task()
register struct proc *caller_ptr;
int s;
int call_nr;
int n = 0;
/* Initialize the system task. */
initialize();
while (TRUE) {
int r;
/* Get work. Block and wait until a request message arrives. */
if((r=receive(ANY, &m)) != OK) panic("system: receive() failed", r);
struct proc *restarting;
#if 0
if(!(n++ % 100000)) {
int i;
kprintf("switch %8d reload %8d\n", cr3switch, cr3reload);
}
#endif
restarting = vmrestart_check(&m);
softnotify_check();
if(softnotify)
minix_panic("softnotify non-NULL before receive (1)", NO_NUM);
if(!restarting) {
int r;
/* Get work. Block and wait until a request message arrives. */
if(softnotify)
minix_panic("softnotify non-NULL before receive (2)", NO_NUM);
if((r=receive(ANY, &m)) != OK)
minix_panic("receive() failed", r);
if(m.m_source == SYSTEM)
continue;
if(softnotify)
minix_panic("softnotify non-NULL after receive", NO_NUM);
}
sys_call_code = (unsigned) m.m_type;
call_nr = sys_call_code - KERNEL_CALL;
who_e = m.m_source;
@@ -115,16 +146,34 @@ PUBLIC void sys_task()
result = (*call_vec[call_nr])(&m); /* handle the system call */
}
/* Send a reply, unless inhibited by a handler function. Use the kernel
* function lock_send() to prevent a system call trap. The destination
* is known to be blocked waiting for a message.
*/
if (result != EDONTREPLY) {
m.m_type = result; /* report status of call */
if (OK != (s=lock_send(m.m_source, &m))) {
kprintf("SYSTEM, reply to %d failed: %d\n", m.m_source, s);
}
}
if(result == VMSUSPEND) {
/* Special case: message has to be saved for handling
* until VM tells us it's allowed. VM has been notified
* and we must wait for its reply to restart the call.
*/
memcpy(&caller_ptr->p_vmrequest.saved.reqmsg, &m, sizeof(m));
caller_ptr->p_vmrequest.type = VMSTYPE_SYS_MESSAGE;
#if 0
kprintf("SYSTEM: suspending call from %d\n", m.m_source);
#endif
} else if (result != EDONTREPLY) {
/* Send a reply, unless inhibited by a handler function.
* Use the kernel function lock_send() to prevent a system
* call trap.
*/
if(restarting)
RTS_LOCK_UNSET(restarting, VMREQUEST);
m.m_type = result; /* report status of call */
if(WILLRECEIVE(caller_ptr, SYSTEM)) {
if (OK != (s=lock_send(m.m_source, &m))) {
kprintf("SYSTEM, reply to %d failed: %d\n",
m.m_source, s);
}
} else {
kprintf("SYSTEM: not replying to %d; not ready\n",
caller_ptr->p_endpoint);
}
}
}
}
@@ -153,6 +202,7 @@ PRIVATE void initialize(void)
*/
for (i=0; i<NR_SYS_CALLS; i++) {
call_vec[i] = do_unused;
callnames[i] = "unused";
}
/* Process management. */
@@ -181,14 +231,14 @@ PRIVATE void initialize(void)
map(SYS_SEGCTL, do_segctl); /* add segment and get selector */
map(SYS_MEMSET, do_memset); /* write char to memory area */
map(SYS_VM_SETBUF, do_vm_setbuf); /* PM passes buffer for page tables */
map(SYS_VM_MAP, do_vm_map); /* Map/unmap physical (device) memory */
map(SYS_VMCTL, do_vmctl); /* various VM process settings */
/* Copying. */
map(SYS_UMAP, do_umap); /* map virtual to physical address */
map(SYS_VIRCOPY, do_vircopy); /* use pure virtual addressing */
map(SYS_PHYSCOPY, do_physcopy); /* use physical addressing */
map(SYS_PHYSCOPY, do_copy); /* use physical addressing */
map(SYS_VIRVCOPY, do_virvcopy); /* vector with copy requests */
map(SYS_PHYSVCOPY, do_physvcopy); /* vector with copy requests */
map(SYS_PHYSVCOPY, do_vcopy); /* vector with copy requests */
map(SYS_SAFECOPYFROM, do_safecopy); /* copy with pre-granted permission */
map(SYS_SAFECOPYTO, do_safecopy); /* copy with pre-granted permission */
map(SYS_VSAFECOPY, do_vsafecopy); /* vectored safecopy */
@@ -279,19 +329,16 @@ PUBLIC void send_sig(int proc_nr, int sig_nr)
/* Notify a system process about a signal. This is straightforward. Simply
* set the signal that is to be delivered in the pending signals map and
* send a notification with source SYSTEM.
*
* Process number is verified to avoid writing in random places, but we
* don't kprintf() or panic() because that causes send_sig() invocations.
*/
register struct proc *rp;
static int n;
if(!isokprocn(proc_nr) || isemptyn(proc_nr))
return;
minix_panic("send_sig to empty process", proc_nr);
rp = proc_addr(proc_nr);
sigaddset(&priv(rp)->s_sig_pending, sig_nr);
lock_notify(SYSTEM, rp->p_endpoint);
soft_notify(rp->p_endpoint);
}
/*===========================================================================*
@@ -317,7 +364,7 @@ int sig_nr; /* signal to be sent, 1 to _NSIG */
register struct proc *rp;
if (proc_nr == PM_PROC_NR)
panic("cause_sig: PM gets signal", NO_NUM);
minix_panic("cause_sig: PM gets signal", NO_NUM);
/* Check if the signal is already pending. Process it otherwise. */
rp = proc_addr(proc_nr);
@@ -335,8 +382,7 @@ int sig_nr; /* signal to be sent, 1 to _NSIG */
/*===========================================================================*
* umap_bios *
*===========================================================================*/
PUBLIC phys_bytes umap_bios(rp, vir_addr, bytes)
register struct proc *rp; /* pointer to proc table entry for process */
PUBLIC phys_bytes umap_bios(vir_addr, bytes)
vir_bytes vir_addr; /* virtual address in BIOS segment */
vir_bytes bytes; /* # of bytes to be copied */
{
@@ -358,37 +404,6 @@ vir_bytes bytes; /* # of bytes to be copied */
}
#endif
/*===========================================================================*
* umap_verify_grant *
*===========================================================================*/
PUBLIC phys_bytes umap_verify_grant(rp, grantee, grant, offset, bytes, access)
struct proc *rp; /* pointer to proc table entry for process */
endpoint_t grantee; /* who wants to do this */
cp_grant_id_t grant; /* grant no. */
vir_bytes offset; /* offset into grant */
vir_bytes bytes; /* size */
int access; /* does grantee want to CPF_READ or _WRITE? */
{
int proc_nr;
vir_bytes v_offset;
endpoint_t granter;
/* See if the grant in that process is sensible, and
* find out the virtual address and (optionally) new
* process for that address.
*
* Then convert that process to a slot number.
*/
if(verify_grant(rp->p_endpoint, grantee, grant, bytes, access, offset,
&v_offset, &granter) != OK
|| !isokendpt(granter, &proc_nr)) {
return 0;
}
/* Do the mapping from virtual to physical. */
return umap_local(proc_addr(proc_nr), D, v_offset, bytes);
}
/*===========================================================================*
* umap_grant *
*===========================================================================*/
@@ -398,9 +413,9 @@ cp_grant_id_t grant; /* grant no. */
vir_bytes bytes; /* size */
{
int proc_nr;
vir_bytes offset;
vir_bytes offset, ret;
endpoint_t granter;
/* See if the grant in that process is sensible, and
* find out the virtual address and (optionally) new
* process for that address.
@@ -409,88 +424,25 @@ vir_bytes bytes; /* size */
*/
if(verify_grant(rp->p_endpoint, ANY, grant, bytes, 0, 0,
&offset, &granter) != OK) {
kprintf("SYSTEM: umap_grant: verify_grant failed\n");
return 0;
}
if(!isokendpt(granter, &proc_nr)) {
kprintf("SYSTEM: umap_grant: isokendpt failed\n");
return 0;
}
/* Do the mapping from virtual to physical. */
return umap_local(proc_addr(proc_nr), D, offset, bytes);
ret = umap_virtual(proc_addr(proc_nr), D, offset, bytes);
if(!ret) {
kprintf("SYSTEM:umap_grant:umap_virtual failed; grant %s:%d -> %s: vir 0x%lx\n",
rp->p_name, grant,
proc_addr(proc_nr)->p_name, offset);
}
return ret;
}
/*===========================================================================*
* virtual_copy *
*===========================================================================*/
PUBLIC int virtual_copy(src_addr, dst_addr, bytes)
struct vir_addr *src_addr; /* source virtual address */
struct vir_addr *dst_addr; /* destination virtual address */
vir_bytes bytes; /* # of bytes to copy */
{
/* Copy bytes from virtual address src_addr to virtual address dst_addr.
* Virtual addresses can be in ABS, LOCAL_SEG, REMOTE_SEG, or BIOS_SEG.
*/
struct vir_addr *vir_addr[2]; /* virtual source and destination address */
phys_bytes phys_addr[2]; /* absolute source and destination */
int seg_index;
int i;
/* Check copy count. */
if (bytes <= 0) return(EDOM);
/* Do some more checks and map virtual addresses to physical addresses. */
vir_addr[_SRC_] = src_addr;
vir_addr[_DST_] = dst_addr;
for (i=_SRC_; i<=_DST_; i++) {
int proc_nr, type;
struct proc *p;
type = vir_addr[i]->segment & SEGMENT_TYPE;
if(type != PHYS_SEG && isokendpt(vir_addr[i]->proc_nr_e, &proc_nr))
p = proc_addr(proc_nr);
else
p = NULL;
/* Get physical address. */
switch(type) {
case LOCAL_SEG:
if(!p) return EDEADSRCDST;
seg_index = vir_addr[i]->segment & SEGMENT_INDEX;
phys_addr[i] = umap_local(p, seg_index, vir_addr[i]->offset, bytes);
break;
case REMOTE_SEG:
if(!p) return EDEADSRCDST;
seg_index = vir_addr[i]->segment & SEGMENT_INDEX;
phys_addr[i] = umap_remote(p, seg_index, vir_addr[i]->offset, bytes);
break;
#if _MINIX_CHIP == _CHIP_INTEL
case BIOS_SEG:
if(!p) return EDEADSRCDST;
phys_addr[i] = umap_bios(p, vir_addr[i]->offset, bytes );
break;
#endif
case PHYS_SEG:
phys_addr[i] = vir_addr[i]->offset;
break;
case GRANT_SEG:
phys_addr[i] = umap_grant(p, vir_addr[i]->offset, bytes);
break;
default:
return(EINVAL);
}
/* Check if mapping succeeded. */
if (phys_addr[i] <= 0 && vir_addr[i]->segment != PHYS_SEG)
return(EFAULT);
}
/* Now copy bytes between physical addresseses. */
phys_copy(phys_addr[_SRC_], phys_addr[_DST_], (phys_bytes) bytes);
return(OK);
}
/*===========================================================================*
* clear_endpoint *
*===========================================================================*/
@@ -499,8 +451,22 @@ register struct proc *rc; /* slot of process to clean up */
{
register struct proc *rp; /* iterate over process table */
register struct proc **xpp; /* iterate over caller queue */
struct proc *np;
if(isemptyp(rc)) panic("clear_proc: empty process", proc_nr(rc));
if(isemptyp(rc)) minix_panic("clear_proc: empty process", proc_nr(rc));
#if 1
if(rc->p_endpoint == PM_PROC_NR || rc->p_endpoint == VFS_PROC_NR) {
/* This test is great for debugging system processes dying,
* but as this happens normally on reboot, not good permanent code.
*/
kprintf("process %s / %d died; stack: ", rc->p_name, rc->p_endpoint);
proc_stacktrace(rc);
kprintf("kernel trace: ");
util_stacktrace();
minix_panic("clear_proc: system process died", rc->p_endpoint);
}
#endif
/* Make sure that the exiting process is no longer scheduled. */
RTS_LOCK_SET(rc, NO_ENDPOINT);
@@ -550,7 +516,8 @@ register struct proc *rc; /* slot of process to clean up */
rp->p_reg.retreg = ESRCDIED; /* report source died */
RTS_LOCK_UNSET(rp, RECEIVING); /* no longer receiving */
#if DEBUG_ENABLE_IPC_WARNINGS
kprintf("Proc %d receive dead src %d\n", proc_nr(rp), proc_nr(rc));
kprintf("Proc %d (%s) receiving from dead src %d (%s)\n",
proc_nr(rp), rp->p_name, proc_nr(rc), rc->p_name);
#endif
}
if (RTS_ISSET(rp, SENDING) &&
@@ -558,8 +525,147 @@ register struct proc *rc; /* slot of process to clean up */
rp->p_reg.retreg = EDSTDIED; /* report destination died */
RTS_LOCK_UNSET(rp, SENDING);
#if DEBUG_ENABLE_IPC_WARNINGS
kprintf("Proc %d send dead dst %d\n", proc_nr(rp), proc_nr(rc));
kprintf("Proc %d (%s) send to dying dst %d (%s)\n",
proc_nr(rp), rp->p_name, proc_nr(rc), rc->p_name);
#endif
}
}
/* No pending soft notifies. */
for(np = softnotify; np; np = np->next_soft_notify) {
if(np == rc) {
minix_panic("dying proc was on next_soft_notify", np->p_endpoint);
}
}
}
/*===========================================================================*
* umap_verify_grant *
*===========================================================================*/
PUBLIC phys_bytes umap_verify_grant(rp, grantee, grant, offset, bytes, access)
struct proc *rp; /* pointer to proc table entry for process */
endpoint_t grantee; /* who wants to do this */
cp_grant_id_t grant; /* grant no. */
vir_bytes offset; /* offset into grant */
vir_bytes bytes; /* size */
int access; /* does grantee want to CPF_READ or _WRITE? */
{
int proc_nr;
vir_bytes v_offset;
endpoint_t granter;
/* See if the grant in that process is sensible, and
* find out the virtual address and (optionally) new
* process for that address.
*
* Then convert that process to a slot number.
*/
if(verify_grant(rp->p_endpoint, grantee, grant, bytes, access, offset,
&v_offset, &granter) != OK
|| !isokendpt(granter, &proc_nr)) {
return 0;
}
/* Do the mapping from virtual to physical. */
return umap_virtual(proc_addr(proc_nr), D, v_offset, bytes);
}
/*===========================================================================*
* softnotify_check *
*===========================================================================*/
PRIVATE void softnotify_check(void)
{
struct proc *np, *nextnp;
if(!softnotify)
return;
for(np = softnotify; np; np = nextnp) {
if(!np->p_softnotified)
minix_panic("softnotify but no p_softnotified", NO_NUM);
lock_notify(SYSTEM, np->p_endpoint);
nextnp = np->next_soft_notify;
np->next_soft_notify = NULL;
np->p_softnotified = 0;
}
softnotify = NULL;
}
/*===========================================================================*
* vmrestart_check *
*===========================================================================*/
PRIVATE struct proc *vmrestart_check(message *m)
{
int type, r;
struct proc *restarting;
/* Anyone waiting to be vm-restarted? */
if(!(restarting = vmrestart))
return NULL;
if(restarting->p_rts_flags & SLOT_FREE)
minix_panic("SYSTEM: VMREQUEST set for empty process", NO_NUM);
type = restarting->p_vmrequest.type;
restarting->p_vmrequest.type = VMSTYPE_SYS_NONE;
vmrestart = restarting->p_vmrequest.nextrestart;
if(!RTS_ISSET(restarting, VMREQUEST))
minix_panic("SYSTEM: VMREQUEST not set for process on vmrestart queue",
restarting->p_endpoint);
switch(type) {
case VMSTYPE_SYS_MESSAGE:
memcpy(m, &restarting->p_vmrequest.saved.reqmsg, sizeof(*m));
#if 0
kprintf("SYSTEM: restart sys_message type %d / %lx source %d\n",
m->m_type, m->m_type, m->m_source);
#endif
if(m->m_source != restarting->p_endpoint)
minix_panic("SYSTEM: vmrestart source doesn't match",
NO_NUM);
/* Original caller could've disappeared in the meantime. */
if(!isokendpt(m->m_source, &who_p)) {
kprintf("SYSTEM: ignoring call %d from dead %d\n",
m->m_type, m->m_source);
return NULL;
}
{ int i;
i = m->m_type - KERNEL_CALL;
if(i >= 0 && i < NR_SYS_CALLS) {
#if 0
kprintf("SYSTEM: restart %s from %d\n",
callnames[i], m->m_source);
#endif
} else {
minix_panic("call number out of range", i);
}
}
return restarting;
case VMSTYPE_SYS_CALL:
kprintf("SYSTEM: restart sys_call\n");
/* Restarting a kernel trap. */
sys_call_restart(restarting);
/* Handled; restart system loop. */
return NULL;
case VMSTYPE_MSGCOPY:
/* Do delayed message copy. */
if((r=data_copy(SYSTEM,
(vir_bytes) &restarting->p_vmrequest.saved.msgcopy.msgbuf,
restarting->p_vmrequest.saved.msgcopy.dst->p_endpoint,
(vir_bytes) restarting->p_vmrequest.saved.msgcopy.dst_v,
sizeof(message))) != OK) {
minix_panic("SYSTEM: delayed msgcopy failed", r);
}
RTS_LOCK_UNSET(restarting, VMREQUEST);
/* Handled; restart system loop. */
return NULL;
default:
minix_panic("strange restart type", type);
}
minix_panic("fell out of switch", NO_NUM);
}

View File

@@ -71,14 +71,12 @@ _PROTOTYPE( int do_nice, (message *m_ptr) );
_PROTOTYPE( int do_copy, (message *m_ptr) );
#define do_vircopy do_copy
#define do_physcopy do_copy
#if ! (USE_VIRCOPY || USE_PHYSCOPY)
#define do_copy do_unused
#endif
_PROTOTYPE( int do_vcopy, (message *m_ptr) );
#define do_virvcopy do_vcopy
#define do_physvcopy do_vcopy
#if ! (USE_VIRVCOPY || USE_PHYSVCOPY)
#define do_vcopy do_unused
#endif
@@ -178,8 +176,10 @@ _PROTOTYPE( int do_stime, (message *m_ptr) );
_PROTOTYPE( int do_safecopy, (message *m_ptr) );
_PROTOTYPE( int do_vsafecopy, (message *m_ptr) );
_PROTOTYPE( int do_iopenable, (message *m_ptr) );
_PROTOTYPE( int do_vmctl, (message *m_ptr) );
_PROTOTYPE( int do_setgrant, (message *m_ptr) );
_PROTOTYPE( int do_readbios, (message *m_ptr) );
_PROTOTYPE( int do_mapdma, (message *m_ptr) );
_PROTOTYPE( int do_sprofile, (message *m_ptr) );
#if ! SPROFILE

View File

@@ -50,12 +50,12 @@ OBJECTS = \
$(SYSTEM)(do_sigreturn.o) \
$(SYSTEM)(do_abort.o) \
$(SYSTEM)(do_getinfo.o) \
$(SYSTEM)(do_vm.o) \
$(SYSTEM)(do_vm_setbuf.o) \
$(SYSTEM)(do_sprofile.o) \
$(SYSTEM)(do_cprofile.o) \
$(SYSTEM)(do_profbuf.o) \
$(SYSTEM)(do_mapdma.o)
$(SYSTEM)(do_mapdma.o) \
$(SYSTEM)(do_vmctl.o)
build $(SYSTEM): $(OBJECTS)
aal cr $@ *.o
@@ -172,3 +172,6 @@ $(SYSTEM)(do_profbuf.o): do_profbuf.c
$(SYSTEM)(do_mapdma.o): do_mapdma.c
$(CC) do_mapdma.c
$(SYSTEM)(do_vmctl.o): do_vmctl.c
$(CC) do_vmctl.c

View File

@@ -23,21 +23,25 @@ message *m_ptr; /* pointer to request message */
* in the PM (normal abort or panic) or TTY (after CTRL-ALT-DEL).
*/
int how = m_ptr->ABRT_HOW;
int proc_nr;
int length;
phys_bytes src_phys;
/* See if the monitor is to run the specified instructions. */
if (how == RBT_MONITOR) {
int p;
static char paramsbuffer[512];
int len;
len = MIN(m_ptr->ABRT_MON_LEN, sizeof(paramsbuffer)-1);
if(!isokendpt(m_ptr->ABRT_MON_ENDPT, &proc_nr)) return(EDEADSRCDST);
length = m_ptr->ABRT_MON_LEN + 1;
if (length > kinfo.params_size) return(E2BIG);
src_phys = numap_local(proc_nr,(vir_bytes)m_ptr->ABRT_MON_ADDR,length);
if (! src_phys) return(EFAULT);
if((p=data_copy(m_ptr->ABRT_MON_ENDPT, (vir_bytes) m_ptr->ABRT_MON_ADDR,
SYSTEM, (vir_bytes) paramsbuffer, len)) != OK) {
return p;
}
paramsbuffer[len] = '\0';
/* Parameters seem ok, copy them and prepare shutting down. */
phys_copy(src_phys, kinfo.params_base, (phys_bytes) length);
if((p = arch_set_params(paramsbuffer, len+1)) != OK)
return p;
}
/* Now prepare to shutdown MINIX. */

View File

@@ -12,6 +12,7 @@
*/
#include "../system.h"
#include "../vm.h"
#include <minix/type.h>
#if (USE_VIRCOPY || USE_PHYSCOPY)
@@ -30,8 +31,9 @@ register message *m_ptr; /* pointer to request message */
phys_bytes bytes; /* number of bytes to copy */
int i;
if (m_ptr->m_source != 0 && m_ptr->m_source != 1 &&
m_ptr->m_source != 2 && m_ptr->m_source != 3)
if (m_ptr->m_source != PM_PROC_NR && m_ptr->m_source != VFS_PROC_NR &&
m_ptr->m_source != RS_PROC_NR && m_ptr->m_source != MEM_PROC_NR &&
m_ptr->m_source != VM_PROC_NR)
{
static int first=1;
if (first)
@@ -64,9 +66,13 @@ register message *m_ptr; /* pointer to request message */
/* Check if process number was given implictly with SELF and is valid. */
if (vir_addr[i].proc_nr_e == SELF)
vir_addr[i].proc_nr_e = m_ptr->m_source;
if (vir_addr[i].segment != PHYS_SEG &&
! isokendpt(vir_addr[i].proc_nr_e, &p))
if (vir_addr[i].segment != PHYS_SEG) {
if(! isokendpt(vir_addr[i].proc_nr_e, &p)) {
kprintf("do_copy: %d: seg 0x%x, %d not ok endpoint\n",
i, vir_addr[i].segment, vir_addr[i].proc_nr_e);
return(EINVAL);
}
}
/* Check if physical addressing is used without SYS_PHYSCOPY. */
if ((vir_addr[i].segment & PHYS_SEG) &&
@@ -79,7 +85,7 @@ register message *m_ptr; /* pointer to request message */
if (bytes != (vir_bytes) bytes) return(E2BIG);
/* Now try to make the actual virtual copy. */
return( virtual_copy(&vir_addr[_SRC_], &vir_addr[_DST_], bytes) );
return( virtual_copy_vmcheck(&vir_addr[_SRC_], &vir_addr[_DST_], bytes) );
}
#endif /* (USE_VIRCOPY || USE_PHYSCOPY) */

View File

@@ -25,8 +25,8 @@ PUBLIC int do_cprofile(m_ptr)
register message *m_ptr; /* pointer to request message */
{
int proc_nr, i, err = 0, k = 0;
vir_bytes vir_dst;
phys_bytes phys_src, phys_dst, len;
phys_bytes len;
vir_bytes vir_dst, vir_src;
switch (m_ptr->PROF_ACTION) {
@@ -50,10 +50,9 @@ register message *m_ptr; /* pointer to request message */
}
/* Set reset flag. */
phys_src = vir2phys((vir_bytes) &cprof_ctl_inst.reset);
phys_dst = (phys_bytes) cprof_proc_info[i].ctl;
len = (phys_bytes) sizeof(cprof_ctl_inst.reset);
phys_copy(phys_src, phys_dst, len);
data_copy(SYSTEM, (vir_bytes) &cprof_ctl_inst.reset,
cprof_proc_info[i].endpt, cprof_proc_info[i].ctl_v,
sizeof(cprof_ctl_inst.reset));
}
kprintf("\n");
@@ -71,14 +70,6 @@ register message *m_ptr; /* pointer to request message */
if(!isokendpt(m_ptr->PROF_ENDPT, &proc_nr))
return EINVAL;
vir_dst = (vir_bytes) m_ptr->PROF_CTL_PTR;
len = (phys_bytes) sizeof (int *);
cprof_info_addr = numap_local(proc_nr, vir_dst, len);
vir_dst = (vir_bytes) m_ptr->PROF_MEM_PTR;
len = (phys_bytes) sizeof (char *);
cprof_data_addr = numap_local(proc_nr, vir_dst, len);
cprof_mem_size = m_ptr->PROF_MEM_SIZE;
kprintf("CPROFILE notice: getting tables:");
@@ -101,10 +92,9 @@ register message *m_ptr; /* pointer to request message */
}
/* Copy control struct from proc to local variable. */
phys_src = cprof_proc_info[i].ctl;
phys_dst = vir2phys((vir_bytes) &cprof_ctl_inst);
len = (phys_bytes) sizeof(cprof_ctl_inst);
phys_copy(phys_src, phys_dst, len);
data_copy(cprof_proc_info[i].endpt, cprof_proc_info[i].ctl_v,
SYSTEM, (vir_bytes) &cprof_ctl_inst,
sizeof(cprof_ctl_inst));
/* Calculate memory used. */
cprof_proc_info[i].slots_used = cprof_ctl_inst.slots_used;
@@ -121,32 +111,33 @@ register message *m_ptr; /* pointer to request message */
if (cprof_mem_size < cprof_info.mem_used) cprof_info.mem_used = -1;
/* Copy the info struct to the user process. */
phys_copy(vir2phys((vir_bytes) &cprof_info), cprof_info_addr,
(phys_bytes) sizeof(cprof_info));
data_copy(SYSTEM, (vir_bytes) &cprof_info,
m_ptr->PROF_ENDPT, (vir_bytes) m_ptr->PROF_CTL_PTR,
sizeof(cprof_info));
/* If there is no space or errors occurred, don't bother copying. */
if (cprof_info.mem_used == -1 || cprof_info.err) return OK;
/* For each profiled process, copy its name, slots_used and profiling
* table to the user process. */
phys_dst = cprof_data_addr;
vir_dst = (vir_bytes) m_ptr->PROF_MEM_PTR;
for (i=0; i<cprof_procs_no; i++) {
phys_src = vir2phys((vir_bytes) cprof_proc_info[i].name);
len = (phys_bytes) strlen(cprof_proc_info[i].name);
phys_copy(phys_src, phys_dst, len);
phys_dst += CPROF_PROCNAME_LEN;
data_copy(SYSTEM, (vir_bytes) cprof_proc_info[i].name,
m_ptr->PROF_ENDPT, vir_dst, len);
vir_dst += CPROF_PROCNAME_LEN;
phys_src = cprof_proc_info[i].ctl +
sizeof(cprof_ctl_inst.reset);
len = (phys_bytes) sizeof(cprof_ctl_inst.slots_used);
phys_copy(phys_src, phys_dst, len);
phys_dst += len;
data_copy(cprof_proc_info[i].endpt,
cprof_proc_info[i].ctl_v + sizeof(cprof_ctl_inst.reset),
m_ptr->PROF_ENDPT, vir_dst, len);
vir_dst += len;
phys_src = cprof_proc_info[i].buf;
len = (phys_bytes)
(sizeof(cprof_tbl_inst) * cprof_proc_info[i].slots_used);
phys_copy(phys_src, phys_dst, len);
phys_dst += len;
data_copy(cprof_proc_info[i].endpt, cprof_proc_info[i].buf_v,
m_ptr->PROF_ENDPT, vir_dst, len);
vir_dst += len;
}
return OK;

View File

@@ -110,9 +110,9 @@ doit:
}
} else {
switch (io_type) {
case _DIO_BYTE: outb(m_ptr->DIO_PORT, m_ptr->DIO_VALUE); break;
case _DIO_WORD: outw(m_ptr->DIO_PORT, m_ptr->DIO_VALUE); break;
case _DIO_LONG: outl(m_ptr->DIO_PORT, m_ptr->DIO_VALUE); break;
case _DIO_BYTE: outb(m_ptr->DIO_PORT, m_ptr->DIO_VALUE); break;
case _DIO_WORD: outw(m_ptr->DIO_PORT, m_ptr->DIO_VALUE); break;
case _DIO_LONG: outl(m_ptr->DIO_PORT, m_ptr->DIO_VALUE); break;
default: return(EINVAL);
}
}

View File

@@ -22,7 +22,6 @@ register message *m_ptr; /* pointer to request message */
{
/* Handle sys_exec(). A process has done a successful EXEC. Patch it up. */
register struct proc *rp;
reg_t sp; /* new sp */
phys_bytes phys_name;
char *np;
int proc;
@@ -31,27 +30,18 @@ register message *m_ptr; /* pointer to request message */
return EINVAL;
rp = proc_addr(proc);
sp = (reg_t) m_ptr->PR_STACK_PTR;
rp->p_reg.sp = sp; /* set the stack pointer */
#if (_MINIX_CHIP == _CHIP_INTEL)
/* wipe extra LDT entries */
phys_memset(vir2phys(&rp->p_seg.p_ldt[EXTRA_LDT_INDEX]), 0,
(LDT_SIZE - EXTRA_LDT_INDEX) * sizeof(rp->p_seg.p_ldt[0]));
#endif
rp->p_reg.pc = (reg_t) m_ptr->PR_IP_PTR; /* set pc */
RTS_LOCK_UNSET(rp, RECEIVING); /* PM does not reply to EXEC call */
/* Save command name for debugging, ps(1) output, etc. */
phys_name = numap_local(who_p, (vir_bytes) m_ptr->PR_NAME_PTR,
(vir_bytes) P_NAME_LEN - 1);
if (phys_name != 0) {
phys_copy(phys_name, vir2phys(rp->p_name), (phys_bytes) P_NAME_LEN - 1);
for (np = rp->p_name; (*np & BYTE) >= ' '; np++) {}
*np = 0; /* mark end */
} else {
if(data_copy(who_e, (vir_bytes) m_ptr->PR_NAME_PTR,
SYSTEM, (vir_bytes) rp->p_name, (phys_bytes) P_NAME_LEN - 1) != OK)
strncpy(rp->p_name, "<unset>", P_NAME_LEN);
}
/* Do architecture-specific exec() stuff. */
arch_pre_exec(rp, (u32_t) m_ptr->PR_IP_PTR, (u32_t) m_ptr->PR_STACK_PTR);
/* No reply to EXEC call */
RTS_LOCK_UNSET(rp, RECEIVING);
return(OK);
}
#endif /* USE_EXEC */

View File

@@ -56,9 +56,11 @@ register struct proc *rc; /* slot of process to clean up */
/* Don't clear if already cleared. */
if(isemptyp(rc)) return;
/* Remove the process' ability to send and receive messages */
clear_endpoint(rc);
/* Turn off any alarm timers at the clock. */
reset_timer(&priv(rc)->s_alarm_timer);
@@ -70,10 +72,10 @@ register struct proc *rc; /* slot of process to clean up */
/* Check the table with IRQ hooks to see if hooks should be released. */
for (i=0; i < NR_IRQ_HOOKS; i++) {
int proc;
if (rc->p_endpoint == irq_hooks[i].proc_nr_e) {
if (rc->p_endpoint == irq_hooks[i].proc_nr_e) {
rm_irq_handler(&irq_hooks[i]); /* remove interrupt handler */
irq_hooks[i].proc_nr_e = NONE; /* mark hook as free */
}
}
}
/* Release the process table slot. If this is a system process, also
@@ -83,9 +85,12 @@ register struct proc *rc; /* slot of process to clean up */
*/
if (priv(rc)->s_flags & SYS_PROC) priv(rc)->s_proc_nr = NONE;
#if 0
/* Clean up virtual memory */
if (rc->p_misc_flags & MF_VM)
if (rc->p_misc_flags & MF_VM) {
vm_map_default(rc);
}
#endif
}
#endif /* USE_EXIT */

View File

@@ -41,10 +41,10 @@ register message *m_ptr; /* pointer to request message */
gen = _ENDPOINT_G(rpc->p_endpoint);
#if (_MINIX_CHIP == _CHIP_INTEL)
old_ldt_sel = rpc->p_seg.p_ldt_sel; /* backup local descriptors */
#endif
*rpc = *rpp; /* copy 'proc' struct */
#if (_MINIX_CHIP == _CHIP_INTEL)
rpc->p_seg.p_ldt_sel = old_ldt_sel; /* restore descriptors */
#else
*rpc = *rpp; /* copy 'proc' struct */
#endif
if(++gen >= _ENDPOINT_MAX_GENERATION) /* increase generation */
gen = 1; /* generation number wraparound */
@@ -77,6 +77,11 @@ register message *m_ptr; /* pointer to request message */
/* Install new map */
r = newmap(rpc, map_ptr);
/* Don't schedule process in VM mode until it has a new pagetable. */
if(m_ptr->PR_FORK_FLAGS & PFF_VMINHIBIT) {
RTS_LOCK_SET(rpc, VMINHIBIT);
}
/* Only one in group should have SIGNALED, child doesn't inherit tracing. */
RTS_LOCK_UNSET(rpc, (SIGNALED | SIG_PENDING | P_STOP));
sigemptyset(&rpc->p_pending);

View File

@@ -10,13 +10,7 @@
*/
#include "../system.h"
#if !( POWERPC )
static unsigned long bios_buf[1024]; /* 4K, what about alignment */
static vir_bytes bios_buf_vir, bios_buf_len;
#endif /* #if !( POWERPC ) */
#include "../vm.h"
#if USE_GETINFO
@@ -31,35 +25,44 @@ register message *m_ptr; /* pointer to request message */
* call simply copies entire data structures to the caller.
*/
size_t length;
phys_bytes src_phys;
phys_bytes dst_phys;
int proc_nr, nr_e, nr;
vir_bytes src_vir;
int proc_nr, nr_e, nr, hz;
struct proc *caller;
phys_bytes ph;
caller = proc_addr(who_p);
/* Set source address and length based on request type. */
switch (m_ptr->I_REQUEST) {
case GET_MACHINE: {
length = sizeof(struct machine);
src_phys = vir2phys(&machine);
src_vir = (vir_bytes) &machine;
break;
}
case GET_KINFO: {
length = sizeof(struct kinfo);
src_phys = vir2phys(&kinfo);
src_vir = (vir_bytes) &kinfo;
break;
}
case GET_LOADINFO: {
length = sizeof(struct loadinfo);
src_phys = vir2phys(&kloadinfo);
src_vir = (vir_bytes) &kloadinfo;
break;
}
case GET_HZ: {
length = sizeof(hz);
src_vir = (vir_bytes) &hz;
hz = HZ;
break;
}
case GET_IMAGE: {
length = sizeof(struct boot_image) * NR_BOOT_PROCS;
src_phys = vir2phys(image);
src_vir = (vir_bytes) image;
break;
}
case GET_IRQHOOKS: {
length = sizeof(struct irq_hook) * NR_IRQ_HOOKS;
src_phys = vir2phys(irq_hooks);
src_vir = (vir_bytes) irq_hooks;
break;
}
case GET_SCHEDINFO: {
@@ -67,36 +70,44 @@ register message *m_ptr; /* pointer to request message */
* at once, otherwise the scheduling information may be incorrect.
* Copy the queue heads and fall through to copy the process table.
*/
if((ph=umap_local(caller, D, (vir_bytes) m_ptr->I_VAL_PTR2,length)) == 0)
return EFAULT;
length = sizeof(struct proc *) * NR_SCHED_QUEUES;
src_phys = vir2phys(rdy_head);
okendpt(m_ptr->m_source, &proc_nr);
dst_phys = numap_local(proc_nr, (vir_bytes) m_ptr->I_VAL_PTR2,
length);
if (src_phys == 0 || dst_phys == 0) return(EFAULT);
phys_copy(src_phys, dst_phys, length);
/* fall through */
CHECKRANGE_OR_SUSPEND(proc_addr(who_p), ph, length, 1);
data_copy(SYSTEM, (vir_bytes) rdy_head,
who_e, (vir_bytes) m_ptr->I_VAL_PTR2, length);
/* fall through to GET_PROCTAB */
}
case GET_PROCTAB: {
length = sizeof(struct proc) * (NR_PROCS + NR_TASKS);
src_phys = vir2phys(proc);
src_vir = (vir_bytes) proc;
break;
}
case GET_PRIVTAB: {
length = sizeof(struct priv) * (NR_SYS_PROCS);
src_phys = vir2phys(priv);
src_vir = (vir_bytes) priv;
break;
}
case GET_PROC: {
nr_e = (m_ptr->I_VAL_LEN2_E == SELF) ?
m_ptr->m_source : m_ptr->I_VAL_LEN2_E;
who_e : m_ptr->I_VAL_LEN2_E;
if(!isokendpt(nr_e, &nr)) return EINVAL; /* validate request */
length = sizeof(struct proc);
src_phys = vir2phys(proc_addr(nr));
src_vir = (vir_bytes) proc_addr(nr);
break;
}
case GET_WHOAMI: {
int len;
/* GET_WHOAMI uses m3 and only uses the message contents for info. */
m_ptr->GIWHO_EP = who_e;
len = MIN(sizeof(m_ptr->GIWHO_NAME), sizeof(caller->p_name))-1;
strncpy(m_ptr->GIWHO_NAME, caller->p_name, len);
m_ptr->GIWHO_NAME[len] = '\0';
return OK;
}
case GET_MONPARAMS: {
src_phys = kinfo.params_base; /* already is a physical */
length = kinfo.params_size;
src_vir = (vir_bytes) params_buffer;
length = sizeof(params_buffer);
break;
}
case GET_RANDOMNESS: {
@@ -109,44 +120,25 @@ register message *m_ptr; /* pointer to request message */
krandom.bin[i].r_next = 0;
}
length = sizeof(struct randomness);
src_phys = vir2phys(&copy);
src_vir = (vir_bytes) &copy;
break;
}
case GET_KMESSAGES: {
length = sizeof(struct kmessages);
src_phys = vir2phys(&kmess);
src_vir = (vir_bytes) &kmess;
break;
}
#if DEBUG_TIME_LOCKS
case GET_LOCKTIMING: {
length = sizeof(timingdata);
src_phys = vir2phys(timingdata);
src_vir = (vir_bytes) timingdata;
break;
}
#endif
#if !( POWERPC )
case GET_BIOSBUFFER:
bios_buf_vir = (vir_bytes)bios_buf;
bios_buf_len = sizeof(bios_buf);
length = sizeof(bios_buf_len);
src_phys = vir2phys(&bios_buf_len);
if (length != m_ptr->I_VAL_LEN2_E) return (EINVAL);
if(!isokendpt(m_ptr->m_source, &proc_nr))
panic("bogus source", m_ptr->m_source);
dst_phys = numap_local(proc_nr, (vir_bytes) m_ptr->I_VAL_PTR2, length);
if (src_phys == 0 || dst_phys == 0) return(EFAULT);
phys_copy(src_phys, dst_phys, length);
length = sizeof(bios_buf_vir);
src_phys = vir2phys(&bios_buf_vir);
break;
#endif /* #if !( POWERPC ) */
case GET_IRQACTIDS: {
length = sizeof(irq_actids);
src_phys = vir2phys(irq_actids);
src_vir = (vir_bytes) irq_actids;
break;
}
@@ -162,11 +154,10 @@ register message *m_ptr; /* pointer to request message */
/* Try to make the actual copy for the requested data. */
if (m_ptr->I_VAL_LEN > 0 && length > m_ptr->I_VAL_LEN) return (E2BIG);
if(!isokendpt(m_ptr->m_source, &proc_nr))
panic("bogus source", m_ptr->m_source);
dst_phys = numap_local(proc_nr, (vir_bytes) m_ptr->I_VAL_PTR, length);
if (src_phys == 0 || dst_phys == 0) return(EFAULT);
phys_copy(src_phys, dst_phys, length);
if((ph=umap_local(caller, D, (vir_bytes) m_ptr->I_VAL_PTR,length)) == 0)
return EFAULT;
CHECKRANGE_OR_SUSPEND(caller, ph, length, 1);
data_copy(SYSTEM, src_vir, who_e, (vir_bytes) m_ptr->I_VAL_PTR, length);
return(OK);
}

View File

@@ -46,8 +46,9 @@ register message *m_ptr; /* pointer to request message */
if (irq_hook_id >= NR_IRQ_HOOKS || irq_hook_id < 0 ||
irq_hooks[irq_hook_id].proc_nr_e == NONE) return(EINVAL);
if (irq_hooks[irq_hook_id].proc_nr_e != m_ptr->m_source) return(EPERM);
if (m_ptr->IRQ_REQUEST == IRQ_ENABLE)
if (m_ptr->IRQ_REQUEST == IRQ_ENABLE) {
enable_irq(&irq_hooks[irq_hook_id]);
}
else
disable_irq(&irq_hooks[irq_hook_id]);
break;
@@ -64,7 +65,7 @@ register message *m_ptr; /* pointer to request message */
privp= priv(rp);
if (!privp)
{
kprintf("no priv structure!\n");
kprintf("do_irqctl: no priv structure!\n");
return EPERM;
}
if (privp->s_flags & CHECK_IRQ)
@@ -143,14 +144,12 @@ irq_hook_t *hook;
*/
get_randomness(hook->irq);
/* Check if the handler is still alive. If not, forget about the
* interrupt. This should never happen, as processes that die
/* Check if the handler is still alive.
* If it's dead, this should never happen, as processes that die
* automatically get their interrupt hooks unhooked.
*/
if(!isokendpt(hook->proc_nr_e, &proc)) {
hook->proc_nr_e = NONE;
return 0;
}
if(!isokendpt(hook->proc_nr_e, &proc))
minix_panic("invalid interrupt handler", hook->proc_nr_e);
/* Add a bit for this interrupt to the process' pending interrupts. When
* sending the notification message, this bit map will be magically set

View File

@@ -31,14 +31,13 @@ register message *m_ptr; /* pointer to request message */
proc = proc_addr(proc_p);
phys_base= umap_local(proc, D, base, size);
if (!phys_base)
{
kprintf("do_mapdma: umap_local failed\n");
phys_base= umap_virtual(proc, D, base, size);
if (!phys_base)
{
kprintf("do_mapdma: umap_virtual failed\n");
return EFAULT;
}
m_ptr->CP_DST_ADDR = phys_base;
return OK;
}

View File

@@ -16,10 +16,10 @@
PUBLIC int do_newmap(m_ptr)
message *m_ptr; /* pointer to request message */
{
/* Handle sys_newmap(). Fetch the memory map from PM. */
/* Handle sys_newmap(). Fetch the memory map. */
register struct proc *rp; /* process whose map is to be loaded */
struct mem_map *map_ptr; /* virtual address of map inside caller (PM) */
phys_bytes src_phys; /* physical address of map at the PM */
struct mem_map *map_ptr; /* virtual address of map inside caller */
phys_bytes src_phys; /* physical address of map at the */
int proc;
map_ptr = (struct mem_map *) m_ptr->PR_MEM_PTR;
@@ -36,18 +36,15 @@ message *m_ptr; /* pointer to request message */
*===========================================================================*/
PUBLIC int newmap(rp, map_ptr)
struct proc *rp; /* process whose map is to be loaded */
struct mem_map *map_ptr; /* virtual address of map inside caller (PM) */
struct mem_map *map_ptr; /* virtual address of map inside caller */
{
/* Fetch the memory map from PM. */
phys_bytes src_phys; /* physical address of map at the PM */
int proc;
/* Copy the map from PM. */
src_phys = umap_local(proc_addr(who_p), D, (vir_bytes) map_ptr,
sizeof(rp->p_memmap));
if (src_phys == 0) return(EFAULT);
phys_copy(src_phys,vir2phys(rp->p_memmap),
(phys_bytes)sizeof(rp->p_memmap));
int r;
/* Fetch the memory map. */
if((r=data_copy(who_e, (vir_bytes) map_ptr,
SYSTEM, (vir_bytes) rp->p_memmap, sizeof(rp->p_memmap))) != OK) {
kprintf("newmap: data_copy failed! (%d)\n", r);
return r;
}
alloc_segments(rp);

View File

@@ -28,8 +28,7 @@ message *m_ptr; /* pointer to request message */
register struct priv *sp;
int proc_nr;
int priv_id;
int i;
phys_bytes caller_phys, kernel_phys;
int i, r;
struct io_range io_range;
struct mem_range mem_range;
struct priv priv;
@@ -100,12 +99,9 @@ message *m_ptr; /* pointer to request message */
if (m_ptr->CTL_ARG_PTR)
{
/* Copy privilege structure from caller */
caller_phys = umap_local(caller_ptr, D,
(vir_bytes) m_ptr->CTL_ARG_PTR, sizeof(priv));
if (caller_phys == 0)
return EFAULT;
kernel_phys = vir2phys(&priv);
phys_copy(caller_phys, kernel_phys, sizeof(priv));
if((r=data_copy(who_e, (vir_bytes) m_ptr->CTL_ARG_PTR,
SYSTEM, (vir_bytes) &priv, sizeof(priv))) != OK)
return r;
/* Copy the call mask */
for (i= 0; i<CALL_MASK_SIZE; i++)
@@ -180,12 +176,8 @@ message *m_ptr; /* pointer to request message */
#endif
/* Get the I/O range */
caller_phys = umap_local(caller_ptr, D, (vir_bytes) m_ptr->CTL_ARG_PTR,
sizeof(io_range));
if (caller_phys == 0)
return EFAULT;
kernel_phys = vir2phys(&io_range);
phys_copy(caller_phys, kernel_phys, sizeof(io_range));
data_copy(who_e, (vir_bytes) m_ptr->CTL_ARG_PTR,
SYSTEM, (vir_bytes) &io_range, sizeof(io_range));
priv(rp)->s_flags |= CHECK_IO_PORT; /* Check I/O accesses */
i= priv(rp)->s_nr_io_range;
if (i >= NR_IO_RANGE)
@@ -206,12 +198,9 @@ message *m_ptr; /* pointer to request message */
return EPERM;
/* Get the memory range */
caller_phys = umap_local(caller_ptr, D, (vir_bytes) m_ptr->CTL_ARG_PTR,
sizeof(mem_range));
if (caller_phys == 0)
return EFAULT;
kernel_phys = vir2phys(&mem_range);
phys_copy(caller_phys, kernel_phys, sizeof(mem_range));
if((r=data_copy(who_e, (vir_bytes) m_ptr->CTL_ARG_PTR,
SYSTEM, (vir_bytes) &mem_range, sizeof(mem_range))) != OK)
return r;
priv(rp)->s_flags |= CHECK_MEM; /* Check I/O accesses */
i= priv(rp)->s_nr_mem_range;
if (i >= NR_MEM_RANGE)

View File

@@ -25,26 +25,24 @@ register message *m_ptr; /* pointer to request message */
* about the location of their profiling table and the control structure
* which is used to enable the kernel to have the tables cleared.
*/
int proc_nr, len;
int proc_nr;
vir_bytes vir_dst;
struct proc *rp;
/* Store process name, control struct, table locations. */
isokendpt(m_ptr->m_source, &proc_nr);
if(!isokendpt(m_ptr->m_source, &proc_nr))
return EDEADSRCDST;
if(cprof_procs_no >= NR_SYS_PROCS)
return ENOSPC;
rp = proc_addr(proc_nr);
cprof_proc_info[cprof_procs_no].endpt = who_e;
cprof_proc_info[cprof_procs_no].name = rp->p_name;
len = (phys_bytes) sizeof (void *);
vir_dst = (vir_bytes) m_ptr->PROF_CTL_PTR;
cprof_proc_info[cprof_procs_no].ctl =
numap_local(proc_nr, vir_dst, len);
vir_dst = (vir_bytes) m_ptr->PROF_MEM_PTR;
cprof_proc_info[cprof_procs_no].buf =
numap_local(proc_nr, vir_dst, len);
cprof_proc_info[cprof_procs_no].ctl_v = (vir_bytes) m_ptr->PROF_CTL_PTR;
cprof_proc_info[cprof_procs_no].buf_v = (vir_bytes) m_ptr->PROF_MEM_PTR;
cprof_procs_no++;

View File

@@ -14,14 +14,19 @@
* VSCP_VEC_SIZE number of significant elements in vector
*/
#include "../system.h"
#include <minix/type.h>
#include <minix/safecopies.h>
#include "../system.h"
#include "../vm.h"
#define MEM_TOP 0xFFFFFFFFUL
FORWARD _PROTOTYPE(int safecopy, (endpoint_t, endpoint_t, cp_grant_id_t, int, int, size_t, vir_bytes, vir_bytes, int));
#define HASGRANTTABLE(gr) \
(!RTS_ISSET(gr, NO_PRIV) && priv(gr) && priv(gr)->s_grant_table > 0)
/*===========================================================================*
* verify_grant *
*===========================================================================*/
@@ -38,7 +43,7 @@ endpoint_t *e_granter; /* new granter (magic grants) */
static cp_grant_t g;
static int proc_nr;
static struct proc *granter_proc;
static phys_bytes phys_grant;
int r;
/* Get granter process slot (if valid), and check range of
* grant id.
@@ -51,23 +56,9 @@ endpoint_t *e_granter; /* new granter (magic grants) */
/* If there is no priv. structure, or no grant table in the
* priv. structure, or the grant table in the priv. structure
* is too small for the grant,
*
* then there exists no such grant, so
*
* return EPERM.
*
* (Don't leak how big the grant table is by returning
* EINVAL for grant-out-of-range, in case this turns out to be
* interesting information.)
* is too small for the grant, return EPERM.
*/
if(RTS_ISSET(granter_proc, NO_PRIV) || !(priv(granter_proc)) ||
priv(granter_proc)->s_grant_table < 1) {
kprintf("verify_grant: grant verify failed in ep %d proc %d: "
"no priv table, or no grant table\n",
granter, proc_nr);
return(EPERM);
}
if(!HASGRANTTABLE(granter_proc)) return EPERM;
if(priv(granter_proc)->s_grant_entries <= grant) {
static int curr= 0, limit= 100, extra= 20;
@@ -94,25 +85,25 @@ endpoint_t *e_granter; /* new granter (magic grants) */
* (presumably) set an invalid grant table entry by returning
* EPERM, just like with an invalid grant id.
*/
if(!(phys_grant = umap_local(granter_proc, D,
priv(granter_proc)->s_grant_table + sizeof(g)*grant, sizeof(g)))) {
kprintf("verify_grant: grant verify failed: umap failed\n");
if((r=data_copy(granter,
priv(granter_proc)->s_grant_table + sizeof(g)*grant,
SYSTEM, (vir_bytes) &g, sizeof(g))) != OK) {
kprintf("verify_grant: grant verify: data_copy failed\n");
return EPERM;
}
phys_copy(phys_grant, vir2phys(&g), sizeof(g));
/* Check validity. */
if((g.cp_flags & (CPF_USED | CPF_VALID)) != (CPF_USED | CPF_VALID)) {
kprintf(
"verify_grant: grant verify failed: unused or invalid\n");
"verify_grant: grant failed: invalid (%d flags 0x%lx)\n",
grant, g.cp_flags);
return EPERM;
}
/* Check access of grant. */
if(((g.cp_flags & access) != access)) {
kprintf(
"verify_grant: grant verify failed: access invalid; want %x, have %x\n",
"verify_grant: grant verify failed: access invalid; want 0x%x, have 0x%x\n",
access, g.cp_flags);
return EPERM;
}
@@ -210,6 +201,11 @@ int access; /* CPF_READ for a copy from granter to grantee, CPF_WRITE
static vir_bytes v_offset;
int r;
endpoint_t new_granter, *src, *dst;
struct proc *granter_p;
/* See if there is a reasonable grant table. */
if(!(granter_p = endpoint_lookup(granter))) return EINVAL;
if(!HASGRANTTABLE(granter_p)) return EPERM;
/* Decide who is src and who is dst. */
if(access & CPF_READ) {
@@ -227,9 +223,11 @@ int access; /* CPF_READ for a copy from granter to grantee, CPF_WRITE
if (curr < limit+extra)
{
#if 0
kprintf(
"grant %d verify to copy %d->%d by %d failed: err %d\n",
grantid, *src, *dst, grantee, r);
#endif
} else if (curr == limit+extra)
{
kprintf(
@@ -265,7 +263,7 @@ int access; /* CPF_READ for a copy from granter to grantee, CPF_WRITE
}
/* Do the regular copy. */
return virtual_copy(&v_src, &v_dst, bytes);
return virtual_copy_vmcheck(&v_src, &v_dst, bytes);
}
@@ -288,7 +286,7 @@ register message *m_ptr; /* pointer to request message */
src_seg = SCP_INFO2SEG(m_ptr->SCP_INFO);
dst_seg = D;
access = CPF_WRITE;
} else panic("Impossible system call nr. ", sys_call_code);
} else minix_panic("Impossible system call nr. ", sys_call_code);
return safecopy(m_ptr->SCP_FROM_TO, who_e, m_ptr->SCP_GID,
src_seg, dst_seg, m_ptr->SCP_BYTES, m_ptr->SCP_OFFSET,
@@ -304,6 +302,7 @@ register message *m_ptr; /* pointer to request message */
static struct vscp_vec vec[SCPVEC_NR];
static struct vir_addr src, dst;
int r, i, els;
size_t bytes;
/* Set vector copy parameters. */
src.proc_nr_e = who_e;
@@ -314,9 +313,10 @@ register message *m_ptr; /* pointer to request message */
/* No. of vector elements. */
els = m_ptr->VSCP_VEC_SIZE;
bytes = els * sizeof(struct vscp_vec);
/* Obtain vector of copies. */
if((r=virtual_copy(&src, &dst, els * sizeof(struct vscp_vec))) != OK)
if((r=virtual_copy_vmcheck(&src, &dst, bytes)) != OK)
return r;
/* Perform safecopies. */

View File

@@ -26,18 +26,16 @@ message *m_ptr; /* pointer to request message */
*/
struct sigcontext sc;
register struct proc *rp;
phys_bytes src_phys;
int proc;
int proc, r;
if (! isokendpt(m_ptr->SIG_ENDPT, &proc)) return(EINVAL);
if (iskerneln(proc)) return(EPERM);
rp = proc_addr(proc);
/* Copy in the sigcontext structure. */
src_phys = umap_local(rp, D, (vir_bytes) m_ptr->SIG_CTXT_PTR,
(vir_bytes) sizeof(struct sigcontext));
if (src_phys == 0) return(EFAULT);
phys_copy(src_phys, vir2phys(&sc), (phys_bytes) sizeof(struct sigcontext));
if((r=data_copy(m_ptr->SIG_ENDPT, (vir_bytes) m_ptr->SIG_CTXT_PTR,
SYSTEM, (vir_bytes) &sc, sizeof(struct sigcontext))) != OK)
return r;
/* Restore user bits of psw from sc, maintain system bits from proc. */
sc.sc_psw = (sc.sc_psw & X86_FLAGS_USER) |

View File

@@ -25,20 +25,18 @@ message *m_ptr; /* pointer to request message */
struct sigmsg smsg;
register struct proc *rp;
phys_bytes src_phys, dst_phys;
struct sigcontext sc, *scp;
struct sigframe fr, *frp;
int proc;
int proc, r;
if (!isokendpt(m_ptr->SIG_ENDPT, &proc)) return(EINVAL);
if (iskerneln(proc)) return(EPERM);
rp = proc_addr(proc);
/* Get the sigmsg structure into our address space. */
src_phys = umap_local(proc_addr(PM_PROC_NR), D, (vir_bytes)
m_ptr->SIG_CTXT_PTR, (vir_bytes) sizeof(struct sigmsg));
if (src_phys == 0) return(EFAULT);
phys_copy(src_phys,vir2phys(&smsg),(phys_bytes) sizeof(struct sigmsg));
if((r=data_copy(PM_PROC_NR, (vir_bytes) m_ptr->SIG_CTXT_PTR,
SYSTEM, (vir_bytes) &smsg, (phys_bytes) sizeof(struct sigmsg))) != OK)
return r;
/* Compute the user stack pointer where sigcontext will be stored. */
scp = (struct sigcontext *) smsg.sm_stkptr - 1;
@@ -56,10 +54,9 @@ message *m_ptr; /* pointer to request message */
sc.sc_mask = smsg.sm_mask;
/* Copy the sigcontext structure to the user's stack. */
dst_phys = umap_local(rp, D, (vir_bytes) scp,
(vir_bytes) sizeof(struct sigcontext));
if (dst_phys == 0) return(EFAULT);
phys_copy(vir2phys(&sc), dst_phys, (phys_bytes) sizeof(struct sigcontext));
if((r=data_copy(SYSTEM, (vir_bytes) &sc, m_ptr->SIG_ENDPT, (vir_bytes) scp,
(vir_bytes) sizeof(struct sigcontext))) != OK)
return r;
/* Initialize the sigframe structure. */
frp = (struct sigframe *) scp - 1;
@@ -73,10 +70,10 @@ message *m_ptr; /* pointer to request message */
fr.sf_retadr = (void (*)()) smsg.sm_sigreturn;
/* Copy the sigframe structure to the user's stack. */
dst_phys = umap_local(rp, D, (vir_bytes) frp,
(vir_bytes) sizeof(struct sigframe));
if (dst_phys == 0) return(EFAULT);
phys_copy(vir2phys(&fr), dst_phys, (phys_bytes) sizeof(struct sigframe));
if((r=data_copy(SYSTEM, (vir_bytes) &fr, m_ptr->SIG_ENDPT, (vir_bytes) frp,
(vir_bytes) sizeof(struct sigframe))) != OK)
return r;
#if ( _MINIX_CHIP == _CHIP_POWERPC ) /* stuff that can't be done in the assembler code. */
/* When the signal handlers C code is called it will write this value
@@ -95,8 +92,13 @@ message *m_ptr; /* pointer to request message */
/* Reschedule if necessary. */
if(RTS_ISSET(rp, NO_PRIORITY))
RTS_LOCK_UNSET(rp, NO_PRIORITY);
else
else {
struct proc *caller;
caller = proc_addr(who_p);
kprintf("system: warning: sigsend a running process\n");
kprintf("caller stack: ");
proc_stacktrace(caller);
}
return(OK);
}

View File

@@ -17,6 +17,9 @@
#if SPROFILE
/* user address to write info struct */
PRIVATE vir_bytes sprof_info_addr_vir;
/*===========================================================================*
* do_sprofile *
*===========================================================================*/
@@ -41,15 +44,14 @@ register message *m_ptr; /* pointer to request message */
return EBUSY;
}
isokendpt(m_ptr->PROF_ENDPT, &proc_nr);
/* Test endpoint number. */
if(!isokendpt(m_ptr->PROF_ENDPT, &proc_nr))
return EINVAL;
vir_dst = (vir_bytes) m_ptr->PROF_CTL_PTR;
length = (phys_bytes) sizeof (int *);
sprof_info_addr = numap_local(proc_nr, vir_dst, length);
vir_dst = (vir_bytes) m_ptr->PROF_MEM_PTR;
length = (phys_bytes) sizeof (char *);
sprof_data_addr = numap_local(proc_nr, vir_dst, length);
/* Set parameters for statistical profiler. */
sprof_ep = m_ptr->PROF_ENDPT;
sprof_info_addr_vir = (vir_bytes) m_ptr->PROF_CTL_PTR;
sprof_data_addr_vir = (vir_bytes) m_ptr->PROF_MEM_PTR;
sprof_info.mem_used = 0;
sprof_info.total_samples = 0;
@@ -80,8 +82,8 @@ register message *m_ptr; /* pointer to request message */
stop_profile_clock();
phys_copy(vir2phys((vir_bytes) &sprof_info),
sprof_info_addr, (phys_bytes) sizeof(sprof_info));
data_copy(SYSTEM, (vir_bytes) &sprof_info,
sprof_ep, sprof_info_addr_vir, sizeof(sprof_info));
return OK;

View File

@@ -40,7 +40,6 @@ register message *m_ptr;
*/
register struct proc *rp;
phys_bytes src, dst;
vir_bytes tr_addr = (vir_bytes) m_ptr->CTL_ADDRESS;
long tr_data = m_ptr->CTL_DATA;
int tr_request = m_ptr->CTL_REQUEST;
@@ -48,6 +47,26 @@ register message *m_ptr;
unsigned char ub;
int i;
#define COPYTOPROC(seg, addr, myaddr, length) { \
struct vir_addr fromaddr, toaddr; \
fromaddr.proc_nr_e = SYSTEM; \
toaddr.proc_nr_e = tr_proc_nr_e; \
fromaddr.offset = (myaddr); \
toaddr.offset = (addr); \
fromaddr.segment = D; \
toaddr.segment = (seg); \
}
#define COPYFROMPROC(seg, addr, myaddr, length) { \
struct vir_addr fromaddr, toaddr; \
fromaddr.proc_nr_e = tr_proc_nr_e; \
toaddr.proc_nr_e = SYSTEM; \
fromaddr.offset = (addr); \
toaddr.offset = (myaddr); \
fromaddr.segment = (seg); \
toaddr.segment = D; \
}
if(!isokendpt(tr_proc_nr_e, &tr_proc_nr)) return(EINVAL);
if (iskerneln(tr_proc_nr)) return(EPERM);
@@ -61,16 +80,14 @@ register message *m_ptr;
case T_GETINS: /* return value from instruction space */
if (rp->p_memmap[T].mem_len != 0) {
if ((src = umap_local(rp, T, tr_addr, TR_VLSIZE)) == 0) return(EIO);
phys_copy(src, vir2phys(&tr_data), (phys_bytes) sizeof(long));
COPYTOPROC(T, tr_addr, (vir_bytes) &tr_data, sizeof(long));
m_ptr->CTL_DATA = tr_data;
break;
}
/* Text space is actually data space - fall through. */
case T_GETDATA: /* return value from data space */
if ((src = umap_local(rp, D, tr_addr, TR_VLSIZE)) == 0) return(EIO);
phys_copy(src, vir2phys(&tr_data), (phys_bytes) sizeof(long));
COPYTOPROC(D, tr_addr, (vir_bytes) &tr_data, sizeof(long));
m_ptr->CTL_DATA= tr_data;
break;
@@ -83,16 +100,14 @@ register message *m_ptr;
case T_SETINS: /* set value in instruction space */
if (rp->p_memmap[T].mem_len != 0) {
if ((dst = umap_local(rp, T, tr_addr, TR_VLSIZE)) == 0) return(EIO);
phys_copy(vir2phys(&tr_data), dst, (phys_bytes) sizeof(long));
COPYFROMPROC(T, tr_addr, (vir_bytes) &tr_data, sizeof(long));
m_ptr->CTL_DATA = 0;
break;
}
/* Text space is actually data space - fall through. */
case T_SETDATA: /* set value in data space */
if ((dst = umap_local(rp, D, tr_addr, TR_VLSIZE)) == 0) return(EIO);
phys_copy(vir2phys(&tr_data), dst, (phys_bytes) sizeof(long));
COPYFROMPROC(D, tr_addr, (vir_bytes) &tr_data, sizeof(long));
m_ptr->CTL_DATA = 0;
break;
@@ -136,28 +151,12 @@ register message *m_ptr;
break;
case T_READB_INS: /* get value from instruction space */
if (rp->p_memmap[T].mem_len != 0) {
if ((dst = umap_local(rp, T, tr_addr, 1)) == 0) return(EFAULT);
phys_copy(dst, vir2phys(&ub), (phys_bytes) 1);
m_ptr->CTL_DATA = ub;
break;
}
if ((dst = umap_local(rp, D, tr_addr, 1)) == 0) return(EFAULT);
phys_copy(dst, vir2phys(&ub), (phys_bytes) 1);
COPYFROMPROC(rp->p_memmap[T].mem_len > 0 ? T : D, tr_addr, (vir_bytes) &ub, 1);
m_ptr->CTL_DATA = ub;
break;
case T_WRITEB_INS: /* set value in instruction space */
if (rp->p_memmap[T].mem_len != 0) {
if ((dst = umap_local(rp, T, tr_addr, 1)) == 0) return(EFAULT);
phys_copy(vir2phys(&tr_data), dst, (phys_bytes) 1);
m_ptr->CTL_DATA = 0;
break;
}
if ((dst = umap_local(rp, D, tr_addr, 1)) == 0) return(EFAULT);
phys_copy(vir2phys(&tr_data), dst, (phys_bytes) 1);
COPYTOPROC(rp->p_memmap[T].mem_len > 0 ? T : D,tr_addr, (vir_bytes) &tr_data, 1);
m_ptr->CTL_DATA = 0;
break;

View File

@@ -10,6 +10,7 @@
*/
#include "../system.h"
#include "../vm.h"
#if USE_UMAP
@@ -25,8 +26,11 @@ register message *m_ptr; /* pointer to request message */
vir_bytes offset = m_ptr->CP_SRC_ADDR;
int count = m_ptr->CP_NR_BYTES;
int endpt = (int) m_ptr->CP_SRC_ENDPT;
int proc_nr;
phys_bytes phys_addr;
int proc_nr, r;
int naughty = 0;
phys_bytes phys_addr = 0, lin_addr = 0;
int caller_pn;
struct proc *targetpr, *caller;
/* Verify process number. */
if (endpt == SELF)
@@ -34,27 +38,88 @@ register message *m_ptr; /* pointer to request message */
else
if (! isokendpt(endpt, &proc_nr))
return(EINVAL);
targetpr = proc_addr(proc_nr);
okendpt(who_e, &caller_pn);
caller = proc_addr(caller_pn);
/* See which mapping should be made. */
switch(seg_type) {
case LOCAL_SEG:
phys_addr = umap_local(proc_addr(proc_nr), seg_index, offset, count);
phys_addr = lin_addr = umap_local(targetpr, seg_index, offset, count);
if(!lin_addr) return EFAULT;
CHECKRANGE_OR_SUSPEND(targetpr, lin_addr, count, 1);
naughty = 1;
break;
case REMOTE_SEG:
phys_addr = umap_remote(proc_addr(proc_nr), seg_index, offset, count);
phys_addr = lin_addr = umap_remote(targetpr, seg_index, offset, count);
if(!lin_addr) return EFAULT;
CHECKRANGE_OR_SUSPEND(targetpr, lin_addr, count, 1);
naughty = 1;
break;
#if _MINIX_CHIP == _CHIP_INTEL
case BIOS_SEG:
phys_addr = umap_bios(proc_addr(proc_nr), offset, count);
break;
#endif
case GRANT_SEG:
phys_addr = umap_grant(proc_addr(proc_nr), offset, count);
naughty = 1;
case LOCAL_VM_SEG:
if(seg_index == MEM_GRANT || seg_type == GRANT_SEG) {
vir_bytes newoffset;
endpoint_t newep;
int new_proc_nr;
if(verify_grant(targetpr->p_endpoint, ANY, offset, count, 0, 0,
&newoffset, &newep) != OK) {
kprintf("SYSTEM: do_umap: verify_grant in %s, grant %d, bytes 0x%lx, failed, caller %s\n", targetpr->p_name, offset, count, caller->p_name);
proc_stacktrace(caller);
return EFAULT;
}
if(!isokendpt(newep, &new_proc_nr)) {
kprintf("SYSTEM: do_umap: isokendpt failed\n");
return EFAULT;
}
/* New lookup. */
offset = newoffset;
targetpr = proc_addr(new_proc_nr);
seg_index = D;
}
if(seg_index == T || seg_index == D || seg_index == S) {
phys_addr = lin_addr = umap_local(targetpr, seg_index, offset, count);
} else {
kprintf("SYSTEM: bogus seg type 0x%lx\n", seg_index);
return EFAULT;
}
if(!lin_addr) {
kprintf("SYSTEM:do_umap: umap_local failed\n");
return EFAULT;
}
CHECKRANGE_OR_SUSPEND(targetpr, lin_addr, count, 1);
if(vm_lookup(targetpr, lin_addr, &phys_addr, NULL) != OK) {
kprintf("SYSTEM:do_umap: vm_lookup failed\n");
return EFAULT;
}
if(phys_addr == 0)
minix_panic("vm_lookup returned zero physical address", NO_NUM);
break;
default:
return(EINVAL);
if((r=arch_umap(targetpr, offset, count, seg_type, &lin_addr))
!= OK)
return r;
phys_addr = lin_addr;
}
if(vm_running && !vm_contiguous(targetpr, lin_addr, count)) {
kprintf("SYSTEM:do_umap: not contiguous\n");
return EFAULT;
}
m_ptr->CP_DST_ADDR = phys_addr;
if(naughty || phys_addr == 0) {
kprintf("kernel: umap 0x%x done by %d / %s, pc 0x%lx, 0x%lx -> 0x%lx\n",
seg_type, who_e, caller->p_name, caller->p_reg.pc, offset, phys_addr);
kprintf("caller stack: ");
proc_stacktrace(caller);
}
return (phys_addr == 0) ? EFAULT: OK;
}

View File

@@ -10,7 +10,7 @@
PUBLIC int do_unused(m)
message *m; /* pointer to request message */
{
kprintf("SYSTEM: got unused request %d from %d", m->m_type, m->m_source);
kprintf("SYSTEM: got unused request %d from %d\n", m->m_type, m->m_source);
return(EBADREQUEST); /* illegal message type */
}

View File

@@ -25,13 +25,13 @@ register message *m_ptr; /* pointer to request message */
* requests. Although a single handler function is used, there are two
* different kernel calls so that permissions can be checked.
*/
int nr_req;
int nr_req, r;
vir_bytes caller_vir;
phys_bytes caller_phys;
phys_bytes kernel_phys;
phys_bytes bytes;
int i,s;
struct vir_cp_req *req;
struct vir_addr src, dst;
struct proc *pr;
{ static int first=1;
if (first)
@@ -41,17 +41,23 @@ register message *m_ptr; /* pointer to request message */
}
}
if(!(pr = endpoint_lookup(who_e)))
minix_panic("do_vcopy: caller doesn't exist", who_e);
/* Check if request vector size is ok. */
nr_req = (unsigned) m_ptr->VCP_VEC_SIZE;
if (nr_req > VCOPY_VEC_SIZE) return(EINVAL);
bytes = nr_req * sizeof(struct vir_cp_req);
/* Calculate physical addresses and copy (port,value)-pairs from user. */
caller_vir = (vir_bytes) m_ptr->VCP_VEC_ADDR;
caller_phys = umap_local(proc_addr(who_p), D, caller_vir, bytes);
if (0 == caller_phys) return(EFAULT);
kernel_phys = vir2phys(vir_cp_req);
phys_copy(caller_phys, kernel_phys, (phys_bytes) bytes);
src.segment = dst.segment = D;
src.proc_nr_e = who_e;
dst.proc_nr_e = SYSTEM;
dst.offset = (vir_bytes) vir_cp_req;
src.offset = (vir_bytes) m_ptr->VCP_VEC_ADDR;
if((r=virtual_copy_vmcheck(&src, &dst, bytes)) != OK)
return r;
/* Assume vector with requests is correct. Try to copy everything. */
m_ptr->VCP_NR_OK = 0;
@@ -62,7 +68,7 @@ register message *m_ptr; /* pointer to request message */
/* Check if physical addressing is used without SYS_PHYSVCOPY. */
if (((req->src.segment | req->dst.segment) & PHYS_SEG) &&
m_ptr->m_type != SYS_PHYSVCOPY) return(EPERM);
if ((s=virtual_copy(&req->src, &req->dst, req->count)) != OK)
if ((s=virtual_copy_vmcheck(&req->src, &req->dst, req->count)) != OK)
return(s);
m_ptr->VCP_NR_OK ++;
}

View File

@@ -36,14 +36,13 @@ register message *m_ptr; /* pointer to request message */
int vec_size; /* size of vector */
int io_in; /* true if input */
size_t bytes; /* # bytes to be copied */
vir_bytes caller_vir; /* virtual address at caller */
phys_bytes caller_phys; /* physical address at caller */
port_t port;
int i, j, io_size, nr_io_range;
int io_dir, io_type;
struct proc *rp;
struct priv *privp;
struct io_range *iorp;
int r;
/* Get the request, size of the request vector, and check the values. */
io_dir = m_ptr->DIO_REQUEST & _DIO_DIRMASK;
@@ -69,11 +68,10 @@ register message *m_ptr; /* pointer to request message */
}
if (bytes > sizeof(vdevio_buf)) return(E2BIG);
/* Calculate physical addresses and copy (port,value)-pairs from user. */
caller_vir = (vir_bytes) m_ptr->DIO_VEC_ADDR;
caller_phys = umap_local(proc_addr(who_p), D, caller_vir, bytes);
if (0 == caller_phys) return(EFAULT);
phys_copy(caller_phys, vir2phys(vdevio_buf), (phys_bytes) bytes);
/* Copy (port,value)-pairs from user. */
if((r=data_copy(who_e, (vir_bytes) m_ptr->DIO_VEC_ADDR,
SYSTEM, (vir_bytes) vdevio_buf, bytes)) != OK)
return r;
rp= proc_addr(who_p);
privp= priv(rp);
@@ -110,7 +108,20 @@ register message *m_ptr; /* pointer to request message */
* the entire switch is wrapped in lock() and unlock() to prevent the I/O
* batch from being interrupted.
*/
lock(13, "do_vdevio");
#if 0
if(who_e == 71091) {
static int vd = 0;
if(vd++ < 100) {
kprintf("proc %d does vdevio no %d; type %d, direction %s\n",
who_e, vd, io_type, io_in ? "input" : "output");
kprintf("(");
for (i=0; i<vec_size; i++)
kprintf("%2d:0x%x,0x%x ", i, pvb[i].port, pvb[i].value);
kprintf(")\n");
}
}
#endif
lock;
switch (io_type) {
case _DIO_BYTE: /* byte values */
if (io_in) for (i=0; i<vec_size; i++)
@@ -158,14 +169,18 @@ register message *m_ptr; /* pointer to request message */
}
}
}
unlock(13);
unlock;
/* Almost done, copy back results for input requests. */
if (io_in) phys_copy(vir2phys(vdevio_buf), caller_phys, (phys_bytes) bytes);
if (io_in)
if((r=data_copy(SYSTEM, (vir_bytes) vdevio_buf,
who_e, (vir_bytes) m_ptr->DIO_VEC_ADDR,
(phys_bytes) bytes)) != OK)
return r;
return(OK);
bad:
panic("do_vdevio: unaligned port\n", port);
minix_panic("do_vdevio: unaligned port", port);
return EPERM;
}

View File

@@ -1,83 +0,0 @@
/* The system call implemented in this file:
* m_type: SYS_VM_MAP
*
* The parameters for this system call are:
* m4_l1: Process that requests map (VM_MAP_ENDPT)
* m4_l2: Map (TRUE) or unmap (FALSE) (VM_MAP_MAPUNMAP)
* m4_l3: Base address (VM_MAP_BASE)
* m4_l4: Size (VM_MAP_SIZE)
* m4_l5: address (VM_MAP_ADDR)
*/
#include "../system.h"
PRIVATE int vm_needs_init= 1;
#include <sys/vm.h>
/*===========================================================================*
* do_vm_map *
*===========================================================================*/
PUBLIC int do_vm_map(m_ptr)
message *m_ptr; /* pointer to request message */
{
int proc_nr, do_map;
phys_bytes base, size, offset, p_phys;
struct proc *pp;
/* do_serial_debug= 1; */
if (vm_needs_init)
{
vm_needs_init= 0;
vm_init();
}
if (m_ptr->VM_MAP_ENDPT == SELF) {
proc_nr = who_p;
} else {
if(!isokendpt(m_ptr->VM_MAP_ENDPT, &proc_nr))
return EINVAL;
}
do_map= m_ptr->VM_MAP_MAPUNMAP;
base= m_ptr->VM_MAP_BASE;
size= m_ptr->VM_MAP_SIZE;
offset= m_ptr->VM_MAP_ADDR;
pp= proc_addr(proc_nr);
p_phys= umap_local(pp, D, base, size);
if (p_phys == 0)
return EFAULT;
if (do_map)
{
pp->p_misc_flags |= MF_VM;
vm_map_range(p_phys, size, offset);
}
else
{
vm_map_range(p_phys, size, p_phys);
}
return OK;
}
/*===========================================================================*
* vm_map_default *
*===========================================================================*/
PUBLIC void vm_map_default(pp)
struct proc *pp;
{
phys_bytes base_clicks, size_clicks;
if (vm_needs_init)
panic("vm_map_default: VM not initialized?", NO_NUM);
pp->p_misc_flags &= ~MF_VM;
base_clicks= pp->p_memmap[D].mem_phys;
size_clicks= pp->p_memmap[S].mem_phys+pp->p_memmap[S].mem_len -
base_clicks;
vm_map_range(base_clicks << CLICK_SHIFT,
size_clicks << CLICK_SHIFT, base_clicks << CLICK_SHIFT);
}

104
kernel/system/do_vmctl.c Normal file
View File

@@ -0,0 +1,104 @@
/* The kernel call implemented in this file:
* m_type: SYS_VMCTL
*
* The parameters for this kernel call are:
* SVMCTL_WHO which process
* SVMCTL_PARAM set this setting (VMCTL_*)
* SVMCTL_VALUE to this value
*/
#include "../system.h"
#include "../vm.h"
#include "../debug.h"
#include <minix/type.h>
/*===========================================================================*
* do_vmctl *
*===========================================================================*/
PUBLIC int do_vmctl(m_ptr)
register message *m_ptr; /* pointer to request message */
{
int proc_nr, i;
endpoint_t ep = m_ptr->SVMCTL_WHO;
struct proc *p, *rp;
if(ep == SELF) { ep = m_ptr->m_source; }
vm_init();
if(m_ptr->m_source != VM_PROC_NR) {
kprintf("do_vmctl: source %d, not VM\n", m_ptr->m_source);
return ENOSYS;
}
if(!isokendpt(ep, &proc_nr)) {
kprintf("do_vmctl: unexpected endpoint %d from VM\n", ep);
return EINVAL;
}
p = proc_addr(proc_nr);
switch(m_ptr->SVMCTL_PARAM) {
case VMCTL_CLEAR_PAGEFAULT:
RTS_LOCK_UNSET(p, PAGEFAULT);
return OK;
case VMCTL_MEMREQ_GET:
/* Send VM the information about the memory request. */
if(!(rp = vmrequest))
return ESRCH;
if(!RTS_ISSET(rp, VMREQUEST))
minix_panic("do_vmctl: no VMREQUEST set", NO_NUM);
/* Reply with request fields. */
m_ptr->SVMCTL_MRG_ADDR = (char *) rp->p_vmrequest.start;
m_ptr->SVMCTL_MRG_LEN = rp->p_vmrequest.length;
m_ptr->SVMCTL_MRG_WRITE = rp->p_vmrequest.writeflag;
m_ptr->SVMCTL_MRG_EP = rp->p_vmrequest.who;
rp->p_vmrequest.vmresult = VMSUSPEND;
/* Remove from request chain. */
vmrequest = vmrequest->p_vmrequest.nextrequestor;
return OK;
case VMCTL_MEMREQ_REPLY:
if(!(rp = p->p_vmrequest.requestor))
minix_panic("do_vmctl: no requestor set", ep);
p->p_vmrequest.requestor = NULL;
if(!RTS_ISSET(rp, VMREQUEST))
minix_panic("do_vmctl: no VMREQUEST set", ep);
if(rp->p_vmrequest.vmresult != VMSUSPEND)
minix_panic("do_vmctl: result not VMSUSPEND set",
rp->p_vmrequest.vmresult);
rp->p_vmrequest.vmresult = m_ptr->SVMCTL_VALUE;
if(rp->p_vmrequest.vmresult == VMSUSPEND)
minix_panic("VM returned VMSUSPEND?", NO_NUM);
if(rp->p_vmrequest.vmresult != OK)
kprintf("SYSTEM: VM replied %d to mem request\n",
rp->p_vmrequest.vmresult);
/* Put on restart chain. */
rp->p_vmrequest.nextrestart = vmrestart;
vmrestart = rp;
#if DEBUG_VMASSERT
/* Sanity check. */
if(rp->p_vmrequest.vmresult == OK) {
if(CHECKRANGE(p,
rp->p_vmrequest.start,
rp->p_vmrequest.length,
rp->p_vmrequest.writeflag) != OK) {
kprintf("SYSTEM: request %d:0x%lx-0x%lx, wrflag %d, failed\n",
rp->p_endpoint,
rp->p_vmrequest.start, rp->p_vmrequest.start + rp->p_vmrequest.length,
rp->p_vmrequest.writeflag);
minix_panic("SYSTEM: fail but VM said OK", NO_NUM);
}
}
#endif
return OK;
}
/* Try architecture-specific vmctls. */
return arch_do_vmctl(m_ptr, p);
}

View File

@@ -68,7 +68,8 @@ PUBLIC char *t_stack[TOT_STACK_SPACE / sizeof(char *)];
#define NUL_M 0
#define SRV_M (~0)
#define SYS_M (~0)
#define USR_M (s(PM_PROC_NR) | s(FS_PROC_NR) | s(RS_PROC_NR) | s(SYSTEM))
#define USR_M (s(PM_PROC_NR) | s(FS_PROC_NR) | s(RS_PROC_NR) | s(SYSTEM) | \
s(VM_PROC_NR))
#define DRV_M (USR_M | s(SYSTEM) | s(CLOCK) | s(DS_PROC_NR) | s(LOG_PROC_NR) | s(TTY_PROC_NR))
/* Define kernel calls that processes are allowed to make. This is not looking
@@ -90,10 +91,11 @@ PRIVATE int
pm_c[] = { SYS_ALL_CALLS },
rs_c[] = { SYS_ALL_CALLS },
ds_c[] = { SYS_ALL_CALLS },
vm_c[] = { SYS_ALL_CALLS },
drv_c[] = { DRV_C },
tty_c[] = { DRV_C, SYS_PHYSCOPY, SYS_ABORT, SYS_VM_MAP, SYS_IOPENABLE,
tty_c[] = { DRV_C, SYS_PHYSCOPY, SYS_ABORT, SYS_IOPENABLE,
SYS_READBIOS },
mem_c[] = { DRV_C, SYS_PHYSCOPY, SYS_PHYSVCOPY, SYS_VM_MAP, SYS_IOPENABLE };
mem_c[] = { DRV_C, SYS_PHYSCOPY, SYS_PHYSVCOPY, SYS_IOPENABLE };
/* The system image table lists all programs that are part of the boot image.
* The order of the entries here MUST agree with the order of the programs
@@ -122,6 +124,7 @@ PUBLIC struct boot_image image[] = {
{MEM_PROC_NR, 0,SRV_F, 4, 2, 0, SRV_T, SYS_M,c(mem_c),"memory"},
{LOG_PROC_NR, 0,SRV_F, 4, 2, 0, SRV_T, SYS_M,c(drv_c),"log" },
{MFS_PROC_NR, 0,SRV_F, 32, 4, 0, SRV_T, SRV_M, c(fs_c),"mfs" },
{VM_PROC_NR, 0,SRV_F, 32, 3, 0, SRV_T, SRV_M, c(vm_c),"vm" },
{INIT_PROC_NR, 0,USR_F, 8, USER_Q, 0, USR_T, USR_M, no_c,"init" },
};

View File

@@ -27,13 +27,6 @@ struct boot_image {
endpoint_t endpoint; /* endpoint number when started */
};
/* The kernel outputs diagnostic messages in a circular buffer. */
struct kmessages {
int km_next; /* next index to write */
int km_size; /* current size in buffer */
char km_buf[KMESS_BUF_SIZE]; /* buffer for messages */
};
struct randomness {
struct {
int r_next; /* next index to write */
@@ -57,4 +50,14 @@ typedef struct irq_hook {
typedef int (*irq_handler_t)(struct irq_hook *);
/* Timing measurements. */
struct lock_timingdata {
char names[TIMING_NAME];
unsigned long lock_timings[TIMING_POINTS];
unsigned long lock_timings_range[2];
unsigned long binsize, resets, misses, measurements;
};
EXTERN struct lock_timingdata timingdata[TIMING_CATEGORIES];
#endif /* TYPE_H */

View File

@@ -1,5 +1,5 @@
/* This file contains a collection of miscellaneous procedures:
* panic: abort MINIX due to a fatal error
* minix_panic: abort MINIX due to a fatal error
* kprintf: (from lib/sysutil/kprintf.c)
* kputc: buffered putc used by kernel kprintf
*/
@@ -9,26 +9,32 @@
#include <unistd.h>
#include <signal.h>
#include <string.h>
#include <minix/sysutil.h>
#include <minix/sys_config.h>
/*===========================================================================*
* panic *
* minix_panic *
*===========================================================================*/
PUBLIC void panic(mess,nr)
_CONST char *mess;
PUBLIC void minix_panic(mess,nr)
char *mess;
int nr;
{
/* The system has run aground of a fatal kernel error. Terminate execution. */
static int panicking = 0;
if (panicking ++) return; /* prevent recursive panics */
if (minix_panicing ++) return; /* prevent recursive panics */
if (mess != NULL) {
kprintf("\nKernel panic: %s", mess);
if (nr != NO_NUM) kprintf(" %d", nr);
kprintf("kernel panic: %s", mess);
if(nr != NO_NUM)
kprintf(" %d", nr);
kprintf("\n");
kprintf("kernel stacktrace: ");
util_stacktrace();
}
/* Abort MINIX. */
prepare_shutdown(RBT_PANIC);
minix_shutdown(NULL);
}
@@ -36,6 +42,7 @@ int nr;
#define printf kprintf
#include "../lib/sysutil/kprintf.c"
#define END_OF_KMESS 0
/*===========================================================================*
@@ -54,11 +61,12 @@ int c; /* character to append */
ser_putc(c);
}
kmess.km_buf[kmess.km_next] = c; /* put normal char in buffer */
if (kmess.km_size < KMESS_BUF_SIZE)
if (kmess.km_size < sizeof(kmess.km_buf))
kmess.km_size += 1;
kmess.km_next = (kmess.km_next + 1) % KMESS_BUF_SIZE;
kmess.km_next = (kmess.km_next + 1) % _KMESS_BUF_SIZE;
} else {
int p, outprocs[] = OUTPUT_PROCS_ARRAY;
if(minix_panicing) return;
for(p = 0; outprocs[p] != NONE; p++) {
if(isokprocn(outprocs[p]) && !isemptyn(outprocs[p])) {
send_sig(outprocs[p], SIGKMESS);

20
kernel/vm.h Normal file
View File

@@ -0,0 +1,20 @@
#ifndef _VM_H
#define _VM_H 1
#define CHECKRANGE_OR_SUSPEND(pr, start, length, wr) { int mr; \
if(vm_running && (mr=vm_checkrange(proc_addr(who_p), pr, start, length, wr, 0)) != OK) { \
return mr; \
} }
#define CHECKRANGE(pr, start, length, wr) \
vm_checkrange(proc_addr(who_p), pr, start, length, wr, 1)
/* Pseudo error code indicating a process request has to be
* restarted after an OK from VM.
*/
#define VMSUSPEND -996
#endif