Merge of Wu's GSOC 09 branch (src.20090525.r4372.wu)

Main changes:
- COW optimization for safecopy.
- safemap, a grant-based interface for sharing memory regions between processes.
- Integration with safemap and complete rework of DS, supporting new data types
  natively (labels, memory ranges, memory mapped ranges).
- For further information:
  http://wiki.minix3.org/en/SummerOfCode2009/MemoryGrants

Additional changes not included in the original Wu's branch:
- Fixed unhandled case in VM when using COW optimization for safecopy in case
  of a block that has already been shared as SMAP.
- Better interface and naming scheme for sys_saferevmap and ds_retrieve_map
  calls.
- Better input checking in syslib: check for page alignment when creating
  memory mapping grants.
- DS notifies subscribers when an entry is deleted.
- Documented the behavior of indirect grants in case of memory mapping.
- Test suite in /usr/src/test/safeperf|safecopy|safemap|ds/* reworked
  and extended.
- Minor fixes and general cleanup.
- TO-DO: Grant ids should be generated and managed the way endpoints are to make
sure grant slots are never misreused.
This commit is contained in:
Cristiano Giuffrida
2010-01-14 15:24:16 +00:00
parent da3b64d8bc
commit c5b309ff07
99 changed files with 3594 additions and 851 deletions

View File

@@ -613,10 +613,11 @@ PUBLIC int vm_suspend(struct proc *caller, struct proc *target,
util_stacktrace_strcat(caller->p_vmrequest.stacktrace);
#endif
caller->p_vmrequest.writeflag = 1;
caller->p_vmrequest.start = linaddr;
caller->p_vmrequest.length = len;
caller->p_vmrequest.who = target->p_endpoint;
caller->p_vmrequest.req_type = VMPTYPE_CHECK;
caller->p_vmrequest.target = target->p_endpoint;
caller->p_vmrequest.params.check.start = linaddr;
caller->p_vmrequest.params.check.length = len;
caller->p_vmrequest.params.check.writeflag = 1;
caller->p_vmrequest.type = type;
/* Connect caller on vmrequest wait queue. */

View File

@@ -137,12 +137,7 @@ PUBLIC void prot_init(void)
/* Click-round kernel. */
if(kinfo.data_base % CLICK_SIZE)
minix_panic("kinfo.data_base not aligned", NO_NUM);
kinfo.data_size = ((kinfo.data_size+CLICK_SIZE-1)/CLICK_SIZE) * CLICK_SIZE;
/* Click-round kernel. */
if(kinfo.data_base % CLICK_SIZE)
minix_panic("kinfo.data_base not aligned", NO_NUM);
kinfo.data_size = ((kinfo.data_size+CLICK_SIZE-1)/CLICK_SIZE) * CLICK_SIZE;
kinfo.data_size = (phys_bytes) (CLICK_CEIL(kinfo.data_size));
/* Build gdt and idt pointers in GDT where the BIOS expects them. */
dtp= (struct desctableptr_s *) &gdt[GDT_INDEX];

View File

@@ -154,13 +154,14 @@ PUBLIC void main()
/* Convert addresses to clicks and build process memory map */
text_base = e_hdr.a_syms >> CLICK_SHIFT;
text_clicks = (e_hdr.a_text + CLICK_SIZE-1) >> CLICK_SHIFT;
data_clicks = (e_hdr.a_data+e_hdr.a_bss + CLICK_SIZE-1) >> CLICK_SHIFT;
st_clicks= (e_hdr.a_total + CLICK_SIZE-1) >> CLICK_SHIFT;
text_clicks = (vir_clicks) (CLICK_CEIL(e_hdr.a_text) >> CLICK_SHIFT);
data_clicks = (vir_clicks) (CLICK_CEIL(e_hdr.a_data
+ e_hdr.a_bss) >> CLICK_SHIFT);
st_clicks = (vir_clicks) (CLICK_CEIL(e_hdr.a_total) >> CLICK_SHIFT);
if (!(e_hdr.a_flags & A_SEP))
{
data_clicks= (e_hdr.a_text+e_hdr.a_data+e_hdr.a_bss +
CLICK_SIZE-1) >> CLICK_SHIFT;
data_clicks = (vir_clicks) (CLICK_CEIL(e_hdr.a_text +
e_hdr.a_data + e_hdr.a_bss) >> CLICK_SHIFT);
text_clicks = 0; /* common I&D */
}
rp->p_memmap[T].mem_phys = text_base;

View File

@@ -61,7 +61,7 @@ struct proc {
* memory that isn't present, VM has to fix it. Until it has asked
* what needs to be done and fixed it, save necessary state here.
*
* The requester gets a copy of its request message in reqmsg and gets
* The requestor gets a copy of its request message in reqmsg and gets
* VMREQUEST set.
*/
struct {
@@ -70,6 +70,8 @@ struct proc {
#define VMSTYPE_SYS_NONE 0
#define VMSTYPE_KERNELCALL 1
#define VMSTYPE_DELIVERMSG 2
#define VMSTYPE_MAP 3
int type; /* suspended operation */
union {
/* VMSTYPE_SYS_MESSAGE */
@@ -77,10 +79,20 @@ struct proc {
} saved;
/* Parameters of request to VM */
vir_bytes start, length; /* memory range */
u8_t writeflag; /* nonzero for write access */
endpoint_t who;
int req_type;
endpoint_t target;
union {
struct {
vir_bytes start, length; /* memory range */
u8_t writeflag; /* nonzero for write access */
} check;
struct {
char writeflag;
endpoint_t ep_s;
vir_bytes vir_s, vir_d;
vir_bytes length;
} map;
} params;
/* VM result when available */
int vmresult;

View File

@@ -89,6 +89,12 @@ _PROTOTYPE( void check_runqueues_f, (char *file, int line) );
_PROTOTYPE( char *rtsflagstr, (int flags) );
_PROTOTYPE( char *miscflagstr, (int flags) );
/* system/do_safemap.c */
_PROTOTYPE( int map_invoke_vm, (int req_type,
endpoint_t end_d, int seg_d, vir_bytes off_d,
endpoint_t end_s, int seg_s, vir_bytes off_s,
size_t size, int flag));
/* system/do_safecopy.c */
_PROTOTYPE( int verify_grant, (endpoint_t, endpoint_t, cp_grant_id_t, vir_bytes,
int, vir_bytes, vir_bytes *, endpoint_t *));

View File

@@ -208,6 +208,11 @@ PRIVATE void initialize(void)
map(SYS_SAFECOPYTO, do_safecopy); /* copy with pre-granted permission */
map(SYS_VSAFECOPY, do_vsafecopy); /* vectored safecopy */
/* Mapping. */
map(SYS_SAFEMAP, do_safemap); /* map pages from other process */
map(SYS_SAFEREVMAP, do_saferevmap); /* grantor revokes the map grant */
map(SYS_SAFEUNMAP, do_safeunmap); /* requestor unmaps the mapped pages */
/* Clock functionality. */
map(SYS_TIMES, do_times); /* get uptime and process times */
map(SYS_SETALARM, do_setalarm); /* schedule a synchronous alarm */
@@ -262,10 +267,6 @@ int priv_id; /* privilege id */
}
rc->p_priv = sp; /* assign new slot */
rc->p_priv->s_proc_nr = proc_nr(rc); /* set association */
/* Clear some fields */
sp->s_asyntab= -1;
sp->s_asynsize= 0;
return(OK);
}
@@ -482,9 +483,11 @@ register struct proc *rc; /* slot of process to clean up */
if (priv(rc)->s_flags & SYS_PROC)
{
if (priv(rc)->s_asynsize) {
#if 0
kprintf("clear_endpoint: clearing s_asynsize of %s / %d\n",
rc->p_name, rc->p_endpoint);
proc_stacktrace(rc);
#endif
}
priv(rc)->s_asynsize= 0;
}

View File

@@ -181,6 +181,10 @@ _PROTOTYPE( int do_setgrant, (message *m_ptr) );
_PROTOTYPE( int do_readbios, (message *m_ptr) );
_PROTOTYPE( int do_mapdma, (message *m_ptr) );
_PROTOTYPE( int do_safemap, (message *m_ptr) );
_PROTOTYPE( int do_saferevmap, (message *m_ptr) );
_PROTOTYPE( int do_safeunmap, (message *m_ptr) );
_PROTOTYPE( int do_sprofile, (message *m_ptr) );
#if ! SPROFILE
#define do_sprofile do_unused

View File

@@ -44,6 +44,7 @@ OBJECTS = \
$(SYSTEM)(do_privctl.o) \
$(SYSTEM)(do_segctl.o) \
$(SYSTEM)(do_safecopy.o) \
$(SYSTEM)(do_safemap.o) \
$(SYSTEM)(do_sysctl.o) \
$(SYSTEM)(do_getksig.o) \
$(SYSTEM)(do_endksig.o) \
@@ -155,6 +156,9 @@ $(SYSTEM)(do_privctl.o): do_privctl.c
$(SYSTEM)(do_safecopy.o): do_safecopy.c
$(CC) do_safecopy.c
$(SYSTEM)(do_safemap.o): do_safemap.c
$(CC) do_safemap.c
$(SYSTEM)(do_sysctl.o): do_sysctl.c
$(CC) do_sysctl.c

View File

@@ -49,12 +49,6 @@ register message *m_ptr; /* pointer to request message */
return EINVAL;
}
/* memory becomes readonly */
if (priv(rpp)->s_asynsize > 0) {
printf("kernel: process with waiting asynsend table can't fork\n");
return EINVAL;
}
map_ptr= (struct mem_map *) m_ptr->PR_MEM_PTR;
/* Copy parent 'proc' struct to child. And reinitialize some fields. */

View File

@@ -101,6 +101,8 @@ message *m_ptr; /* pointer to request message */
priv(rp)->s_notify_pending.chunk[i] = 0; /* - notifications */
priv(rp)->s_int_pending = 0; /* - interrupts */
sigemptyset(&priv(rp)->s_sig_pending); /* - signals */
priv(rp)->s_asyntab= -1; /* - asynsends */
priv(rp)->s_asynsize= 0;
/* Set defaults for privilege bitmaps. */
priv(rp)->s_flags= DEF_SYS_F; /* privilege flags */

View File

@@ -24,6 +24,8 @@
#define MEM_TOP 0xFFFFFFFFUL
#define USE_COW_SAFECOPY 0
FORWARD _PROTOTYPE(int safecopy, (endpoint_t, endpoint_t, cp_grant_id_t, int, int, size_t, vir_bytes, vir_bytes, int));
#define HASGRANTTABLE(gr) \
@@ -229,9 +231,10 @@ int access; /* CPF_READ for a copy from granter to grantee, CPF_WRITE
{
static struct vir_addr v_src, v_dst;
static vir_bytes v_offset;
int r;
endpoint_t new_granter, *src, *dst;
struct proc *granter_p;
vir_bytes size;
int r;
/* See if there is a reasonable grant table. */
if(!(granter_p = endpoint_lookup(granter))) return EINVAL;
@@ -279,8 +282,48 @@ int access; /* CPF_READ for a copy from granter to grantee, CPF_WRITE
}
/* Do the regular copy. */
return virtual_copy_vmcheck(&v_src, &v_dst, bytes);
#if USE_COW_SAFECOPY
if(v_offset % CLICK_SIZE != addr % CLICK_SIZE || bytes < CLICK_SIZE) {
/* Give up on COW immediately when offsets are not aligned
* or we are copying less than a page.
*/
return virtual_copy_vmcheck(&v_src, &v_dst, bytes);
}
if((size = v_offset % CLICK_SIZE) != 0) {
/* Normal copy for everything before the first page boundary. */
size = CLICK_SIZE - size;
r = virtual_copy_vmcheck(&v_src, &v_dst, size);
if(r != OK)
return r;
v_src.offset += size;
v_dst.offset += size;
bytes -= size;
}
if((size = bytes / CLICK_SIZE) != 0) {
/* Use COW optimization when copying entire pages. */
size *= CLICK_SIZE;
r = map_invoke_vm(VMPTYPE_COWMAP,
v_dst.proc_nr_e, v_dst.segment, v_dst.offset,
v_src.proc_nr_e, v_src.segment, v_src.offset,
size, 0);
if(r != OK)
return r;
v_src.offset += size;
v_dst.offset += size;
bytes -= size;
}
if(bytes != 0) {
/* Normal copy for everything after the last page boundary. */
r = virtual_copy_vmcheck(&v_src, &v_dst, bytes);
if(r != OK)
return r;
}
return OK;
#else
return virtual_copy_vmcheck(&v_src, &v_dst, bytes);
#endif
}
/*===========================================================================*

285
kernel/system/do_safemap.c Normal file
View File

@@ -0,0 +1,285 @@
/* The kernel call implemented in this file:
* m_type: SYS_SAFEMAP or SYS_SAFEREVMAP or SYS_SAFEUNMAP
*
* The parameters for this kernel call are:
* SMAP_EP endpoint of the grantor
* SMAP_GID grant id
* SMAP_OFFSET offset of the grant space
* SMAP_SEG segment
* SMAP_ADDRESS address
* SMAP_BYTES bytes to be copied
* SMAP_FLAG access, writable map or not?
*/
#include <minix/type.h>
#include <minix/safecopies.h>
#include "../system.h"
#include "../vm.h"
struct map_info_s {
int flag;
/* Grantor. */
endpoint_t grantor;
int gid;
vir_bytes offset;
vir_bytes address_Dseg; /* seg always is D */
/* Grantee. */
endpoint_t grantee;
int seg;
vir_bytes address;
/* Length. */
vir_bytes bytes;
};
#define MAX_MAP_INFO 20
static struct map_info_s map_info[MAX_MAP_INFO];
/*===========================================================================*
* add_info *
*===========================================================================*/
static int add_info(endpoint_t grantor, endpoint_t grantee, int gid,
vir_bytes offset, vir_bytes address_Dseg,
int seg, vir_bytes address, vir_bytes bytes)
{
int i;
for(i = 0; i < MAX_MAP_INFO; i++) {
if(map_info[i].flag == 0)
break;
}
if(i == MAX_MAP_INFO)
return EBUSY;
map_info[i].flag = 1;
map_info[i].grantor = grantor;
map_info[i].grantee = grantee;
map_info[i].gid = gid;
map_info[i].address_Dseg = address_Dseg;
map_info[i].offset = offset;
map_info[i].seg = seg;
map_info[i].address = address;
map_info[i].bytes = bytes;
return OK;
}
/*===========================================================================*
* get_revoke_info *
*===========================================================================*/
static struct map_info_s *get_revoke_info(endpoint_t grantor, int flag, int arg)
{
int i;
for(i = 0; i < MAX_MAP_INFO; i++) {
if(map_info[i].flag == 1
&& map_info[i].grantor == grantor
&& (flag ? (map_info[i].gid == arg)
: (map_info[i].address_Dseg == arg)))
return &map_info[i];
}
return NULL;
}
/*===========================================================================*
* get_unmap_info *
*===========================================================================*/
static struct map_info_s *get_unmap_info(endpoint_t grantee, int seg,
vir_bytes address)
{
int i;
for(i = 0; i < MAX_MAP_INFO; i++) {
if(map_info[i].flag == 1
&& map_info[i].grantee == grantee
&& map_info[i].seg == seg
&& map_info[i].address == address)
return &map_info[i];
}
return NULL;
}
/*===========================================================================*
* clear_info *
*===========================================================================*/
static int clear_info(struct map_info_s *p)
{
p->flag = 0;
return 0;
}
/*===========================================================================*
* map_invoke_vm *
*===========================================================================*/
PUBLIC int map_invoke_vm(int req_type, /* VMPTYPE_... COWMAP, SMAP, SUNMAP */
endpoint_t end_d, int seg_d, vir_bytes off_d,
endpoint_t end_s, int seg_s, vir_bytes off_s,
size_t size, int flag)
{
struct proc *caller, *src, *dst;
vir_bytes lin_src, lin_dst;
src = endpoint_lookup(end_s);
dst = endpoint_lookup(end_d);
caller = endpoint_lookup(who_e);
lin_src = umap_local(src, seg_s, off_s, size);
lin_dst = umap_local(dst, seg_d, off_d, size);
if(lin_src == 0 || lin_dst == 0) {
kprintf("map_invoke_vm: error in umap_local.\n");
return EINVAL;
}
/* Make sure the linear addresses are both page aligned. */
if(lin_src % CLICK_SIZE != 0
|| lin_dst % CLICK_SIZE != 0) {
kprintf("map_invoke_vm: linear addresses not page aligned.\n");
return EINVAL;
}
vmassert(!RTS_ISSET(caller, RTS_VMREQUEST));
vmassert(!RTS_ISSET(caller, RTS_VMREQTARGET));
vmassert(!RTS_ISSET(dst, RTS_VMREQUEST));
vmassert(!RTS_ISSET(dst, RTS_VMREQTARGET));
RTS_LOCK_SET(caller, RTS_VMREQUEST);
RTS_LOCK_SET(dst, RTS_VMREQTARGET);
/* Map to the destination. */
caller->p_vmrequest.req_type = req_type;
caller->p_vmrequest.target = end_d; /* destination proc */
caller->p_vmrequest.params.map.vir_d = lin_dst; /* destination addr */
caller->p_vmrequest.params.map.ep_s = end_s; /* source process */
caller->p_vmrequest.params.map.vir_s = lin_src; /* source address */
caller->p_vmrequest.params.map.length = size;
caller->p_vmrequest.params.map.writeflag = flag;
caller->p_vmrequest.type = VMSTYPE_MAP;
/* Connect caller on vmrequest wait queue. */
if(!(caller->p_vmrequest.nextrequestor = vmrequest))
lock_notify(SYSTEM, VM_PROC_NR);
vmrequest = caller;
return OK;
}
/*===========================================================================*
* do_safemap *
*===========================================================================*/
PUBLIC int do_safemap(m_ptr)
register message *m_ptr;
{
endpoint_t grantor = m_ptr->SMAP_EP;
cp_grant_id_t gid = m_ptr->SMAP_GID;
vir_bytes offset = m_ptr->SMAP_OFFSET;
int seg = (int)m_ptr->SMAP_SEG;
vir_bytes address = m_ptr->SMAP_ADDRESS;
vir_bytes bytes = m_ptr->SMAP_BYTES;
int flag = m_ptr->SMAP_FLAG;
vir_bytes offset_result;
endpoint_t new_grantor;
int r;
int access = CPF_MAP | CPF_READ;
/* Check the grant. We currently support safemap with both direct and
* indirect grants, as verify_grant() stores the original grantor
* transparently in new_grantor below. However, we maintain the original
* semantics associated to indirect grants only here at safemap time.
* After the mapping has been set up, if a process part of the chain
* of trust crashes or exits without revoking the mapping, the mapping
* can no longer be manually or automatically revoked for any of the
* processes lower in the chain. This solution reduces complexity but
* could be improved if we make the assumption that only one process in
* the chain of trust can effectively map the original memory region.
*/
if(flag != 0)
access |= CPF_WRITE;
r = verify_grant(grantor, who_e, gid, bytes, access,
offset, &offset_result, &new_grantor);
if(r != OK) {
kprintf("verify_grant for gid %d from %d to %d failed: %d\n",
gid, grantor, who_e, r);
return r;
}
/* Add map info. */
r = add_info(new_grantor, who_e, gid, offset, offset_result, seg,
address, bytes);
if(r != OK)
return r;
/* Invoke VM. */
return map_invoke_vm(VMPTYPE_SMAP,
who_e, seg, address, new_grantor, D, offset_result, bytes,flag);
}
/*===========================================================================*
* safeunmap *
*===========================================================================*/
PRIVATE int safeunmap(struct map_info_s *p)
{
vir_bytes offset_result;
endpoint_t new_grantor;
int r;
r = verify_grant(p->grantor, p->grantee, p->gid, p->bytes, CPF_MAP,
p->offset, &offset_result, &new_grantor);
if(r != OK) {
kprintf("safeunmap: error in verify_grant.\n");
return r;
}
r = map_invoke_vm(VMPTYPE_SUNMAP,
p->grantee, p->seg, p->address,
new_grantor, D, offset_result,
p->bytes, 0);
clear_info(p);
if(r != OK) {
kprintf("safeunmap: error in map_invoke_vm.\n");
return r;
}
return OK;
}
/*===========================================================================*
* do_saferevmap *
*===========================================================================*/
PUBLIC int do_saferevmap(m_ptr)
register message *m_ptr;
{
struct map_info_s *p;
int flag = m_ptr->SMAP_FLAG;
int arg = m_ptr->SMAP_GID; /* gid or address_Dseg */
int r;
while((p = get_revoke_info(who_e, flag, arg)) != NULL) {
if((r = safeunmap(p)) != OK)
return r;
}
return OK;
}
/*===========================================================================*
* do_safeunmap *
*===========================================================================*/
PUBLIC int do_safeunmap(m_ptr)
register message *m_ptr;
{
vir_bytes address = m_ptr->SMAP_ADDRESS;
int seg = (int)m_ptr->SMAP_SEG;
struct map_info_s *p;
int r;
while((p = get_unmap_info(who_e, seg, address)) != NULL) {
if((r = safeunmap(p)) != OK)
return r;
}
return OK;
}

View File

@@ -65,21 +65,51 @@ register message *m_ptr; /* pointer to request message */
#endif
/* Reply with request fields. */
m_ptr->SVMCTL_MRG_ADDR = (char *) rp->p_vmrequest.start;
m_ptr->SVMCTL_MRG_LEN = rp->p_vmrequest.length;
m_ptr->SVMCTL_MRG_WRITE = rp->p_vmrequest.writeflag;
m_ptr->SVMCTL_MRG_EP = rp->p_vmrequest.who;
m_ptr->SVMCTL_MRG_REQUESTOR = (void *) rp->p_endpoint;
switch(rp->p_vmrequest.req_type) {
case VMPTYPE_CHECK:
m_ptr->SVMCTL_MRG_TARGET =
rp->p_vmrequest.target;
m_ptr->SVMCTL_MRG_ADDR =
rp->p_vmrequest.params.check.start;
m_ptr->SVMCTL_MRG_LENGTH =
rp->p_vmrequest.params.check.length;
m_ptr->SVMCTL_MRG_FLAG =
rp->p_vmrequest.params.check.writeflag;
m_ptr->SVMCTL_MRG_REQUESTOR =
(void *) rp->p_endpoint;
break;
case VMPTYPE_COWMAP:
case VMPTYPE_SMAP:
case VMPTYPE_SUNMAP:
m_ptr->SVMCTL_MRG_TARGET =
rp->p_vmrequest.target;
m_ptr->SVMCTL_MRG_ADDR =
rp->p_vmrequest.params.map.vir_d;
m_ptr->SVMCTL_MRG_EP2 =
rp->p_vmrequest.params.map.ep_s;
m_ptr->SVMCTL_MRG_ADDR2 =
rp->p_vmrequest.params.map.vir_s;
m_ptr->SVMCTL_MRG_LENGTH =
rp->p_vmrequest.params.map.length;
m_ptr->SVMCTL_MRG_FLAG =
rp->p_vmrequest.params.map.writeflag;
m_ptr->SVMCTL_MRG_REQUESTOR =
(void *) rp->p_endpoint;
break;
default:
minix_panic("VMREQUEST wrong type", NO_NUM);
}
rp->p_vmrequest.vmresult = VMSUSPEND;
/* Remove from request chain. */
vmrequest = vmrequest->p_vmrequest.nextrequestor;
return OK;
return rp->p_vmrequest.req_type;
case VMCTL_MEMREQ_REPLY:
vmassert(RTS_ISSET(p, RTS_VMREQUEST));
vmassert(p->p_vmrequest.vmresult == VMSUSPEND);
okendpt(p->p_vmrequest.who, &proc_nr);
okendpt(p->p_vmrequest.target, &proc_nr);
target = proc_addr(proc_nr);
p->p_vmrequest.vmresult = m_ptr->SVMCTL_VALUE;
vmassert(p->p_vmrequest.vmresult != VMSUSPEND);
@@ -94,21 +124,28 @@ register message *m_ptr; /* pointer to request message */
p->p_vmrequest.start + p->p_vmrequest.length,
p->p_vmrequest.writeflag, p->p_vmrequest.stacktrace);
printf("type %d\n", p->p_vmrequest.type);
#endif
vmassert(RTS_ISSET(target, RTS_VMREQTARGET));
RTS_LOCK_UNSET(target, RTS_VMREQTARGET);
#endif
if(p->p_vmrequest.type == VMSTYPE_KERNELCALL) {
switch(p->p_vmrequest.type) {
case VMSTYPE_KERNELCALL:
/* Put on restart chain. */
p->p_vmrequest.nextrestart = vmrestart;
vmrestart = p;
} else if(p->p_vmrequest.type == VMSTYPE_DELIVERMSG) {
break;
case VMSTYPE_DELIVERMSG:
vmassert(p->p_misc_flags & MF_DELIVERMSG);
vmassert(p == target);
vmassert(RTS_ISSET(p, RTS_VMREQUEST));
RTS_LOCK_UNSET(p, RTS_VMREQUEST);
} else {
break;
case VMSTYPE_MAP:
vmassert(RTS_ISSET(p, RTS_VMREQUEST));
RTS_LOCK_UNSET(p, RTS_VMREQUEST);
break;
default:
#if DEBUG_VMASSERT
printf("suspended with stack: %s\n",
p->p_vmrequest.stacktrace);