New sources layout

Change-Id: Ic716f336b7071063997cf5b4dae6d50e0b4631e9
This commit is contained in:
2014-07-28 21:19:37 +02:00
parent 428aa25dc6
commit 433d6423c3
3138 changed files with 693 additions and 606 deletions

View File

@@ -19,59 +19,60 @@ SUBDIR+= ../external/gpl3/gcc/lib/libgcc .WAIT
. endif
.endif
.if defined(__MINIX)
#LSC MINIX libc depends on
# - libminlib because of minix/malloc-debug.c
# - libminlib because of minix/lib/libc/malloc-debug.c
# - libmthread because of sys/lib/libunwind
SUBDIR+= libminlib
SUBDIR+= ../minix/lib/libminlib
SUBDIR+= .WAIT
SUBDIR+= libsys
SUBDIR+= libmthread
SUBDIR+= ../minix/lib/libsys
SUBDIR+= ../minix/lib/libmthread
SUBDIR+= .WAIT
.endif # defined(__MINIX)
SUBDIR+= libc
SUBDIR+= .WAIT
.if defined(__MINIX)
SUBDIR+= libasyn \
libaudiodriver \
libbdev \
libblockdriver \
libchardriver \
libcompat_minix \
libddekit \
libdevman \
libexec \
libfetch \
libinputdriver \
libminc \
libminixfs \
libnetdriver \
libsffs \
libtimers \
libusb \
libvtreefs
SUBDIR+= ../minix/lib/libasyn \
../minix/lib/libaudiodriver \
../minix/lib/libbdev \
../minix/lib/libblockdriver \
../minix/lib/libchardriver \
../minix/lib/libcompat_minix \
../minix/lib/libddekit \
../minix/lib/libdevman \
../minix/lib/libexec \
../minix/lib/libfetch \
../minix/lib/libinputdriver \
../minix/lib/libminc \
../minix/lib/libminixfs \
../minix/lib/libnetdriver \
../minix/lib/libsffs \
../minix/lib/libtimers \
../minix/lib/libusb \
../minix/lib/libvtreefs
.if (${HAVE_LIBGCC} == "no")
SUBDIR+= libgcc_s_empty
SUBDIR+= ../minix/lib/libgcc_s_empty
.endif
.if (${MKLWIP} == "yes")
SUBDIR+= liblwip \
libnetsock
SUBDIR+= ../minix/lib/liblwip \
../minix/lib/libnetsock
.endif
.if (${MACHINE_ARCH} == "i386")
SUBDIR+= libhgfs \
libvassert \
libvboxfs \
libvirtio
SUBDIR+= ../minix/lib/libhgfs \
../minix/lib/libvassert \
../minix/lib/libvboxfs \
../minix/lib/libvirtio
.endif
.if (${MACHINE_ARCH} == "earm")
SUBDIR+= libclkconf \
libgpio \
libi2cdriver
SUBDIR+= ../minix/lib/libclkconf \
../minix/lib/libgpio \
../minix/lib/libi2cdriver
.endif
.endif # defined(__MINIX)

View File

@@ -1,9 +0,0 @@
.include <bsd.own.mk>
LIB= asyn
INCS= asynchio.h
INCSDIR= /usr/include/sys
SRCS+= asyn_cancel.c asyn_close.c asyn_init.c asyn_pending.c asyn_read.c \
asyn_special.c asyn_synch.c asyn_wait.c asyn_write.c
.include <bsd.lib.mk>

View File

@@ -1,12 +0,0 @@
# ansi sources
.PATH: ${.CURDIR}/asyn
SRCS+= \
asyn_cancel.c \
asyn_close.c \
asyn_init.c \
asyn_pending.c \
asyn_read.c \
asyn_synch.c \
asyn_wait.c \
asyn_write.c

View File

@@ -1,19 +0,0 @@
/* asyn.h - async I/O
* Author: Kees J. Bot
* 7 Jul 1997
* Minix-vmd compatible asynchio(3) using BSD select(2).
*/
#define nil 0
#include <sys/types.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/asynchio.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
typedef struct _asynfd asynfd_t;
#undef IDLE
typedef enum state { IDLE, WAITING, PENDING } state_t;

View File

@@ -1,21 +0,0 @@
/* asyn_cancel() - cancel an asynch operation Author: Kees J. Bot
* 7 Jul 1997
*/
#include "asyn.h"
int asyn_cancel(asynchio_t *asyn, int fd, int op)
/* Cancel an asynchronous operation if one is in progress. (This is easy with
* select(2), because no operation is actually happening.)
*/
{
asynfd_t *afd;
if ((unsigned) fd >= FD_SETSIZE) { errno= EBADF; return -1; }
afd= &asyn->asyn_afd[fd];
if (afd->afd_state[op] == WAITING) {
afd->afd_state[op]= IDLE;
FD_CLR(fd, &asyn->asyn_fdset[SEL_READ]);
}
return 0;
}

View File

@@ -1,24 +0,0 @@
/* asyn_close() - forcefully forget about a file descriptor
* Author: Kees J. Bot
* 7 Jul 1997
*/
#include "asyn.h"
int asyn_close(asynchio_t *asyn, int fd)
/* Stop caring about any async operations on this file descriptor. */
{
asynfd_t *afd;
int op;
if ((unsigned) fd >= FD_SETSIZE) { errno= EBADF; return -1; }
afd= &asyn->asyn_afd[fd];
for (op= 0; op < SEL_NR; op++) {
afd->afd_state[op]= IDLE;
FD_CLR(fd, &asyn->asyn_fdset[op]);
}
afd->afd_seen= 0;
asyn->asyn_more++;
return 0;
}

View File

@@ -1,9 +0,0 @@
/* asyn_init() Author: Kees J. Bot
* 7 Jul 1997
*/
#include "asyn.h"
void asyn_init(asynchio_t *asyn)
{
memset(asyn, 0, sizeof(*asyn));
}

View File

@@ -1,14 +0,0 @@
/* asyn_pending() - any results pending? Author: Kees J. Bot
* 7 Jul 1997
*/
#include "asyn.h"
int asyn_pending(asynchio_t *asyn, int fd, int op)
/* Check if a result of an operation is pending. (This is easy with
* select(2), because no operation is actually happening.)
*/
{
if ((unsigned) fd >= FD_SETSIZE) { errno= EBADF; return -1; }
return 0;
}

View File

@@ -1,61 +0,0 @@
/* asyn_read() Author: Kees J. Bot
* 7 Jul 1997
*/
#include "asyn.h"
#include <signal.h>
ssize_t asyn_read(asynchio_t *asyn, int fd, void *buf, size_t len)
/* Asynchronous read(). Try if a read can be done, if not then set a flag
* indicating that select(2) should look out for it. Returns like a normal
* read or returns -1 with errno set to EAGAIN.
*/
{
asynfd_t *afd;
/* Asyn_wait() may block if this counter equals zero indicating that
* all of the asyn_* functions are "in progress".
*/
asyn->asyn_more++;
if ((unsigned) fd >= FD_SETSIZE) { errno= EBADF; return -1; }
afd= &asyn->asyn_afd[fd];
/* If this is the first async call on this filedescriptor then
* remember its file flags.
*/
if (!afd->afd_seen) {
if ((afd->afd_flags= fcntl(fd, F_GETFL)) < 0) return -1;
afd->afd_seen= 1;
}
/* Try to read if I/O is pending. */
if (afd->afd_state[SEL_READ] == PENDING) {
sigset_t mask;
ssize_t result;
int err;
sigemptyset(&mask);
if (sigprocmask(SIG_SETMASK, &mask, &mask) < 0) return -1;
(void) fcntl(fd, F_SETFL, afd->afd_flags | O_NONBLOCK);
/* Try the actual read. */
result= read(fd, buf, len);
err= errno;
(void) fcntl(fd, F_SETFL, afd->afd_flags);
(void) sigprocmask(SIG_SETMASK, &mask, nil);
errno= err;
if (result != -1 || errno != EAGAIN) {
afd->afd_state[SEL_READ]= IDLE;
return result;
}
}
/* Record this read as "waiting". */
afd->afd_state[SEL_READ]= WAITING;
FD_SET(fd, &asyn->asyn_fdset[SEL_READ]);
errno= EAGAIN;
asyn->asyn_more--;
return -1;
}

View File

@@ -1,104 +0,0 @@
/* asyn_special(), asyn_result() Author: Kees J. Bot
* 8 Jul 1997
*/
#include "asyn.h"
#include <signal.h>
/* Saved signal mask between asyn_special() and asyn_result(). */
static sigset_t mask;
int asyn_special(asynchio_t *asyn, int fd, int op)
/* Wait for an operation. This is an odd one out compared to asyn_read()
* and asyn_write(). It does not do an operation itself, but together with
* asyn_result() it is a set of brackets around a system call xxx that has
* no asyn_xxx() for itself. It can be used to build an asyn_accept() or
* asyn_connect() for instance. (Minix-vmd has asyn_ioctl() instead,
* which is used for any other event like TCP/IP listen/connect. BSD has
* a myriad of calls that can't be given an asyn_xxx() counterpart each.)
* Asyn_special() returns -1 for "forget it", 0 for "try it", and 1 for
* "very first call, maybe you should try it once, maybe not". Errno is
* set to EAGAIN if the result is -1 or 1. After trying the system call
* make sure errno equals EAGAIN if the call is still in progress and call
* asyn_result with the result of the system call. Asyn_result() must be
* called if asyn_special() returns 0 or 1.
*
* Example use:
*
* int asyn_accept(asynchio_t *asyn, int s, struct sockaddr *addr, int *addrlen)
* {
* int r;
* if ((r= asyn_special(asyn, fd, SEL_READ)) < 0) return -1;
* r= r == 0 ? accept(fd, addr, addrlen) : -1;
* return asyn_result(asyn, fd, SEL_READ, r);
* }
*
* int asyn_connect(asynchio_t *asyn, int s, struct sockaddr *name, int namelen)
* {
* int r;
* if ((r= asyn_special(asyn, fd, SEL_WRITE)) < 0) return -1;
* if (r == 1 && (r= connect(fd, name, namelen)) < 0) {
* if (errno == EINPROGRESS) errno= EAGAIN;
* }
* return asyn_result(asyn, fd, SEL_WRITE, r);
* }
*/
{
asynfd_t *afd;
int seen;
asyn->asyn_more++;
if ((unsigned) fd >= FD_SETSIZE) { errno= EBADF; return -1; }
afd= &asyn->asyn_afd[fd];
/* If this is the first async call on this filedescriptor then
* remember its file flags.
*/
if (!(seen= afd->afd_seen)) {
if ((afd->afd_flags= fcntl(fd, F_GETFL)) < 0) return -1;
afd->afd_seen= 1;
}
/* Try to read if I/O is pending. */
if (!seen || afd->afd_state[op] == PENDING) {
sigemptyset(&mask);
if (sigprocmask(SIG_SETMASK, &mask, &mask) < 0) return -1;
(void) fcntl(fd, F_SETFL, afd->afd_flags | O_NONBLOCK);
/* Let the caller try the system call. */
errno= EAGAIN;
return seen ? 0 : 1;
}
/* Record this read as "waiting". */
afd->afd_state[op]= WAITING;
FD_SET(fd, &asyn->asyn_fdset[op]);
errno= EAGAIN;
asyn->asyn_more--;
return -1;
}
int asyn_result(asynchio_t *asyn, int fd, int op, int result)
/* The caller has tried the system call with the given result. Finish up. */
{
int err;
asynfd_t *afd= &asyn->asyn_afd[fd];
err= errno;
(void) fcntl(fd, F_SETFL, afd->afd_flags);
(void) sigprocmask(SIG_SETMASK, &mask, nil);
errno= err;
if (result != -1 || errno != EAGAIN) {
afd->afd_state[op]= IDLE;
return result;
}
/* Record this operation as "waiting". */
afd->afd_state[op]= WAITING;
FD_SET(fd, &asyn->asyn_fdset[op]);
errno= EAGAIN;
asyn->asyn_more--;
return -1;
}

View File

@@ -1,26 +0,0 @@
/* asyn_synch() - step back to synch Author: Kees J. Bot
* 7 Jul 1997
*/
#include "asyn.h"
int asyn_synch(asynchio_t *asyn, int fd)
/* No more asynchronous operations on this file descriptor. */
{
asynfd_t *afd;
int op;
if ((unsigned) fd >= FD_SETSIZE) { errno= EBADF; return -1; }
afd= &asyn->asyn_afd[fd];
for (op= 0; op < SEL_NR; op++) {
if (afd->afd_state[op] != IDLE) {
errno= EAGAIN;
return -1;
}
}
/* Make sure the file flags are as they once were. */
if (afd->afd_seen && fcntl(fd, F_SETFL, afd->afd_flags) < 0) return -1;
afd->afd_seen= 0;
return 0;
}

View File

@@ -1,119 +0,0 @@
/* asyn_wait() - wait for asynch operations Author: Kees J. Bot
* 7 Jul 1997
*/
#define DEBUG 0
#include "asyn.h"
#include <time.h>
#if DEBUG
#include <stdio.h>
#endif
#define TBOUND_MIN 1
#define TBOUND_MAX 16
int asyn_wait(asynchio_t *asyn, int flags, struct timeval *to)
/* Wait for one or more nonblocking operations to return a result. */
{
int r;
static struct timeval zero_time;
struct timeval t;
static time_t tbound= TBOUND_MIN;
/* Are there more things to do before we can block? */
if (asyn->asyn_more > 0) { asyn->asyn_more= 0; return 0; }
if (flags & ASYN_NONBLOCK) {
/* Don't block by using a zero second timeout. */
to= &zero_time;
} else
if (to != nil) {
/* asyn_wait() uses an absolute time. */
if (to->tv_usec >= 1000000L) {
to->tv_sec+= to->tv_usec / 1000000L;
to->tv_usec%= 1000000L;
}
(void) gettimeofday(&t, nil);
if (t.tv_sec > to->tv_sec || (t.tv_sec == to->tv_sec
&& t.tv_usec >= to->tv_usec)) {
to= &zero_time;
} else {
t.tv_sec= to->tv_sec - t.tv_sec;
t.tv_usec= to->tv_usec - t.tv_usec;
if (t.tv_usec < 0) {
t.tv_sec--;
t.tv_usec+= 1000000L;
}
to= &t;
}
/* Don't sleep too long, we don't trust select(). */
if (to->tv_sec > tbound) goto bound;
} else {
bound:
/* No timeout? Don't hang in (buggy?) select() forever. */
to= &t;
t.tv_sec= tbound;
t.tv_usec= 0;
}
#if DEBUG
{
int op;
fprintf(stderr, "select: ");
for (op= 0; op < SEL_NR; op++) {
fd_set *fdsetp= &asyn->asyn_fdset[op];
int fd;
for (fd= 0; fd < FD_SETSIZE; fd++) {
if (FD_ISSET(fd, fdsetp)) {
asyn->asyn_afd[fd].afd_state[op]=
PENDING;
fprintf(stderr, "%d%c", fd, "rwx"[op]);
}
}
}
fflush(stderr);
}
#endif
r= select(FD_SETSIZE, &asyn->asyn_fdset[SEL_READ],
&asyn->asyn_fdset[SEL_WRITE],
&asyn->asyn_fdset[SEL_EXCEPT], to);
#if DEBUG
fprintf(stderr, " (%d) ", r);
#endif
if (r > 0) {
/* An event occurred on one or more file descriptors. */
int op;
for (op= 0; op < SEL_NR; op++) {
fd_set *fdsetp= &asyn->asyn_fdset[op];
int fd;
for (fd= 0; fd < FD_SETSIZE; fd++) {
if (FD_ISSET(fd, fdsetp)) {
asyn->asyn_afd[fd].afd_state[op]=
PENDING;
#if DEBUG
fprintf(stderr, "%d%c", fd, "rwx"[op]);
#endif
}
}
}
tbound= TBOUND_MIN;
} else
if (r == 0) {
/* If nothing happened then let the time boundary slip a bit. */
if (tbound < TBOUND_MAX) tbound <<= 1;
}
#if DEBUG
fputc('\n', stderr);
#endif
FD_ZERO(&asyn->asyn_fdset[SEL_READ]);
FD_ZERO(&asyn->asyn_fdset[SEL_WRITE]);
FD_ZERO(&asyn->asyn_fdset[SEL_EXCEPT]);
return r == 0 ? (errno= EINTR, -1) : r;
}

View File

@@ -1,49 +0,0 @@
/* asyn_write() Author: Kees J. Bot
* 7 Jul 1997
*/
#include "asyn.h"
#include <signal.h>
ssize_t asyn_write(asynchio_t *asyn, int fd, const void *buf, size_t len)
/* Nonblocking write(). (See asyn_read()). */
{
asynfd_t *afd;
asyn->asyn_more++;
if ((unsigned) fd >= FD_SETSIZE) { errno= EBADF; return -1; }
afd= &asyn->asyn_afd[fd];
if (!afd->afd_seen) {
if ((afd->afd_flags= fcntl(fd, F_GETFL)) < 0) return -1;
afd->afd_seen= 1;
}
if (afd->afd_state[SEL_WRITE] == PENDING) {
sigset_t mask;
ssize_t result;
int err;
sigemptyset(&mask);
if (sigprocmask(SIG_SETMASK, &mask, &mask) < 0) return -1;
(void) fcntl(fd, F_SETFL, afd->afd_flags | O_NONBLOCK);
result= write(fd, buf, len);
err= errno;
(void) fcntl(fd, F_SETFL, afd->afd_flags);
(void) sigprocmask(SIG_SETMASK, &mask, nil);
errno= err;
if (result != -1 || errno != EAGAIN) {
afd->afd_state[SEL_WRITE]= IDLE;
return result;
}
}
afd->afd_state[SEL_WRITE]= WAITING;
FD_SET(fd, &asyn->asyn_fdset[SEL_WRITE]);
errno= EAGAIN;
asyn->asyn_more--;
return -1;
}

View File

@@ -1,43 +0,0 @@
/* asynchio.h - Asynchronous I/O Author: Kees J. Bot
* 7 Jul 1997
* Minix-vmd compatible asynchio(3) using BSD select(2).
*/
#ifndef _SYS__ASYNCHIO_H
#define _SYS__ASYNCHIO_H
#include <sys/select.h> /* for FD_SETSIZE */
#define SEL_READ 0 /* Code for a read. */
#define SEL_WRITE 1 /* Code for a write. */
#define SEL_EXCEPT 2 /* Code for some exception. */
#define SEL_NR 3 /* Number of codes. */
struct _asynfd {
int afd_seen; /* Set if we manage this descriptor. */
int afd_flags; /* File flags by fcntl(fd, F_GETFL). */
int afd_state[SEL_NR]; /* Operation state. */
};
typedef struct {
int asyn_more; /* Set if more to do before blocking. */
struct _asynfd asyn_afd[FD_SETSIZE];
fd_set asyn_fdset[SEL_NR]; /* Select() fd sets. */
} asynchio_t;
#define ASYN_INPROGRESS EAGAIN /* Errno code telling "nothing yet." */
#define ASYN_NONBLOCK 0x01 /* If asyn_wait() mustn't block. */
struct timeval;
void asyn_init(asynchio_t *_asyn);
ssize_t asyn_read(asynchio_t *_asyn, int _fd, void *_buf, size_t _len);
ssize_t asyn_write(asynchio_t *_asyn, int _fd, const void *_buf, size_t _len);
int asyn_special(asynchio_t *_asyn, int _fd, int _op);
int asyn_result(asynchio_t *_asyn, int _fd, int _op, int _result);
int asyn_wait(asynchio_t *_asyn, int _flags, struct timeval *to);
int asyn_cancel(asynchio_t *_asyn, int _fd, int _op);
int asyn_pending(asynchio_t *_asyn, int _fd, int _op);
int asyn_synch(asynchio_t *_asyn, int _fd);
int asyn_close(asynchio_t *_asyn, int _fd);
#endif /* _SYS__ASYNCHIO_H */

View File

@@ -1,9 +0,0 @@
# Makefile for the common audio framework
NOGCCERROR=yes
NOCLANGERROR=yes
CPPFLAGS+= -D_MINIX_SYSTEM
LIB= audiodriver
SRCS= audio_fw.c liveupdate.c
.include <bsd.lib.mk>

View File

@@ -1,870 +0,0 @@
/* Best viewed with tabsize 4
*
* This file contains a standard driver for audio devices.
* It supports double dma buffering and can be configured to use
* extra buffer space beside the dma buffer.
* This driver also support sub devices, which can be independently
* opened and closed.
*
* The file contains one entry point:
*
* main: main entry when driver is brought up
*
* October 2007 Updated audio framework to work with mplayer, added
* savecopies (Pieter Hijma)
* February 2006 Updated audio framework,
* changed driver-framework relation (Peter Boonstoppel)
* November 2005 Created generic DMA driver framework (Laurens Bronwasser)
* August 24 2005 Ported audio driver to user space
* (only audio playback) (Peter Boonstoppel)
* May 20 1995 SB16 Driver: Michel R. Prevenier
*/
#include <minix/audio_fw.h>
#include <minix/endpoint.h>
#include <minix/ds.h>
#include <sys/ioccom.h>
static int msg_open(devminor_t minor_dev_nr, int access,
endpoint_t user_endpt);
static int msg_close(int minor_dev_nr);
static ssize_t msg_read(devminor_t minor, u64_t position, endpoint_t endpt,
cp_grant_id_t grant, size_t size, int flags, cdev_id_t id);
static ssize_t msg_write(devminor_t minor, u64_t position, endpoint_t endpt,
cp_grant_id_t grant, size_t size, int flags, cdev_id_t id);
static int msg_ioctl(devminor_t minor, unsigned long request, endpoint_t endpt,
cp_grant_id_t grant, int flags, endpoint_t user_endpt, cdev_id_t id);
static void msg_hardware(unsigned int mask);
static int open_sub_dev(int sub_dev_nr, int operation);
static int close_sub_dev(int sub_dev_nr);
static void handle_int_write(int sub_dev_nr);
static void handle_int_read(int sub_dev_nr);
static void data_to_user(sub_dev_t *sub_dev_ptr);
static void data_from_user(sub_dev_t *sub_dev_ptr);
static int init_buffers(sub_dev_t *sub_dev_ptr);
static int get_started(sub_dev_t *sub_dev_ptr);
static int io_ctl_length(int io_request);
static special_file_t* get_special_file(int minor_dev_nr);
static void tell_dev(vir_bytes buf, size_t size, int pci_bus,
int pci_dev, int pci_func);
static char io_ctl_buf[IOCPARM_MASK];
static int irq_hook_id = 0; /* id of irq hook at the kernel */
static int irq_hook_set = FALSE;
/* SEF functions and variables. */
static void sef_local_startup(void);
static int sef_cb_init_fresh(int type, sef_init_info_t *info);
static void sef_cb_signal_handler(int signo);
EXTERN int sef_cb_lu_prepare(int state);
EXTERN int sef_cb_lu_state_isvalid(int state);
EXTERN void sef_cb_lu_state_dump(int state);
static struct chardriver audio_tab = {
.cdr_open = msg_open, /* open the special file */
.cdr_close = msg_close, /* close the special file */
.cdr_read = msg_read,
.cdr_write = msg_write,
.cdr_ioctl = msg_ioctl,
.cdr_intr = msg_hardware
};
int main(void)
{
int r, caller;
message mess, repl_mess;
int ipc_status;
/* SEF local startup. */
sef_local_startup();
/* Here is the main loop of the dma driver. It waits for a message,
carries it out, and sends a reply. */
chardriver_task(&audio_tab);
return 0;
}
/*===========================================================================*
* sef_local_startup *
*===========================================================================*/
static void sef_local_startup(void)
{
/* Register init callbacks. */
sef_setcb_init_fresh(sef_cb_init_fresh);
sef_setcb_init_lu(sef_cb_init_fresh);
sef_setcb_init_restart(sef_cb_init_fresh);
/* Register live update callbacks. */
sef_setcb_lu_prepare(sef_cb_lu_prepare);
sef_setcb_lu_state_isvalid(sef_cb_lu_state_isvalid);
sef_setcb_lu_state_dump(sef_cb_lu_state_dump);
/* Register signal callbacks. */
sef_setcb_signal_handler(sef_cb_signal_handler);
/* Let SEF perform startup. */
sef_startup();
}
/*===========================================================================*
* sef_cb_init_fresh *
*===========================================================================*/
static int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *UNUSED(info))
{
/* Initialize the audio driver framework. */
u32_t i; char irq;
static int executed = 0;
sub_dev_t* sub_dev_ptr;
/* initialize basic driver variables */
if (drv_init() != OK) {
printf("libaudiodriver: Could not initialize driver\n");
return EIO;
}
/* init variables, get dma buffers */
for (i = 0; i < drv.NrOfSubDevices; i++) {
sub_dev_ptr = &sub_dev[i];
sub_dev_ptr->Opened = FALSE;
sub_dev_ptr->DmaBusy = FALSE;
sub_dev_ptr->DmaMode = NO_DMA;
sub_dev_ptr->DmaReadNext = 0;
sub_dev_ptr->DmaFillNext = 0;
sub_dev_ptr->DmaLength = 0;
sub_dev_ptr->BufReadNext = 0;
sub_dev_ptr->BufFillNext = 0;
sub_dev_ptr->RevivePending = FALSE;
sub_dev_ptr->OutOfData = FALSE;
sub_dev_ptr->Nr = i;
}
/* initialize hardware*/
if (drv_init_hw() != OK) {
printf("%s: Could not initialize hardware\n", drv.DriverName);
return EIO;
}
/* get irq from device driver...*/
if (drv_get_irq(&irq) != OK) {
printf("%s: init driver couldn't get IRQ", drv.DriverName);
return EIO;
}
/* TODO: execute the rest of this function only once
we don't want to set irq policy twice */
if (executed) return OK;
executed = TRUE;
/* ...and register interrupt vector */
if ((i=sys_irqsetpolicy(irq, 0, &irq_hook_id )) != OK){
printf("%s: init driver couldn't set IRQ policy: %d", drv.DriverName, i);
return EIO;
}
irq_hook_set = TRUE; /* now signal handler knows it must unregister policy*/
/* Announce we are up! */
chardriver_announce();
return OK;
}
/*===========================================================================*
* sef_cb_signal_handler *
*===========================================================================*/
static void sef_cb_signal_handler(int signo)
{
int i;
char irq;
/* Only check for termination signal, ignore anything else. */
if (signo != SIGTERM) return;
for (i = 0; i < drv.NrOfSubDevices; i++) {
drv_stop(i); /* stop all sub devices */
}
if (irq_hook_set) {
if (sys_irqdisable(&irq_hook_id) != OK) {
printf("Could not disable IRQ\n");
}
/* get irq from device driver*/
if (drv_get_irq(&irq) != OK) {
printf("Msg SIG_STOP Couldn't get IRQ");
}
/* remove the policy */
if (sys_irqrmpolicy(&irq_hook_id) != OK) {
printf("%s: Could not disable IRQ\n",drv.DriverName);
}
}
}
static int msg_open(devminor_t minor_dev_nr, int UNUSED(access),
endpoint_t UNUSED(user_endpt))
{
int r, read_chan, write_chan, io_ctl;
special_file_t* special_file_ptr;
special_file_ptr = get_special_file(minor_dev_nr);
if(special_file_ptr == NULL) {
return EIO;
}
read_chan = special_file_ptr->read_chan;
write_chan = special_file_ptr->write_chan;
io_ctl = special_file_ptr->io_ctl;
if (read_chan==NO_CHANNEL && write_chan==NO_CHANNEL && io_ctl==NO_CHANNEL) {
printf("%s: No channel specified for minor device %d!\n",
drv.DriverName, minor_dev_nr);
return EIO;
}
if (read_chan == write_chan && read_chan != NO_CHANNEL) {
printf("%s: Read and write channels are equal: %d!\n",
drv.DriverName, minor_dev_nr);
return EIO;
}
/* open the sub devices specified in the interface header file */
if (write_chan != NO_CHANNEL) {
/* open sub device for writing */
if (open_sub_dev(write_chan, WRITE_DMA) != OK) return EIO;
}
if (read_chan != NO_CHANNEL) {
if (open_sub_dev(read_chan, READ_DMA) != OK) return EIO;
}
if (read_chan == io_ctl || write_chan == io_ctl) {
/* io_ctl is already opened because it's the same as read or write */
return OK; /* we're done */
}
if (io_ctl != NO_CHANNEL) { /* Ioctl differs from read/write channels, */
r = open_sub_dev(io_ctl, NO_DMA); /* open it explicitly */
if (r != OK) return EIO;
}
return OK;
}
static int open_sub_dev(int sub_dev_nr, int dma_mode) {
sub_dev_t* sub_dev_ptr;
sub_dev_ptr = &sub_dev[sub_dev_nr];
/* Only one open at a time per sub device */
if (sub_dev_ptr->Opened) {
printf("%s: Sub device %d is already opened\n",
drv.DriverName, sub_dev_nr);
return EBUSY;
}
if (sub_dev_ptr->DmaBusy) {
printf("%s: Sub device %d is still busy\n", drv.DriverName, sub_dev_nr);
return EBUSY;
}
/* Setup variables */
sub_dev_ptr->Opened = TRUE;
sub_dev_ptr->DmaReadNext = 0;
sub_dev_ptr->DmaFillNext = 0;
sub_dev_ptr->DmaLength = 0;
sub_dev_ptr->DmaMode = dma_mode;
sub_dev_ptr->BufReadNext = 0;
sub_dev_ptr->BufFillNext = 0;
sub_dev_ptr->BufLength = 0;
sub_dev_ptr->RevivePending = FALSE;
sub_dev_ptr->OutOfData = TRUE;
/* arrange DMA */
if (dma_mode != NO_DMA) { /* sub device uses DMA */
/* allocate dma buffer and extra buffer space
and configure sub device for dma */
if (init_buffers(sub_dev_ptr) != OK ) return EIO;
}
return OK;
}
static int msg_close(devminor_t minor_dev_nr)
{
int r, read_chan, write_chan, io_ctl;
special_file_t* special_file_ptr;
special_file_ptr = get_special_file(minor_dev_nr);
if(special_file_ptr == NULL) {
return EIO;
}
read_chan = special_file_ptr->read_chan;
write_chan = special_file_ptr->write_chan;
io_ctl = special_file_ptr->io_ctl;
r= OK;
/* close all sub devices */
if (write_chan != NO_CHANNEL) {
if (close_sub_dev(write_chan) != OK) r = EIO;
}
if (read_chan != NO_CHANNEL) {
if (close_sub_dev(read_chan) != OK) r = EIO;
}
if (read_chan == io_ctl || write_chan == io_ctl) {
/* io_ctl is already closed because it's the same as read or write */
return r; /* we're done */
}
/* ioctl differs from read/write channels... */
if (io_ctl != NO_CHANNEL) {
if (close_sub_dev(io_ctl) != OK) r = EIO; /* ...close it explicitly */
}
return r;
}
static int close_sub_dev(int sub_dev_nr) {
size_t size;
sub_dev_t *sub_dev_ptr;
sub_dev_ptr = &sub_dev[sub_dev_nr];
if (sub_dev_ptr->DmaMode == WRITE_DMA && !sub_dev_ptr->OutOfData) {
/* do nothing, still data in buffers that has to be transferred */
sub_dev_ptr->Opened = FALSE; /* keep DMA busy */
return OK;
}
if (sub_dev_ptr->DmaMode == NO_DMA) {
/* do nothing, there is no dma going on */
sub_dev_ptr->Opened = FALSE;
return OK;
}
sub_dev_ptr->Opened = FALSE;
sub_dev_ptr->DmaBusy = FALSE;
/* stop the device */
drv_stop(sub_dev_ptr->Nr);
/* free the buffers */
size= sub_dev_ptr->DmaSize + 64 * 1024;
free_contig(sub_dev_ptr->DmaBuf, size);
free(sub_dev_ptr->ExtraBuf);
return OK;
}
static int msg_ioctl(devminor_t minor, unsigned long request, endpoint_t endpt,
cp_grant_id_t grant, int flags, endpoint_t user_endpt, cdev_id_t id)
{
int status, len, chan;
sub_dev_t *sub_dev_ptr;
special_file_t* special_file_ptr;
special_file_ptr = get_special_file(minor);
if(special_file_ptr == NULL) {
return EIO;
}
chan = special_file_ptr->io_ctl;
if (chan == NO_CHANNEL) {
printf("%s: No io control channel specified!\n", drv.DriverName);
return EIO;
}
/* get pointer to sub device data */
sub_dev_ptr = &sub_dev[chan];
if(!sub_dev_ptr->Opened) {
printf("%s: io control impossible - not opened!\n", drv.DriverName);
return EIO;
}
if (request & IOC_IN) { /* if there is data for us, copy it */
len = io_ctl_length(request);
if (sys_safecopyfrom(endpt, grant, 0, (vir_bytes)io_ctl_buf,
len) != OK) {
printf("%s:%d: safecopyfrom failed\n", __FILE__, __LINE__);
}
}
/* all ioctl's are passed to the device specific part of the driver */
status = drv_io_ctl(request, (void *)io_ctl_buf, &len, chan);
/* IOC_OUT bit -> user expects data */
if (status == OK && request & IOC_OUT) {
/* copy result back to user */
if (sys_safecopyto(endpt, grant, 0, (vir_bytes)io_ctl_buf,
len) != OK) {
printf("%s:%d: safecopyto failed\n", __FILE__, __LINE__);
}
}
return status;
}
static ssize_t msg_write(devminor_t minor, u64_t UNUSED(position),
endpoint_t endpt, cp_grant_id_t grant, size_t size, int UNUSED(flags),
cdev_id_t id)
{
int chan; sub_dev_t *sub_dev_ptr;
special_file_t* special_file_ptr;
special_file_ptr = get_special_file(minor);
chan = special_file_ptr->write_chan;
if (chan == NO_CHANNEL) {
printf("%s: No write channel specified!\n", drv.DriverName);
return EIO;
}
/* get pointer to sub device data */
sub_dev_ptr = &sub_dev[chan];
if (!sub_dev_ptr->DmaBusy) { /* get fragment size on first write */
if (drv_get_frag_size(&(sub_dev_ptr->FragSize), sub_dev_ptr->Nr) != OK){
printf("%s; Failed to get fragment size!\n", drv.DriverName);
return EIO;
}
}
if(size != sub_dev_ptr->FragSize) {
printf("Fragment size does not match user's buffer length\n");
return EINVAL;
}
/* if we are busy with something else than writing, return EBUSY */
if(sub_dev_ptr->DmaBusy && sub_dev_ptr->DmaMode != WRITE_DMA) {
printf("Already busy with something else than writing\n");
return EBUSY;
}
sub_dev_ptr->RevivePending = TRUE;
sub_dev_ptr->ReviveId = id;
sub_dev_ptr->ReviveGrant = grant;
sub_dev_ptr->SourceProcNr = endpt;
data_from_user(sub_dev_ptr);
if(!sub_dev_ptr->DmaBusy) { /* Dma tranfer not yet started */
get_started(sub_dev_ptr);
sub_dev_ptr->DmaMode = WRITE_DMA; /* Dma mode is writing */
}
/* We may already have replied by now. In any case don't reply here. */
return EDONTREPLY;
}
static ssize_t msg_read(devminor_t minor, u64_t UNUSED(position),
endpoint_t endpt, cp_grant_id_t grant, size_t size, int UNUSED(flags),
cdev_id_t id)
{
int chan; sub_dev_t *sub_dev_ptr;
special_file_t* special_file_ptr;
special_file_ptr = get_special_file(minor);
chan = special_file_ptr->read_chan;
if (chan == NO_CHANNEL) {
printf("%s: No read channel specified!\n", drv.DriverName);
return EIO;
}
/* get pointer to sub device data */
sub_dev_ptr = &sub_dev[chan];
if (!sub_dev_ptr->DmaBusy) { /* get fragment size on first read */
if (drv_get_frag_size(&(sub_dev_ptr->FragSize), sub_dev_ptr->Nr) != OK){
printf("%s: Could not retrieve fragment size!\n", drv.DriverName);
return EIO;
}
}
if(size != sub_dev_ptr->FragSize) {
printf("fragment size does not match message size\n");
return EINVAL;
}
/* if we are busy with something else than reading, reply EBUSY */
if(sub_dev_ptr->DmaBusy && sub_dev_ptr->DmaMode != READ_DMA) {
return EBUSY;
}
sub_dev_ptr->RevivePending = TRUE;
sub_dev_ptr->ReviveId = id;
sub_dev_ptr->ReviveGrant = grant;
sub_dev_ptr->SourceProcNr = endpt;
if(!sub_dev_ptr->DmaBusy) { /* Dma tranfer not yet started */
get_started(sub_dev_ptr);
sub_dev_ptr->DmaMode = READ_DMA; /* Dma mode is reading */
/* no need to get data from DMA buffer at this point */
return EDONTREPLY;
}
/* check if data is available and possibly fill user's buffer */
data_to_user(sub_dev_ptr);
/* We may already have replied by now. In any case don't reply here. */
return EDONTREPLY;
}
static void msg_hardware(unsigned int UNUSED(mask))
{
u32_t i;
/* loop over all sub devices */
for ( i = 0; i < drv.NrOfSubDevices; i++) {
/* if interrupt from sub device and Dma transfer
was actually busy, take care of business */
if( drv_int(i) && sub_dev[i].DmaBusy ) {
if (sub_dev[i].DmaMode == WRITE_DMA)
handle_int_write(i);
if (sub_dev[i].DmaMode == READ_DMA)
handle_int_read(i);
}
}
/* As IRQ_REENABLE is not on in sys_irqsetpolicy, we must
* re-enable out interrupt after every interrupt.
*/
if ((sys_irqenable(&irq_hook_id)) != OK) {
printf("%s: msg_hardware: Couldn't enable IRQ\n", drv.DriverName);
}
}
/* handle interrupt for specified sub device; DmaMode == WRITE_DMA */
static void handle_int_write(int sub_dev_nr)
{
sub_dev_t *sub_dev_ptr;
sub_dev_ptr = &sub_dev[sub_dev_nr];
sub_dev_ptr->DmaReadNext =
(sub_dev_ptr->DmaReadNext + 1) % sub_dev_ptr->NrOfDmaFragments;
sub_dev_ptr->DmaLength -= 1;
if (sub_dev_ptr->BufLength != 0) { /* Data in extra buf, copy to Dma buf */
memcpy(sub_dev_ptr->DmaPtr +
sub_dev_ptr->DmaFillNext * sub_dev_ptr->FragSize,
sub_dev_ptr->ExtraBuf +
sub_dev_ptr->BufReadNext * sub_dev_ptr->FragSize,
sub_dev_ptr->FragSize);
sub_dev_ptr->BufReadNext =
(sub_dev_ptr->BufReadNext + 1) % sub_dev_ptr->NrOfExtraBuffers;
sub_dev_ptr->DmaFillNext =
(sub_dev_ptr->DmaFillNext + 1) % sub_dev_ptr->NrOfDmaFragments;
sub_dev_ptr->BufLength -= 1;
sub_dev_ptr->DmaLength += 1;
}
/* space became available, possibly copy new data from user */
data_from_user(sub_dev_ptr);
if(sub_dev_ptr->DmaLength == 0) { /* Dma buffer empty, stop Dma transfer */
sub_dev_ptr->OutOfData = TRUE; /* we're out of data */
if (!sub_dev_ptr->Opened) {
close_sub_dev(sub_dev_ptr->Nr);
return;
}
drv_pause(sub_dev_ptr->Nr);
return;
}
/* confirm and reenable interrupt from this sub dev */
drv_reenable_int(sub_dev_nr);
#if 0
/* reenable irq_hook*/
if (sys_irqenable(&irq_hook_id != OK) {
printf("%s Couldn't enable IRQ\n", drv.DriverName);
}
#endif
}
/* handle interrupt for specified sub device; DmaMode == READ_DMA */
static void handle_int_read(int sub_dev_nr)
{
sub_dev_t *sub_dev_ptr;
message m;
sub_dev_ptr = &sub_dev[sub_dev_nr];
sub_dev_ptr->DmaLength += 1;
sub_dev_ptr->DmaFillNext =
(sub_dev_ptr->DmaFillNext + 1) % sub_dev_ptr->NrOfDmaFragments;
/* possibly copy data to user (if it is waiting for us) */
data_to_user(sub_dev_ptr);
if (sub_dev_ptr->DmaLength == sub_dev_ptr->NrOfDmaFragments) {
/* if dma buffer full */
if (sub_dev_ptr->BufLength == sub_dev_ptr->NrOfExtraBuffers) {
printf("All buffers full, we have a problem.\n");
drv_stop(sub_dev_nr); /* stop the sub device */
sub_dev_ptr->DmaBusy = FALSE;
/* no data for user, this is a sad story */
chardriver_reply_task(sub_dev_ptr->SourceProcNr,
sub_dev_ptr->ReviveId, 0);
return;
}
else { /* dma full, still room in extra buf;
copy from dma to extra buf */
memcpy(sub_dev_ptr->ExtraBuf +
sub_dev_ptr->BufFillNext * sub_dev_ptr->FragSize,
sub_dev_ptr->DmaPtr +
sub_dev_ptr->DmaReadNext * sub_dev_ptr->FragSize,
sub_dev_ptr->FragSize);
sub_dev_ptr->DmaLength -= 1;
sub_dev_ptr->DmaReadNext =
(sub_dev_ptr->DmaReadNext + 1) % sub_dev_ptr->NrOfDmaFragments;
sub_dev_ptr->BufFillNext =
(sub_dev_ptr->BufFillNext + 1) % sub_dev_ptr->NrOfExtraBuffers;
}
}
/* confirm interrupt, and reenable interrupt from this sub dev*/
drv_reenable_int(sub_dev_ptr->Nr);
#if 0
/* reenable irq_hook*/
if (sys_irqenable(&irq_hook_id) != OK) {
printf("%s: Couldn't reenable IRQ", drv.DriverName);
}
#endif
}
static int get_started(sub_dev_t *sub_dev_ptr) {
u32_t i;
/* enable interrupt messages from MINIX */
if ((i=sys_irqenable(&irq_hook_id)) != OK) {
printf("%s: Couldn't enable IRQs: error code %u",drv.DriverName, (unsigned int) i);
return EIO;
}
/* let the lower part of the driver start the device */
if (drv_start(sub_dev_ptr->Nr, sub_dev_ptr->DmaMode) != OK) {
printf("%s: Could not start device %d\n",
drv.DriverName, sub_dev_ptr->Nr);
}
sub_dev_ptr->DmaBusy = TRUE; /* Dma is busy from now on */
sub_dev_ptr->DmaReadNext = 0;
return OK;
}
static void data_from_user(sub_dev_t *subdev)
{
int r;
message m;
if (subdev->DmaLength == subdev->NrOfDmaFragments &&
subdev->BufLength == subdev->NrOfExtraBuffers) return;/* no space */
if (!subdev->RevivePending) return; /* no new data waiting to be copied */
if (subdev->DmaLength < subdev->NrOfDmaFragments) { /* room in dma buf */
r = sys_safecopyfrom(subdev->SourceProcNr,
(vir_bytes)subdev->ReviveGrant, 0,
(vir_bytes)subdev->DmaPtr +
subdev->DmaFillNext * subdev->FragSize,
(phys_bytes)subdev->FragSize);
if (r != OK)
printf("%s:%d: safecopy failed\n", __FILE__, __LINE__);
subdev->DmaLength += 1;
subdev->DmaFillNext =
(subdev->DmaFillNext + 1) % subdev->NrOfDmaFragments;
} else { /* room in extra buf */
r = sys_safecopyfrom(subdev->SourceProcNr,
(vir_bytes)subdev->ReviveGrant, 0,
(vir_bytes)subdev->ExtraBuf +
subdev->BufFillNext * subdev->FragSize,
(phys_bytes)subdev->FragSize);
if (r != OK)
printf("%s:%d: safecopy failed\n", __FILE__, __LINE__);
subdev->BufLength += 1;
subdev->BufFillNext =
(subdev->BufFillNext + 1) % subdev->NrOfExtraBuffers;
}
if(subdev->OutOfData) { /* if device paused (because of lack of data) */
subdev->OutOfData = FALSE;
drv_reenable_int(subdev->Nr);
/* reenable irq_hook*/
if ((sys_irqenable(&irq_hook_id)) != OK) {
printf("%s: Couldn't enable IRQ", drv.DriverName);
}
drv_resume(subdev->Nr); /* resume resume the sub device */
}
chardriver_reply_task(subdev->SourceProcNr, subdev->ReviveId,
subdev->FragSize);
/* reset variables */
subdev->RevivePending = 0;
}
static void data_to_user(sub_dev_t *sub_dev_ptr)
{
int r;
message m;
if (!sub_dev_ptr->RevivePending) return; /* nobody is wating for data */
if (sub_dev_ptr->BufLength == 0 && sub_dev_ptr->DmaLength == 0) return;
/* no data for user */
if(sub_dev_ptr->BufLength != 0) { /* data in extra buffer available */
r = sys_safecopyto(sub_dev_ptr->SourceProcNr,
(vir_bytes)sub_dev_ptr->ReviveGrant,
0, (vir_bytes)sub_dev_ptr->ExtraBuf +
sub_dev_ptr->BufReadNext * sub_dev_ptr->FragSize,
(phys_bytes)sub_dev_ptr->FragSize);
if (r != OK)
printf("%s:%d: safecopy failed\n", __FILE__, __LINE__);
/* adjust the buffer status variables */
sub_dev_ptr->BufReadNext =
(sub_dev_ptr->BufReadNext + 1) % sub_dev_ptr->NrOfExtraBuffers;
sub_dev_ptr->BufLength -= 1;
} else { /* extra buf empty, but data in dma buf*/
r = sys_safecopyto(
sub_dev_ptr->SourceProcNr,
(vir_bytes)sub_dev_ptr->ReviveGrant, 0,
(vir_bytes)sub_dev_ptr->DmaPtr +
sub_dev_ptr->DmaReadNext * sub_dev_ptr->FragSize,
(phys_bytes)sub_dev_ptr->FragSize);
if (r != OK)
printf("%s:%d: safecopy failed\n", __FILE__, __LINE__);
/* adjust the buffer status variables */
sub_dev_ptr->DmaReadNext =
(sub_dev_ptr->DmaReadNext + 1) % sub_dev_ptr->NrOfDmaFragments;
sub_dev_ptr->DmaLength -= 1;
}
chardriver_reply_task(sub_dev_ptr->SourceProcNr, sub_dev_ptr->ReviveId,
sub_dev_ptr->FragSize);
/* reset variables */
sub_dev_ptr->RevivePending = 0;
}
static int init_buffers(sub_dev_t *sub_dev_ptr)
{
#if defined(__i386__)
char *base;
size_t size;
unsigned left;
u32_t i;
phys_bytes ph;
/* allocate dma buffer space */
size= sub_dev_ptr->DmaSize + 64 * 1024;
base= alloc_contig(size, AC_ALIGN64K|AC_LOWER16M, &ph);
if (!base) {
printf("%s: failed to allocate dma buffer for a channel\n",
drv.DriverName);
return EIO;
}
sub_dev_ptr->DmaBuf= base;
tell_dev((vir_bytes)base, size, 0, 0, 0);
/* allocate extra buffer space */
if (!(sub_dev_ptr->ExtraBuf = malloc(sub_dev_ptr->NrOfExtraBuffers *
sub_dev_ptr->DmaSize /
sub_dev_ptr->NrOfDmaFragments))) {
printf("%s failed to allocate extra buffer for a channel\n",
drv.DriverName);
return EIO;
}
sub_dev_ptr->DmaPtr = sub_dev_ptr->DmaBuf;
i = sys_umap(SELF, VM_D, (vir_bytes) base, (phys_bytes) size,
&(sub_dev_ptr->DmaPhys));
if (i != OK) {
return EIO;
}
if ((left = dma_bytes_left(sub_dev_ptr->DmaPhys)) <
sub_dev_ptr->DmaSize) {
/* First half of buffer crosses a 64K boundary,
* can't DMA into that */
sub_dev_ptr->DmaPtr += left;
sub_dev_ptr->DmaPhys += left;
}
/* write the physical dma address and size to the device */
drv_set_dma(sub_dev_ptr->DmaPhys,
sub_dev_ptr->DmaSize, sub_dev_ptr->Nr);
return OK;
#else /* !defined(__i386__) */
printf("%s: init_buffers() failed, CHIP != INTEL", drv.DriverName);
return EIO;
#endif /* defined(__i386__) */
}
static int io_ctl_length(int io_request) {
io_request >>= 16;
return io_request & IOCPARM_MASK;
}
static special_file_t* get_special_file(int minor_dev_nr) {
int i;
for(i = 0; i < drv.NrOfSpecialFiles; i++) {
if(special_file[i].minor_dev_nr == minor_dev_nr) {
return &special_file[i];
}
}
printf("%s: No subdevice specified for minor device %d!\n",
drv.DriverName, minor_dev_nr);
return NULL;
}
static void tell_dev(vir_bytes buf, size_t size, int pci_bus,
int pci_dev, int pci_func)
{
int r;
endpoint_t dev_e;
message m;
r= ds_retrieve_label_endpt("amddev", &dev_e);
if (r != OK)
{
#if 0
printf("tell_dev: ds_retrieve_label_endpt failed for 'amddev': %d\n",
r);
#endif
return;
}
m.m_type= IOMMU_MAP;
m.m2_i1= pci_bus;
m.m2_i2= pci_dev;
m.m2_i3= pci_func;
m.m2_l1= buf;
m.m2_l2= size;
r= ipc_sendrec(dev_e, &m);
if (r != OK)
{
printf("tell_dev: ipc_sendrec to %d failed: %d\n", dev_e, r);
return;
}
if (m.m_type != OK)
{
printf("tell_dev: dma map request failed: %d\n", m.m_type);
return;
}
}

View File

@@ -1,109 +0,0 @@
#include <minix/audio_fw.h>
/*
* - From audio_fw.h:
* EXTERN drv_t drv;
* EXTERN sub_dev_t sub_dev[];
*/
/* State management helpers */
static int is_read_pending;
static int is_write_pending;
static void load_state_info(void)
{
int i, dma_mode, found_pending;
/* Check if reads or writes are pending. */
is_read_pending = FALSE;
is_write_pending = FALSE;
found_pending = FALSE;
for (i = 0; i < drv.NrOfSubDevices && !found_pending; i++) {
if(sub_dev[i].RevivePending) {
dma_mode = sub_dev[i].DmaMode;
if(dma_mode == READ_DMA) {
is_read_pending = TRUE;
}
else if (dma_mode == WRITE_DMA){
is_write_pending = TRUE;
}
}
found_pending = (is_read_pending && is_write_pending);
}
}
/* Custom states definition. */
#define AUDIO_STATE_READ_REQUEST_FREE (SEF_LU_STATE_CUSTOM_BASE + 0)
#define AUDIO_STATE_WRITE_REQUEST_FREE (SEF_LU_STATE_CUSTOM_BASE + 1)
#define AUDIO_STATE_IS_CUSTOM(s) \
((s) >= AUDIO_STATE_READ_REQUEST_FREE && (s) <=AUDIO_STATE_WRITE_REQUEST_FREE)
/*===========================================================================*
* sef_cb_lu_prepare *
*===========================================================================*/
int sef_cb_lu_prepare(int state)
{
int is_ready;
/* Load state information. */
load_state_info();
/* Check if we are ready for the target state. */
is_ready = FALSE;
switch(state) {
/* Standard states. */
case SEF_LU_STATE_REQUEST_FREE:
is_ready = (!is_read_pending && !is_write_pending);
break;
case SEF_LU_STATE_PROTOCOL_FREE:
is_ready = (!is_read_pending && !is_write_pending);
break;
/* Custom states. */
case AUDIO_STATE_READ_REQUEST_FREE:
is_ready = (!is_read_pending);
break;
case AUDIO_STATE_WRITE_REQUEST_FREE:
is_ready = (!is_write_pending);
break;
}
/* Tell SEF if we are ready. */
return is_ready ? OK : ENOTREADY;
}
/*===========================================================================*
* sef_cb_lu_state_isvalid *
*===========================================================================*/
int sef_cb_lu_state_isvalid(int state)
{
return SEF_LU_STATE_IS_STANDARD(state) || AUDIO_STATE_IS_CUSTOM(state);
}
/*===========================================================================*
* sef_cb_lu_state_dump *
*===========================================================================*/
void sef_cb_lu_state_dump(int state)
{
/* Load state information. */
load_state_info();
sef_lu_dprint("audio: live update state = %d\n", state);
sef_lu_dprint("audio: is_read_pending = %d\n", is_read_pending);
sef_lu_dprint("audio: is_write_pending = %d\n", is_write_pending);
sef_lu_dprint("audio: SEF_LU_STATE_WORK_FREE(%d) reached = %d\n",
SEF_LU_STATE_WORK_FREE, TRUE);
sef_lu_dprint("audio: SEF_LU_STATE_REQUEST_FREE(%d) reached = %d\n",
SEF_LU_STATE_REQUEST_FREE, (!is_read_pending && !is_write_pending));
sef_lu_dprint("audio: SEF_LU_STATE_PROTOCOL_FREE(%d) reached = %d\n",
SEF_LU_STATE_PROTOCOL_FREE, (!is_read_pending && !is_write_pending));
sef_lu_dprint("audio: AUDIO_STATE_READ_REQUEST_FREE(%d) reached = %d\n",
AUDIO_STATE_READ_REQUEST_FREE, (!is_read_pending));
sef_lu_dprint("audio: AUDIO_STATE_WRITE_REQUEST_FREE(%d) reached = %d\n",
AUDIO_STATE_WRITE_REQUEST_FREE, (!is_write_pending));
}

View File

@@ -1,12 +0,0 @@
NOGCCERROR=yes
NOCLANGERROR=yes
CPPFLAGS+= -D_MINIX_SYSTEM
# Makefile for libbdev
.include <bsd.own.mk>
LIB= bdev
SRCS= bdev.c driver.c call.c ipc.c minor.c
.include <bsd.lib.mk>

View File

@@ -1,52 +0,0 @@
Development notes regarding libbdev, by David van Moolenbroek.
GENERAL MODEL
This library is designed mainly for use by file servers. It essentially covers
two use cases: 1) use of the block device that contains the file system itself,
and 2) use of any block device for raw block I/O (on unmounted file systems)
performed by the root file server. In the first case, the file server is
responsible for opening and closing the block device, and recovery from a
driver restart involves reopening those minor devices. Regular file systems
should have one or at most two (for a separate journal) block devices open at
the same time, which is why NR_OPEN_DEVS is set to a value that is quite low.
In the second case, VFS is responsible for opening and closing the block device
(and performing IOCTLs), as well as reopening the block device on a driver
restart -- the root file server only gets raw I/O (and flush) requests.
At this time, libbdev considers only clean crashes (a crash-only model), and
does not support recovery from behavioral errors. Protocol errors are passed to
the user process, and generally do not have an effect on the overall state of
the library.
RETRY MODEL
The philosophy for recovering from driver restarts in libbdev can be formulated
as follows: we want to tolerate an unlimited number of driver restarts over a
long time, but we do not want to keep retrying individual requests across
driver restarts. As such, we do not keep track of driver restarts on a per-
driver basis, because that would mean we put a hard limit on the number of
restarts for that driver in total. Instead, there are two limits: a driver
restart limit that is kept on a per-request basis, failing only that request
when the limit is reached, and a driver restart limit that is kept during
recovery, limiting the number of restarts and eventually giving up on the
entire driver when even the recovery keeps failing (as no progress is made in
that case).
Each transfer request also has a transfer retry count. The assumption here is
that when a transfer request returns EIO, it can be retried and possibly
succeed upon repetition. The driver restart and transfer retry counts are
tracked independently and thus the first to hit the limit will fail the
request. The behavior should be the same for synchronous and asynchronous
requests in this respect.
It could happen that a new driver gets loaded after we have decided that the
current driver is unusable. This could be due to a race condition (VFS sends a
new-driver request after we've given up) or due to user interaction (the user
loads a replacement driver). The latter case may occur legitimately with raw
I/O on the root file server, so we must not mark the driver as unusable
forever. On the other hand, in the former case, we must not continue to send
I/O without first reopening the minor devices. For this reason, we do not clean
up the record of the minor devices when we mark a driver as unusable.

View File

@@ -1,641 +0,0 @@
/* libbdev - block device interfacing library, by D.C. van Moolenbroek */
#include <minix/drivers.h>
#include <minix/bdev.h>
#include <minix/ioctl.h>
#include <assert.h>
#include "const.h"
#include "type.h"
#include "proto.h"
void bdev_driver(dev_t dev, char *label)
{
/* Associate a driver with the given (major) device, using its endpoint.
* File system usage note: typically called from mount and newdriver.
*/
static int first = TRUE;
if (first) {
/* Initialize the driver endpoint array. */
bdev_driver_init();
first = FALSE;
}
bdev_update(dev, label);
}
static int bdev_retry(int *driver_tries, int *transfer_tries, int *result)
{
/* Return TRUE iff the call result implies that we should retry the operation.
*/
switch (*result) {
case ERESTART:
/* We get this error internally if the driver has restarted and the
* current operation may now go through. Check the retry count for
* driver restarts first, as we don't want to keep trying forever.
*/
if (++*driver_tries < DRIVER_TRIES)
return TRUE;
*result = EDEADSRCDST;
break;
case EIO:
/* The 'transfer_tries' pointer is non-NULL if this was a transfer
* request. If we get back an I/O failure, keep retrying the request
* until we hit the transfer retry limit.
*/
if (transfer_tries != NULL && ++*transfer_tries < TRANSFER_TRIES)
return TRUE;
break;
}
return FALSE;
}
static int bdev_opcl(int req, dev_t dev, int access)
{
/* Open or close the given minor device.
*/
message m;
int r, driver_tries = 0;
do {
memset(&m, 0, sizeof(m));
m.m_type = req;
m.m_lbdev_lblockdriver_msg.minor = minor(dev);
m.m_lbdev_lblockdriver_msg.access = access;
r = bdev_sendrec(dev, &m);
} while (bdev_retry(&driver_tries, NULL, &r));
return r;
}
int bdev_open(dev_t dev, int access)
{
/* Open the given minor device.
* File system usage note: typically called from mount, after bdev_driver.
*/
int r;
r = bdev_opcl(BDEV_OPEN, dev, access);
if (r == OK)
bdev_minor_add(dev, access);
return r;
}
int bdev_close(dev_t dev)
{
/* Close the given minor device.
* File system usage note: typically called from unmount.
*/
int r;
bdev_flush_asyn(dev);
r = bdev_opcl(BDEV_CLOSE, dev, 0);
if (r == OK)
bdev_minor_del(dev);
return r;
}
static int bdev_rdwt_setup(int req, dev_t dev, u64_t pos, char *buf,
size_t count, int flags, message *m)
{
/* Set up a single-buffer read/write request.
*/
endpoint_t endpt;
cp_grant_id_t grant;
int access;
assert((ssize_t) count >= 0);
if ((endpt = bdev_driver_get(dev)) == NONE)
return EDEADSRCDST;
access = (req == BDEV_READ) ? CPF_WRITE : CPF_READ;
grant = cpf_grant_direct(endpt, (vir_bytes) buf, count, access);
if (!GRANT_VALID(grant)) {
printf("bdev: unable to allocate grant!\n");
return EINVAL;
}
memset(m, 0, sizeof(*m));
m->m_type = req;
m->m_lbdev_lblockdriver_msg.minor = minor(dev);
m->m_lbdev_lblockdriver_msg.pos = pos;
m->m_lbdev_lblockdriver_msg.count = count;
m->m_lbdev_lblockdriver_msg.grant = grant;
m->m_lbdev_lblockdriver_msg.flags = flags;
return OK;
}
static void bdev_rdwt_cleanup(const message *m)
{
/* Clean up a single-buffer read/write request.
*/
cpf_revoke(m->m_lbdev_lblockdriver_msg.grant);
}
static ssize_t bdev_rdwt(int req, dev_t dev, u64_t pos, char *buf,
size_t count, int flags)
{
/* Perform a synchronous read or write call using a single buffer.
*/
message m;
int r, driver_tries = 0, transfer_tries = 0;
do {
if ((r = bdev_rdwt_setup(req, dev, pos, buf, count, flags, &m)) != OK)
break;
r = bdev_sendrec(dev, &m);
bdev_rdwt_cleanup(&m);
} while (bdev_retry(&driver_tries, &transfer_tries, &r));
return r;
}
static int bdev_vrdwt_setup(int req, dev_t dev, u64_t pos, iovec_t *vec,
int count, int flags, message *m, iovec_s_t *gvec)
{
/* Set up a vectored read/write request.
*/
ssize_t size;
endpoint_t endpt;
cp_grant_id_t grant;
int i, access;
assert(count <= NR_IOREQS);
if ((endpt = bdev_driver_get(dev)) == NONE)
return EDEADSRCDST;
access = (req == BDEV_GATHER) ? CPF_WRITE : CPF_READ;
size = 0;
for (i = 0; i < count; i++) {
grant = cpf_grant_direct(endpt, vec[i].iov_addr, vec[i].iov_size,
access);
if (!GRANT_VALID(grant)) {
printf("bdev: unable to allocate grant!\n");
for (i--; i >= 0; i--)
cpf_revoke(gvec[i].iov_grant);
return EINVAL;
}
gvec[i].iov_grant = grant;
gvec[i].iov_size = vec[i].iov_size;
assert(vec[i].iov_size > 0);
assert((ssize_t) (size + vec[i].iov_size) > size);
size += vec[i].iov_size;
}
grant = cpf_grant_direct(endpt, (vir_bytes) gvec, sizeof(gvec[0]) * count,
CPF_READ);
if (!GRANT_VALID(grant)) {
printf("bdev: unable to allocate grant!\n");
for (i = count - 1; i >= 0; i--)
cpf_revoke(gvec[i].iov_grant);
return EINVAL;
}
memset(m, 0, sizeof(*m));
m->m_type = req;
m->m_lbdev_lblockdriver_msg.minor = minor(dev);
m->m_lbdev_lblockdriver_msg.pos = pos;
m->m_lbdev_lblockdriver_msg.count = count;
m->m_lbdev_lblockdriver_msg.grant = grant;
m->m_lbdev_lblockdriver_msg.flags = flags;
return OK;
}
static void bdev_vrdwt_cleanup(const message *m, iovec_s_t *gvec)
{
/* Clean up a vectored read/write request.
*/
cp_grant_id_t grant;
int i;
grant = m->m_lbdev_lblockdriver_msg.grant;
cpf_revoke(grant);
for (i = m->m_lbdev_lblockdriver_msg.count - 1; i >= 0; i--)
cpf_revoke(gvec[i].iov_grant);
}
static ssize_t bdev_vrdwt(int req, dev_t dev, u64_t pos, iovec_t *vec,
int count, int flags)
{
/* Perform a synchronous read or write call using a vector of buffers.
*/
iovec_s_t gvec[NR_IOREQS];
message m;
int r, driver_tries = 0, transfer_tries = 0;
do {
if ((r = bdev_vrdwt_setup(req, dev, pos, vec, count, flags, &m,
gvec)) != OK)
break;
r = bdev_sendrec(dev, &m);
bdev_vrdwt_cleanup(&m, gvec);
} while (bdev_retry(&driver_tries, &transfer_tries, &r));
return r;
}
ssize_t bdev_read(dev_t dev, u64_t pos, char *buf, size_t count, int flags)
{
/* Perform a synchronous read call into a single buffer.
*/
return bdev_rdwt(BDEV_READ, dev, pos, buf, count, flags);
}
ssize_t bdev_write(dev_t dev, u64_t pos, char *buf, size_t count, int flags)
{
/* Perform a synchronous write call from a single buffer.
*/
return bdev_rdwt(BDEV_WRITE, dev, pos, buf, count, flags);
}
ssize_t bdev_gather(dev_t dev, u64_t pos, iovec_t *vec, int count, int flags)
{
/* Perform a synchronous read call into a vector of buffers.
*/
return bdev_vrdwt(BDEV_GATHER, dev, pos, vec, count, flags);
}
ssize_t bdev_scatter(dev_t dev, u64_t pos, iovec_t *vec, int count, int flags)
{
/* Perform a synchronous write call from a vector of buffers.
*/
return bdev_vrdwt(BDEV_SCATTER, dev, pos, vec, count, flags);
}
static int bdev_ioctl_setup(dev_t dev, int request, void *buf,
endpoint_t user_endpt, message *m)
{
/* Set up an I/O control request.
*/
endpoint_t endpt;
size_t size;
cp_grant_id_t grant;
int access;
if ((endpt = bdev_driver_get(dev)) == NONE)
return EDEADSRCDST;
if (_MINIX_IOCTL_BIG(request))
size = _MINIX_IOCTL_SIZE_BIG(request);
else
size = _MINIX_IOCTL_SIZE(request);
access = 0;
if (_MINIX_IOCTL_IOR(request)) access |= CPF_WRITE;
if (_MINIX_IOCTL_IOW(request)) access |= CPF_READ;
/* The size may be 0, in which case 'buf' need not be a valid pointer. */
grant = cpf_grant_direct(endpt, (vir_bytes) buf, size, access);
if (!GRANT_VALID(grant)) {
printf("bdev: unable to allocate grant!\n");
return EINVAL;
}
memset(m, 0, sizeof(*m));
m->m_type = BDEV_IOCTL;
m->m_lbdev_lblockdriver_msg.minor = minor(dev);
m->m_lbdev_lblockdriver_msg.request = request;
m->m_lbdev_lblockdriver_msg.grant = grant;
m->m_lbdev_lblockdriver_msg.user = user_endpt;
return OK;
}
static void bdev_ioctl_cleanup(const message *m)
{
/* Clean up an I/O control request.
*/
cpf_revoke(m->m_lbdev_lblockdriver_msg.grant);
}
int bdev_ioctl(dev_t dev, int request, void *buf, endpoint_t user_endpt)
{
/* Perform a synchronous I/O control request.
*/
message m;
int r, driver_tries = 0;
do {
if ((r = bdev_ioctl_setup(dev, request, buf, user_endpt, &m)) != OK)
break;
r = bdev_sendrec(dev, &m);
bdev_ioctl_cleanup(&m);
} while (bdev_retry(&driver_tries, NULL, &r));
return r;
}
void bdev_flush_asyn(dev_t dev)
{
/* Flush all ongoing asynchronous requests to the given minor device. This
* involves blocking until all I/O for it has completed.
* File system usage note: typically called from flush.
*/
bdev_call_t *call;
while ((call = bdev_call_find(dev)) != NULL)
(void) bdev_wait_asyn(call->id);
}
static bdev_id_t bdev_rdwt_asyn(int req, dev_t dev, u64_t pos, char *buf,
size_t count, int flags, bdev_callback_t callback, bdev_param_t param)
{
/* Perform an asynchronous read or write call using a single buffer.
*/
bdev_call_t *call;
int r;
if ((call = bdev_call_alloc(1)) == NULL)
return ENOMEM;
if ((r = bdev_rdwt_setup(req, dev, pos, buf, count, flags, &call->msg)) !=
OK) {
bdev_call_free(call);
return r;
}
if ((r = bdev_senda(dev, &call->msg, call->id)) != OK) {
bdev_rdwt_cleanup(&call->msg);
bdev_call_free(call);
return r;
}
call->dev = dev;
call->callback = callback;
call->param = param;
call->driver_tries = 0;
call->transfer_tries = 0;
call->vec[0].iov_addr = (vir_bytes) buf;
call->vec[0].iov_size = count;
return call->id;
}
static bdev_id_t bdev_vrdwt_asyn(int req, dev_t dev, u64_t pos, iovec_t *vec,
int count, int flags, bdev_callback_t callback, bdev_param_t param)
{
/* Perform an asynchronous read or write call using a vector of buffers.
*/
bdev_call_t *call;
int r;
if ((call = bdev_call_alloc(count)) == NULL)
return ENOMEM;
if ((r = bdev_vrdwt_setup(req, dev, pos, vec, count, flags, &call->msg,
call->gvec)) != OK) {
bdev_call_free(call);
return r;
}
if ((r = bdev_senda(dev, &call->msg, call->id)) != OK) {
bdev_vrdwt_cleanup(&call->msg, call->gvec);
bdev_call_free(call);
return r;
}
call->dev = dev;
call->callback = callback;
call->param = param;
call->driver_tries = 0;
call->transfer_tries = 0;
memcpy(call->vec, vec, sizeof(vec[0]) * count);
return call->id;
}
bdev_id_t bdev_read_asyn(dev_t dev, u64_t pos, char *buf, size_t count,
int flags, bdev_callback_t callback, bdev_param_t param)
{
/* Perform an asynchronous read call into a single buffer.
*/
return bdev_rdwt_asyn(BDEV_READ, dev, pos, buf, count, flags, callback,
param);
}
bdev_id_t bdev_write_asyn(dev_t dev, u64_t pos, char *buf, size_t count,
int flags, bdev_callback_t callback, bdev_param_t param)
{
/* Perform an asynchronous write call from a single buffer.
*/
return bdev_rdwt_asyn(BDEV_WRITE, dev, pos, buf, count, flags, callback,
param);
}
bdev_id_t bdev_gather_asyn(dev_t dev, u64_t pos, iovec_t *vec, int count,
int flags, bdev_callback_t callback, bdev_param_t param)
{
/* Perform an asynchronous read call into a vector of buffers.
*/
return bdev_vrdwt_asyn(BDEV_GATHER, dev, pos, vec, count, flags, callback,
param);
}
bdev_id_t bdev_scatter_asyn(dev_t dev, u64_t pos, iovec_t *vec, int count,
int flags, bdev_callback_t callback, bdev_param_t param)
{
/* Perform an asynchronous write call into a vector of buffers.
*/
return bdev_vrdwt_asyn(BDEV_SCATTER, dev, pos, vec, count, flags, callback,
param);
}
bdev_id_t bdev_ioctl_asyn(dev_t dev, int request, void *buf,
endpoint_t user_endpt, bdev_callback_t callback, bdev_param_t param)
{
/* Perform an asynchronous I/O control request.
*/
bdev_call_t *call;
int r;
if ((call = bdev_call_alloc(1)) == NULL)
return ENOMEM;
if ((r = bdev_ioctl_setup(dev, request, buf, user_endpt,
&call->msg)) != OK) {
bdev_call_free(call);
return r;
}
if ((r = bdev_senda(dev, &call->msg, call->id)) != OK) {
bdev_ioctl_cleanup(&call->msg);
bdev_call_free(call);
return r;
}
call->dev = dev;
call->callback = callback;
call->param = param;
call->driver_tries = 0;
call->vec[0].iov_addr = (vir_bytes) buf;
return call->id;
}
void bdev_callback_asyn(bdev_call_t *call, int result)
{
/* Perform the callback for an asynchronous request, with the given result.
* Clean up the call structure afterwards.
*/
/* If this was a transfer request and the result is EIO, we may want to retry
* the request first.
*/
switch (call->msg.m_type) {
case BDEV_READ:
case BDEV_WRITE:
case BDEV_GATHER:
case BDEV_SCATTER:
if (result == EIO && ++call->transfer_tries < TRANSFER_TRIES) {
result = bdev_senda(call->dev, &call->msg, call->id);
if (result == OK)
return;
}
}
/* Clean up. */
switch (call->msg.m_type) {
case BDEV_READ:
case BDEV_WRITE:
bdev_rdwt_cleanup(&call->msg);
break;
case BDEV_GATHER:
case BDEV_SCATTER:
bdev_vrdwt_cleanup(&call->msg, call->gvec);
break;
case BDEV_IOCTL:
bdev_ioctl_cleanup(&call->msg);
break;
default:
assert(0);
}
/* Call the callback function. */
/* FIXME: we assume all reasonable ssize_t values can be stored in an int. */
call->callback(call->dev, call->id, call->param, result);
/* Free up the call structure. */
bdev_call_free(call);
}
int bdev_restart_asyn(bdev_call_t *call)
{
/* The driver for the given call has restarted, and may now have a new
* endpoint. Recreate and resend the request for the given call.
*/
int type, r = OK;
/* Update and check the retry limit for driver restarts first. */
if (++call->driver_tries >= DRIVER_TRIES)
return EDEADSRCDST;
/* Recreate all grants for the new endpoint. */
type = call->msg.m_type;
switch (type) {
case BDEV_READ:
case BDEV_WRITE:
bdev_rdwt_cleanup(&call->msg);
r = bdev_rdwt_setup(type, call->dev,
call->msg.m_lbdev_lblockdriver_msg.pos,
(char *) call->vec[0].iov_addr, call->msg.m_lbdev_lblockdriver_msg.count,
call->msg.m_lbdev_lblockdriver_msg.flags, &call->msg);
break;
case BDEV_GATHER:
case BDEV_SCATTER:
bdev_vrdwt_cleanup(&call->msg, call->gvec);
r = bdev_vrdwt_setup(type, call->dev,
call->msg.m_lbdev_lblockdriver_msg.pos,
call->vec, call->msg.m_lbdev_lblockdriver_msg.count, call->msg.m_lbdev_lblockdriver_msg.flags,
&call->msg, call->gvec);
break;
case BDEV_IOCTL:
bdev_ioctl_cleanup(&call->msg);
r = bdev_ioctl_setup(call->dev, call->msg.m_lbdev_lblockdriver_msg.request,
(char *) call->vec[0].iov_addr, call->msg.m_lbdev_lblockdriver_msg.user,
&call->msg);
break;
default:
assert(0);
}
if (r != OK)
return r;
/* Try to resend the request. */
return bdev_senda(call->dev, &call->msg, call->id);
}

View File

@@ -1,118 +0,0 @@
/* libbdev - asynchronous call structure management */
#include <minix/drivers.h>
#include <minix/bdev.h>
#include <assert.h>
#include "const.h"
#include "type.h"
#include "proto.h"
static bdev_call_t *calls[NR_CALLS];
bdev_call_t *bdev_call_alloc(int count)
{
/* Allocate a call structure.
*/
bdev_call_t *call;
bdev_id_t id;
for (id = 0; id < NR_CALLS; id++)
if (calls[id] == NULL)
break;
if (id == NR_CALLS)
return NULL;
call = malloc(sizeof(bdev_call_t) +
sizeof(call->gvec[0]) * (count - 1) +
sizeof(call->vec[0]) * count);
if (call == NULL)
return NULL;
call->id = id;
call->vec = (iovec_t *) &call->gvec[count];
calls[id] = call;
return call;
}
void bdev_call_free(bdev_call_t *call)
{
/* Free a call structure.
*/
assert(calls[call->id] == call);
calls[call->id] = NULL;
free(call);
}
bdev_call_t *bdev_call_get(bdev_id_t id)
{
/* Retrieve a call structure by request number.
*/
if (id < 0 || id >= NR_CALLS)
return NULL;
return calls[id];
}
bdev_call_t *bdev_call_find(dev_t dev)
{
/* Find the first asynchronous request for the given device, if any.
*/
bdev_id_t id;
for (id = 0; id < NR_CALLS; id++)
if (calls[id] != NULL && calls[id]->dev == dev)
return calls[id];
return NULL;
}
bdev_call_t *bdev_call_iter_maj(dev_t dev, bdev_call_t *call,
bdev_call_t **next)
{
/* Iterate over all asynchronous requests for a major device. This function
* must be safe even if the returned call structure is freed.
*/
bdev_id_t id;
int major;
major = major(dev);
/* If this is the first invocation, find the first match. Otherwise, take the
* call we found to be next in the last invocation, which may be NULL.
*/
if (call == NULL) {
for (id = 0; id < NR_CALLS; id++)
if (calls[id] != NULL && major(calls[id]->dev) == major)
break;
if (id == NR_CALLS)
return NULL;
call = calls[id];
} else {
if ((call = *next) == NULL)
return NULL;
}
/* Look for the next match, if any. */
*next = NULL;
for (id = call->id + 1; id < NR_CALLS; id++) {
if (calls[id] != NULL && major(calls[id]->dev) == major) {
*next = calls[id];
break;
}
}
return call;
}

View File

@@ -1,17 +0,0 @@
#ifndef _BDEV_CONST_H
#define _BDEV_CONST_H
#define NR_CALLS 256 /* maximum number of concurrent async calls */
#define NO_ID (-1) /* ID for synchronous requests */
#define DS_NR_TRIES 100 /* number of times to check endpoint in DS */
#define DS_DELAY 50000 /* delay time (us) between DS checks */
#define DRIVER_TRIES 10 /* after so many tries, give up on a driver */
#define RECOVER_TRIES 2 /* tolerated nr of restarts during recovery */
#define TRANSFER_TRIES 5 /* number of times to try transfers on EIO */
#define NR_OPEN_DEVS 4 /* maximum different opened minor devices */
#endif /* _BDEV_CONST_H */

View File

@@ -1,122 +0,0 @@
/* libbdev - driver endpoint management */
#include <minix/drivers.h>
#include <minix/bdev.h>
#include <minix/ds.h>
#include <assert.h>
#include "const.h"
#include "type.h"
#include "proto.h"
static struct {
endpoint_t endpt;
char label[DS_MAX_KEYLEN];
} driver_tab[NR_DEVICES];
void bdev_driver_init(void)
{
/* Initialize the driver table.
*/
int i;
for (i = 0; i < NR_DEVICES; i++) {
driver_tab[i].endpt = NONE;
driver_tab[i].label[0] = '\0';
}
}
void bdev_driver_clear(dev_t dev)
{
/* Clear information about a driver.
*/
int major;
major = major(dev);
assert(major >= 0 && major < NR_DEVICES);
driver_tab[major].endpt = NONE;
driver_tab[major].label[0] = '\0';
}
endpoint_t bdev_driver_set(dev_t dev, char *label)
{
/* Set the label for a driver, and retrieve the associated endpoint.
*/
int major;
major = major(dev);
assert(major >= 0 && major < NR_DEVICES);
assert(strlen(label) < sizeof(driver_tab[major].label));
strlcpy(driver_tab[major].label, label, sizeof(driver_tab[major].label));
driver_tab[major].endpt = NONE;
return bdev_driver_update(dev);
}
endpoint_t bdev_driver_get(dev_t dev)
{
/* Return the endpoint for a driver, or NONE if we do not know its endpoint.
*/
int major;
major = major(dev);
assert(major >= 0 && major < NR_DEVICES);
return driver_tab[major].endpt;
}
endpoint_t bdev_driver_update(dev_t dev)
{
/* Update the endpoint of a driver. The caller of this function already knows
* that the current endpoint may no longer be valid, and must be updated.
* Return the new endpoint upon success, and NONE otherwise.
*/
endpoint_t endpt;
int r, major, nr_tries;
major = major(dev);
assert(major >= 0 && major < NR_DEVICES);
assert(driver_tab[major].label[0] != '\0');
/* Repeatedly retrieve the endpoint for the driver label, and see if it is a
* different, valid endpoint. If retrieval fails at first, we have to wait.
* We use polling, as opposed to a DS subscription, for a number of reasons:
* 1) DS supports only one subscription per process, and our main program may
* already have a subscription;
* 2) if we block on receiving a notification from DS, we cannot impose an
* upper bound on the retry time;
* 3) temporarily subscribing and then unsubscribing may cause leftover DS
* notifications, which the main program would then have to deal with.
* As of writing, unsubscribing from DS is not possible at all, anyway.
*
* In the normal case, the driver's label/endpoint mapping entry disappears
* completely for a short moment, before being replaced with the new mapping.
* Hence, failure to retrieve the entry at all does not constitute permanent
* failure. In fact, there is no way to determine reliably that a driver has
* failed permanently in the current approach. For this we simply rely on the
* retry limit.
*/
for (nr_tries = 0; nr_tries < DS_NR_TRIES; nr_tries++) {
r = ds_retrieve_label_endpt(driver_tab[major].label, &endpt);
if (r == OK && endpt != NONE && endpt != driver_tab[major].endpt) {
driver_tab[major].endpt = endpt;
return endpt;
}
if (nr_tries < DS_NR_TRIES - 1)
micro_delay(DS_DELAY);
}
driver_tab[major].endpt = NONE;
return NONE;
}

View File

@@ -1,346 +0,0 @@
/* libbdev - IPC and recovery functions */
#include <minix/drivers.h>
#include <minix/bdev.h>
#include <assert.h>
#include "const.h"
#include "type.h"
#include "proto.h"
static void bdev_cancel(dev_t dev)
{
/* Recovering the driver for the given device has failed repeatedly. Mark it as
* permanently unusable, and clean up any associated calls and resources.
*/
bdev_call_t *call, *next;
printf("bdev: giving up on major %d\n", major(dev));
/* Cancel all pending asynchronous requests. */
call = NULL;
while ((call = bdev_call_iter_maj(dev, call, &next)) != NULL)
bdev_callback_asyn(call, EDEADSRCDST);
/* Mark the driver as unusable. */
bdev_driver_clear(dev);
}
static int bdev_recover(dev_t dev, int update_endpt)
{
/* The IPC subsystem has signaled an error communicating to the driver
* associated with the given device. Try to recover. If 'update_endpt' is set,
* we need to find the new endpoint of the driver first. Return TRUE iff
* recovery has been successful.
*/
bdev_call_t *call, *next;
endpoint_t endpt;
int r, active, nr_tries;
/* Only print output if there is something to recover. Some drivers may be
* shut down and later restarted legitimately, and if they were not in use
* while that happened, there is no need to flood the console with messages.
*/
active = bdev_minor_is_open(dev) || bdev_call_iter_maj(dev, NULL, &next);
if (active)
printf("bdev: recovering from a driver restart on major %d\n",
major(dev));
for (nr_tries = 0; nr_tries < RECOVER_TRIES; nr_tries++) {
/* First update the endpoint, if necessary. */
if (update_endpt)
(void) bdev_driver_update(dev);
if ((endpt = bdev_driver_get(dev)) == NONE)
break;
/* If anything goes wrong, update the endpoint again next time. */
update_endpt = TRUE;
/* Reopen all minor devices on the new driver. */
if ((r = bdev_minor_reopen(dev)) != OK) {
/* If the driver died again, we may give it another try. */
if (r == EDEADSRCDST)
continue;
/* If another error occurred, we cannot continue using the
* driver as is, but we also cannot force it to restart.
*/
break;
}
/* Resend all asynchronous requests. */
call = NULL;
while ((call = bdev_call_iter_maj(dev, call, &next)) != NULL) {
/* It is not strictly necessary that we manage to reissue all
* asynchronous requests successfully. We can fail them on an
* individual basis here, without affecting the overall
* recovery. Note that we will never get new IPC failures here.
*/
if ((r = bdev_restart_asyn(call)) != OK)
bdev_callback_asyn(call, r);
}
/* Recovery seems successful. We can now reissue the current
* synchronous request (if any), and continue normal operation.
*/
if (active)
printf("bdev: recovery successful, new driver at %d\n", endpt);
return TRUE;
}
/* Recovery failed repeatedly. Give up on this driver. */
bdev_cancel(dev);
return FALSE;
}
void bdev_update(dev_t dev, char *label)
{
/* Set the endpoint for a driver. Perform recovery if necessary.
*/
endpoint_t endpt, old_endpt;
old_endpt = bdev_driver_get(dev);
endpt = bdev_driver_set(dev, label);
/* If updating the driver causes an endpoint change, we need to perform
* recovery, but not update the endpoint yet again.
*/
if (old_endpt != NONE && old_endpt != endpt)
bdev_recover(dev, FALSE /*update_endpt*/);
}
int bdev_senda(dev_t dev, const message *m_orig, bdev_id_t id)
{
/* Send an asynchronous request for the given device. This function will never
* get any new IPC errors sending to the driver. If sending an asynchronous
* request fails, we will find out through other ways later.
*/
endpoint_t endpt;
message m;
int r;
/* If we have no usable driver endpoint, fail instantly. */
if ((endpt = bdev_driver_get(dev)) == NONE)
return EDEADSRCDST;
m = *m_orig;
m.m_lbdev_lblockdriver_msg.id = id;
r = asynsend(endpt, &m);
if (r != OK)
printf("bdev: asynsend to driver (%d) failed (%d)\n", endpt, r);
return r;
}
int bdev_sendrec(dev_t dev, const message *m_orig)
{
/* Send a synchronous request for the given device, and wait for the reply.
* Return ERESTART if the caller should try to reissue the request.
*/
endpoint_t endpt;
message m;
int r;
/* If we have no usable driver endpoint, fail instantly. */
if ((endpt = bdev_driver_get(dev)) == NONE)
return EDEADSRCDST;
/* Send the request and block until we receive a reply. */
m = *m_orig;
m.m_lbdev_lblockdriver_msg.id = NO_ID;
r = ipc_sendrec(endpt, &m);
/* If communication failed, the driver has died. We assume it will be
* restarted soon after, so we attempt recovery. Upon success, we let the
* caller reissue the synchronous request.
*/
if (r == EDEADSRCDST) {
if (!bdev_recover(dev, TRUE /*update_endpt*/))
return EDEADSRCDST;
return ERESTART;
}
if (r != OK) {
printf("bdev: IPC to driver (%d) failed (%d)\n", endpt, r);
return r;
}
if (m.m_type != BDEV_REPLY) {
printf("bdev: driver (%d) sent weird response (%d)\n",
endpt, m.m_type);
return EINVAL;
}
/* The protocol contract states that no asynchronous reply can satisfy a
* synchronous SENDREC call, so we can never get an asynchronous reply here.
*/
if (m.m_lblockdriver_lbdev_reply.id != NO_ID) {
printf("bdev: driver (%d) sent invalid ID (%d)\n", endpt,
m.m_lblockdriver_lbdev_reply.id);
return EINVAL;
}
/* Unless the caller is misusing libbdev, we will only get ERESTART if we
* have managed to resend a raw block I/O request to the driver after a
* restart, but before VFS has had a chance to reopen the associated device
* first. This is highly exceptional, and hard to deal with correctly. We
* take the easiest route: sleep for a while so that VFS can reopen the
* device, and then resend the request. If the call keeps failing, the caller
* will eventually give up.
*/
if (m.m_lblockdriver_lbdev_reply.status == ERESTART) {
printf("bdev: got ERESTART from driver (%d), sleeping for reopen\n",
endpt);
micro_delay(1000);
return ERESTART;
}
/* Return the result of our request. */
return m.m_lblockdriver_lbdev_reply.status;
}
static int bdev_receive(dev_t dev, message *m)
{
/* Receive one valid message.
*/
endpoint_t endpt;
int r, nr_tries = 0;
for (;;) {
/* Retrieve and check the driver endpoint on every try, as it will
* change with each driver restart.
*/
if ((endpt = bdev_driver_get(dev)) == NONE)
return EDEADSRCDST;
r = sef_receive(endpt, m);
if (r == EDEADSRCDST) {
/* If we reached the maximum number of retries, give up. */
if (++nr_tries == DRIVER_TRIES)
break;
/* Attempt recovery. If successful, all asynchronous requests
* will have been resent, and we can retry receiving a reply.
*/
if (!bdev_recover(dev, TRUE /*update_endpt*/))
return EDEADSRCDST;
continue;
}
if (r != OK) {
printf("bdev: IPC to driver (%d) failed (%d)\n", endpt, r);
return r;
}
if (m->m_type != BDEV_REPLY) {
printf("bdev: driver (%d) sent weird response (%d)\n",
endpt, m->m_type);
return EINVAL;
}
/* The caller is responsible for checking the ID and status. */
return OK;
}
/* All tries failed, even though all recovery attempts succeeded. In this
* case, we let the caller recheck whether it wants to keep calling us,
* returning ERESTART to indicate we can be called again but did not actually
* receive a message.
*/
return ERESTART;
}
void bdev_reply_asyn(message *m)
{
/* A reply has come in from a disk driver.
*/
bdev_call_t *call;
endpoint_t endpt;
bdev_id_t id;
int r;
/* This is a requirement for the caller. */
assert(m->m_type == BDEV_REPLY);
/* Get the corresponding asynchronous call structure. */
id = m->m_lblockdriver_lbdev_reply.id;
if ((call = bdev_call_get(id)) == NULL) {
printf("bdev: driver (%d) replied to unknown request (%d)\n",
m->m_source, m->m_lblockdriver_lbdev_reply.id);
return;
}
/* Make sure the reply was sent from the right endpoint. */
endpt = bdev_driver_get(call->dev);
if (m->m_source != endpt) {
/* If the endpoint is NONE, this may be a stray reply. */
if (endpt != NONE)
printf("bdev: driver (%d) replied to request not sent to it\n",
m->m_source);
return;
}
/* See the ERESTART comment in bdev_sendrec(). */
if (m->m_lblockdriver_lbdev_reply.status == ERESTART) {
printf("bdev: got ERESTART from driver (%d), sleeping for reopen\n",
endpt);
micro_delay(1000);
if ((r = bdev_restart_asyn(call)) != OK)
bdev_callback_asyn(call, r);
return;
}
bdev_callback_asyn(call, m->m_lblockdriver_lbdev_reply.status);
}
int bdev_wait_asyn(bdev_id_t id)
{
/* Wait for an asynchronous request to complete.
*/
bdev_call_t *call;
dev_t dev;
message m;
int r;
if ((call = bdev_call_get(id)) == NULL)
return ENOENT;
dev = call->dev;
do {
if ((r = bdev_receive(dev, &m)) != OK && r != ERESTART)
return r;
/* Processing the reply will free up the call structure as a side
* effect. If we repeatedly get ERESTART, we will repeatedly resend the
* asynchronous request, which will then eventually hit the retry limit
* and we will break out of the loop.
*/
if (r == OK)
bdev_reply_asyn(&m);
} while (bdev_call_get(id) != NULL);
return OK;
}

View File

@@ -1,136 +0,0 @@
/* libbdev - tracking and reopening of opened minor devices */
#include <minix/drivers.h>
#include <minix/bdev.h>
#include <assert.h>
#include "const.h"
#include "type.h"
#include "proto.h"
static struct {
dev_t dev;
int count;
int access;
} open_dev[NR_OPEN_DEVS] = { { NO_DEV, 0, 0 } };
int bdev_minor_reopen(dev_t dev)
{
/* Reopen all minor devices on a major device. This function duplicates some
* code from elsewhere, because in this case we must avoid performing recovery.
* FIXME: if reopening fails with a non-IPC error, we should attempt to close
* all minors that we did manage to reopen so far, or they might stay open
* forever.
*/
endpoint_t endpt;
message m;
int i, j, r, major;
major = major(dev);
endpt = bdev_driver_get(dev);
assert(endpt != NONE);
for (i = 0; i < NR_OPEN_DEVS; i++) {
if (major(open_dev[i].dev) != major)
continue;
/* Each minor device may have been opened multiple times. Send an open
* request for each time that it was opened before. We could reopen it
* just once, but then we'd have to keep a shadow open count as well.
*/
for (j = 0; j < open_dev[i].count; j++) {
memset(&m, 0, sizeof(m));
m.m_type = BDEV_OPEN;
m.m_lbdev_lblockdriver_msg.minor = minor(open_dev[i].dev);
m.m_lbdev_lblockdriver_msg.access = open_dev[i].access;
m.m_lbdev_lblockdriver_msg.id = NO_ID;
if ((r = ipc_sendrec(endpt, &m)) != OK) {
printf("bdev: IPC to driver (%d) failed (%d)\n",
endpt, r);
return r;
}
if (m.m_type != BDEV_REPLY) {
printf("bdev: driver (%d) sent weird response (%d)\n",
endpt, m.m_type);
return EINVAL;
}
if (m.m_lblockdriver_lbdev_reply.id != NO_ID) {
printf("bdev: driver (%d) sent invalid ID (%ld)\n",
endpt, m.m_lblockdriver_lbdev_reply.id);
return EINVAL;
}
if ((r = m.m_lblockdriver_lbdev_reply.status) != OK) {
printf("bdev: driver (%d) failed device reopen (%d)\n",
endpt, r);
return r;
}
}
}
return OK;
}
void bdev_minor_add(dev_t dev, int access)
{
/* Increase the reference count of the given minor device.
*/
int i, free = -1;
for (i = 0; i < NR_OPEN_DEVS; i++) {
if (open_dev[i].dev == dev) {
open_dev[i].count++;
open_dev[i].access |= access;
return;
}
if (free < 0 && open_dev[i].dev == NO_DEV)
free = i;
}
if (free < 0) {
printf("bdev: too many open devices, increase NR_OPEN_DEVS\n");
return;
}
open_dev[free].dev = dev;
open_dev[free].count = 1;
open_dev[free].access = access;
}
void bdev_minor_del(dev_t dev)
{
/* Decrease the reference count of the given minor device, if present.
*/
int i;
for (i = 0; i < NR_OPEN_DEVS; i++) {
if (open_dev[i].dev == dev) {
if (!--open_dev[i].count)
open_dev[i].dev = NO_DEV;
break;
}
}
}
int bdev_minor_is_open(dev_t dev)
{
/* Return whether any minor is open for the major of the given device.
*/
int i, major;
major = major(dev);
for (i = 0; i < NR_OPEN_DEVS; i++) {
if (major(open_dev[i].dev) == major)
return TRUE;
}
return FALSE;
}

View File

@@ -1,34 +0,0 @@
#ifndef _BDEV_PROTO_H
#define _BDEV_PROTO_H
/* bdev.c */
extern void bdev_callback_asyn(bdev_call_t *call, int result);
extern int bdev_restart_asyn(bdev_call_t *call);
/* driver.c */
extern void bdev_driver_init(void);
extern void bdev_driver_clear(dev_t dev);
extern endpoint_t bdev_driver_set(dev_t dev, char *label);
extern endpoint_t bdev_driver_get(dev_t dev);
extern endpoint_t bdev_driver_update(dev_t dev);
/* call.c */
extern bdev_call_t *bdev_call_alloc(int count);
extern void bdev_call_free(bdev_call_t *call);
extern bdev_call_t *bdev_call_get(bdev_id_t id);
extern bdev_call_t *bdev_call_find(dev_t dev);
extern bdev_call_t *bdev_call_iter_maj(dev_t dev, bdev_call_t *last,
bdev_call_t **next);
/* ipc.c */
extern void bdev_update(dev_t dev, char *label);
extern int bdev_senda(dev_t dev, const message *m_orig, bdev_id_t num);
extern int bdev_sendrec(dev_t dev, const message *m_orig);
/* minor.c */
extern int bdev_minor_reopen(dev_t dev);
extern void bdev_minor_add(dev_t dev, int access);
extern void bdev_minor_del(dev_t dev);
extern int bdev_minor_is_open(dev_t dev);
#endif /* _BDEV_PROTO_H */

View File

@@ -1,16 +0,0 @@
#ifndef _BDEV_TYPE_H
#define _BDEV_TYPE_H
typedef struct {
bdev_id_t id; /* call ID */
dev_t dev; /* target device number */
message msg; /* request message */
bdev_callback_t callback; /* callback function */
bdev_param_t param; /* callback parameter */
int driver_tries; /* times retried on driver restarts */
int transfer_tries; /* times retried on transfer errors */
iovec_t *vec; /* original vector */
iovec_s_t gvec[1]; /* grant vector */
} bdev_call_t;
#endif /* _BDEV_TYPE_H */

View File

@@ -1,12 +0,0 @@
NOGCCERROR=yes
NOCLANGERROR=yes
CPPFLAGS+= -D_MINIX_SYSTEM
# Makefile for libblockdriver
.include <bsd.own.mk>
LIB= blockdriver
SRCS= driver.c drvlib.c driver_st.c driver_mt.c mq.c trace.c
.include <bsd.lib.mk>

View File

@@ -1,17 +0,0 @@
#ifndef _BLOCKDRIVER_CONST_H
#define _BLOCKDRIVER_CONST_H
/* Thread stack size. */
#define STACK_SIZE 8192
/* Maximum number of devices supported. */
#define MAX_DEVICES BLOCKDRIVER_MAX_DEVICES
/* The maximum number of worker threads per device. */
#define MAX_WORKERS 32
#define MAX_THREADS (MAX_DEVICES * MAX_WORKERS) /* max nr of threads */
#define MAIN_THREAD (MAX_THREADS) /* main thread ID */
#define SINGLE_THREAD (0) /* single-thread ID */
#endif /* _BLOCKDRIVER_CONST_H */

View File

@@ -1,462 +0,0 @@
/* This file contains the device independent block driver interface.
*
* Block drivers support the following requests. Message format m10 is used.
* Field names are prefixed with BDEV_. Separate field names are used for the
* "access", "request", and "user" fields.
*
* m_type MINOR COUNT GRANT FLAGS ID REQUEST POS
* +--------------+--------+----------+-------+-------+------+---------+------+
* | BDEV_OPEN | minor | access | | | id | | |
* |--------------+--------+----------+-------+-------+------+---------+------|
* | BDEV_CLOSE | minor | | | | id | | |
* |--------------+--------+----------+-------+-------+------+---------+------|
* | BDEV_READ | minor | bytes | grant | flags | id | | pos. |
* |--------------+--------+----------+-------+-------+------+---------+------|
* | BDEV_WRITE | minor | bytes | grant | flags | id | | pos. |
* |--------------+--------+----------+-------+-------+------+---------+------|
* | BDEV_GATHER | minor | elements | grant | flags | id | | pos. |
* |--------------+--------+----------+-------+-------+------+---------+------|
* | BDEV_SCATTER | minor | elements | grant | flags | id | | pos. |
* |--------------+--------+----------+-------+-------+------+---------+------|
* | BDEV_IOCTL | minor | | grant | user | id | request | |
* ----------------------------------------------------------------------------
*
* The following reply message is used for all requests.
*
* m_type STATUS ID
* +--------------+--------+----------+-------+-------+------+---------+------+
* | BDEV_REPLY | status | | | | id | | |
* ----------------------------------------------------------------------------
*
* Changes:
* Oct 16, 2011 split character and block protocol (D.C. van Moolenbroek)
* Aug 27, 2011 move common functions into driver.c (A. Welzel)
* Jul 25, 2005 added SYS_SIG type for signals (Jorrit N. Herder)
* Sep 15, 2004 added SYN_ALARM type for timeouts (Jorrit N. Herder)
* Jul 23, 2004 removed kernel dependencies (Jorrit N. Herder)
* Apr 02, 1992 constructed from AT wini and floppy driver (Kees J. Bot)
*/
#include <minix/drivers.h>
#include <minix/blockdriver.h>
#include <minix/ds.h>
#include <sys/ioc_block.h>
#include <sys/ioc_disk.h>
#include "driver.h"
#include "mq.h"
#include "trace.h"
/* Management data for opened devices. */
static int open_devs[MAX_NR_OPEN_DEVICES];
static int next_open_devs_slot = 0;
/*===========================================================================*
* clear_open_devs *
*===========================================================================*/
static void clear_open_devs(void)
{
/* Reset the set of previously opened minor devices. */
next_open_devs_slot = 0;
}
/*===========================================================================*
* is_open_dev *
*===========================================================================*/
static int is_open_dev(int device)
{
/* Check whether the given minor device has previously been opened. */
int i;
for (i = 0; i < next_open_devs_slot; i++)
if (open_devs[i] == device)
return TRUE;
return FALSE;
}
/*===========================================================================*
* set_open_dev *
*===========================================================================*/
static void set_open_dev(int device)
{
/* Mark the given minor device as having been opened. */
if (next_open_devs_slot >= MAX_NR_OPEN_DEVICES)
panic("out of slots for open devices");
open_devs[next_open_devs_slot] = device;
next_open_devs_slot++;
}
/*===========================================================================*
* blockdriver_announce *
*===========================================================================*/
void blockdriver_announce(int type)
{
/* Announce we are up after a fresh start or a restart. */
int r;
char key[DS_MAX_KEYLEN];
char label[DS_MAX_KEYLEN];
char *driver_prefix = "drv.blk.";
/* Callers are allowed to use ipc_sendrec to communicate with drivers.
* For this reason, there may blocked callers when a driver restarts.
* Ask the kernel to unblock them (if any). Note that most block drivers
* will not restart statefully, and thus will skip this code.
*/
if (type == SEF_INIT_RESTART) {
if ((r = sys_statectl(SYS_STATE_CLEAR_IPC_REFS)) != OK)
panic("blockdriver_init: sys_statectl failed: %d", r);
}
/* Publish a driver up event. */
if ((r = ds_retrieve_label_name(label, sef_self())) != OK)
panic("blockdriver_init: unable to get own label: %d", r);
snprintf(key, DS_MAX_KEYLEN, "%s%s", driver_prefix, label);
if ((r = ds_publish_u32(key, DS_DRIVER_UP, DSF_OVERWRITE)) != OK)
panic("blockdriver_init: unable to publish driver up event: %d", r);
/* Expect an open for any device before serving regular driver requests. */
clear_open_devs();
/* Initialize or reset the message queue. */
mq_init();
}
/*===========================================================================*
* send_reply *
*===========================================================================*/
static void send_reply(endpoint_t endpt, message *m_ptr, int ipc_status)
{
/* Send a reply message to a request. */
int r;
/* If we would block sending the message, send it asynchronously. The NOREPLY
* flag is set because the caller may also issue a SENDREC (mixing sync and
* async comm), and the asynchronous reply could otherwise end up satisfying
* the SENDREC's receive part, after which our next SENDNB call would fail.
*/
if (IPC_STATUS_CALL(ipc_status) == SENDREC)
r = ipc_sendnb(endpt, m_ptr);
else
r = asynsend3(endpt, m_ptr, AMF_NOREPLY);
if (r != OK)
printf("blockdriver: unable to send reply to %d: %d\n", endpt, r);
}
/*===========================================================================*
* blockdriver_reply *
*===========================================================================*/
void blockdriver_reply(message *m_ptr, int ipc_status, int reply)
{
/* Reply to a block request sent to the driver. */
message m_reply;
if (reply == EDONTREPLY)
return;
memset(&m_reply, 0, sizeof(m_reply));
m_reply.m_type = BDEV_REPLY;
m_reply.m_lblockdriver_lbdev_reply.status = reply;
m_reply.m_lblockdriver_lbdev_reply.id = m_ptr->m_lbdev_lblockdriver_msg.id;
send_reply(m_ptr->m_source, &m_reply, ipc_status);
}
/*===========================================================================*
* do_open *
*===========================================================================*/
static int do_open(struct blockdriver *bdp, message *mp)
{
/* Open a minor device. */
return (*bdp->bdr_open)(mp->m_lbdev_lblockdriver_msg.minor, mp->m_lbdev_lblockdriver_msg.access);
}
/*===========================================================================*
* do_close *
*===========================================================================*/
static int do_close(struct blockdriver *bdp, message *mp)
{
/* Close a minor device. */
return (*bdp->bdr_close)(mp->m_lbdev_lblockdriver_msg.minor);
}
/*===========================================================================*
* do_rdwt *
*===========================================================================*/
static int do_rdwt(struct blockdriver *bdp, message *mp)
{
/* Carry out a single read or write request. */
iovec_t iovec1;
u64_t position;
int do_write;
ssize_t r;
/* Disk address? Address and length of the user buffer? */
if (mp->m_lbdev_lblockdriver_msg.count < 0) return EINVAL;
/* Create a one element scatter/gather vector for the buffer. */
iovec1.iov_addr = mp->m_lbdev_lblockdriver_msg.grant;
iovec1.iov_size = mp->m_lbdev_lblockdriver_msg.count;
/* Transfer bytes from/to the device. */
do_write = (mp->m_type == BDEV_WRITE);
position = mp->m_lbdev_lblockdriver_msg.pos;
r = (*bdp->bdr_transfer)(mp->m_lbdev_lblockdriver_msg.minor, do_write, position, mp->m_source,
&iovec1, 1, mp->m_lbdev_lblockdriver_msg.flags);
/* Return the number of bytes transferred or an error code. */
return r;
}
/*===========================================================================*
* do_vrdwt *
*===========================================================================*/
static int do_vrdwt(struct blockdriver *bdp, message *mp, thread_id_t id)
{
/* Carry out an device read or write to/from a vector of buffers. */
iovec_t iovec[NR_IOREQS];
unsigned int nr_req;
u64_t position;
int i, do_write;
ssize_t r, size;
/* Copy the vector from the caller to kernel space. */
nr_req = mp->m_lbdev_lblockdriver_msg.count; /* Length of I/O vector */
if (nr_req > NR_IOREQS) nr_req = NR_IOREQS;
if (OK != sys_safecopyfrom(mp->m_source, (vir_bytes) mp->m_lbdev_lblockdriver_msg.grant,
0, (vir_bytes) iovec, nr_req * sizeof(iovec[0]))) {
printf("blockdriver: bad I/O vector by: %d\n", mp->m_source);
return EINVAL;
}
/* Check for overflow condition, and update the size for block tracing. */
for (i = size = 0; i < nr_req; i++) {
if ((ssize_t) (size + iovec[i].iov_size) < size) return EINVAL;
size += iovec[i].iov_size;
}
trace_setsize(id, size);
/* Transfer bytes from/to the device. */
do_write = (mp->m_type == BDEV_SCATTER);
position = mp->m_lbdev_lblockdriver_msg.pos;
r = (*bdp->bdr_transfer)(mp->m_lbdev_lblockdriver_msg.minor, do_write, position, mp->m_source,
iovec, nr_req, mp->m_lbdev_lblockdriver_msg.flags);
/* Return the number of bytes transferred or an error code. */
return r;
}
/*===========================================================================*
* do_dioctl *
*===========================================================================*/
static int do_dioctl(struct blockdriver *bdp, dev_t minor,
unsigned long request, endpoint_t endpt, cp_grant_id_t grant)
{
/* Carry out a disk-specific I/O control request. */
struct device *dv;
struct part_geom entry;
int r = EINVAL;
switch (request) {
case DIOCSETP:
/* Copy just this one partition table entry. */
r = sys_safecopyfrom(endpt, grant, 0, (vir_bytes) &entry,
sizeof(entry));
if (r != OK)
return r;
if ((dv = (*bdp->bdr_part)(minor)) == NULL)
return ENXIO;
dv->dv_base = entry.base;
dv->dv_size = entry.size;
break;
case DIOCGETP:
/* Return a partition table entry and the geometry of the drive. */
if ((dv = (*bdp->bdr_part)(minor)) == NULL)
return ENXIO;
entry.base = dv->dv_base;
entry.size = dv->dv_size;
if (bdp->bdr_geometry) {
(*bdp->bdr_geometry)(minor, &entry);
} else {
/* The driver doesn't care -- make up fake geometry. */
entry.cylinders = (unsigned long)(entry.size / SECTOR_SIZE) /
(64 * 32);
entry.heads = 64;
entry.sectors = 32;
}
r = sys_safecopyto(endpt, grant, 0, (vir_bytes) &entry, sizeof(entry));
break;
}
return r;
}
/*===========================================================================*
* do_ioctl *
*===========================================================================*/
static int do_ioctl(struct blockdriver *bdp, message *mp)
{
/* Carry out an I/O control request. We forward block trace control requests
* to the tracing module, and handle setting/getting partitions when the driver
* has specified that it is a disk driver.
*/
dev_t minor;
unsigned long request;
cp_grant_id_t grant;
endpoint_t user_endpt;
int r;
minor = mp->m_lbdev_lblockdriver_msg.minor;
request = mp->m_lbdev_lblockdriver_msg.request;
grant = mp->m_lbdev_lblockdriver_msg.grant;
user_endpt = mp->m_lbdev_lblockdriver_msg.user;
switch (request) {
case BIOCTRACEBUF:
case BIOCTRACECTL:
case BIOCTRACEGET:
/* Block trace control. */
r = trace_ctl(minor, request, mp->m_source, grant);
break;
case DIOCSETP:
case DIOCGETP:
/* Handle disk-specific IOCTLs only for disk-type drivers. */
if (bdp->bdr_type == BLOCKDRIVER_TYPE_DISK) {
/* Disk partition control. */
r = do_dioctl(bdp, minor, request, mp->m_source, grant);
break;
}
/* fall-through */
default:
if (bdp->bdr_ioctl)
r = (*bdp->bdr_ioctl)(minor, request, mp->m_source, grant,
user_endpt);
else
r = ENOTTY;
}
return r;
}
/*===========================================================================*
* do_char_open *
*===========================================================================*/
static void do_char_open(message *m_ptr, int ipc_status)
{
/* Reply to a character driver open request stating there is no such device. */
message m_reply;
memset(&m_reply, 0, sizeof(m_reply));
m_reply.m_type = CDEV_REPLY;
m_reply.m_lchardriver_vfs_reply.status = ENXIO;
m_reply.m_lchardriver_vfs_reply.id = m_ptr->m_vfs_lchardriver_openclose.id;
send_reply(m_ptr->m_source, &m_reply, ipc_status);
}
/*===========================================================================*
* blockdriver_process_on_thread *
*===========================================================================*/
void blockdriver_process_on_thread(struct blockdriver *bdp, message *m_ptr,
int ipc_status, thread_id_t id)
{
/* Call the appropiate driver function, based on the type of request. Send
* a result code to the caller. The call is processed in the context of the
* given thread ID, which may be SINGLE_THREAD for single-threaded callers.
*/
int r;
/* Check for notifications first. We never reply to notifications. */
if (is_ipc_notify(ipc_status)) {
switch (_ENDPOINT_P(m_ptr->m_source)) {
case HARDWARE:
if (bdp->bdr_intr)
(*bdp->bdr_intr)(m_ptr->m_notify.interrupts);
break;
case CLOCK:
if (bdp->bdr_alarm)
(*bdp->bdr_alarm)(m_ptr->m_notify.timestamp);
break;
default:
if (bdp->bdr_other)
(*bdp->bdr_other)(m_ptr, ipc_status);
}
return; /* do not send a reply */
}
/* Reply to character driver open requests with an error code. Otherwise, if
* someone creates a character device node for a block driver, opening that
* device node will cause the corresponding VFS thread to block forever.
*/
if (m_ptr->m_type == CDEV_OPEN) {
do_char_open(m_ptr, ipc_status);
return;
}
/* We might get spurious requests if the driver has been restarted. Deny any
* requests on devices that have not previously been opened, signaling the
* caller that something went wrong.
*/
if (IS_BDEV_RQ(m_ptr->m_type) && !is_open_dev(m_ptr->m_lbdev_lblockdriver_msg.minor)) {
/* Reply ERESTART to spurious requests for unopened devices. */
if (m_ptr->m_type != BDEV_OPEN) {
blockdriver_reply(m_ptr, ipc_status, ERESTART);
return;
}
/* Mark the device as opened otherwise. */
set_open_dev(m_ptr->m_lbdev_lblockdriver_msg.minor);
}
trace_start(id, m_ptr);
/* Call the appropriate function(s) for this request. */
switch (m_ptr->m_type) {
case BDEV_OPEN: r = do_open(bdp, m_ptr); break;
case BDEV_CLOSE: r = do_close(bdp, m_ptr); break;
case BDEV_READ:
case BDEV_WRITE: r = do_rdwt(bdp, m_ptr); break;
case BDEV_GATHER:
case BDEV_SCATTER: r = do_vrdwt(bdp, m_ptr, id); break;
case BDEV_IOCTL: r = do_ioctl(bdp, m_ptr); break;
default:
if (bdp->bdr_other != NULL)
(*bdp->bdr_other)(m_ptr, ipc_status);
return; /* do not send a reply */
}
/* Let the driver perform any cleanup. */
if (bdp->bdr_cleanup != NULL)
(*bdp->bdr_cleanup)();
trace_finish(id, r);
blockdriver_reply(m_ptr, ipc_status, r);
}

View File

@@ -1,8 +0,0 @@
#ifndef _BLOCKDRIVER_DRIVER_H
#define _BLOCKDRIVER_DRIVER_H
void blockdriver_process_on_thread(struct blockdriver *bdp, message *m_ptr,
int ipc_status, thread_id_t thread);
void blockdriver_reply(message *m_ptr, int ipc_status, int reply);
#endif /* _BLOCKDRIVER_DRIVER_H */

View File

@@ -1,509 +0,0 @@
/* This file contains the multithreaded driver interface.
*
* Changes:
* Aug 27, 2011 created (A. Welzel)
*
* The entry points into this file are:
* blockdriver_mt_task: the main message loop of the driver
* blockdriver_mt_terminate: break out of the main message loop
* blockdriver_mt_sleep: put the current thread to sleep
* blockdriver_mt_wakeup: wake up a sleeping thread
* blockdriver_mt_set_workers:set the number of worker threads
*/
#include <minix/blockdriver_mt.h>
#include <minix/mthread.h>
#include <assert.h>
#include "const.h"
#include "driver.h"
#include "mq.h"
/* A thread ID is composed of a device ID and a per-device worker thread ID.
* All thread IDs must be in the range 0..(MAX_THREADS-1) inclusive.
*/
#define MAKE_TID(did, wid) ((did) * MAX_WORKERS + (wid))
#define TID_DEVICE(tid) ((tid) / MAX_WORKERS)
#define TID_WORKER(tid) ((tid) % MAX_WORKERS)
typedef int worker_id_t;
typedef enum {
STATE_DEAD,
STATE_RUNNING,
STATE_BUSY,
STATE_EXITED
} worker_state;
/* Structure with information about a worker thread. */
typedef struct {
device_id_t device_id;
worker_id_t worker_id;
worker_state state;
mthread_thread_t mthread;
mthread_event_t sleep_event;
} worker_t;
/* Structure with information about a device. */
typedef struct {
device_id_t id;
unsigned int workers;
worker_t worker[MAX_WORKERS];
mthread_event_t queue_event;
mthread_rwlock_t barrier;
} device_t;
static struct blockdriver *bdtab;
static int running = FALSE;
static mthread_key_t worker_key;
static device_t device[MAX_DEVICES];
static worker_t *exited[MAX_THREADS];
static int num_exited = 0;
/*===========================================================================*
* enqueue *
*===========================================================================*/
static void enqueue(device_t *dp, const message *m_src, int ipc_status)
{
/* Enqueue a message into the device's queue, and signal the event.
* Must be called from the master thread.
*/
if (!mq_enqueue(dp->id, m_src, ipc_status))
panic("blockdriver_mt: enqueue failed (message queue full)");
mthread_event_fire(&dp->queue_event);
}
/*===========================================================================*
* try_dequeue *
*===========================================================================*/
static int try_dequeue(device_t *dp, message *m_dst, int *ipc_status)
{
/* See if a message can be dequeued from the current worker thread's device
* queue. If so, dequeue the message and return TRUE. If not, return FALSE.
* Must be called from a worker thread. Does not block.
*/
return mq_dequeue(dp->id, m_dst, ipc_status);
}
/*===========================================================================*
* dequeue *
*===========================================================================*/
static int dequeue(device_t *dp, worker_t *wp, message *m_dst,
int *ipc_status)
{
/* Dequeue a message from the current worker thread's device queue. Block the
* current thread if necessary. Must be called from a worker thread. Either
* succeeds with a message (TRUE) or indicates that the thread should be
* terminated (FALSE).
*/
do {
mthread_event_wait(&dp->queue_event);
/* If we were woken up as a result of terminate or set_workers, break
* out of the loop and terminate the thread.
*/
if (!running || wp->worker_id >= dp->workers)
return FALSE;
} while (!try_dequeue(dp, m_dst, ipc_status));
return TRUE;
}
/*===========================================================================*
* is_transfer_req *
*===========================================================================*/
static int is_transfer_req(int type)
{
/* Return whether the given block device request is a transfer request.
*/
switch (type) {
case BDEV_READ:
case BDEV_WRITE:
case BDEV_GATHER:
case BDEV_SCATTER:
return TRUE;
default:
return FALSE;
}
}
/*===========================================================================*
* worker_thread *
*===========================================================================*/
static void *worker_thread(void *param)
{
/* The worker thread loop. Set up the thread-specific reference to itself and
* start looping. The loop consists of blocking dequeing and handling messages.
* After handling a message, the thread might have been stopped, so we check
* for this condition and exit if so.
*/
worker_t *wp;
device_t *dp;
thread_id_t tid;
message m;
int ipc_status, r;
wp = (worker_t *) param;
assert(wp != NULL);
dp = &device[wp->device_id];
tid = MAKE_TID(wp->device_id, wp->worker_id);
if (mthread_setspecific(worker_key, wp))
panic("blockdriver_mt: could not save local thread pointer");
while (running && wp->worker_id < dp->workers) {
/* See if a new message is available right away. */
if (!try_dequeue(dp, &m, &ipc_status)) {
/* If not, block waiting for a new message or a thread
* termination event.
*/
if (!dequeue(dp, wp, &m, &ipc_status))
break;
}
/* Even if the thread was stopped before, a new message resumes it. */
wp->state = STATE_BUSY;
/* If the request is a transfer request, we acquire the read barrier
* lock. Otherwise, we acquire the write lock.
*/
if (is_transfer_req(m.m_type))
mthread_rwlock_rdlock(&dp->barrier);
else
mthread_rwlock_wrlock(&dp->barrier);
/* Handle the request and send a reply. */
blockdriver_process_on_thread(bdtab, &m, ipc_status, tid);
/* Switch the thread back to running state, and unlock the barrier. */
wp->state = STATE_RUNNING;
mthread_rwlock_unlock(&dp->barrier);
}
/* Clean up and terminate this thread. */
if (mthread_setspecific(worker_key, NULL))
panic("blockdriver_mt: could not delete local thread pointer");
wp->state = STATE_EXITED;
exited[num_exited++] = wp;
return NULL;
}
/*===========================================================================*
* master_create_worker *
*===========================================================================*/
static void master_create_worker(worker_t *wp, worker_id_t worker_id,
device_id_t device_id)
{
/* Start a new worker thread.
*/
mthread_attr_t attr;
int r;
wp->device_id = device_id;
wp->worker_id = worker_id;
wp->state = STATE_RUNNING;
/* Initialize synchronization primitives. */
mthread_event_init(&wp->sleep_event);
r = mthread_attr_init(&attr);
if (r != 0)
panic("blockdriver_mt: could not initialize attributes (%d)", r);
r = mthread_attr_setstacksize(&attr, STACK_SIZE);
if (r != 0)
panic("blockdriver_mt: could not set stack size (%d)", r);
r = mthread_create(&wp->mthread, &attr, worker_thread, (void *) wp);
if (r != 0)
panic("blockdriver_mt: could not start thread %d (%d)", worker_id, r);
mthread_attr_destroy(&attr);
}
/*===========================================================================*
* master_destroy_worker *
*===========================================================================*/
static void master_destroy_worker(worker_t *wp)
{
/* Clean up resources used by an exited worker thread.
*/
assert(wp != NULL);
assert(wp->state == STATE_EXITED);
/* Join the thread. */
if (mthread_join(wp->mthread, NULL))
panic("blockdriver_mt: could not join thread %d", wp->worker_id);
/* Destroy resources. */
mthread_event_destroy(&wp->sleep_event);
wp->state = STATE_DEAD;
}
/*===========================================================================*
* master_handle_exits *
*===========================================================================*/
static void master_handle_exits(void)
{
/* Destroy the remains of all exited threads.
*/
int i;
for (i = 0; i < num_exited; i++)
master_destroy_worker(exited[i]);
num_exited = 0;
}
/*===========================================================================*
* master_handle_message *
*===========================================================================*/
static void master_handle_message(message *m_ptr, int ipc_status)
{
/* For real request messages, query the device ID, start a thread if none is
* free and the maximum number of threads for that device has not yet been
* reached, and enqueue the message in the devices's message queue. All other
* messages are handled immediately from the main thread.
*/
device_id_t id;
worker_t *wp;
device_t *dp;
int r, wid;
/* If this is not a block driver request, we cannot get the minor device
* associated with it, and thus we can not tell which thread should process
* it either. In that case, the master thread has to handle it instead.
*/
if (is_ipc_notify(ipc_status) || !IS_BDEV_RQ(m_ptr->m_type)) {
/* Process as 'other' message. */
blockdriver_process_on_thread(bdtab, m_ptr, ipc_status, MAIN_THREAD);
return;
}
/* Query the device ID. Upon failure, send the error code to the caller. */
r = (*bdtab->bdr_device)(m_ptr->m_lbdev_lblockdriver_msg.minor, &id);
if (r != OK) {
blockdriver_reply(m_ptr, ipc_status, r);
return;
}
/* Look up the device control block. */
assert(id >= 0 && id < MAX_DEVICES);
dp = &device[id];
/* Find the first non-busy worker thread. */
for (wid = 0; wid < dp->workers; wid++)
if (dp->worker[wid].state != STATE_BUSY)
break;
/* If the worker thread is dead, start a thread now, unless we have already
* reached the maximum number of threads.
*/
if (wid < dp->workers) {
wp = &dp->worker[wid];
assert(wp->state != STATE_EXITED);
/* If the non-busy thread has not yet been created, create one now. */
if (wp->state == STATE_DEAD)
master_create_worker(wp, wid, dp->id);
}
/* Enqueue the message at the device queue. */
enqueue(dp, m_ptr, ipc_status);
}
/*===========================================================================*
* master_init *
*===========================================================================*/
static void master_init(struct blockdriver *bdp)
{
/* Initialize the state of the master thread.
*/
int i, j;
assert(bdp != NULL);
assert(bdp->bdr_device != NULL);
bdtab = bdp;
/* Initialize device-specific data structures. */
for (i = 0; i < MAX_DEVICES; i++) {
device[i].id = i;
device[i].workers = 1;
mthread_event_init(&device[i].queue_event);
mthread_rwlock_init(&device[i].barrier);
for (j = 0; j < MAX_WORKERS; j++)
device[i].worker[j].state = STATE_DEAD;
}
/* Initialize a per-thread key, where each worker thread stores its own
* reference to the worker structure.
*/
if (mthread_key_create(&worker_key, NULL))
panic("blockdriver_mt: error initializing worker key");
}
/*===========================================================================*
* blockdriver_mt_get_tid *
*===========================================================================*/
thread_id_t blockdriver_mt_get_tid(void)
{
/* Return back the ID of this thread.
*/
worker_t *wp;
wp = (worker_t *) mthread_getspecific(worker_key);
if (wp == NULL)
panic("blockdriver_mt: master thread cannot query thread ID\n");
return MAKE_TID(wp->device_id, wp->worker_id);
}
/*===========================================================================*
* blockdriver_mt_receive *
*===========================================================================*/
static void blockdriver_mt_receive(message *m_ptr, int *ipc_status)
{
/* Receive a message.
*/
int r;
r = sef_receive_status(ANY, m_ptr, ipc_status);
if (r != OK)
panic("blockdriver_mt: sef_receive_status() returned %d", r);
}
/*===========================================================================*
* blockdriver_mt_task *
*===========================================================================*/
void blockdriver_mt_task(struct blockdriver *driver_tab)
{
/* The multithreaded driver task.
*/
int ipc_status, i;
message mess;
/* Initialize first if necessary. */
if (!running) {
master_init(driver_tab);
running = TRUE;
}
/* The main message loop. */
while (running) {
/* Receive a message. */
blockdriver_mt_receive(&mess, &ipc_status);
/* Dispatch the message. */
master_handle_message(&mess, ipc_status);
/* Let other threads run. */
mthread_yield_all();
/* Clean up any exited threads. */
if (num_exited > 0)
master_handle_exits();
}
/* Free up resources. */
for (i = 0; i < MAX_DEVICES; i++)
mthread_event_destroy(&device[i].queue_event);
}
/*===========================================================================*
* blockdriver_mt_terminate *
*===========================================================================*/
void blockdriver_mt_terminate(void)
{
/* Instruct libblockdriver to shut down.
*/
running = FALSE;
}
/*===========================================================================*
* blockdriver_mt_sleep *
*===========================================================================*/
void blockdriver_mt_sleep(void)
{
/* Let the current thread sleep until it gets woken up by the master thread.
*/
worker_t *wp;
wp = (worker_t *) mthread_getspecific(worker_key);
if (wp == NULL)
panic("blockdriver_mt: master thread cannot sleep");
mthread_event_wait(&wp->sleep_event);
}
/*===========================================================================*
* blockdriver_mt_wakeup *
*===========================================================================*/
void blockdriver_mt_wakeup(thread_id_t id)
{
/* Wake up a sleeping worker thread from the master thread.
*/
worker_t *wp;
device_id_t device_id;
worker_id_t worker_id;
device_id = TID_DEVICE(id);
worker_id = TID_WORKER(id);
assert(device_id >= 0 && device_id < MAX_DEVICES);
assert(worker_id >= 0 && worker_id < MAX_WORKERS);
wp = &device[device_id].worker[worker_id];
assert(wp->state == STATE_RUNNING || wp->state == STATE_BUSY);
mthread_event_fire(&wp->sleep_event);
}
/*===========================================================================*
* blockdriver_mt_set_workers *
*===========================================================================*/
void blockdriver_mt_set_workers(device_id_t id, int workers)
{
/* Set the number of worker threads for the given device.
*/
device_t *dp;
assert(id >= 0 && id < MAX_DEVICES);
if (workers > MAX_WORKERS)
workers = MAX_WORKERS;
dp = &device[id];
/* If we are cleaning up, wake up all threads waiting on a queue event. */
if (workers == 1 && dp->workers > workers)
mthread_event_fire_all(&dp->queue_event);
dp->workers = workers;
}

View File

@@ -1,94 +0,0 @@
/* This file contains the singlethreaded device driver interface.
*
* Changes:
* Aug 27, 2011 extracted from driver.c (A. Welzel)
*
* The entry points into this file are:
* blockdriver_task: the main message loop of the driver
* blockdriver_terminate: break out of the main message loop
* blockdriver_receive_mq: message receive interface with message queueing
* blockdriver_mq_queue: queue an incoming message for later processing
*/
#include <minix/drivers.h>
#include <minix/blockdriver.h>
#include "const.h"
#include "driver.h"
#include "mq.h"
static int running;
/*===========================================================================*
* blockdriver_receive_mq *
*===========================================================================*/
int blockdriver_receive_mq(message *m_ptr, int *status_ptr)
{
/* receive() interface for drivers with message queueing. */
/* Any queued messages? */
if (mq_dequeue(SINGLE_THREAD, m_ptr, status_ptr))
return OK;
/* Fall back to standard receive() interface otherwise. */
return driver_receive(ANY, m_ptr, status_ptr);
}
/*===========================================================================*
* blockdriver_terminate *
*===========================================================================*/
void blockdriver_terminate(void)
{
/* Break out of the main driver loop after finishing the current request. */
running = FALSE;
sef_cancel();
}
/*===========================================================================*
* blockdriver_task *
*===========================================================================*/
void blockdriver_task(struct blockdriver *bdp)
{
/* Main program of any block device driver task. */
int r, ipc_status;
message mess;
running = TRUE;
/* Here is the main loop of the disk task. It waits for a message, carries
* it out, and sends a reply.
*/
while (running) {
if ((r = blockdriver_receive_mq(&mess, &ipc_status)) != OK) {
if (r == EINTR && !running)
break;
panic("blockdriver_receive_mq failed: %d", r);
}
blockdriver_process(bdp, &mess, ipc_status);
}
}
/*===========================================================================*
* blockdriver_process *
*===========================================================================*/
void blockdriver_process(struct blockdriver *bdp, message *m_ptr,
int ipc_status)
{
/* Handle the given received message. */
blockdriver_process_on_thread(bdp, m_ptr, ipc_status, SINGLE_THREAD);
}
/*===========================================================================*
* blockdriver_mq_queue *
*===========================================================================*/
int blockdriver_mq_queue(message *m, int status)
{
/* Queue a message for later processing. */
return mq_enqueue(SINGLE_THREAD, m, status);
}

View File

@@ -1,234 +0,0 @@
/* IBM device driver utility functions. Author: Kees J. Bot
* 7 Dec 1995
* Entry point:
* partition: partition a disk to the partition table(s) on it.
*/
#include <minix/blockdriver.h>
#include <minix/drvlib.h>
#include <unistd.h>
/* Extended partition? */
#define ext_part(s) ((s) == 0x05 || (s) == 0x0F)
static void parse_part_table(struct blockdriver *bdp, int device,
int style, int atapi, u8_t *tmp_buf);
static void extpartition(struct blockdriver *bdp, int extdev,
unsigned long extbase, u8_t *tmp_buf);
static int get_part_table(struct blockdriver *bdp, int device,
unsigned long offset, struct part_entry *table, u8_t *tmp_buf);
static void sort(struct part_entry *table);
/*============================================================================*
* partition *
*============================================================================*/
void partition(
struct blockdriver *bdp, /* device dependent entry points */
int device, /* device to partition */
int style, /* partitioning style: floppy, primary, sub. */
int atapi /* atapi device */
)
{
/* This routine is called on first open to initialize the partition tables
* of a device.
*/
u8_t *tmp_buf;
if ((*bdp->bdr_part)(device) == NULL)
return;
/* For multithreaded drivers, multiple partition() calls may be made on
* different devices in parallel. Hence we need a separate temporary buffer
* for each request.
*/
if (!(tmp_buf = alloc_contig(CD_SECTOR_SIZE, AC_ALIGN4K, NULL)))
panic("partition: unable to allocate temporary buffer");
parse_part_table(bdp, device, style, atapi, tmp_buf);
free_contig(tmp_buf, CD_SECTOR_SIZE);
}
/*============================================================================*
* parse_part_table *
*============================================================================*/
static void parse_part_table(
struct blockdriver *bdp, /* device dependent entry points */
int device, /* device to partition */
int style, /* partitioning style: floppy, primary, sub. */
int atapi, /* atapi device */
u8_t *tmp_buf /* temporary buffer */
)
{
/* This routine reads and parses a partition table. It may be called
* recursively. It makes sure that each partition falls safely within the
* device's limits. Depending on the partition style we are either making
* floppy partitions, primary partitions or subpartitions. Only primary
* partitions are sorted, because they are shared with other operating
* systems that expect this.
*/
struct part_entry table[NR_PARTITIONS], *pe;
int disk, par;
struct device *dv;
unsigned long base, limit, part_limit;
/* Get the geometry of the device to partition */
if ((dv = (*bdp->bdr_part)(device)) == NULL
|| dv->dv_size == 0) return;
base = (unsigned long)(dv->dv_base / SECTOR_SIZE);
limit = base + (unsigned long)(dv->dv_size / SECTOR_SIZE);
/* Read the partition table for the device. */
if(!get_part_table(bdp, device, 0L, table, tmp_buf)) {
return;
}
/* Compute the device number of the first partition. */
switch (style) {
case P_FLOPPY:
device += MINOR_fd0p0;
break;
case P_PRIMARY:
sort(table); /* sort a primary partition table */
device += 1;
break;
case P_SUB:
disk = device / DEV_PER_DRIVE;
par = device % DEV_PER_DRIVE - 1;
device = MINOR_d0p0s0 + (disk * NR_PARTITIONS + par) * NR_PARTITIONS;
}
/* Find an array of devices. */
if ((dv = (*bdp->bdr_part)(device)) == NULL) return;
/* Set the geometry of the partitions from the partition table. */
for (par = 0; par < NR_PARTITIONS; par++, dv++) {
/* Shrink the partition to fit within the device. */
pe = &table[par];
part_limit = pe->lowsec + pe->size;
if (part_limit < pe->lowsec) part_limit = limit;
if (part_limit > limit) part_limit = limit;
if (pe->lowsec < base) pe->lowsec = base;
if (part_limit < pe->lowsec) part_limit = pe->lowsec;
dv->dv_base = (u64_t)pe->lowsec * SECTOR_SIZE;
dv->dv_size = (u64_t)(part_limit - pe->lowsec) * SECTOR_SIZE;
if (style == P_PRIMARY) {
/* Each Minix primary partition can be subpartitioned. */
if (pe->sysind == MINIX_PART)
parse_part_table(bdp, device + par, P_SUB, atapi,
tmp_buf);
/* An extended partition has logical partitions. */
if (ext_part(pe->sysind))
extpartition(bdp, device + par, pe->lowsec, tmp_buf);
}
}
}
/*============================================================================*
* extpartition *
*============================================================================*/
static void extpartition(
struct blockdriver *bdp, /* device dependent entry points */
int extdev, /* extended partition to scan */
unsigned long extbase, /* sector offset of the base ext. partition */
u8_t *tmp_buf /* temporary buffer */
)
{
/* Extended partitions cannot be ignored alas, because people like to move
* files to and from DOS partitions. Avoid reading this code, it's no fun.
*/
struct part_entry table[NR_PARTITIONS], *pe;
int subdev, disk, par;
struct device *dv;
unsigned long offset, nextoffset;
disk = extdev / DEV_PER_DRIVE;
par = extdev % DEV_PER_DRIVE - 1;
subdev = MINOR_d0p0s0 + (disk * NR_PARTITIONS + par) * NR_PARTITIONS;
offset = 0;
do {
if (!get_part_table(bdp, extdev, offset, table, tmp_buf)) return;
sort(table);
/* The table should contain one logical partition and optionally
* another extended partition. (It's a linked list.)
*/
nextoffset = 0;
for (par = 0; par < NR_PARTITIONS; par++) {
pe = &table[par];
if (ext_part(pe->sysind)) {
nextoffset = pe->lowsec;
} else
if (pe->sysind != NO_PART) {
if ((dv = (*bdp->bdr_part)(subdev)) == NULL) return;
dv->dv_base = (u64_t)(extbase + offset + pe->lowsec) *
SECTOR_SIZE;
dv->dv_size = (u64_t)pe->size * SECTOR_SIZE;
/* Out of devices? */
if (++subdev % NR_PARTITIONS == 0) return;
}
}
} while ((offset = nextoffset) != 0);
}
/*============================================================================*
* get_part_table *
*============================================================================*/
static int get_part_table(
struct blockdriver *bdp,
int device,
unsigned long offset, /* sector offset to the table */
struct part_entry *table, /* four entries */
u8_t *tmp_buf) /* temporary buffer */
{
/* Read the partition table for the device, return true iff there were no
* errors.
*/
iovec_t iovec1;
u64_t position;
int r;
position = (u64_t)offset * SECTOR_SIZE;
iovec1.iov_addr = (vir_bytes) tmp_buf;
iovec1.iov_size = CD_SECTOR_SIZE;
r = (*bdp->bdr_transfer)(device, FALSE /*do_write*/, position, SELF,
&iovec1, 1, BDEV_NOFLAGS);
if (r != CD_SECTOR_SIZE) {
return 0;
}
if (tmp_buf[510] != 0x55 || tmp_buf[511] != 0xAA) {
/* Invalid partition table. */
return 0;
}
memcpy(table, (tmp_buf + PART_TABLE_OFF), NR_PARTITIONS * sizeof(table[0]));
return 1;
}
/*===========================================================================*
* sort *
*===========================================================================*/
static void sort(struct part_entry *table)
{
/* Sort a partition table. */
struct part_entry *pe, tmp;
int n = NR_PARTITIONS;
do {
for (pe = table; pe < table + NR_PARTITIONS-1; pe++) {
if (pe[0].sysind == NO_PART
|| (pe[0].lowsec > pe[1].lowsec
&& pe[1].sysind != NO_PART)) {
tmp = pe[0]; pe[0] = pe[1]; pe[1] = tmp;
}
}
} while (--n > 0);
}

View File

@@ -1,97 +0,0 @@
/* This file contains a simple message queue implementation to support both
* the singlethread and the multithreaded driver implementation.
*
* Changes:
* Oct 27, 2011 rewritten to use sys/queue.h (D.C. van Moolenbroek)
* Aug 27, 2011 integrated into libblockdriver (A. Welzel)
*/
#include <minix/blockdriver_mt.h>
#include <sys/queue.h>
#include <assert.h>
#include "const.h"
#include "mq.h"
#define MQ_SIZE 128
struct mq_cell {
message mess;
int ipc_status;
STAILQ_ENTRY(mq_cell) next;
};
static struct mq_cell pool[MQ_SIZE];
static STAILQ_HEAD(queue, mq_cell) queue[MAX_DEVICES];
static STAILQ_HEAD(free_list, mq_cell) free_list;
/*===========================================================================*
* mq_init *
*===========================================================================*/
void mq_init(void)
{
/* Initialize the message queues and message cells.
*/
int i;
STAILQ_INIT(&free_list);
for (i = 0; i < MAX_DEVICES; i++)
STAILQ_INIT(&queue[i]);
for (i = 0; i < MQ_SIZE; i++)
STAILQ_INSERT_HEAD(&free_list, &pool[i], next);
}
/*===========================================================================*
* mq_enqueue *
*===========================================================================*/
int mq_enqueue(device_id_t device_id, const message *mess,
int ipc_status)
{
/* Add a message, including its IPC status, to the message queue of a device.
* Return TRUE iff the message was added successfully.
*/
struct mq_cell *cell;
assert(device_id >= 0 && device_id < MAX_DEVICES);
if (STAILQ_EMPTY(&free_list))
return FALSE;
cell = STAILQ_FIRST(&free_list);
STAILQ_REMOVE_HEAD(&free_list, next);
cell->mess = *mess;
cell->ipc_status = ipc_status;
STAILQ_INSERT_TAIL(&queue[device_id], cell, next);
return TRUE;
}
/*===========================================================================*
* mq_dequeue *
*===========================================================================*/
int mq_dequeue(device_id_t device_id, message *mess, int *ipc_status)
{
/* Return and remove a message, including its IPC status, from the message
* queue of a thread. Return TRUE iff a message was available.
*/
struct mq_cell *cell;
assert(device_id >= 0 && device_id < MAX_DEVICES);
if (STAILQ_EMPTY(&queue[device_id]))
return FALSE;
cell = STAILQ_FIRST(&queue[device_id]);
STAILQ_REMOVE_HEAD(&queue[device_id], next);
*mess = cell->mess;
*ipc_status = cell->ipc_status;
STAILQ_INSERT_HEAD(&free_list, cell, next);
return TRUE;
}

View File

@@ -1,9 +0,0 @@
#ifndef _BLOCKDRIVER_MQ_H
#define _BLOCKDRIVER_MQ_H
void mq_init(void);
int mq_enqueue(device_id_t device_id, const message *mess, int
ipc_status);
int mq_dequeue(device_id_t device_id, message *mess, int *ipc_status);
#endif /* _BLOCKDRIVER_MQ_H */

View File

@@ -1,284 +0,0 @@
/* This file implements block level tracing support. */
#include <minix/drivers.h>
#include <minix/blockdriver_mt.h>
#include <minix/btrace.h>
#include <sys/ioc_block.h>
#include <minix/minlib.h>
#include <assert.h>
#include "const.h"
#include "trace.h"
#define NO_TRACEDEV ((dev_t) -1)
#define NO_TIME ((u32_t) -1)
static int trace_enabled = FALSE;
static dev_t trace_dev = NO_TRACEDEV;
static btrace_entry *trace_buf = NULL;
static size_t trace_size = 0;
static size_t trace_pos;
static size_t trace_next;
static u64_t trace_tsc;
/* Pointers to in-progress trace entries for each thread (all worker threads,
* plus one for the main thread). Each pointer is set to NULL whenever no
* operation is currently being traced for that thread, for whatever reason.
*/
static btrace_entry *trace_ptr[MAX_THREADS + 1] = { NULL };
/*===========================================================================*
* trace_gettime *
*===========================================================================*/
static u32_t trace_gettime(void)
{
/* Return the current time, in microseconds since the start of the trace.
*/
u64_t tsc;
assert(trace_enabled);
read_tsc_64(&tsc);
tsc -= trace_tsc;
return tsc_64_to_micros(tsc);
}
/*===========================================================================*
* trace_ctl *
*===========================================================================*/
int trace_ctl(dev_t minor, unsigned long request, endpoint_t endpt,
cp_grant_id_t grant)
{
/* Process a block trace control request.
*/
size_t size;
int r, ctl, entries;
switch (request) {
case BIOCTRACEBUF:
/* The size cannot be changed when tracing is enabled. */
if (trace_enabled) return EBUSY;
/* Copy in the requested size. */
if ((r = sys_safecopyfrom(endpt, grant, 0, (vir_bytes) &size,
sizeof(size))) != OK)
return r;
if (size >= INT_MAX / sizeof(btrace_entry)) return EINVAL;
/* The size can only be set or reset, not changed. */
if (size > 0 && trace_size > 0) return EBUSY;
/* Allocate or free a buffer for tracing data. For future multi-device
* tracing support, the buffer is associated with a minor device.
*/
if (size == 0) {
if (trace_dev == NO_TRACEDEV) return OK;
if (trace_dev != minor) return EINVAL;
free(trace_buf);
trace_dev = NO_TRACEDEV;
} else {
if ((trace_buf = malloc(size * sizeof(btrace_entry))) == NULL)
return errno;
trace_dev = minor;
}
trace_size = size;
trace_pos = 0;
trace_next = 0;
return OK;
case BIOCTRACECTL:
/* We can only start/stop tracing if the given device has a trace
* buffer associated with it.
*/
if (trace_dev != minor) return EINVAL;
/* Copy in the request code. */
if ((r = sys_safecopyfrom(endpt, grant, 0, (vir_bytes) &ctl,
sizeof(ctl))) != OK)
return r;
/* Start or stop tracing. */
switch (ctl) {
case BTCTL_START:
if (trace_enabled) return EBUSY;
read_tsc_64(&trace_tsc);
trace_enabled = TRUE;
break;
case BTCTL_STOP:
if (!trace_enabled) return EINVAL;
trace_enabled = FALSE;
/* Cancel all ongoing trace operations. */
memset(trace_ptr, 0, sizeof(trace_ptr));
break;
default:
return EINVAL;
}
return OK;
case BIOCTRACEGET:
/* We can only retrieve tracing entries if the given device has a trace
* buffer associated with it.
*/
if (trace_dev != minor) return EINVAL;
if (trace_enabled) return EBUSY;
/* How much can we copy out? */
entries = MIN(trace_pos - trace_next,
_MINIX_IOCTL_SIZE_BIG(request) / sizeof(btrace_entry));
if (entries == 0)
return 0;
if ((r = sys_safecopyto(endpt, grant, 0,
(vir_bytes) &trace_buf[trace_next],
entries * sizeof(btrace_entry))) != OK)
return r;
trace_next += entries;
return entries;
}
return EINVAL;
}
/*===========================================================================*
* trace_start *
*===========================================================================*/
void trace_start(thread_id_t id, message *m_ptr)
{
/* Start creating a trace entry.
*/
btrace_entry *entry;
int req;
u64_t pos;
size_t size;
int flags;
if (!trace_enabled || trace_dev != m_ptr->m_lbdev_lblockdriver_msg.minor) return;
assert(id >= 0 && id < MAX_THREADS + 1);
if (trace_pos == trace_size)
return;
switch (m_ptr->m_type) {
case BDEV_OPEN: req = BTREQ_OPEN; break;
case BDEV_CLOSE: req = BTREQ_CLOSE; break;
case BDEV_READ: req = BTREQ_READ; break;
case BDEV_WRITE: req = BTREQ_WRITE; break;
case BDEV_GATHER: req = BTREQ_GATHER; break;
case BDEV_SCATTER: req = BTREQ_SCATTER; break;
case BDEV_IOCTL: req = BTREQ_IOCTL; break;
default: return;
}
switch (m_ptr->m_type) {
case BDEV_OPEN:
case BDEV_CLOSE:
pos = 0;
size = m_ptr->m_lbdev_lblockdriver_msg.access;
flags = 0;
break;
case BDEV_READ:
case BDEV_WRITE:
case BDEV_GATHER:
case BDEV_SCATTER:
pos = m_ptr->m_lbdev_lblockdriver_msg.pos;
size = m_ptr->m_lbdev_lblockdriver_msg.count;
flags = m_ptr->m_lbdev_lblockdriver_msg.flags;
break;
case BDEV_IOCTL:
pos = 0;
size = m_ptr->m_lbdev_lblockdriver_msg.request;
flags = 0;
/* Do not log trace control requests. */
switch (size) {
case BIOCTRACEBUF:
case BIOCTRACECTL:
case BIOCTRACEGET:
return;
}
break;
default:
/* Do not log any other messages. */
return;
}
entry = &trace_buf[trace_pos];
entry->request = req;
entry->size = size;
entry->position = pos;
entry->flags = flags;
entry->result = BTRES_INPROGRESS;
entry->start_time = trace_gettime();
entry->finish_time = NO_TIME;
trace_ptr[id] = entry;
trace_pos++;
}
/*===========================================================================*
* trace_setsize *
*===========================================================================*/
void trace_setsize(thread_id_t id, size_t size)
{
/* Set the current trace entry's actual (byte) size, for vector requests.
*/
btrace_entry *entry;
if (!trace_enabled) return;
assert(id >= 0 && id < MAX_THREADS + 1);
if ((entry = trace_ptr[id]) == NULL) return;
entry->size = size;
}
/*===========================================================================*
* trace_finish *
*===========================================================================*/
void trace_finish(thread_id_t id, int result)
{
/* Finish a trace entry.
*/
btrace_entry *entry;
if (!trace_enabled) return;
assert(id >= 0 && id < MAX_THREADS + 1);
if ((entry = trace_ptr[id]) == NULL) return;
entry->result = result;
entry->finish_time = trace_gettime();
trace_ptr[id] = NULL;
}

View File

@@ -1,11 +0,0 @@
#ifndef _BLOCKDRIVER_TRACE_H
#define _BLOCKDRIVER_TRACE_H
int trace_ctl(dev_t minor, unsigned long request, endpoint_t endpt,
cp_grant_id_t grant);
void trace_start(thread_id_t thread_id, message *m_ptr);
void trace_setsize(thread_id_t thread_id, size_t size);
void trace_finish(thread_id_t thread_id, int r);
#endif /* _BLOCKDRIVER_TRACE_H */

View File

@@ -111,7 +111,7 @@ SUBDIR+= pkgconfig
.include "${.CURDIR}/thread-stub/Makefile.inc"
.include "${.CURDIR}/time/Makefile.inc"
.if defined(__MINIX)
.include "${.CURDIR}/sys-minix/Makefile.inc"
.include "${NETBSDSRCDIR}/minix/lib/libc/sys/Makefile.inc"
.else
.include "${.CURDIR}/tls/Makefile.inc"
.endif

View File

@@ -27,7 +27,7 @@ SRCS+= _setjmp.S
SRCS+= sigsetjmp.S
.if defined(__MINIX)
# Already defined in sys-minix
# Already defined in minix/lib/libc/arch/arm/sys
.else
SRCS+= makecontext.c resumecontext.c swapcontext.S

View File

@@ -1,28 +0,0 @@
# rts sources
HERE=${ARCHDIR}/sys-minix
.PATH: ${HERE}
TMP=ucontextoffsets.h.tmp
CF=${HERE}/ucontextoffsets.cf
INCS+=ucontextoffsets.h
ucontext.o: ucontextoffsets.h
SRCS+= \
__sigreturn.S \
_do_kernel_call_intr.S \
_ipc.S \
brksize.S \
get_minix_kerninfo.S \
ucontext.S
ucontextoffsets.h: ${CF}
ucontextoffsets.h: ${NETBSDSRCDIR}/sys/sys/ucontext.h
ucontextoffsets.h: ${NETBSDSRCDIR}/include/arch/${MACHINE_ARCH}/include/stackframe.h
ucontextoffsets.h:
${_MKTARGET_CREATE}
cat ${CF} | \
${TOOL_GENASSYM} -- ${CC} ${CFLAGS} ${CPPFLAGS} ${PROF} >$TMP && \
mv -f $TMP $@

View File

@@ -1,9 +0,0 @@
/* This routine is the low-level code for returning from signals. */
/* It calls _sigreturn, which is the normal "system call" routine. */
/* Both __sigreturn and _sigreturn are needed. */
#include <machine/asm.h>
IMPORT(sigreturn)
ENTRY(__sigreturn)
pop {r0} /* load sigframe.sf_scp into r0 as parameter */
b _C_LABEL(sigreturn) /* _sigreturn(struct sigcontext *sf_scpcopy) */

View File

@@ -1,8 +0,0 @@
#include <minix/ipcconst.h>
#include <machine/asm.h>
ENTRY(_do_kernel_call_intr)
/* r0 already holds msg ptr */
mov r3, #KERVEC_INTR /* r3 determines the SVC type */
svc #0 /* trap to kernel */
bx lr

View File

@@ -1,74 +0,0 @@
#include <minix/ipcconst.h>
#include <machine/asm.h>
/**========================================================================* */
/* IPC assembly routines * */
/**========================================================================* */
ENTRY(_ipc_send_intr)
push {fp}
mov fp, sp
mov r2, r1 /* r2 = msg ptr */
mov r1, r0 /* r1 = src_dest */
mov r0, #SEND /* _ipc_send(dest, ptr) */
mov r3, #IPCVEC_INTR /* r3 determines the SVC type */
svc #0 /* trap to kernel */
pop {fp}
bx lr
ENTRY(_ipc_receive_intr)
push {fp}
mov fp, sp
push {r2} /* save status ptr */
mov r2, r1 /* r2 = msg ptr */
mov r1, r0 /* r1 = src_dest */
mov r0, #RECEIVE /* _ipc_receive(src, ptr) */
mov r3, #IPCVEC_INTR /* r3 determines the SVC type */
svc #0 /* trap to kernel */
pop {r2} /* restore status ptr */
str r1, [r2]
pop {fp}
bx lr
ENTRY(_ipc_sendrec_intr)
push {fp}
mov fp, sp
mov r2, r1 /* r2 = msg ptr */
mov r1, r0 /* r1 = src_dest */
mov r0, #SENDREC /* _ipc_sendrec(srcdest, ptr) */
mov r3, #IPCVEC_INTR /* r3 determines the SVC type */
svc #0 /* trap to kernel */
pop {fp}
bx lr
ENTRY(_ipc_notify_intr)
push {fp}
mov fp, sp
mov r1, r0 /* r1 = src_dest */
mov r0, #NOTIFY /* _ipc_notify(srcdst) */
mov r3, #IPCVEC_INTR /* r3 determines the SVC type */
svc #0 /* trap to kernel */
pop {fp}
bx lr
ENTRY(_ipc_sendnb_intr)
push {fp}
mov fp, sp
mov r2, r1 /* r2 = msg ptr */
mov r1, r0 /* r1 = src_dest */
mov r0, #SENDNB /* _ipc_sendnb(dest, ptr) */
mov r3, #IPCVEC_INTR /* r3 determines the SVC type */
svc #0 /* trap to kernel */
pop {fp}
bx lr
ENTRY(_ipc_senda_intr)
push {fp}
mov fp, sp
mov r2, r0 /* r2 = table */
/* r1 already holds count */
mov r0, #SENDA /* _ipc_senda(table, count) */
mov r3, #IPCVEC_INTR /* r3 determines the SVC type */
svc #0 /* trap to kernel */
pop {fp}
bx lr

View File

@@ -1,5 +0,0 @@
.globl _end
.globl _brksize
.data
_brksize: .long _end

View File

@@ -1,17 +0,0 @@
#include <minix/ipcconst.h>
#include <machine/asm.h>
ENTRY(get_minix_kerninfo)
push {fp}
mov fp, sp
push {r0}
mov r1, #0
mov r2, #0
mov r0, #MINIX_KERNINFO /* _get_minix_kerninfo() */
mov r3, #IPCVEC_INTR /* r3 determines the SVC type */
svc #0 /* trap to kernel */
pop {r2} /* r2 = return struct ptr (was r0) */
str r1, [r2]
pop {fp}
bx lr

View File

@@ -1,143 +0,0 @@
#include <machine/asm.h>
#include <ucontextoffsets.h>
IMPORT(getuctx)
IMPORT(setuctx)
IMPORT(resumecontext)
/* int getcontext(ucontext_t *ucp)
* Initialise the structure pointed to by ucp to the current user context
* of the calling thread. */
ENTRY(getcontext)
ENTRY(_getcontext)
/* In case a process does not use the FPU and is neither interested in
* saving its signal mask, then we can skip the context switch to
* PM and kernel altogether and only save general-purpose registers. */
mov r3, lr /* Save return address:
* When setcontext or swapcontext is called,
* we jump to this address and continue
* running. */
/* r0 = ucp */
/* Check null pointer */
cmp r0, #0 /* ucp == NULL? */
bne 3f /* Not null, continue */
mov r1, #EFAULT
ldr r2, =_C_LABEL(errno)
str r1, [r2] /* errno = EFAULT */
mov r0, #-1 /* return -1 */
bx lr
3: /* Check flags */
ldr r1, [r0, #UC_FLAGS] /* r1 = ucp->uc_flags */
and r1, r1, #[_UC_IGNFPU|_UC_IGNSIGM]
cmp r1, #[_UC_IGNFPU|_UC_IGNSIGM] /* Allowed to ignore both? */
beq 1f /* If so, skip getuctx */
0:
push {r0, r3}
bl _C_LABEL(getuctx) /* getuctx(ucp) */
pop {r0, r3}
1:
/* Save the context */
mov lr, r3 /* Restore lr */
str lr, [r0, #LRREG] /* Save lr */
str lr, [r0, #PCREG] /* Save real RTA in mcp struct */
str sp, [r0, #SPREG] /* Save stack pointer */
str fp, [r0, #FPREG] /* Save fp */
str r4, [r0, #REG4] /* Save r4 */
str r5, [r0, #REG5] /* Save r5 */
str r6, [r0, #REG6] /* Save r6 */
str r7, [r0, #REG7] /* Save r7 */
str r8, [r0, #REG8] /* Save r8 */
str r9, [r0, #REG9] /* Save r9 */
str r10, [r0, #REG10] /* Save r10 */
ldr r1, =MCF_MAGIC
str r1, [r0, #MAGIC] /* Set magic value */
mov r1, #0
str r1, [r0, #REG0] /* Return 0 */
mov r0, #0 /* Return 0 */
2:
bx lr /* Restore return address */
/* int setcontext(const ucontext_t *ucp)
* Restore the user context pointed to by ucp. A successful call to
* setcontext does not return; program execution resumes at the point
* specified by the ucp argument. If ucp was created with getcontext(),
* program execution continues as if the corresponding call of getcontext()
* had just returned. If ucp was created with makecontext(), program
* execution continues with the function passed to makecontext(). */
ENTRY(setcontext)
/* In case a process does not use the FPU and is neither interested in
* restoring its signal mask, then we can skip the context switch to
* PM and kernel altogether and restore state here. */
/* r0 = ucp */
/* Check null pointer */
cmp r0, #0 /* ucp == NULL? */
bne 3f /* Not null, continue */
mov r1, #EFAULT
ldr r2, =_C_LABEL(errno)
str r1, [r2] /* errno = EFAULT */
mov r0, #-1 /* return -1 */
bx lr
3: /* Check flags */
ldr r1, [r0, #MAGIC] /* r1 = ucp->mc_context.mc_magic */
ldr r2, =MCF_MAGIC
cmp r1, r2 /* is the magic value set (is context valid)?*/
beq 4f /* is set, proceed */
mov r1, #EINVAL /* not set, return error code */
ldr r2, =_C_LABEL(errno)
str r1, [r2] /* errno = EINVAL */
mov r0, #-1 /* return -1 */
bx lr
4: ldr r1, [r0, #UC_FLAGS] /* r1 = ucp->uc_flags */
and r1, r1, #[_UC_IGNFPU|_UC_IGNSIGM]
cmp r1, #[_UC_IGNFPU|_UC_IGNSIGM] /* Allowed to ignore both? */
beq 1f /* Neither are set, so don't bother restoring FPU
* state and signal mask */
push {r0, r3}
0: bl _C_LABEL(setuctx) /* setuctx(ucp) */
pop {r0, r3}
1: /* Restore the registers */
ldr r4, [r0, #REG4] /* Restore r4 */
ldr r5, [r0, #REG5] /* Restore r5 */
ldr r6, [r0, #REG6] /* Restore r6 */
ldr r7, [r0, #REG7] /* Restore r7 */
ldr r8, [r0, #REG8] /* Restore r8 */
ldr r9, [r0, #REG9] /* Restore r9 */
ldr r10, [r0, #REG10] /* Restore r10 */
ldr r12, [r0, #REG12] /* Restore r12 */
ldr fp, [r0, #FPREG] /* Restore fp */
ldr sp, [r0, #SPREG] /* Restore sp */
ldr lr, [r0, #LRREG] /* Restore lr */
mov r3, r0
ldr r0, [r3, #REG0] /* Restore r0 */
2:
ldr pc, [r3, #PCREG] /* Restore pc */
/* void ctx_start()
* A wrapper to call resumecontext. Makecontext puts the ucp in r4.
* This function moves the ucp into r0 so that the ucp is the first
* parameter for resumecontext. The call to resumecontext will start
* the next context in the linked list (or exit the program if there
* is no context). */
ENTRY(ctx_start)
mov r0, r4
b _C_LABEL(resumecontext)

View File

@@ -1,31 +0,0 @@
include <minix/type.h>
include <sys/ucontext.h>
include <sys/errno.h>
include <machine/mcontext.h>
struct __ucontext
member UC_FLAGS uc_flags
member UC_LINK uc_link
member MAGIC uc_mcontext.mc_magic
member REG0 uc_mcontext.__gregs[_REG_R0]
member REG1 uc_mcontext.__gregs[_REG_R1]
member REG2 uc_mcontext.__gregs[_REG_R2]
member REG3 uc_mcontext.__gregs[_REG_R3]
member REG4 uc_mcontext.__gregs[_REG_R4]
member REG5 uc_mcontext.__gregs[_REG_R5]
member REG6 uc_mcontext.__gregs[_REG_R6]
member REG7 uc_mcontext.__gregs[_REG_R7]
member REG8 uc_mcontext.__gregs[_REG_R8]
member REG9 uc_mcontext.__gregs[_REG_R9]
member REG10 uc_mcontext.__gregs[_REG_R10]
member FPREG uc_mcontext.__gregs[_REG_FP]
member REG12 uc_mcontext.__gregs[_REG_R12]
member SPREG uc_mcontext.__gregs[_REG_SP]
member LRREG uc_mcontext.__gregs[_REG_LR]
member PCREG uc_mcontext.__gregs[_REG_PC]
define EFAULT EFAULT
define EINVAL EINVAL
define MCF_MAGIC MCF_MAGIC
define _UC_IGNFPU _UC_IGNFPU
define _UC_IGNSIGM _UC_IGNSIGM

View File

@@ -9,7 +9,7 @@ SRCS+= alloca.S byte_swap_2.S byte_swap_4.S fabs.S \
SRCS+= setjmp.S _setjmp.S sigsetjmp.S
.if defined(__MINIX)
# Already defined in sys-minix
# Already defined in minix/lib/libc/arch/i386/sys
.else
SRCS+= resumecontext.S swapcontext.S
.endif

View File

@@ -1,28 +0,0 @@
# rts sources
HERE=${ARCHDIR}/sys-minix
.PATH: ${HERE}
TMP=ucontextoffsets.h.tmp
CF=${HERE}/ucontextoffsets.cf
INCS+=ucontextoffsets.h
ucontext.o: ucontextoffsets.h
SRCS+= \
__sigreturn.S \
_do_kernel_call_intr.S \
_ipc.S \
brksize.S \
get_minix_kerninfo.S \
ucontext.S
ucontextoffsets.h: ${CF}
ucontextoffsets.h: ${NETBSDSRCDIR}/sys/sys/ucontext.h
ucontextoffsets.h: ${NETBSDSRCDIR}/include/arch/${MACHINE_ARCH}/include/stackframe.h
ucontextoffsets.h:
${_MKTARGET_CREATE}
cat ${CF} | \
${TOOL_GENASSYM} -- ${CC} ${CFLAGS} ${CPPFLAGS} ${PROF} >$TMP && \
mv -f $TMP $@

View File

@@ -1,15 +0,0 @@
/* This routine is the low-level code for returning from signals. */
/* It calls _sigreturn, which is the normal "system call" routine. */
/* Both __sigreturn and _sigreturn are needed. */
#include <machine/asm.h>
IMPORT(sigreturn)
ENTRY(__sigreturn)
addl $16, %esp
#ifndef __PIC__
jmp _C_LABEL(sigreturn)
#else
PIC_PROLOGUE /* push %ebx, but we do not care */
pop %eax /* special knowledge of how PIC works: discard pushed EBX */
jmp PIC_PLT(_C_LABEL(sigreturn))
#endif /* PIC */

View File

@@ -1,8 +0,0 @@
#include <minix/ipcconst.h>
#include <machine/asm.h>
ENTRY(_do_kernel_call_intr)
/* pass the message pointer to kernel in the %eax register */
movl 4(%esp), %eax
int $KERVEC_INTR
ret

View File

@@ -1,88 +0,0 @@
#include <minix/ipcconst.h>
#include <machine/asm.h>
SRC_DST = 8 /* source/ destination process */
MESSAGE = 12 /* message pointer */
STATUS = 16 /* status pointer */
/* For _ipc_senda() */
MSGTAB = 8 /* message table */
TABCOUNT = 12 /* number of entries in message table */
/**========================================================================* */
/* IPC assembly routines * */
/**========================================================================* */
/* all message passing routines save ebx, but destroy eax and ecx. */
ENTRY(_ipc_send_intr)
push %ebp
movl %esp, %ebp
push %ebx
movl SRC_DST(%ebp), %eax /* eax = dest-src */
movl MESSAGE(%ebp), %ebx /* ebx = message pointer */
movl $SEND, %ecx /* _ipc_send(dest, ptr) */
int $IPCVEC_INTR /* trap to the kernel */
pop %ebx
pop %ebp
ret
ENTRY(_ipc_receive_intr)
push %ebp
movl %esp, %ebp
push %ebx
movl SRC_DST(%ebp), %eax /* eax = dest-src */
movl MESSAGE(%ebp), %ebx /* ebx = message pointer */
movl $RECEIVE, %ecx /* _ipc_receive(src, ptr) */
int $IPCVEC_INTR /* trap to the kernel */
movl STATUS(%ebp), %ecx /* ecx = status pointer */
movl %ebx, (%ecx)
pop %ebx
pop %ebp
ret
ENTRY(_ipc_sendrec_intr)
push %ebp
movl %esp, %ebp
push %ebx
movl SRC_DST(%ebp), %eax /* eax = dest-src */
movl MESSAGE(%ebp), %ebx /* ebx = message pointer */
movl $SENDREC, %ecx /* _ipc_sendrec(srcdest, ptr) */
int $IPCVEC_INTR /* trap to the kernel */
pop %ebx
pop %ebp
ret
ENTRY(_ipc_notify_intr)
push %ebp
movl %esp, %ebp
push %ebx
movl SRC_DST(%ebp), %eax /* eax = destination */
movl $NOTIFY, %ecx /* _ipc_notify(srcdst) */
int $IPCVEC_INTR /* trap to the kernel */
pop %ebx
pop %ebp
ret
ENTRY(_ipc_sendnb_intr)
push %ebp
movl %esp, %ebp
push %ebx
movl SRC_DST(%ebp), %eax /* eax = dest-src */
movl MESSAGE(%ebp), %ebx /* ebx = message pointer */
movl $SENDNB, %ecx /* _ipc_sendnb(dest, ptr) */
int $IPCVEC_INTR /* trap to the kernel */
pop %ebx
pop %ebp
ret
ENTRY(_ipc_senda_intr)
push %ebp
movl %esp, %ebp
push %ebx
movl TABCOUNT(%ebp), %eax /* eax = count */
movl MSGTAB(%ebp), %ebx /* ebx = table */
movl $SENDA, %ecx /* _ipc_senda(table, count) */
int $IPCVEC_INTR /* trap to the kernel */
pop %ebx
pop %ebp
ret

View File

@@ -1,5 +0,0 @@
.globl _end
.globl _brksize
.data
_brksize: .long _end

View File

@@ -1,17 +0,0 @@
#include <minix/ipcconst.h>
#include <machine/asm.h>
ENTRY(get_minix_kerninfo)
push %ebp
movl %esp, %ebp
push %ebx
movl $0, %eax
movl $0, %ebx
movl $MINIX_KERNINFO, %ecx
int $IPCVEC_INTR /* trap to the kernel */
movl 8(%ebp), %ecx /* ecx = return struct ptr */
movl %ebx, (%ecx)
pop %ebx
pop %ebp
ret

View File

@@ -1,144 +0,0 @@
#include <machine/asm.h>
#include <ucontextoffsets.h>
IMPORT(getuctx)
IMPORT(setuctx)
IMPORT(resumecontext)
.globl _C_LABEL(__errno)
/* int getcontext(ucontext_t *ucp)
* Initialise the structure pointed to by ucp to the current user context
* of the calling thread. */
ENTRY(getcontext)
ENTRY(_getcontext)
/* In case a process does not use the FPU and is neither interested in
* saving its signal mask, then we can skip the context switch to
* PM and kernel altogether and only save general-purpose registers. */
mov 4(%esp), %edx /* edx = ucp */
/* Check null pointer */
cmp $0, %edx /* edx == NULL? */
jne 3f /* Not null, continue */
PIC_PROLOGUE
call PIC_PLT(_C_LABEL(__errno))
PIC_EPILOGUE
movl $EFAULT, (%eax)
xor %eax, %eax
dec %eax /* return -1 */
ret
3: /* Check flags */
mov UC_FLAGS(%edx), %eax /* eax = ucp->uc_flags */
and $[_UC_IGNFPU|_UC_IGNSIGM], %eax
cmp $[_UC_IGNFPU|_UC_IGNSIGM], %eax
jz 5f /* Ignore both, skip getuctx */
PIC_PROLOGUE
push %edx /* push a copy for us */
push %edx /* push a copy as function argument */
call PIC_PLT(_C_LABEL(getuctx)) /* getuctx(ucp) */
pop %edx /* clean up stack */
pop %edx /* clean up stack and restore edx */
PIC_EPILOGUE
5:
/* Save the context */
pop PC(%edx) /* Save real RTA in mcp struct */
mov %esp, SP(%edx) /* Save stack pointer (now pointing to ucp) */
/* Save GP registers (except EAX and EDX) */
mov %ebp, BP(%edx) /* Save EBP */
mov %esi, SI(%edx) /* Save ESI */
mov %edi, DI(%edx) /* Save EDI */
mov %ebx, BX(%edx) /* Save EBX */
mov %ecx, CX(%edx) /* Save ECX */
movl $MCF_MAGIC, MAGIC(%edx) /* Set magic value */
xor %eax, %eax /* Return 0 */
jmp *PC(%edx) /* Return return address */
/* int setcontext(const ucontext_t *ucp)
* Restore the user context pointed to by ucp. A successful call to
* setcontext does not return; program execution resumes at the point
* specified by the ucp argument. If ucp was created with getcontext(),
* program execution continues as if the corresponding call of getcontext()
* had just returned. If ucp was created with makecontext(), program
* execution continues with the function passed to makecontext(). */
ENTRY(setcontext)
/* In case a process does not use the FPU and is neither interested in
* restoring its signal mask, then we can skip the context switch to
* PM and kernel altogether and restore state here. */
mov 4(%esp), %edx /* edx = ucp */
/* Check null pointer */
cmp $0, %edx /* edx == NULL? */
jnz 3f /* Not null, continue */
movl $EFAULT, %edx
0: push %edx /* preserve errno */
PIC_PROLOGUE
call PIC_PLT(_C_LABEL(__errno))
PIC_EPILOGUE
pop %edx
movl %edx, (%eax)
xor %eax, %eax
dec %eax /* return -1 */
ret
3: /* Check flags */
cmpl $MCF_MAGIC, MAGIC(%edx) /* is the magic value set (is context valid)?*/
jz 4f /* is set, proceed */
movl $EINVAL, %edx /* not set, return error code */
jmp 0b
4: mov UC_FLAGS(%edx), %eax /* eax = ucp->uc_flags */
and $[_UC_IGNFPU|_UC_IGNSIGM], %eax
cmp $[_UC_IGNFPU|_UC_IGNSIGM], %eax
jz 5f /* Ignore both, so don't bother restoring FPU
* state and signal mask */
PIC_PROLOGUE
push %edx /* push a copy for us */
push %edx /* push a copy as function argument */
call PIC_PLT(_C_LABEL(setuctx)) /* setuctx(ucp) */
pop %edx /* clean up stack */
pop %edx /* clean up stack and restore edx */
PIC_EPILOGUE
5: /* Restore the registers (except EAX and EDX) */
mov CX(%edx), %ecx /* Restore ECX */
mov BX(%edx), %ebx /* Restore EBX */
mov DI(%edx), %edi /* Restore EDI */
mov SI(%edx), %esi /* Restore ESI */
mov BP(%edx), %ebp /* Restore EBP */
mov SP(%edx), %esp /* Restore stack pointer */
xor %eax, %eax /* Return 0 */
jmp *PC(%edx) /* Return to RTA */
/* void ctx_start((void *func)(int arg1, ..., argn), arg1, ..., argn,
* ucontext_t *ucp)
* A wrapper to start function `func'. ESI register will contain a pointer
* to ucp on the stack. By setting ESP to ESI, we effectively 'remove' all
* arguments to `func' from the stack. Finally, a call to resumecontext
* will start the next context in the linked list (or exit the program if
* there is no context).
*
* Since PIC needs the EBX register, which is pushed on the stack by
* PIC_PROLOGUE, we need an extra of salsa here.
*/
ENTRY(ctx_start)
/* 0(esp) -> func
* 4(esp) -> arg1
* ...
* 4*n(esp) -> argn
* 4*(n+1)(esp) -> ucp */
pop %eax /* eax = func */
call *%eax /* func(arg1, ..., argn) */
PIC_PROLOGUE /* may push %ebx, but we do not care */
mov %esi, %esp /* Clean up stack, keep %ebx = &GOT */
/* ucp is now at the top of the stack again */
call PIC_PLT(_C_LABEL(resumecontext)) /* resumecontext(ucp) */
ret /* never reached */

View File

@@ -1,24 +0,0 @@
include <minix/type.h>
include <sys/ucontext.h>
include <sys/errno.h>
include <machine/mcontext.h>
struct __ucontext
member UC_FLAGS uc_flags
member UC_LINK uc_link
member MAGIC uc_mcontext.mc_magic
member DI uc_mcontext.__gregs[_REG_EDI]
member SI uc_mcontext.__gregs[_REG_ESI]
member BP uc_mcontext.__gregs[_REG_EBP]
member AX uc_mcontext.__gregs[_REG_EAX]
member BX uc_mcontext.__gregs[_REG_EBX]
member CX uc_mcontext.__gregs[_REG_ECX]
member DX uc_mcontext.__gregs[_REG_EDX]
member PC uc_mcontext.__gregs[_REG_EIP]
member SP uc_mcontext.__gregs[_REG_ESP]
define EFAULT EFAULT
define EINVAL EINVAL
define MCF_MAGIC MCF_MAGIC
define _UC_IGNFPU _UC_IGNFPU
define _UC_IGNSIGM _UC_IGNSIGM

View File

@@ -3,7 +3,7 @@
# gen sources
.if defined(__MINIX)
.PATH: ${.CURDIR}/gen/minix
.PATH: ${NETBSDSRCDIR}/minix/lib/libc/gen
.endif # defined(__MINIX)
.PATH: ${ARCHDIR}/gen ${.CURDIR}/gen

View File

@@ -1,16 +0,0 @@
/*
* clock - determine the processor time used
*/
#include <sys/cdefs.h>
#include "namespace.h"
#include <time.h>
#include <sys/times.h>
clock_t clock(void)
{
struct tms tms;
times(&tms);
return tms.tms_utime;
}

View File

@@ -1,29 +0,0 @@
/* getdomainname() Author: Kees J. Bot
* 2 Dec 1994
*/
#define nil 0
#include "namespace.h"
#include <sys/types.h>
#include <unistd.h>
#include <string.h>
#ifdef __weak_alias
__weak_alias(getdomainname, _getdomainname)
#endif
int getdomainname(char *result, size_t size)
{
char nodename[256];
char *domain;
if (gethostname(nodename, sizeof(nodename)) < 0)
return -1;
nodename[sizeof(nodename)-1]= 0;
if ((domain = strchr(nodename, '.')) != NULL)
strncpy(result, domain+1, size);
else
result[0] = '\0';
if (size > 0) result[size-1]= 0;
return 0;
}

View File

@@ -1,31 +0,0 @@
/* gethostname(2) system call emulation */
#include <sys/cdefs.h>
#include "namespace.h"
#include <sys/types.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <minix/paths.h>
#ifdef __weak_alias
__weak_alias(gethostname, _gethostname)
#endif
int gethostname(char *buf, size_t len)
{
int fd;
int r;
char *nl;
if ((fd= open(_PATH_HOSTNAME_FILE, O_RDONLY)) < 0) return -1;
r= read(fd, buf, len);
close(fd);
if (r == -1) return -1;
buf[len-1]= '\0';
if ((nl= strchr(buf, '\n')) != NULL) *nl= '\0';
return 0;
}

View File

@@ -1,41 +0,0 @@
#include <sys/cdefs.h>
#include "namespace.h"
#include <sys/types.h>
#include <minix/paths.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <lib.h>
#ifdef __weak_alias
__weak_alias(getloadavg, _getloadavg)
#endif
/* Retrieve system load average information. */
int getloadavg(double *loadavg, int nelem)
{
FILE *fp;
int i;
if(nelem < 1) {
errno = ENOSPC;
return -1;
}
if((fp = fopen(_PATH_PROC "loadavg", "r")) == NULL)
return -1;
for(i = 0; i < nelem; i++)
if(fscanf(fp, "%lf", &loadavg[i]) != 1)
break;
fclose(fp);
if (i == 0) {
errno = ENOENT;
return -1;
}
return i;
}

View File

@@ -1,20 +0,0 @@
/*
getpagesize.c
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include "namespace.h"
#include <machine/param.h>
#include <machine/vmparam.h>
#include <unistd.h>
#ifdef __weak_alias
__weak_alias(getpagesize, _getpagesize)
#endif
int getpagesize(void)
{
return PAGE_SIZE;
}

View File

@@ -1,74 +0,0 @@
/* getpass() - read a password Author: Kees J. Bot
* Feb 16 1993
*/
#include <sys/cdefs.h>
#include "namespace.h"
#include <sys/types.h>
#include <fcntl.h>
#include <unistd.h>
#include <signal.h>
#include <termios.h>
#include <string.h>
#ifdef __weak_alias
__weak_alias(getpass, _getpass)
#endif
static int intr;
static void catch(int sig)
{
intr= 1;
}
char *getpass(const char *prompt)
{
struct sigaction osa, sa;
struct termios cooked, raw;
static char password[32+1];
int fd, n= 0;
/* Try to open the controlling terminal. */
if ((fd= open("/dev/tty", O_RDONLY)) < 0) return NULL;
/* Trap interrupts unless ignored. */
intr= 0;
sigaction(SIGINT, NULL, &osa);
if (osa.sa_handler != SIG_IGN) {
sigemptyset(&sa.sa_mask);
sa.sa_flags= 0;
sa.sa_handler= catch;
sigaction(SIGINT, &sa, &osa);
}
/* Set the terminal to non-echo mode. */
tcgetattr(fd, &cooked);
raw= cooked;
raw.c_iflag|= ICRNL;
raw.c_lflag&= ~ECHO;
raw.c_lflag|= ECHONL;
raw.c_oflag|= OPOST | ONLCR;
tcsetattr(fd, TCSANOW, &raw);
/* Print the prompt. (After setting non-echo!) */
write(2, prompt, strlen(prompt));
/* Read the password, 32 characters max. */
while (read(fd, password+n, 1) > 0) {
if (password[n] == '\n') break;
if (n < 32) n++;
}
password[n]= 0;
/* Terminal back to cooked mode. */
tcsetattr(fd, TCSANOW, &cooked);
close(fd);
/* Interrupt? */
sigaction(SIGINT, &osa, NULL);
if (intr) raise(SIGINT);
return password;
}

View File

@@ -1,21 +0,0 @@
/*
* (c) copyright 1987 by the Vrije Universiteit, Amsterdam, The Netherlands.
* See the copyright notice in the ACK home directory, in the file "Copyright".
*/
/* $Header$ */
#if defined(_POSIX_SOURCE)
#include <sys/types.h>
#endif
#include <signal.h>
int _kill(int pid, int sig);
pid_t getpid(void);
int
raise(int sig)
{
if (sig < 0 || sig >= _NSIG)
return -1;
return _kill(getpid(), sig);
}

View File

@@ -1,64 +0,0 @@
/* gethostname(2) system call emulation */
#include <sys/cdefs.h>
#include "namespace.h"
#include <sys/types.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <minix/paths.h>
#ifdef __weak_alias
__weak_alias(sethostname, _sethostname)
#endif
int sethostname(const char *buf, size_t len)
{
int fd;
int r;
int tmperr;
char name[20];
strlcpy(name, "/tmp/hostname.XXXXX",sizeof(name));
fd = mkstemp(name);
if (fd == -1)
return -1;
r = fchmod(fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
if (r == -1) {
tmperr = errno;
close(fd);
unlink(name);
errno = tmperr;
return -1;
}
r = write(fd, buf, len);
tmperr = errno;
close(fd);
if (r == -1) {
unlink(name);
errno = tmperr;
return -1;
}
if (r < len) {
unlink(name);
errno = ENOSPC;
return -1;
}
r = rename(name, _PATH_HOSTNAME_FILE);
if (r == -1) {
tmperr = errno;
unlink(name);
errno = tmperr;
}
return 0;
}

View File

@@ -1,85 +0,0 @@
/* sysconf.c POSIX 4.8.1
* long int sysconf(int name);
*
* POSIX allows some of the values in <limits.h> to be increased at
* run time. The sysconf() function allows such values to be checked
* at run time. MINIX does not use this facility - the run time
* limits are those given in <limits.h>.
*/
#include <sys/cdefs.h>
#include "namespace.h"
#include <lib.h>
#include <unistd.h>
#include <time.h>
#include <stdio.h>
#include <minix/paths.h>
#ifdef __weak_alias
__weak_alias(sysconf, __sysconf)
#endif
static u32_t get_hz(void)
{
FILE *fp;
u32_t hz;
int r;
if ((fp = fopen(_PATH_PROC "hz", "r")) != NULL)
{
r = fscanf(fp, "%u", &hz);
fclose(fp);
if (r == 1)
return hz;
}
return DEFAULT_HZ;
}
long int sysconf(name)
int name; /* property being inspected */
{
switch(name) {
case _SC_ARG_MAX:
return (long) ARG_MAX;
case _SC_CHILD_MAX:
return (long) CHILD_MAX;
case _SC_CLK_TCK:
return (long) get_hz();
case _SC_NGROUPS_MAX:
return (long) NGROUPS_MAX;
case _SC_OPEN_MAX:
return (long) OPEN_MAX;
case _SC_JOB_CONTROL:
return -1L; /* no job control */
case _SC_SAVED_IDS:
return -1L; /* no saved uid/gid */
case _SC_VERSION:
return (long) _POSIX_VERSION;
case _SC_STREAM_MAX:
return (long) _POSIX_STREAM_MAX;
case _SC_TZNAME_MAX:
return (long) _POSIX_TZNAME_MAX;
case _SC_PAGESIZE:
return getpagesize();
case _SC_LINE_MAX:
return (long) LINE_MAX;
default:
errno = EINVAL;
return -1L;
}
}

View File

@@ -1,66 +0,0 @@
/* uname(3) - describe the machine. Author: Kees J. Bot
* 5 Dec 1992
*/
#include <sys/cdefs.h>
#include "namespace.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#include <sys/types.h>
#include <sys/utsname.h>
#ifdef __weak_alias
__weak_alias(uname, _uname)
#endif
#define uts_get(field, string) \
if (sysuname(_UTS_GET, field, name->string, sizeof(name->string)) < 0) \
return -1; \
name->string[sizeof(name->string)-1]= 0;
int uname(struct utsname *name)
{
int hf, n, err;
char *nl;
/* Get each of the strings with a sysuname call. Null terminate them,
* because the buffers in the kernel may grow before this and the
* programs are recompiled.
*/
uts_get(_UTS_SYSNAME, sysname);
uts_get(_UTS_NODENAME, nodename);
uts_get(_UTS_RELEASE, release);
uts_get(_UTS_VERSION, version);
uts_get(_UTS_MACHINE, machine);
uts_get(_UTS_ARCH, arch);
#if 0
uts_get(_UTS_KERNEL, kernel);
uts_get(_UTS_HOSTNAME, hostname);
uts_get(_UTS_BUS, bus);
#endif
/* Try to read the node name from /etc/hostname.file. This information
* should be stored in the kernel.
*/
if ((hf = open("/etc/hostname.file", O_RDONLY)) < 0) {
if (errno != ENOENT) return(-1);
} else {
n = read(hf, name->nodename, sizeof(name->nodename) - 1);
err = errno;
close(hf);
errno = err;
if (n < 0) return(-1);
name->nodename[n] = 0;
if ((nl = strchr(name->nodename, '\n')) != NULL) {
memset(nl, 0, (name->nodename +
sizeof(name->nodename)) - nl);
}
}
return 0;
}
/*
* $PchId: _uname.c,v 1.4 1995/11/27 20:09:08 philip Exp $
*/

View File

@@ -1,22 +0,0 @@
#include <sys/cdefs.h>
#include <lib.h>
#include "namespace.h"
#include <string.h>
#include <sys/wait.h>
#ifdef __weak_alias
__weak_alias(wait, _wait)
#endif
pid_t wait(int * status)
{
message m;
memset(&m, 0, sizeof(m));
m.m_lc_pm_waitpid.pid = -1;
m.m_lc_pm_waitpid.options = 0;
if (_syscall(PM_PROC_NR, PM_WAITPID, &m) < 0) return(-1);
if (status != 0) *status = m.m_pm_lc_waitpid.status;
return(m.m_type);
}

View File

@@ -1,22 +0,0 @@
#include <sys/cdefs.h>
#include <lib.h>
#include "namespace.h"
#include <string.h>
#include <sys/wait.h>
#ifdef __weak_alias
__weak_alias(waitpid, _waitpid)
#endif
pid_t waitpid(pid_t pid, int *status, int options)
{
message m;
memset(&m, 0, sizeof(m));
m.m_lc_pm_waitpid.pid = pid;
m.m_lc_pm_waitpid.options = options;
if (_syscall(PM_PROC_NR, PM_WAITPID, &m) < 0) return(-1);
if (status != 0) *status = m.m_pm_lc_waitpid.status;
return m.m_type;
}

View File

@@ -1,6 +0,0 @@
# MINIX Specifics sources
.PATH: ${.CURDIR}/minix
SRCS+= minix-malloc.c minix-calloc.c
SRCS+= minix-malloc-debug.c

View File

@@ -1,8 +0,0 @@
#include <minix/u64.h>
#include <sys/types.h>
/* malloc-debug.c */
void *_dbg_malloc(size_t size);
void *_dbg_realloc(void *oldp, size_t size);
void _dbg_free(void *ptr);

View File

@@ -1,28 +0,0 @@
/* $Header$ */
#include <stdlib.h>
/* replace undef by define */
#define ALIGN_EIGHT_BYTES /* Use 8-byte alignment. */
#ifdef ALIGN_EIGHT_BYTES
#define ALIGN_SIZE 8
#else
#define ALIGN_SIZE sizeof(size_t)
#endif
#define ALIGN(x) (((x) + (ALIGN_SIZE - 1)) & ~(ALIGN_SIZE - 1))
void *
calloc(size_t nelem, size_t elsize)
{
register char *p;
register size_t *q;
size_t size = ALIGN(nelem * elsize);
p = malloc(size);
if (p == NULL) return NULL;
q = (size_t *) (p + size);
while ((char *) q > p) *--q = 0;
return p;
}

View File

@@ -1,245 +0,0 @@
/* pointless without assertions */
#ifdef NDEBUG
#undef NDEBUG
#endif
#include <assert.h>
#include <machine/vm.h>
#include <machine/vmparam.h>
#include <minix/minlib.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <unistd.h>
#include "malloc-debug.h"
#if 0
#include <stdio.h>
static int reenter;
#define LOG(args) if (!reenter) { reenter++; printf args; reenter--; }
#else
#define LOG(args)
#endif
struct block {
size_t size;
unsigned magic;
};
static u8_t *ptr_min, *ptr_max;
static unsigned long page_round_down(unsigned long x)
{
return x - x % PAGE_SIZE;
}
static unsigned long page_round_up(unsigned long x)
{
unsigned long rem;
rem = x % PAGE_SIZE;
if (rem)
x += PAGE_SIZE - rem;
return x;
}
#define page_round_down_ptr(x) ((u8_t *) page_round_down((unsigned long) (x)))
#define page_round_up_ptr(x) ((u8_t *) page_round_up((unsigned long) (x)))
static unsigned long block_compute_magic(struct block *block)
{
return (unsigned long) block + block->size + 0xDEADBEEFUL;
}
static size_t block_get_totalsize(size_t size)
{
return page_round_up(sizeof(struct block) + size);
}
static u8_t *block_get_endptr(struct block *block)
{
return (u8_t *) block + block_get_totalsize(block->size);
}
static u8_t *block_get_dataptr(struct block *block)
{
return block_get_endptr(block) - block->size;
}
static void block_check(struct block *block)
{
u8_t *dataptr, *p;
/* check location */
assert(block);
assert(!((unsigned long) block % PAGE_SIZE));
assert((u8_t *) block >= ptr_min);
assert((u8_t *) block <= ptr_max);
/* check size */
assert(block->size > 0);
/* check fillers */
assert(block->magic == block_compute_magic(block));
dataptr = block_get_dataptr(block);
for (p = (u8_t *) (block + 1); p < dataptr; p++)
assert(*p == ((unsigned long) p & 0xff));
}
static struct block *block_alloc(size_t size)
{
struct block *block;
u8_t *dataptr, *p, *ptr;
unsigned page_index, page_index_max;
size_t sizerem, totalsize;
u64_t tsc;
LOG(("block_alloc; size=0x%x\n", size));
assert(size > 0);
/* round size up to machine word size */
sizerem = size % sizeof(long);
if (sizerem)
size += sizeof(long) - sizerem;
/* initialize address range */
if (!ptr_min && !ptr_max) {
/* keep a safe distance from areas that are in use:
* - 4MB from the break (should not change if traditional
* malloc is not used so a small margin is sufficient
* - 256MB from the stack (big margin because memory beyond
* this may be allocated by mmap when the address space
* starts to fill up)
*/
ptr_min = page_round_up_ptr((u8_t *) sbrk(0) + 0x400000);
ptr_max = page_round_down_ptr((u8_t *) &size - 0x10000000);
}
assert(ptr_min);
assert(ptr_max);
assert(ptr_min < ptr_max);
/* select address at random */
tsc = 0;
/* LSC FIXME Broken for now... */
/* read_tsc_64(&tsc); */
totalsize = block_get_totalsize(size);
page_index_max = (ptr_max - ptr_min - totalsize) / PAGE_SIZE;
page_index = (page_index_max > 0) ? (ex64lo(tsc) % page_index_max) : 0;
ptr = ptr_min + page_index * PAGE_SIZE;
/* allocate block */
block = (struct block *) mmap(
ptr, /* addr */
totalsize, /* len */
PROT_READ|PROT_WRITE, /* prot */
MAP_PREALLOC, /* flags */
-1, /* fd */
0); /* offset */
if (block == MAP_FAILED) {
/* mmap call failed */
abort();
}
/* block may not be at the requested location if that is in use */
if (ptr_min > (u8_t *) block)
ptr_min = (u8_t *) block;
if (ptr_max < (u8_t *) block)
ptr_max = (u8_t *) block;
/* initialize block, including fillers */
block->size = size;
block->magic = block_compute_magic(block);
dataptr = block_get_dataptr(block);
for (p = (u8_t *) (block + 1); p < dataptr; p++)
*p = ((unsigned long) p & 0xff);
LOG(("block_alloc; block=0x%x\n", block));
return block;
}
static struct block *block_find(const void *ptr)
{
struct block *block;
LOG(("block_find; ptr=0x%x\n", ptr));
assert(ptr);
/* locate block based on pointer, then check whether it is valid */
block = (struct block *) page_round_down(
(unsigned long) ((struct block *) __UNCONST(ptr) - 1));
block_check(block);
LOG(("block_find; block=0x%x\n", block));
return block;
}
static void block_free(struct block *block)
{
LOG(("block_free; block=0x%x\n", block));
assert(block);
/* simply unmap the block */
if (munmap(block, block_get_totalsize(block->size)) < 0) {
/* munmap call failed */
abort();
}
}
void *_dbg_malloc(size_t size)
{
struct block *newblock;
u8_t *ptr;
LOG(("_dbg_malloc; size=0x%x\n", size));
assert(size > 0); /* enforced by regular malloc */
newblock = block_alloc(size);
if (!newblock)
return NULL;
ptr = block_get_dataptr(newblock);
LOG(("_dbg_malloc; ptr=0x%x\n", ptr));
return ptr;
}
void *_dbg_realloc(void *oldp, size_t size)
{
u8_t *newp;
struct block *oldblock, *newblock;
LOG(("_dbg_realloc; oldp=0x%x; size=0x%x\n", oldp, size));
assert(oldp); /* enforced by regular realloc */
assert(size > 0); /* enforced by regular realloc */
/* always allocate new block */
newblock = block_alloc(size);
if (!newblock)
return NULL;
/* copy the data */
oldblock = block_find(oldp);
memcpy(block_get_dataptr(newblock),
block_get_dataptr(oldblock),
MIN(newblock->size, oldblock->size));
/* deallocate old block */
block_free(oldblock);
newp = block_get_dataptr(newblock);
LOG(("_dbg_realloc; newp=0x%x\n", newp));
return newp;
}
void _dbg_free(void *ptr)
{
LOG(("_dbg_free; ptr=0x%x\n", ptr));
assert(ptr); /* enforced by regular free */
/* find the block and free it */
block_free(block_find(ptr));
LOG(("_dbg_free done\n"));
}

View File

@@ -1,250 +0,0 @@
/* $Header$ */
/* replace undef by define */
#define ALIGN_EIGHT_BYTES /* Use 8-byte alignment. */
#define DEBUG /* check assertions */
#undef SLOWDEBUG /* some extra test loops (requires DEBUG) */
#ifndef DEBUG
#define NDEBUG
#endif
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <assert.h>
#include "malloc-debug.h"
static int no_debug = -1;
#define CHECK_DBG(statement) \
if (no_debug <= 0) { \
if (no_debug < 0) no_debug = getenv("MALLOC_DEBUG") ? 0 : 1; \
if (no_debug == 0) { statement; } \
}
#define ptrint int
#define BRKSIZE 4096
#ifdef ALIGN_EIGHT_BYTES
#define PTRSIZE 8
#else
#define PTRSIZE ((int) sizeof(void *))
#endif
#define Align(x,a) (((x) + (a - 1)) & ~(a - 1))
#define NextSlot(p) (* (void **) ((p) - PTRSIZE))
#define NextFree(p) (* (void **) (p))
/*
* A short explanation of the data structure and algorithms.
* An area returned by malloc() is called a slot. Each slot
* contains the number of bytes requested, but preceeded by
* an extra pointer to the next the slot in memory.
* '_bottom' and '_top' point to the first/last slot.
* More memory is asked for using brk() and appended to top.
* The list of free slots is maintained to keep malloc() fast.
* '_empty' points the the first free slot. Free slots are
* linked together by a pointer at the start of the
* user visable part, so just after the next-slot pointer.
* Free slots are merged together by free().
*
* Since modern processors prefer 8-byte alignment, we now pretend
* our pointers are 8 bytes wide.
*/
extern void *_sbrk(int);
extern int _brk(void *);
static void *_bottom, *_top, *_empty;
static int grow(size_t len)
{
register char *p;
assert(NextSlot((char *)_top) == 0);
if ((char *) _top + len < (char *) _top
|| (p = (char *)Align((ptrint)_top + len, BRKSIZE)) < (char *) _top ) {
errno = ENOMEM;
return(0);
}
if (_brk(p) != 0)
return(0);
NextSlot((char *)_top) = p;
NextSlot(p) = 0;
free(_top);
_top = p;
return 1;
}
void *
malloc(const size_t size)
{
register char *prev, *p, *next, *new;
unsigned ntries;
if (size == 0)
return NULL;
CHECK_DBG(return _dbg_malloc(size));
for (ntries = 0; ntries < 2; ntries++) {
unsigned len = Align(size, PTRSIZE) + PTRSIZE;
if (len < 2 * PTRSIZE) {
errno = ENOMEM;
return NULL;
}
if (_bottom == 0) {
if ((p = _sbrk(2 * PTRSIZE)) == (char *) -1)
return NULL;
p = (char *) Align((ptrint)p, PTRSIZE);
p += PTRSIZE;
_top = _bottom = p;
NextSlot(p) = 0;
}
#ifdef SLOWDEBUG
for (p = _bottom; (next = NextSlot(p)) != 0; p = next)
assert(next > p);
assert(p == _top);
#endif
for (prev = 0, p = _empty; p != 0; prev = p, p = NextFree(p)) {
next = NextSlot(p);
new = p + len; /* easily overflows!! */
if (new > next || new <= p)
continue; /* too small */
if (new + PTRSIZE < next) { /* too big, so split */
/* + PTRSIZE avoids tiny slots on free list */
NextSlot(new) = next;
NextSlot(p) = new;
NextFree(new) = NextFree(p);
NextFree(p) = new;
}
if (prev)
NextFree(prev) = NextFree(p);
else
_empty = NextFree(p);
return p;
}
if (grow(len) == 0)
break;
}
assert(ntries != 2);
return NULL;
}
void *
realloc(void *oldp, size_t size)
{
register char *prev, *p, *next, *new;
char *old = oldp;
register size_t len, n;
if (old == 0)
return malloc(size);
if (size == 0) {
free(old);
return NULL;
}
CHECK_DBG(return _dbg_realloc(oldp, size));
len = Align(size, PTRSIZE) + PTRSIZE;
next = NextSlot(old);
n = (int)(next - old); /* old length */
/*
* extend old if there is any free space just behind it
*/
for (prev = 0, p = _empty; p != 0; prev = p, p = NextFree(p)) {
if (p > next)
break;
if (p == next) { /* 'next' is a free slot: merge */
NextSlot(old) = NextSlot(p);
if (prev)
NextFree(prev) = NextFree(p);
else
_empty = NextFree(p);
next = NextSlot(old);
break;
}
}
new = old + len;
/*
* Can we use the old, possibly extended slot?
*/
if (new <= next && new >= old) { /* it does fit */
if (new + PTRSIZE < next) { /* too big, so split */
/* + PTRSIZE avoids tiny slots on free list */
NextSlot(new) = next;
NextSlot(old) = new;
free(new);
}
return old;
}
if ((new = malloc(size)) == NULL) /* it didn't fit */
return NULL;
memcpy(new, old, n); /* n < size */
free(old);
return new;
}
void
free(void *ptr)
{
register char *prev, *next;
char *p = ptr;
if (p == 0)
return;
CHECK_DBG(_dbg_free(ptr); return);
#ifdef SLOWDEBUG
{
int found;
char *curr;
/* block must be in block list */
assert(_bottom);
found = 0;
for (curr = _bottom; (next = NextSlot(curr)) != 0; curr = next) {
assert(next > curr);
if (curr == p) found = 1;
}
if (curr == p) found = 1;
assert(found);
/* block must not be in free list */
if (_empty) {
found = 0;
for (curr = _empty; (next = NextFree(curr)) != 0; curr = next) {
assert(next > curr);
if (curr == p) found = 1;
}
if (curr == p) found = 1;
assert(!found);
}
}
#endif
assert((char *) NextSlot(p) > p);
for (prev = 0, next = _empty; next != 0; prev = next, next = NextFree(next))
if (p < next)
break;
NextFree(p) = next;
if (prev)
NextFree(prev) = p;
else
_empty = p;
if (next) {
assert((char *) NextSlot(p) <= next);
if (NextSlot(p) == next) { /* merge p and next */
NextSlot(p) = NextSlot(next);
NextFree(p) = NextFree(next);
}
}
if (prev) {
assert((char *) NextSlot(prev) <= p);
if (NextSlot(prev) == p) { /* merge prev and p */
NextSlot(prev) = NextSlot(p);
NextFree(prev) = NextFree(p);
}
}
}

View File

@@ -3,7 +3,7 @@
# net sources
.if defined(__MINIX)
.PATH: ${.CURDIR}/net/minix
.PATH: ${NETBSDSRCDIR}/minix/lib/libc/net
CPPFLAGS.getpeereid.c+= -D_MINIX_SYSTEM=1
CPPFLAGS.getsockopt.c+= -D_MINIX_SYSTEM=1

View File

@@ -1,81 +0,0 @@
#include <namespace.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <net/if.h>
#include <net/gen/in.h>
#include <net/gen/ip_io.h>
#include <net/gen/tcp.h>
#include <net/gen/udp.h>
#include <netinet/in.h>
#include <fcntl.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <ifaddrs.h>
#if defined(__weak_alias)
__weak_alias(getifaddrs,_getifaddrs)
__weak_alias(freeifaddrs,_freeifaddrs)
#endif
int
getifaddrs(struct ifaddrs **ifap)
{
static int fd = -1;
nwio_ipconf_t ipconf;
int flags;
static struct ifaddrs ifa;
static struct sockaddr_in addr, netmask;
memset(&ifa, 0, sizeof(ifa));
memset(&addr, 0, sizeof(addr));
memset(&netmask, 0, sizeof(netmask));
ifa.ifa_next = NULL;
ifa.ifa_name = __UNCONST("ip");
addr.sin_family = netmask.sin_family = AF_INET;
ifa.ifa_addr = (struct sockaddr *) &addr;
ifa.ifa_netmask = (struct sockaddr *) &netmask;
addr.sin_addr.s_addr = 0;
netmask.sin_addr.s_addr = 0;
*ifap = NULL;
if(fd < 0) {
char *ipd;
if(!(ipd = getenv("IP_DEVICE")))
ipd = __UNCONST("/dev/ip");
if((fd = open(ipd, O_RDWR)) < 0)
return -1;
}
/* Code taken from commands/simple/ifconfig.c. */
if((flags = fcntl(fd, F_GETFL)) < 0 ||
fcntl(fd, F_SETFL, flags | O_NONBLOCK) < 0 ||
ioctl(fd, NWIOGIPCONF, &ipconf))
return 0; /* Report interface as down. */
addr.sin_addr.s_addr = ipconf.nwic_ipaddr;
netmask.sin_addr.s_addr = ipconf.nwic_netmask;
if(addr.sin_addr.s_addr) ifa.ifa_flags = IFF_UP;
/* Just report on this interface. */
*ifap = &ifa;
return 0;
}
void
freeifaddrs(struct ifaddrs *ifp)
{
/* getifaddrs points to static data, so no need to free. */
;
}

View File

@@ -1,37 +0,0 @@
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <sys/socket.h>
#include <sys/ucred.h>
/*
* get the effective user ID and effective group ID of a peer
* connected through a Unix domain socket.
*/
int getpeereid(int sd, uid_t *euid, gid_t *egid) {
int rc;
struct uucred cred;
socklen_t ucred_length;
/* Initialize Data Structures */
ucred_length = sizeof(struct uucred);
memset(&cred, '\0', ucred_length);
/* Validate Input Parameters */
if (euid == NULL || egid == NULL) {
errno = EFAULT;
return -1;
} /* getsockopt will handle validating 'sd' */
/* Get the credentials of the peer at the other end of 'sd' */
rc = getsockopt(sd, SOL_SOCKET, SO_PEERCRED, &cred, &ucred_length);
if (rc == 0) {
/* Success - return the results */
*euid = cred.cr_uid;
*egid = cred.cr_gid;
return 0;
} else {
/* Failure - getsockopt takes care of setting errno */
return -1;
}
}

View File

@@ -28,7 +28,7 @@ SRCS+= erand48_ieee754.c
.if (${USE_JEMALLOC} != "no")
SRCS+= jemalloc.c
.elif (${USE_MINIXMALLOC:Uno} != "no")
.include "../minix/Makefile.inc"
.include "${NETBSDSRCDIR}/minix/lib/libc/Makefile.inc"
.else
SRCS+= malloc.c
.endif

View File

@@ -1,73 +0,0 @@
_lwp_*
acct
lchmod
lchown
clone
extattr_*
fhopen
fhstat
fhstatvfs
fsync_range
getfh
__setlogin
getpgid
setrlimit
getrusage
getsid
issetugid /* WARNING: Always returns 0 in this impl. */
kevent
kqueue
ktrace
lfs_*
madvise
mincore
minherit
mlock
mlockall
munlock
munlockall
modctl
mprotect
mq_timedreceive
mq_timedsend
mremap
msgctl
msgget
msgrcv
msgsnd
msync
nfs_svc
pmc_*
pollts
posix_fadvise
posix_madvise
pselect /* Implementable as select wrapper */
preadv
pwritev
quotactl
rasctl
sa_*
_sched_*
semconfig
setpgid
setpgrp
setregid
setreuid
sigaltstack
sigqueue
sigqueueinfo
sigstack
sigtimedwait
sigwait
sigwaitinfo
swapctl
swapon
sysarch
timer_create
timer_delete
timer_gettime
timer_settime
undelete
utrace
uuidgen
vadvise

View File

@@ -1,35 +0,0 @@
.PATH: ${.CURDIR}/sys-minix
SRCS+= accept.c access.c adjtime.c bind.c brk.c sbrk.c m_closefrom.c getsid.c \
chdir.c chmod.c fchmod.c chown.c fchown.c chroot.c close.c \
clock_getres.c clock_gettime.c clock_settime.c \
connect.c dup.c dup2.c execve.c fcntl.c flock.c fpathconf.c fork.c \
fstatfs.c fstatvfs.c fsync.c ftruncate.c gcov_flush.c getdents.c \
getegid.c getgid.c \
getgroups.c getitimer.c setitimer.c __getlogin.c getpeername.c \
getpgrp.c getpid.c getppid.c priority.c getrlimit.c getsockname.c \
getsockopt.c setsockopt.c gettimeofday.c geteuid.c getuid.c \
getvfsstat.c \
ioctl.c issetugid.c kill.c link.c listen.c loadname.c lseek.c \
minix_rs.c mkdir.c mkfifo.c mknod.c mmap.c mount.c nanosleep.c \
open.c pathconf.c pipe.c poll.c pread.c ptrace.c pwrite.c \
read.c readlink.c reboot.c recvfrom.c recvmsg.c rename.c \
rmdir.c select.c sem.c sendmsg.c sendto.c setgroups.c setsid.c \
setgid.c settimeofday.c setuid.c shmat.c shmctl.c shmget.c stime.c \
vectorio.c shutdown.c sigaction.c sigpending.c sigreturn.c sigsuspend.c\
sigprocmask.c socket.c socketpair.c stat.c statvfs.c svrctl.c \
symlink.c \
sync.c syscall.c sysuname.c truncate.c umask.c unlink.c write.c \
utimensat.c utimes.c futimes.c lutimes.c futimens.c \
_exit.c _ucontext.c environ.c __getcwd.c vfork.c sizeup.c init.c \
getrusage.c setrlimit.c setpgid.c
# Minix specific syscalls / utils.
SRCS+= cprofile.c sprofile.c stack_utils.c _mcontext.c
# Emulation for missing lchown/lchmod
OBJS+= lchown.o lchmod.o
lchown.o lchown.pico lchown.bc: ${NETBSDSRCDIR}/tools/compat/lchown.c
lchmod.o lchmod.pico lchmod.bc: ${NETBSDSRCDIR}/tools/compat/lchmod.c
.include "${ARCHDIR}/sys-minix/Makefile.inc"

View File

@@ -1,142 +0,0 @@
/* getcwd() - get the name of the current working directory.
* Author: Kees J. Bot
* 30 Apr 1989
*/
#include <sys/cdefs.h>
#include "namespace.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <errno.h>
#include <unistd.h>
#include <dirent.h>
#include <limits.h>
#include <string.h>
/* libc-private interface */
int __getcwd(char *, size_t);
static int addpath(const char *path, char **ap, const char *entry)
/* Add the name of a directory entry at the front of the path being built.
* Note that the result always starts with a slash.
*/
{
const char *e= entry;
char *p= *ap;
while (*e != 0) e++;
while (e > entry && p > path) *--p = *--e;
if (p == path) return -1;
*--p = '/';
*ap= p;
return 0;
}
static int recover(char *p)
/* Undo all those chdir("..")'s that have been recorded by addpath. This
* has to be done entry by entry, because the whole pathname may be too long.
*/
{
int e= errno, slash;
char *p0;
while (*p != 0) {
p0= ++p;
do p++; while (*p != 0 && *p != '/');
slash= *p; *p= 0;
if (chdir(p0) < 0) return -1;
*p= slash;
}
errno= e;
return 0;
}
int __getcwd(char *path, size_t size)
{
struct stat above, current, tmp;
struct dirent *entry;
DIR *d;
char *p, *up;
const char *dotdot = "..";
int cycle;
if (path == NULL || size <= 1) { errno= EINVAL; return -1; }
p= path + size;
*--p = 0;
if (stat(".", &current) < 0) return -1;
while (1) {
if (stat(dotdot, &above) < 0) { recover(p); return -1; }
if (above.st_dev == current.st_dev
&& above.st_ino == current.st_ino)
break; /* Root dir found */
if ((d= opendir(dotdot)) == NULL) { recover(p); return -1; }
/* Cycle is 0 for a simple inode nr search, or 1 for a search
* for inode *and* device nr.
*/
cycle= above.st_dev == current.st_dev ? 0 : 1;
do {
char name[3 + NAME_MAX + 1];
tmp.st_ino= 0;
if ((entry= readdir(d)) == NULL) {
switch (++cycle) {
case 1:
rewinddir(d);
continue;
case 2:
closedir(d);
errno= ENOENT;
recover(p);
return -1;
}
}
if (strcmp(entry->d_name, ".") == 0) continue;
if (strcmp(entry->d_name, "..") == 0) continue;
switch (cycle) {
case 0:
/* Simple test on inode nr. */
if (entry->d_ino != current.st_ino) continue;
/*FALL THROUGH*/
case 1:
/* Current is mounted. */
strcpy(name, "../");
strcpy(name+3, entry->d_name);
if (stat(name, &tmp) < 0) continue;
break;
}
} while (tmp.st_ino != current.st_ino
|| tmp.st_dev != current.st_dev);
up= p;
if (addpath(path, &up, entry->d_name) < 0) {
closedir(d);
errno = ERANGE;
recover(p);
return -1;
}
closedir(d);
if (chdir(dotdot) < 0) { recover(p); return -1; }
p= up;
current= above;
}
if (recover(p) < 0) return -1; /* Undo all those chdir("..")'s. */
if (*p == 0) *--p = '/'; /* Cwd is "/" if nothing added */
if (p > path) strcpy(path, p); /* Move string to start of path. */
return 0;
}

View File

@@ -1,29 +0,0 @@
/* getlogin(3)
*
* Author: Terrence W. Holm Aug. 1988
*/
#include <sys/cdefs.h>
#include "namespace.h"
#include <lib.h>
#include <pwd.h>
#include <unistd.h>
#include <string.h>
#include <stdio.h>
#include "extern.h"
int __getlogin(char *logname, size_t sz)
{
struct passwd *pw_entry;
pw_entry = getpwuid(getuid());
if (pw_entry == (struct passwd *)NULL)
return 0;
strncpy(logname, pw_entry->pw_name, sz);
return sz;
}

View File

@@ -1,31 +0,0 @@
#include <sys/cdefs.h>
#include "namespace.h"
#include <lib.h>
#include <string.h>
#include <unistd.h>
#ifdef __weak_alias
__weak_alias(_Exit, _exit)
#endif
__dead void _exit(status)
int status;
{
void (*suicide)(void);
message m;
memset(&m, 0, sizeof(m));
m.m_lc_pm_exit.status = status;
_syscall(PM_PROC_NR, PM_EXIT, &m);
/* If exiting nicely through PM fails for some reason, try to
* commit suicide. E.g., message to PM might fail due to deadlock.
*/
suicide = (void (*)(void)) -1;
suicide();
/* If committing suicide fails for some reason, hang. */
for(;;) { }
}

View File

@@ -1,32 +0,0 @@
/*
* mcontext.c
*/
#include <sys/cdefs.h>
#include <lib.h>
#include <namespace.h>
#include <string.h>
#include <ucontext.h>
#include <unistd.h>
int setmcontext(const mcontext_t *mcp)
{
message m;
memset(&m, 0, sizeof(m));
m.m_lc_pm_mcontext.ctx = (vir_bytes)mcp;
return(_syscall(PM_PROC_NR, PM_SETMCONTEXT, &m));
}
int getmcontext(mcontext_t *mcp)
{
message m;
memset(&m, 0, sizeof(m));
m.m_lc_pm_mcontext.ctx = (vir_bytes)mcp;
return(_syscall(PM_PROC_NR, PM_GETMCONTEXT, &m));
}

View File

@@ -1,261 +0,0 @@
#include <sys/cdefs.h>
#include <namespace.h>
#include <lib.h>
#include <machine/stackframe.h>
#include <sys/cdefs.h>
#include <ucontext.h>
#include <signal.h>
#include <stdarg.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdint.h>
#include <stdio.h>
void ctx_start(void (*)(void), int, ...);
/*===========================================================================*
* setuctx *
*===========================================================================*/
int setuctx(const ucontext_t *ucp)
{
int r;
if (ucp == NULL) {
errno = EFAULT;
return(-1);
}
if (!(ucp->uc_flags & _UC_IGNSIGM)) {
/* Set signal mask */
if ((r = sigprocmask(SIG_SETMASK, &ucp->uc_sigmask, NULL)) == -1)
return(r);
}
if (!(ucp->uc_flags & _UC_IGNFPU)) {
if ((r = setmcontext(&(ucp->uc_mcontext))) == -1)
return(r);
}
return(0);
}
/*===========================================================================*
* getuctx *
*===========================================================================*/
int getuctx(ucontext_t *ucp)
{
int r;
if (ucp == NULL) {
errno = EFAULT;
return(-1);
}
if (!(ucp->uc_flags & _UC_IGNSIGM)) {
/* Get signal mask */
if ((r = sigprocmask(0, NULL, &ucp->uc_sigmask)) == -1)
return(r);
}
if (!(ucp->uc_flags & _UC_IGNFPU)) {
if ((r = getmcontext(&(ucp->uc_mcontext))) != 0)
return(r);
}
return(0);
}
/*===========================================================================*
* makecontext *
*===========================================================================*/
void makecontext(ucontext_t *ucp, void (*func)(void), int argc, ...)
{
va_list ap;
unsigned int *stack_top;
/* There are a number of situations that are erroneous, but we can't actually
tell the caller something is wrong, because this is a void function.
Instead, mcontext_t contains a magic field that has to be set
properly before it can be used. */
if (ucp == NULL) {
return;
} else if ((ucp->uc_stack.ss_sp == NULL) ||
(ucp->uc_stack.ss_size < MINSIGSTKSZ)) {
ucp->uc_mcontext.mc_magic = 0;
_UC_MACHINE_SET_STACK(ucp, 0);
return;
}
if (ucp->uc_mcontext.mc_magic == MCF_MAGIC) {
#if defined(__i386__)
/* The caller provides a pointer to a stack that we can use to run our
context on. When the context starts, control is given to a wrapped
start routine, which calls a function and cleans up the stack
afterwards. The wrapper needs the address of that function on the
stack.
The stack will be prepared as follows:
func() - start routine
arg1 - first argument
...
argn - last argument
ucp - context, esp points here when `func' returns
_ctx_start pops the address of `func' from the stack and calls it.
The stack will then be setup with all arguments for `func'. When
`func' returns, _ctx_start cleans up the stack such that ucp is at
the top of the stack, ready to be used by resumecontext.
Resumecontext, in turn, checks whether another context is ready to
be executed (i.e., uc_link != NULL) or exit(2)s the process. */
/* Find the top of the stack from which we grow downwards. */
stack_top = (unsigned int *) ((uintptr_t ) ucp->uc_stack.ss_sp +
ucp->uc_stack.ss_size);
/* Align the arguments to 16 bytes (we might lose a few bytes of stack
space here).*/
stack_top = (unsigned int *) ((uintptr_t) stack_top & ~0xf);
/* Make room for 'func', the `func' routine arguments, and ucp. */
stack_top -= (1 + argc + 1);
/* Adjust the machine context to point to the top of this stack and the
program counter to the context start wrapper. */
_UC_MACHINE_SET_EBP(ucp, 0); /* Clear frame pointer */
_UC_MACHINE_SET_STACK(ucp, (reg_t) stack_top);
_UC_MACHINE_SET_PC(ucp, (reg_t) ctx_start);
*stack_top++ = (uintptr_t) func;
/* Copy arguments to the stack. */
va_start(ap, argc);
while (argc-- > 0) {
*stack_top++ = va_arg(ap, uintptr_t);
}
va_end(ap);
/* Store ucp on the stack */
*stack_top = (uintptr_t) ucp;
/* Set ESI to point to the base of the stack where ucp is stored, so
that the wrapper function knows how to clean up the stack after
calling `func' (i.e., how to adjust ESP). */
_UC_MACHINE_SET_ESI(ucp, (reg_t) stack_top);
/* If we ran out of stack space, invalidate stack pointer. Eventually,
swapcontext will choke on this and return ENOMEM. */
if (stack_top == ucp->uc_stack.ss_sp) {
_UC_MACHINE_SET_STACK(ucp, 0);
}
#elif defined(__arm__)
/* The caller provides a pointer to a stack that we can use to run our
context on. When the context starts, control is given to the
requested function. When the function finishes, it returns to the
_ctx_start wrapper that calls resumecontext (after setting up
resumecontext's parameter).
The first four arguments for the function will be passed in
regs r0-r3 as specified by the ABI, and the rest will go on
the stack. The ucp is saved in r4 so that we can
eventually pass it to resumecontext. The r4 register is
callee-preserved, so the ucp will remain valid in r4 when
_ctx_start runs. _ctx_start will move the ucp from r4 into
r0, so that the ucp is the first paramater for resumecontext.
Then, _ctx_start will call resumecontext. Resumecontext, in turn,
checks whether another context is ready to be executed
(i.e., uc_link != NULL) or exit(2)s the process. */
/* Find the top of the stack from which we grow downwards. */
stack_top = (unsigned int *) ((uintptr_t ) ucp->uc_stack.ss_sp +
ucp->uc_stack.ss_size);
/* Align the arguments to 16 bytes (we might lose a few bytes of stack
space here).*/
stack_top = (unsigned int *) ((uintptr_t) stack_top & ~0xf);
/* Make room for `func' routine arguments that don't fit in r0-r3 */
if (argc > 4)
stack_top -= argc - 4;
/* Adjust the machine context to point to the top of this stack and the
program counter to the 'func' entry point. Set lr to ctx_start, so
ctx_start runs after 'func'. Save ucp in r4 */
_UC_MACHINE_SET_FP(ucp, 0); /* Clear frame pointer */
_UC_MACHINE_SET_STACK(ucp, (reg_t) stack_top);
_UC_MACHINE_SET_PC(ucp, (reg_t) func);
_UC_MACHINE_SET_LR(ucp, (reg_t) ctx_start);
_UC_MACHINE_SET_R4(ucp, (reg_t) ucp);
/* Copy arguments to r0-r3 and stack. */
va_start(ap, argc);
/* Pass up to four arguments in registers. */
if (argc-- > 0)
_UC_MACHINE_SET_R0(ucp, va_arg(ap, uintptr_t));
if (argc-- > 0)
_UC_MACHINE_SET_R1(ucp, va_arg(ap, uintptr_t));
if (argc-- > 0)
_UC_MACHINE_SET_R2(ucp, va_arg(ap, uintptr_t));
if (argc-- > 0)
_UC_MACHINE_SET_R3(ucp, va_arg(ap, uintptr_t));
/* Pass the rest on the stack. */
while (argc-- > 0) {
*stack_top++ = va_arg(ap, uintptr_t);
}
va_end(ap);
/* If we ran out of stack space, invalidate stack pointer. Eventually,
swapcontext will choke on this and return ENOMEM. */
if (stack_top == ucp->uc_stack.ss_sp) {
_UC_MACHINE_SET_STACK(ucp, 0);
}
#else
# error "Unsupported platform"
#endif
}
}
/*===========================================================================*
* swapcontext *
*===========================================================================*/
int swapcontext(ucontext_t *oucp, const ucontext_t *ucp)
{
int r;
if ((oucp == NULL) || (ucp == NULL)) {
errno = EFAULT;
return(-1);
}
if (_UC_MACHINE_STACK(ucp) == 0) {
/* No stack space. Bail out. */
errno = ENOMEM;
return(-1);
}
oucp->uc_flags &= ~_UC_SWAPPED;
r = getcontext(oucp);
if ((r == 0) && !(oucp->uc_flags & _UC_SWAPPED)) {
oucp->uc_flags |= _UC_SWAPPED;
r = setcontext(ucp);
}
return(r);
}
/*===========================================================================*
* resumecontext *
*===========================================================================*/
__dead
void resumecontext(ucontext_t *ucp)
{
if (ucp->uc_link == NULL) exit(0);
/* Error handling? Where should the error go to? */
(void) setcontext((const ucontext_t *) ucp->uc_link);
exit(1); /* Never reached */
}

View File

@@ -1,136 +0,0 @@
#include <sys/cdefs.h>
#include "namespace.h"
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <net/netlib.h>
#include <net/gen/in.h>
#include <net/gen/tcp.h>
#include <net/gen/tcp_io.h>
#include <net/gen/udp.h>
#include <net/gen/udp_io.h>
#define DEBUG 0
static int _tcp_accept(int sock, struct sockaddr *__restrict address,
socklen_t *__restrict address_len);
static int _uds_accept(int sock, struct sockaddr *__restrict address,
socklen_t *__restrict address_len);
int accept(int sock, struct sockaddr *__restrict address,
socklen_t *__restrict address_len)
{
int r;
nwio_udpopt_t udpopt;
r= _tcp_accept(sock, address, address_len);
if (r != -1 || errno != ENOTTY)
return r;
r= _uds_accept(sock, address, address_len);
if (r != -1 || errno != ENOTTY)
return r;
/* Unfortunately, we have to return EOPNOTSUPP for a socket that
* does not support accept (such as a UDP socket) and ENOTSOCK for
* filedescriptors that do not refer to a socket.
*/
r= ioctl(sock, NWIOGUDPOPT, &udpopt);
if (r == 0)
{
/* UDP socket */
errno= EOPNOTSUPP;
return -1;
}
if (errno == ENOTTY)
{
errno= ENOTSOCK;
return -1;
}
return r;
}
static int _tcp_accept(int sock, struct sockaddr *__restrict address,
socklen_t *__restrict address_len)
{
int r, s1, t_errno;
tcp_cookie_t cookie;
s1= open(TCP_DEVICE, O_RDWR);
if (s1 == -1)
return s1;
r= ioctl(s1, NWIOGTCPCOOKIE, &cookie);
if (r == -1)
{
t_errno= errno;
close(s1);
errno= t_errno;
return -1;
}
r= ioctl(sock, NWIOTCPACCEPTTO, &cookie);
if (r == -1)
{
t_errno= errno;
close(s1);
errno= t_errno;
return -1;
}
if (address != NULL)
getpeername(s1, address, address_len);
return s1;
}
static int _uds_accept(int sock, struct sockaddr *__restrict address,
socklen_t *__restrict address_len)
{
int s1;
int r;
struct sockaddr_un uds_addr;
socklen_t len;
memset(&uds_addr, '\0', sizeof(struct sockaddr_un));
r= ioctl(sock, NWIOGUDSADDR, &uds_addr);
if (r == -1) {
return r;
}
if (uds_addr.sun_family != AF_UNIX) {
errno= EINVAL;
return -1;
}
len= *address_len;
if (len > sizeof(struct sockaddr_un))
len = sizeof(struct sockaddr_un);
memcpy(address, &uds_addr, len);
*address_len= len;
s1= open(UDS_DEVICE, O_RDWR);
if (s1 == -1)
return s1;
/* Copy file descriptor flags from the listening socket. */
fcntl(s1, F_SETFL, fcntl(sock, F_GETFL));
r= ioctl(s1, NWIOSUDSACCEPT, address);
if (r == -1) {
int ioctl_errno = errno;
close(s1);
errno = ioctl_errno;
return r;
}
return s1;
}

View File

@@ -1,18 +0,0 @@
#include <sys/cdefs.h>
#include "namespace.h"
#include <lib.h>
#include <string.h>
#include <unistd.h>
int access(name, mode)
const char *name;
int mode;
{
message m;
memset(&m, 0, sizeof(m));
m.m_lc_vfs_path.mode = mode;
_loadname(name, &m);
return(_syscall(VFS_PROC_NR, VFS_ACCESS, &m));
}

View File

@@ -1,38 +0,0 @@
#include <sys/cdefs.h>
#include <lib.h>
#include "namespace.h"
#include <string.h>
#include <sys/time.h>
#include <time.h>
#ifdef __weak_alias
__weak_alias(adjtime, __adjtime50);
#endif
int adjtime(const struct timeval *delta, struct timeval *olddelta)
{
message m;
memset(&m, 0, sizeof(m));
m.m_lc_pm_time.clk_id = CLOCK_REALTIME;
m.m_lc_pm_time.now = 0; /* use adjtime() method to slowly adjust the clock. */
m.m_lc_pm_time.sec = delta->tv_sec;
m.m_lc_pm_time.nsec = delta->tv_usec * 1000; /* convert usec to nsec */
if (_syscall(PM_PROC_NR, PM_CLOCK_SETTIME, &m) < 0)
return -1;
if (olddelta != NULL) {
/* the kernel returns immediately and the adjustment happens in the
* background. Also, any currently running adjustment is stopped by
* another call to adjtime(2), so the only values possible on Minix
* for olddelta are those of delta.
*/
olddelta->tv_sec = delta->tv_sec;
olddelta->tv_usec = delta->tv_usec;
}
return 0;
}

View File

@@ -1,213 +0,0 @@
#include <sys/cdefs.h>
#include "namespace.h"
#include <unistd.h>
#include <stdint.h>
#include <stdlib.h>
#include <limits.h>
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <netinet/in.h>
#include <net/gen/in.h>
#include <net/gen/tcp.h>
#include <net/gen/tcp_io.h>
#include <net/gen/udp.h>
#include <net/gen/udp_io.h>
#include <sys/un.h>
#include <minix/config.h>
#include <minix/const.h>
#define DEBUG 0
static int _tcp_bind(int sock, const struct sockaddr *address,
socklen_t address_len, nwio_tcpconf_t *tcpconfp);
static int _udp_bind(int sock, const struct sockaddr *address,
socklen_t address_len, nwio_udpopt_t *udpoptp);
static int _uds_bind(int sock, const struct sockaddr *address,
socklen_t address_len, struct sockaddr_un *uds_addr);
int bind(int sock, const struct sockaddr *address, socklen_t address_len)
{
int r;
nwio_tcpconf_t tcpconf;
nwio_udpopt_t udpopt;
struct sockaddr_un uds_addr;
r= ioctl(sock, NWIOGTCPCONF, &tcpconf);
if (r != -1 || errno != ENOTTY)
{
if (r == -1)
return r;
r= _tcp_bind(sock, address, address_len, &tcpconf);
#if DEBUG
if (r == -1)
{
int t_errno= errno;
fprintf(stderr, "bind(tcp) failed: %s\n",
strerror(errno));
errno= t_errno;
}
#endif
return r;
}
r= ioctl(sock, NWIOGUDPOPT, &udpopt);
if (r != -1 || errno != ENOTTY)
{
if (r == -1)
return r;
return _udp_bind(sock, address, address_len, &udpopt);
}
r= ioctl(sock, NWIOGUDSADDR, &uds_addr);
if (r != -1 || errno != ENOTTY)
{
if (r == -1)
return r;
return _uds_bind(sock, address, address_len, &uds_addr);
}
#if DEBUG
fprintf(stderr, "bind: not implemented for fd %d\n", sock);
#endif
errno= ENOSYS;
return -1;
}
static int _tcp_bind(int sock, const struct sockaddr *address,
socklen_t address_len, nwio_tcpconf_t *tcpconfp)
{
int r;
nwio_tcpconf_t tcpconf;
struct sockaddr_in *sinp;
sinp= (struct sockaddr_in *) __UNCONST(address);
if (sinp->sin_family != AF_INET || address_len < sizeof(*sinp))
{
#if DEBUG
fprintf(stderr, "bind(tcp): sin_family = %d, len = %d\n",
sinp->sin_family, address_len);
#endif
errno= EAFNOSUPPORT;
return -1;
}
if (sinp->sin_addr.s_addr != INADDR_ANY &&
sinp->sin_addr.s_addr != tcpconfp->nwtc_locaddr)
{
errno= EADDRNOTAVAIL;
return -1;
}
tcpconf.nwtc_flags= 0;
if (sinp->sin_port == 0)
tcpconf.nwtc_flags |= NWTC_LP_SEL;
else
{
tcpconf.nwtc_flags |= NWTC_LP_SET;
tcpconf.nwtc_locport= sinp->sin_port;
}
r= ioctl(sock, NWIOSTCPCONF, &tcpconf);
return r;
}
static int _udp_bind(int sock, const struct sockaddr *address,
socklen_t address_len, nwio_udpopt_t *udpoptp)
{
int r;
unsigned long curr_flags;
nwio_udpopt_t udpopt;
struct sockaddr_in *sinp;
sinp= (struct sockaddr_in *) __UNCONST(address);
if (sinp->sin_family != AF_INET || address_len < sizeof(*sinp))
{
#if DEBUG
fprintf(stderr, "bind(udp): sin_family = %d, len = %d\n",
sinp->sin_family, address_len);
#endif
errno= EAFNOSUPPORT;
return -1;
}
if (sinp->sin_addr.s_addr != INADDR_ANY &&
sinp->sin_addr.s_addr != udpoptp->nwuo_locaddr)
{
errno= EADDRNOTAVAIL;
return -1;
}
udpopt.nwuo_flags= 0;
if (sinp->sin_port == 0)
udpopt.nwuo_flags |= NWUO_LP_SEL;
else
{
udpopt.nwuo_flags |= NWUO_LP_SET;
udpopt.nwuo_locport= sinp->sin_port;
}
curr_flags= udpoptp->nwuo_flags;
if (!(curr_flags & NWUO_ACC_MASK))
udpopt.nwuo_flags |= NWUO_EXCL;
if (!(curr_flags & (NWUO_EN_LOC|NWUO_DI_LOC)))
udpopt.nwuo_flags |= NWUO_EN_LOC;
if (!(curr_flags & (NWUO_EN_BROAD|NWUO_DI_BROAD)))
udpopt.nwuo_flags |= NWUO_EN_BROAD;
if (!(curr_flags & (NWUO_RP_SET|NWUO_RP_ANY)))
udpopt.nwuo_flags |= NWUO_RP_ANY;
if (!(curr_flags & (NWUO_RA_SET|NWUO_RA_ANY)))
udpopt.nwuo_flags |= NWUO_RA_ANY;
if (!(curr_flags & (NWUO_RWDATONLY|NWUO_RWDATALL)))
udpopt.nwuo_flags |= NWUO_RWDATALL;
if (!(curr_flags & (NWUO_EN_IPOPT|NWUO_DI_IPOPT)))
udpopt.nwuo_flags |= NWUO_DI_IPOPT;
r= ioctl(sock, NWIOSUDPOPT, &udpopt);
return r;
}
static int _uds_bind(int sock, const struct sockaddr *address,
socklen_t address_len, struct sockaddr_un *uds_addr)
{
int r;
int did_mknod;
if (address == NULL) {
errno = EFAULT;
return -1;
}
did_mknod = 0;
r = mknod(((struct sockaddr_un *) __UNCONST(address))->sun_path,
S_IFSOCK|S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH, 0);
if (r == -1 && errno != EEXIST) {
return -1;
} else if (r == 0) {
did_mknod = 1;
}
/* perform the bind */
r= ioctl(sock, NWIOSUDSADDR, (void *) __UNCONST(address));
if (r == -1 && did_mknod) {
/* bind() failed in pfs, so we roll back the
* file system change
*/
unlink(((struct sockaddr_un *) __UNCONST(address))->sun_path);
}
return r;
}

View File

@@ -1,35 +0,0 @@
#include <sys/cdefs.h>
#include "namespace.h"
#include <lib.h>
#include <string.h>
#include <unistd.h>
#ifdef __weak_alias
__weak_alias(brk, _brk)
#endif
extern char *_brksize;
/* Both OSF/1 and SYSVR4 man pages specify that brk(2) returns int.
* However, BSD4.3 specifies that brk() returns char*. POSIX omits
* brk() on the grounds that it imposes a memory model on an architecture.
* On the other hand, they are so crucial to correct operation of so many
* parts of the system, that we have chosen to hide the name brk using _brk,
* as with system calls. In this way, if a user inadvertently defines a
* procedure brk, MINIX may continue to work because the true call is _brk.
*/
int brk(addr)
void *addr;
{
message m;
if (addr != _brksize) {
memset(&m, 0, sizeof(m));
m.m_lc_vm_brk.addr = addr;
if (_syscall(VM_PROC_NR, VM_BRK, &m) < 0) return(-1);
_brksize = addr;
}
return(0);
}

View File

@@ -1,26 +0,0 @@
#include <sys/cdefs.h>
#include "namespace.h"
#include <lib.h>
#include <string.h>
#include <unistd.h>
int chdir(name)
const char *name;
{
message m;
memset(&m, 0, sizeof(m));
_loadname(name, &m);
return(_syscall(VFS_PROC_NR, VFS_CHDIR, &m));
}
int fchdir(fd)
int fd;
{
message m;
memset(&m, 0, sizeof(m));
m.m_lc_vfs_fchdir.fd = fd;
return(_syscall(VFS_PROC_NR, VFS_FCHDIR, &m));
}

View File

@@ -1,16 +0,0 @@
#include <sys/cdefs.h>
#include "namespace.h"
#include <lib.h>
#include <string.h>
#include <sys/stat.h>
int chmod(const char *name, mode_t mode)
{
message m;
memset(&m, 0, sizeof(m));
m.m_lc_vfs_path.mode = mode;
_loadname(name, &m);
return(_syscall(VFS_PROC_NR, VFS_CHMOD, &m));
}

View File

@@ -1,22 +0,0 @@
#include <sys/cdefs.h>
#include "namespace.h"
#include <lib.h>
#include <string.h>
#include <unistd.h>
#ifdef __weak_alias
__weak_alias(__posix_chown, chown)
#endif
int chown(const char *name, uid_t owner, gid_t grp)
{
message m;
memset(&m, 0, sizeof(m));
m.m_lc_vfs_chown.len = strlen(name) + 1;
m.m_lc_vfs_chown.owner = owner;
m.m_lc_vfs_chown.group = grp;
m.m_lc_vfs_chown.name = (vir_bytes)name;
return(_syscall(VFS_PROC_NR, VFS_CHOWN, &m));
}

View File

@@ -1,16 +0,0 @@
#include <sys/cdefs.h>
#include "namespace.h"
#include <lib.h>
#include <string.h>
#include <unistd.h>
int chroot(name)
const char *name;
{
message m;
memset(&m, 0, sizeof(m));
_loadname(name, &m);
return(_syscall(VFS_PROC_NR, VFS_CHROOT, &m));
}

View File

@@ -1,27 +0,0 @@
#include <sys/cdefs.h>
#include <lib.h>
#include "namespace.h"
#include <string.h>
#include <sys/time.h>
#ifdef __weak_alias
__weak_alias(clock_getres, __clock_getres50);
#endif
int clock_getres(clockid_t clock_id, struct timespec *res)
{
message m;
memset(&m, 0, sizeof(m));
m.m_lc_pm_time.clk_id = clock_id;
if (_syscall(PM_PROC_NR, PM_CLOCK_GETRES, &m) < 0)
return -1;
res->tv_sec = m.m_pm_lc_time.sec;
res->tv_nsec = m.m_pm_lc_time.nsec;
return 0;
}

View File

@@ -1,27 +0,0 @@
#include <sys/cdefs.h>
#include <lib.h>
#include "namespace.h"
#include <string.h>
#include <sys/time.h>
#ifdef __weak_alias
__weak_alias(clock_gettime, __clock_gettime50);
#endif
int clock_gettime(clockid_t clock_id, struct timespec *res)
{
message m;
memset(&m, 0, sizeof(m));
m.m_lc_pm_time.clk_id = clock_id;
if (_syscall(PM_PROC_NR, PM_CLOCK_GETTIME, &m) < 0)
return -1;
res->tv_sec = m.m_pm_lc_time.sec;
res->tv_nsec = m.m_pm_lc_time.nsec;
return 0;
}

View File

@@ -1,27 +0,0 @@
#include <sys/cdefs.h>
#include <lib.h>
#include "namespace.h"
#include <string.h>
#include <time.h>
#ifdef __weak_alias
__weak_alias(clock_settime, __clock_settime50);
#endif
int clock_settime(clockid_t clock_id, const struct timespec *ts)
{
message m;
memset(&m, 0, sizeof(m));
m.m_lc_pm_time.clk_id = clock_id;
m.m_lc_pm_time.now = 1; /* set time immediately. don't use adjtime() method. */
m.m_lc_pm_time.sec = ts->tv_sec;
m.m_lc_pm_time.nsec = ts->tv_nsec;
if (_syscall(PM_PROC_NR, PM_CLOCK_SETTIME, &m) < 0)
return -1;
return 0;
}

View File

@@ -1,16 +0,0 @@
#include <sys/cdefs.h>
#include "namespace.h"
#include <lib.h>
#include <string.h>
#include <unistd.h>
int close(fd)
int fd;
{
message m;
memset(&m, 0, sizeof(m));
m.m_lc_vfs_close.fd = fd;
return(_syscall(VFS_PROC_NR, VFS_CLOSE, &m));
}

View File

@@ -1,176 +0,0 @@
#include <sys/cdefs.h>
#include "namespace.h"
#include <minix/config.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <netinet/in.h>
#include <net/gen/in.h>
#include <net/gen/tcp.h>
#include <net/gen/tcp_io.h>
#include <net/gen/udp.h>
#include <net/gen/udp_io.h>
#include <minix/const.h>
#define DEBUG 0
static int _tcp_connect(int sock, const struct sockaddr *address,
socklen_t address_len, nwio_tcpconf_t *tcpconfp);
static int _udp_connect(int sock, const struct sockaddr *address,
socklen_t address_len, nwio_udpopt_t *udpoptp);
static int _uds_connect(int sock, const struct sockaddr *address,
socklen_t address_len);
int connect(int sock, const struct sockaddr *address,
socklen_t address_len)
{
int r;
nwio_tcpconf_t tcpconf;
nwio_udpopt_t udpopt;
r= ioctl(sock, NWIOGTCPCONF, &tcpconf);
if (r != -1 || errno != ENOTTY)
{
if (r == -1)
{
/* Bad file descriptor */
return -1;
}
return _tcp_connect(sock, address, address_len, &tcpconf);
}
r= ioctl(sock, NWIOGUDPOPT, &udpopt);
if (r != -1 || errno != ENOTTY)
{
if (r == -1)
{
/* Bad file descriptor */
return -1;
}
return _udp_connect(sock, address, address_len, &udpopt);
}
r= _uds_connect(sock, address, address_len);
if (r != -1 || (errno != ENOTTY && errno != EAFNOSUPPORT))
{
if (r == -1)
{
/* Bad file descriptor */
return -1;
}
return r;
}
#if DEBUG
fprintf(stderr, "connect: not implemented for fd %d\n", sock);
#endif
errno= ENOSYS;
return -1;
}
static int _tcp_connect(int sock, const struct sockaddr *address,
socklen_t address_len, nwio_tcpconf_t *tcpconfp)
{
int r;
struct sockaddr_in *sinp;
nwio_tcpconf_t tcpconf;
nwio_tcpcl_t tcpcl;
if (address_len != sizeof(*sinp))
{
errno= EINVAL;
return -1;
}
sinp= (struct sockaddr_in *) __UNCONST(address);
if (sinp->sin_family != AF_INET)
{
errno= EINVAL;
return -1;
}
tcpconf.nwtc_flags= NWTC_SET_RA | NWTC_SET_RP;
if ((tcpconfp->nwtc_flags & NWTC_LOCPORT_MASK) == NWTC_LP_UNSET)
tcpconf.nwtc_flags |= NWTC_LP_SEL;
tcpconf.nwtc_remaddr= sinp->sin_addr.s_addr;
tcpconf.nwtc_remport= sinp->sin_port;
if (ioctl(sock, NWIOSTCPCONF, &tcpconf) == -1)
{
/* Ignore EISCONN error. The NWIOTCPCONN ioctl will get the
* right error.
*/
if (errno != EISCONN)
return -1;
}
tcpcl.nwtcl_flags= TCF_DEFAULT;
r= fcntl(sock, F_GETFL);
if (r == 1)
return -1;
if (r & O_NONBLOCK)
tcpcl.nwtcl_flags |= TCF_ASYNCH;
r= ioctl(sock, NWIOTCPCONN, &tcpcl);
return r;
}
static int _udp_connect(int sock, const struct sockaddr *address,
socklen_t address_len, nwio_udpopt_t *udpoptp)
{
int r;
struct sockaddr_in *sinp;
nwio_udpopt_t udpopt;
if (address == NULL)
{
/* Unset remote address */
udpopt.nwuo_flags= NWUO_RP_ANY | NWUO_RA_ANY | NWUO_RWDATALL;
r= ioctl(sock, NWIOSUDPOPT, &udpopt);
return r;
}
if (address_len != sizeof(*sinp))
{
errno= EINVAL;
return -1;
}
sinp= (struct sockaddr_in *) __UNCONST(address);
if (sinp->sin_family != AF_INET)
{
errno= EINVAL;
return -1;
}
udpopt.nwuo_flags= NWUO_RP_SET | NWUO_RA_SET | NWUO_RWDATONLY;
if ((udpoptp->nwuo_flags & NWUO_LOCPORT_MASK) == NWUO_LP_ANY)
udpopt.nwuo_flags |= NWUO_LP_SEL;
udpopt.nwuo_remaddr= sinp->sin_addr.s_addr;
udpopt.nwuo_remport= sinp->sin_port;
r= ioctl(sock, NWIOSUDPOPT, &udpopt);
return r;
}
static int _uds_connect(int sock, const struct sockaddr *address,
socklen_t address_len)
{
if (address == NULL) {
errno = EFAULT;
return -1;
}
/* perform the connect */
return ioctl(sock, NWIOSUDSCONN, (void *) __UNCONST(address));
}

Some files were not shown because too many files have changed in this diff Show More