[4] | 1 | /* This file contains essentially all of the process and message handling.
|
---|
| 2 | * Together with "mpx.s" it forms the lowest layer of the MINIX kernel.
|
---|
| 3 | * There is one entry point from the outside:
|
---|
| 4 | *
|
---|
| 5 | * sys_call: a system call, i.e., the kernel is trapped with an INT
|
---|
| 6 | *
|
---|
| 7 | * As well as several entry points used from the interrupt and task level:
|
---|
| 8 | *
|
---|
| 9 | * lock_notify: notify a process of a system event
|
---|
| 10 | * lock_send: send a message to a process
|
---|
| 11 | * lock_enqueue: put a process on one of the scheduling queues
|
---|
| 12 | * lock_dequeue: remove a process from the scheduling queues
|
---|
| 13 | *
|
---|
| 14 | * Changes:
|
---|
| 15 | * Aug 19, 2005 rewrote scheduling code (Jorrit N. Herder)
|
---|
| 16 | * Jul 25, 2005 rewrote system call handling (Jorrit N. Herder)
|
---|
| 17 | * May 26, 2005 rewrote message passing functions (Jorrit N. Herder)
|
---|
| 18 | * May 24, 2005 new notification system call (Jorrit N. Herder)
|
---|
| 19 | * Oct 28, 2004 nonblocking send and receive calls (Jorrit N. Herder)
|
---|
| 20 | *
|
---|
| 21 | * The code here is critical to make everything work and is important for the
|
---|
| 22 | * overall performance of the system. A large fraction of the code deals with
|
---|
| 23 | * list manipulation. To make this both easy to understand and fast to execute
|
---|
| 24 | * pointer pointers are used throughout the code. Pointer pointers prevent
|
---|
| 25 | * exceptions for the head or tail of a linked list.
|
---|
| 26 | *
|
---|
| 27 | * node_t *queue, *new_node; // assume these as global variables
|
---|
| 28 | * node_t **xpp = &queue; // get pointer pointer to head of queue
|
---|
| 29 | * while (*xpp != NULL) // find last pointer of the linked list
|
---|
| 30 | * xpp = &(*xpp)->next; // get pointer to next pointer
|
---|
| 31 | * *xpp = new_node; // now replace the end (the NULL pointer)
|
---|
| 32 | * new_node->next = NULL; // and mark the new end of the list
|
---|
| 33 | *
|
---|
| 34 | * For example, when adding a new node to the end of the list, one normally
|
---|
| 35 | * makes an exception for an empty list and looks up the end of the list for
|
---|
| 36 | * nonempty lists. As shown above, this is not required with pointer pointers.
|
---|
| 37 | */
|
---|
| 38 |
|
---|
| 39 | #include <minix/com.h>
|
---|
| 40 | #include <minix/callnr.h>
|
---|
| 41 | #include "kernel.h"
|
---|
| 42 | #include "proc.h"
|
---|
| 43 |
|
---|
| 44 | /* Scheduling and message passing functions. The functions are available to
|
---|
| 45 | * other parts of the kernel through lock_...(). The lock temporarily disables
|
---|
| 46 | * interrupts to prevent race conditions.
|
---|
| 47 | */
|
---|
| 48 | FORWARD _PROTOTYPE( int mini_send, (struct proc *caller_ptr, int dst,
|
---|
| 49 | message *m_ptr, unsigned flags) );
|
---|
| 50 | FORWARD _PROTOTYPE( int mini_receive, (struct proc *caller_ptr, int src,
|
---|
| 51 | message *m_ptr, unsigned flags) );
|
---|
| 52 | FORWARD _PROTOTYPE( int mini_notify, (struct proc *caller_ptr, int dst) );
|
---|
| 53 |
|
---|
| 54 | FORWARD _PROTOTYPE( void enqueue, (struct proc *rp) );
|
---|
| 55 | FORWARD _PROTOTYPE( void dequeue, (struct proc *rp) );
|
---|
| 56 | FORWARD _PROTOTYPE( void sched, (struct proc *rp, int *queue, int *front) );
|
---|
| 57 | FORWARD _PROTOTYPE( void pick_proc, (void) );
|
---|
| 58 |
|
---|
| 59 | #define BuildMess(m_ptr, src, dst_ptr) \
|
---|
| 60 | (m_ptr)->m_source = (src); \
|
---|
| 61 | (m_ptr)->m_type = NOTIFY_FROM(src); \
|
---|
| 62 | (m_ptr)->NOTIFY_TIMESTAMP = get_uptime(); \
|
---|
| 63 | switch (src) { \
|
---|
| 64 | case HARDWARE: \
|
---|
| 65 | (m_ptr)->NOTIFY_ARG = priv(dst_ptr)->s_int_pending; \
|
---|
| 66 | priv(dst_ptr)->s_int_pending = 0; \
|
---|
| 67 | break; \
|
---|
| 68 | case SYSTEM: \
|
---|
| 69 | (m_ptr)->NOTIFY_ARG = priv(dst_ptr)->s_sig_pending; \
|
---|
| 70 | priv(dst_ptr)->s_sig_pending = 0; \
|
---|
| 71 | break; \
|
---|
| 72 | }
|
---|
| 73 |
|
---|
| 74 | #define CopyMess(s,sp,sm,dp,dm) \
|
---|
| 75 | cp_mess(s, (sp)->p_memmap[D].mem_phys, \
|
---|
| 76 | (vir_bytes)sm, (dp)->p_memmap[D].mem_phys, (vir_bytes)dm)
|
---|
| 77 |
|
---|
| 78 | /*===========================================================================*
|
---|
| 79 | * sys_call *
|
---|
| 80 | *===========================================================================*/
|
---|
| 81 | PUBLIC int sys_call(call_nr, src_dst, m_ptr)
|
---|
| 82 | int call_nr; /* system call number and flags */
|
---|
| 83 | int src_dst; /* src to receive from or dst to send to */
|
---|
| 84 | message *m_ptr; /* pointer to message in the caller's space */
|
---|
| 85 | {
|
---|
| 86 | /* System calls are done by trapping to the kernel with an INT instruction.
|
---|
| 87 | * The trap is caught and sys_call() is called to send or receive a message
|
---|
| 88 | * (or both). The caller is always given by 'proc_ptr'.
|
---|
| 89 | */
|
---|
| 90 | register struct proc *caller_ptr = proc_ptr; /* get pointer to caller */
|
---|
| 91 | int function = call_nr & SYSCALL_FUNC; /* get system call function */
|
---|
| 92 | unsigned flags = call_nr & SYSCALL_FLAGS; /* get flags */
|
---|
| 93 | int mask_entry; /* bit to check in send mask */
|
---|
| 94 | int result; /* the system call's result */
|
---|
| 95 | vir_clicks vlo, vhi; /* virtual clicks containing message to send */
|
---|
| 96 |
|
---|
| 97 | /* Check if the process has privileges for the requested call. Calls to the
|
---|
| 98 | * kernel may only be SENDREC, because tasks always reply and may not block
|
---|
| 99 | * if the caller doesn't do receive().
|
---|
| 100 | */
|
---|
| 101 | if (! (priv(caller_ptr)->s_trap_mask & (1 << function)) ||
|
---|
| 102 | (iskerneln(src_dst) && function != SENDREC
|
---|
| 103 | && function != RECEIVE)) {
|
---|
| 104 | kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
|
---|
| 105 | function, proc_nr(caller_ptr), src_dst);
|
---|
| 106 | return(ECALLDENIED); /* trap denied by mask or kernel */
|
---|
| 107 | }
|
---|
| 108 |
|
---|
| 109 | /* Require a valid source and/ or destination process, unless echoing. */
|
---|
| 110 | if (! (isokprocn(src_dst) || src_dst == ANY || function == ECHO)) {
|
---|
| 111 | kprintf("sys_call: invalid src_dst, src_dst %d, caller %d\n",
|
---|
| 112 | src_dst, proc_nr(caller_ptr));
|
---|
| 113 | return(EBADSRCDST); /* invalid process number */
|
---|
| 114 | }
|
---|
| 115 |
|
---|
| 116 | /* If the call involves a message buffer, i.e., for SEND, RECEIVE, SENDREC,
|
---|
| 117 | * or ECHO, check the message pointer. This check allows a message to be
|
---|
| 118 | * anywhere in data or stack or gap. It will have to be made more elaborate
|
---|
| 119 | * for machines which don't have the gap mapped.
|
---|
| 120 | */
|
---|
| 121 | if (function & CHECK_PTR) {
|
---|
| 122 | vlo = (vir_bytes) m_ptr >> CLICK_SHIFT;
|
---|
| 123 | vhi = ((vir_bytes) m_ptr + MESS_SIZE - 1) >> CLICK_SHIFT;
|
---|
| 124 | if (vlo < caller_ptr->p_memmap[D].mem_vir || vlo > vhi ||
|
---|
| 125 | vhi >= caller_ptr->p_memmap[S].mem_vir +
|
---|
| 126 | caller_ptr->p_memmap[S].mem_len) {
|
---|
| 127 | kprintf("sys_call: invalid message pointer, trap %d, caller %d\n",
|
---|
| 128 | function, proc_nr(caller_ptr));
|
---|
| 129 | return(EFAULT); /* invalid message pointer */
|
---|
| 130 | }
|
---|
| 131 | }
|
---|
| 132 |
|
---|
| 133 | /* If the call is to send to a process, i.e., for SEND, SENDREC or NOTIFY,
|
---|
| 134 | * verify that the caller is allowed to send to the given destination and
|
---|
| 135 | * that the destination is still alive.
|
---|
| 136 | */
|
---|
| 137 | if (function & CHECK_DST) {
|
---|
| 138 | if (! get_sys_bit(priv(caller_ptr)->s_ipc_to, nr_to_id(src_dst))) {
|
---|
| 139 | kprintf("sys_call: ipc mask denied %d sending to %d\n",
|
---|
| 140 | proc_nr(caller_ptr), src_dst);
|
---|
| 141 | return(ECALLDENIED); /* call denied by ipc mask */
|
---|
| 142 | }
|
---|
| 143 |
|
---|
| 144 | if (isemptyn(src_dst) && !shutdown_started) {
|
---|
| 145 | kprintf("sys_call: dead dest; %d, %d, %d\n",
|
---|
| 146 | function, proc_nr(caller_ptr), src_dst);
|
---|
| 147 | return(EDEADDST); /* cannot send to the dead */
|
---|
| 148 | }
|
---|
| 149 | }
|
---|
| 150 |
|
---|
| 151 | /* Now check if the call is known and try to perform the request. The only
|
---|
| 152 | * system calls that exist in MINIX are sending and receiving messages.
|
---|
| 153 | * - SENDREC: combines SEND and RECEIVE in a single system call
|
---|
| 154 | * - SEND: sender blocks until its message has been delivered
|
---|
| 155 | * - RECEIVE: receiver blocks until an acceptable message has arrived
|
---|
| 156 | * - NOTIFY: nonblocking call; deliver notification or mark pending
|
---|
| 157 | * - ECHO: nonblocking call; directly echo back the message
|
---|
| 158 | */
|
---|
| 159 | switch(function) {
|
---|
| 160 | case SENDREC:
|
---|
| 161 | /* A flag is set so that notifications cannot interrupt SENDREC. */
|
---|
| 162 | priv(caller_ptr)->s_flags |= SENDREC_BUSY;
|
---|
| 163 | /* fall through */
|
---|
| 164 | case SEND:
|
---|
| 165 | result = mini_send(caller_ptr, src_dst, m_ptr, flags);
|
---|
| 166 | if (function == SEND || result != OK) {
|
---|
| 167 | break; /* done, or SEND failed */
|
---|
| 168 | } /* fall through for SENDREC */
|
---|
| 169 | case RECEIVE:
|
---|
| 170 | if (function == RECEIVE)
|
---|
| 171 | priv(caller_ptr)->s_flags &= ~SENDREC_BUSY;
|
---|
| 172 | result = mini_receive(caller_ptr, src_dst, m_ptr, flags);
|
---|
| 173 | break;
|
---|
| 174 | case NOTIFY:
|
---|
| 175 | result = mini_notify(caller_ptr, src_dst);
|
---|
| 176 | break;
|
---|
| 177 | case ECHO:
|
---|
| 178 | CopyMess(caller_ptr->p_nr, caller_ptr, m_ptr, caller_ptr, m_ptr);
|
---|
| 179 | result = OK;
|
---|
| 180 | break;
|
---|
| 181 | default:
|
---|
| 182 | result = EBADCALL; /* illegal system call */
|
---|
| 183 | }
|
---|
| 184 |
|
---|
| 185 | /* Now, return the result of the system call to the caller. */
|
---|
| 186 | return(result);
|
---|
| 187 | }
|
---|
| 188 |
|
---|
| 189 | /*===========================================================================*
|
---|
| 190 | * mini_send *
|
---|
| 191 | *===========================================================================*/
|
---|
| 192 | PRIVATE int mini_send(caller_ptr, dst, m_ptr, flags)
|
---|
| 193 | register struct proc *caller_ptr; /* who is trying to send a message? */
|
---|
| 194 | int dst; /* to whom is message being sent? */
|
---|
| 195 | message *m_ptr; /* pointer to message buffer */
|
---|
| 196 | unsigned flags; /* system call flags */
|
---|
| 197 | {
|
---|
| 198 | /* Send a message from 'caller_ptr' to 'dst'. If 'dst' is blocked waiting
|
---|
| 199 | * for this message, copy the message to it and unblock 'dst'. If 'dst' is
|
---|
| 200 | * not waiting at all, or is waiting for another source, queue 'caller_ptr'.
|
---|
| 201 | */
|
---|
| 202 | register struct proc *dst_ptr = proc_addr(dst);
|
---|
| 203 | register struct proc **xpp;
|
---|
| 204 | register struct proc *xp;
|
---|
| 205 |
|
---|
| 206 | /* Check for deadlock by 'caller_ptr' and 'dst' sending to each other. */
|
---|
| 207 | xp = dst_ptr;
|
---|
| 208 | while (xp->p_rts_flags & SENDING) { /* check while sending */
|
---|
| 209 | xp = proc_addr(xp->p_sendto); /* get xp's destination */
|
---|
| 210 | if (xp == caller_ptr) return(ELOCKED); /* deadlock if cyclic */
|
---|
| 211 | }
|
---|
| 212 |
|
---|
| 213 | /* Check if 'dst' is blocked waiting for this message. The destination's
|
---|
| 214 | * SENDING flag may be set when its SENDREC call blocked while sending.
|
---|
| 215 | */
|
---|
| 216 | if ( (dst_ptr->p_rts_flags & (RECEIVING | SENDING)) == RECEIVING &&
|
---|
| 217 | (dst_ptr->p_getfrom == ANY || dst_ptr->p_getfrom == caller_ptr->p_nr)) {
|
---|
| 218 | /* Destination is indeed waiting for this message. */
|
---|
| 219 | CopyMess(caller_ptr->p_nr, caller_ptr, m_ptr, dst_ptr,
|
---|
| 220 | dst_ptr->p_messbuf);
|
---|
| 221 | if ((dst_ptr->p_rts_flags &= ~RECEIVING) == 0) enqueue(dst_ptr);
|
---|
| 222 | } else if ( ! (flags & NON_BLOCKING)) {
|
---|
| 223 | /* Destination is not waiting. Block and dequeue caller. */
|
---|
| 224 | caller_ptr->p_messbuf = m_ptr;
|
---|
| 225 | if (caller_ptr->p_rts_flags == 0) dequeue(caller_ptr);
|
---|
| 226 | caller_ptr->p_rts_flags |= SENDING;
|
---|
| 227 | caller_ptr->p_sendto = dst;
|
---|
| 228 |
|
---|
| 229 | /* Process is now blocked. Put in on the destination's queue. */
|
---|
| 230 | xpp = &dst_ptr->p_caller_q; /* find end of list */
|
---|
| 231 | while (*xpp != NIL_PROC) xpp = &(*xpp)->p_q_link;
|
---|
| 232 | *xpp = caller_ptr; /* add caller to end */
|
---|
| 233 | caller_ptr->p_q_link = NIL_PROC; /* mark new end of list */
|
---|
| 234 | } else {
|
---|
| 235 | return(ENOTREADY);
|
---|
| 236 | }
|
---|
| 237 | return(OK);
|
---|
| 238 | }
|
---|
| 239 |
|
---|
| 240 | /*===========================================================================*
|
---|
| 241 | * mini_receive *
|
---|
| 242 | *===========================================================================*/
|
---|
| 243 | PRIVATE int mini_receive(caller_ptr, src, m_ptr, flags)
|
---|
| 244 | register struct proc *caller_ptr; /* process trying to get message */
|
---|
| 245 | int src; /* which message source is wanted */
|
---|
| 246 | message *m_ptr; /* pointer to message buffer */
|
---|
| 247 | unsigned flags; /* system call flags */
|
---|
| 248 | {
|
---|
| 249 | /* A process or task wants to get a message. If a message is already queued,
|
---|
| 250 | * acquire it and deblock the sender. If no message from the desired source
|
---|
| 251 | * is available block the caller, unless the flags don't allow blocking.
|
---|
| 252 | */
|
---|
| 253 | register struct proc **xpp;
|
---|
| 254 | register struct notification **ntf_q_pp;
|
---|
| 255 | message m;
|
---|
| 256 | int bit_nr;
|
---|
| 257 | sys_map_t *map;
|
---|
| 258 | bitchunk_t *chunk;
|
---|
| 259 | int i, src_id, src_proc_nr;
|
---|
| 260 |
|
---|
| 261 | /* Check to see if a message from desired source is already available.
|
---|
| 262 | * The caller's SENDING flag may be set if SENDREC couldn't send. If it is
|
---|
| 263 | * set, the process should be blocked.
|
---|
| 264 | */
|
---|
| 265 | if (!(caller_ptr->p_rts_flags & SENDING)) {
|
---|
| 266 |
|
---|
| 267 | /* Check if there are pending notifications, except for SENDREC. */
|
---|
| 268 | if (! (priv(caller_ptr)->s_flags & SENDREC_BUSY)) {
|
---|
| 269 |
|
---|
| 270 | map = &priv(caller_ptr)->s_notify_pending;
|
---|
| 271 | for (chunk=&map->chunk[0]; chunk<&map->chunk[NR_SYS_CHUNKS]; chunk++) {
|
---|
| 272 |
|
---|
| 273 | /* Find a pending notification from the requested source. */
|
---|
| 274 | if (! *chunk) continue; /* no bits in chunk */
|
---|
| 275 | for (i=0; ! (*chunk & (1<<i)); ++i) {} /* look up the bit */
|
---|
| 276 | src_id = (chunk - &map->chunk[0]) * BITCHUNK_BITS + i;
|
---|
| 277 | if (src_id >= NR_SYS_PROCS) break; /* out of range */
|
---|
| 278 | src_proc_nr = id_to_nr(src_id); /* get source proc */
|
---|
| 279 | if (src!=ANY && src!=src_proc_nr) continue; /* source not ok */
|
---|
| 280 | *chunk &= ~(1 << i); /* no longer pending */
|
---|
| 281 |
|
---|
| 282 | /* Found a suitable source, deliver the notification message. */
|
---|
| 283 | BuildMess(&m, src_proc_nr, caller_ptr); /* assemble message */
|
---|
| 284 | CopyMess(src_proc_nr, proc_addr(HARDWARE), &m, caller_ptr, m_ptr);
|
---|
| 285 | return(OK); /* report success */
|
---|
| 286 | }
|
---|
| 287 | }
|
---|
| 288 |
|
---|
| 289 | /* Check caller queue. Use pointer pointers to keep code simple. */
|
---|
| 290 | xpp = &caller_ptr->p_caller_q;
|
---|
| 291 | while (*xpp != NIL_PROC) {
|
---|
| 292 | if (src == ANY || src == proc_nr(*xpp)) {
|
---|
| 293 | /* Found acceptable message. Copy it and update status. */
|
---|
| 294 | CopyMess((*xpp)->p_nr, *xpp, (*xpp)->p_messbuf, caller_ptr, m_ptr);
|
---|
| 295 | if (((*xpp)->p_rts_flags &= ~SENDING) == 0) enqueue(*xpp);
|
---|
| 296 | *xpp = (*xpp)->p_q_link; /* remove from queue */
|
---|
| 297 | return(OK); /* report success */
|
---|
| 298 | }
|
---|
| 299 | xpp = &(*xpp)->p_q_link; /* proceed to next */
|
---|
| 300 | }
|
---|
| 301 | }
|
---|
| 302 |
|
---|
| 303 | /* No suitable message is available or the caller couldn't send in SENDREC.
|
---|
| 304 | * Block the process trying to receive, unless the flags tell otherwise.
|
---|
| 305 | */
|
---|
| 306 | if ( ! (flags & NON_BLOCKING)) {
|
---|
| 307 | caller_ptr->p_getfrom = src;
|
---|
| 308 | caller_ptr->p_messbuf = m_ptr;
|
---|
| 309 | if (caller_ptr->p_rts_flags == 0) dequeue(caller_ptr);
|
---|
| 310 | caller_ptr->p_rts_flags |= RECEIVING;
|
---|
| 311 | return(OK);
|
---|
| 312 | } else {
|
---|
| 313 | return(ENOTREADY);
|
---|
| 314 | }
|
---|
| 315 | }
|
---|
| 316 |
|
---|
| 317 | /*===========================================================================*
|
---|
| 318 | * mini_notify *
|
---|
| 319 | *===========================================================================*/
|
---|
| 320 | PRIVATE int mini_notify(caller_ptr, dst)
|
---|
| 321 | register struct proc *caller_ptr; /* sender of the notification */
|
---|
| 322 | int dst; /* which process to notify */
|
---|
| 323 | {
|
---|
| 324 | register struct proc *dst_ptr = proc_addr(dst);
|
---|
| 325 | int src_id; /* source id for late delivery */
|
---|
| 326 | message m; /* the notification message */
|
---|
| 327 |
|
---|
| 328 | /* Check to see if target is blocked waiting for this message. A process
|
---|
| 329 | * can be both sending and receiving during a SENDREC system call.
|
---|
| 330 | */
|
---|
| 331 | if ((dst_ptr->p_rts_flags & (RECEIVING|SENDING)) == RECEIVING &&
|
---|
| 332 | ! (priv(dst_ptr)->s_flags & SENDREC_BUSY) &&
|
---|
| 333 | (dst_ptr->p_getfrom == ANY || dst_ptr->p_getfrom == caller_ptr->p_nr)) {
|
---|
| 334 |
|
---|
| 335 | /* Destination is indeed waiting for a message. Assemble a notification
|
---|
| 336 | * message and deliver it. Copy from pseudo-source HARDWARE, since the
|
---|
| 337 | * message is in the kernel's address space.
|
---|
| 338 | */
|
---|
| 339 | BuildMess(&m, proc_nr(caller_ptr), dst_ptr);
|
---|
| 340 | CopyMess(proc_nr(caller_ptr), proc_addr(HARDWARE), &m,
|
---|
| 341 | dst_ptr, dst_ptr->p_messbuf);
|
---|
| 342 | dst_ptr->p_rts_flags &= ~RECEIVING; /* deblock destination */
|
---|
| 343 | if (dst_ptr->p_rts_flags == 0) enqueue(dst_ptr);
|
---|
| 344 | return(OK);
|
---|
| 345 | }
|
---|
| 346 |
|
---|
| 347 | /* Destination is not ready to receive the notification. Add it to the
|
---|
| 348 | * bit map with pending notifications. Note the indirectness: the system id
|
---|
| 349 | * instead of the process number is used in the pending bit map.
|
---|
| 350 | */
|
---|
| 351 | src_id = priv(caller_ptr)->s_id;
|
---|
| 352 | set_sys_bit(priv(dst_ptr)->s_notify_pending, src_id);
|
---|
| 353 | return(OK);
|
---|
| 354 | }
|
---|
| 355 |
|
---|
| 356 | /*===========================================================================*
|
---|
| 357 | * lock_notify *
|
---|
| 358 | *===========================================================================*/
|
---|
| 359 | PUBLIC int lock_notify(src, dst)
|
---|
| 360 | int src; /* sender of the notification */
|
---|
| 361 | int dst; /* who is to be notified */
|
---|
| 362 | {
|
---|
| 363 | /* Safe gateway to mini_notify() for tasks and interrupt handlers. The sender
|
---|
| 364 | * is explicitly given to prevent confusion where the call comes from. MINIX
|
---|
| 365 | * kernel is not reentrant, which means to interrupts are disabled after
|
---|
| 366 | * the first kernel entry (hardware interrupt, trap, or exception). Locking
|
---|
| 367 | * is done by temporarily disabling interrupts.
|
---|
| 368 | */
|
---|
| 369 | int result;
|
---|
| 370 |
|
---|
| 371 | /* Exception or interrupt occurred, thus already locked. */
|
---|
| 372 | if (k_reenter >= 0) {
|
---|
| 373 | result = mini_notify(proc_addr(src), dst);
|
---|
| 374 | }
|
---|
| 375 |
|
---|
| 376 | /* Call from task level, locking is required. */
|
---|
| 377 | else {
|
---|
| 378 | lock(0, "notify");
|
---|
| 379 | result = mini_notify(proc_addr(src), dst);
|
---|
| 380 | unlock(0);
|
---|
| 381 | }
|
---|
| 382 | return(result);
|
---|
| 383 | }
|
---|
| 384 |
|
---|
| 385 | /*===========================================================================*
|
---|
| 386 | * enqueue *
|
---|
| 387 | *===========================================================================*/
|
---|
| 388 | PRIVATE void enqueue(rp)
|
---|
| 389 | register struct proc *rp; /* this process is now runnable */
|
---|
| 390 | {
|
---|
| 391 | /* Add 'rp' to one of the queues of runnable processes. This function is
|
---|
| 392 | * responsible for inserting a process into one of the scheduling queues.
|
---|
| 393 | * The mechanism is implemented here. The actual scheduling policy is
|
---|
| 394 | * defined in sched() and pick_proc().
|
---|
| 395 | */
|
---|
| 396 | int q; /* scheduling queue to use */
|
---|
| 397 | int front; /* add to front or back */
|
---|
| 398 |
|
---|
| 399 | /* Determine where to insert to process. */
|
---|
| 400 | sched(rp, &q, &front);
|
---|
| 401 |
|
---|
| 402 | /* Now add the process to the queue. */
|
---|
| 403 | if (rdy_head[q] == NIL_PROC) { /* add to empty queue */
|
---|
| 404 | rdy_head[q] = rdy_tail[q] = rp; /* create a new queue */
|
---|
| 405 | rp->p_nextready = NIL_PROC; /* mark new end */
|
---|
| 406 | }
|
---|
| 407 | else if (front) { /* add to head of queue */
|
---|
| 408 | rp->p_nextready = rdy_head[q]; /* chain head of queue */
|
---|
| 409 | rdy_head[q] = rp; /* set new queue head */
|
---|
| 410 | }
|
---|
| 411 | else { /* add to tail of queue */
|
---|
| 412 | rdy_tail[q]->p_nextready = rp; /* chain tail of queue */
|
---|
| 413 | rdy_tail[q] = rp; /* set new queue tail */
|
---|
| 414 | rp->p_nextready = NIL_PROC; /* mark new end */
|
---|
| 415 | }
|
---|
| 416 |
|
---|
| 417 | /* Now select the next process to run. */
|
---|
| 418 | pick_proc();
|
---|
| 419 | }
|
---|
| 420 |
|
---|
| 421 | /*===========================================================================*
|
---|
| 422 | * dequeue *
|
---|
| 423 | *===========================================================================*/
|
---|
| 424 | PRIVATE void dequeue(rp)
|
---|
| 425 | register struct proc *rp; /* this process is no longer runnable */
|
---|
| 426 | {
|
---|
| 427 | /* A process must be removed from the scheduling queues, for example, because
|
---|
| 428 | * it has blocked. If the currently active process is removed, a new process
|
---|
| 429 | * is picked to run by calling pick_proc().
|
---|
| 430 | */
|
---|
| 431 | register int q = rp->p_priority; /* queue to use */
|
---|
| 432 | register struct proc **xpp; /* iterate over queue */
|
---|
| 433 | register struct proc *prev_xp;
|
---|
| 434 |
|
---|
| 435 | /* Side-effect for kernel: check if the task's stack still is ok? */
|
---|
| 436 | if (iskernelp(rp)) {
|
---|
| 437 | if (*priv(rp)->s_stack_guard != STACK_GUARD)
|
---|
| 438 | panic("stack overrun by task", proc_nr(rp));
|
---|
| 439 | }
|
---|
| 440 |
|
---|
| 441 | /* Now make sure that the process is not in its ready queue. Remove the
|
---|
| 442 | * process if it is found. A process can be made unready even if it is not
|
---|
| 443 | * running by being sent a signal that kills it.
|
---|
| 444 | */
|
---|
| 445 | prev_xp = NIL_PROC;
|
---|
| 446 | for (xpp = &rdy_head[q]; *xpp != NIL_PROC; xpp = &(*xpp)->p_nextready) {
|
---|
| 447 |
|
---|
| 448 | if (*xpp == rp) { /* found process to remove */
|
---|
| 449 | *xpp = (*xpp)->p_nextready; /* replace with next chain */
|
---|
| 450 | if (rp == rdy_tail[q]) /* queue tail removed */
|
---|
| 451 | rdy_tail[q] = prev_xp; /* set new tail */
|
---|
| 452 | if (rp == proc_ptr || rp == next_ptr) /* active process removed */
|
---|
| 453 | pick_proc(); /* pick new process to run */
|
---|
| 454 | break;
|
---|
| 455 | }
|
---|
| 456 | prev_xp = *xpp; /* save previous in chain */
|
---|
| 457 | }
|
---|
| 458 | }
|
---|
| 459 |
|
---|
| 460 | /*===========================================================================*
|
---|
| 461 | * sched *
|
---|
| 462 | *===========================================================================*/
|
---|
| 463 | PRIVATE void sched(rp, queue, front)
|
---|
| 464 | register struct proc *rp; /* process to be scheduled */
|
---|
| 465 | int *queue; /* return: queue to use */
|
---|
| 466 | int *front; /* return: front or back */
|
---|
| 467 | {
|
---|
| 468 | /* This function determines the scheduling policy. It is called whenever a
|
---|
| 469 | * process must be added to one of the scheduling queues to decide where to
|
---|
| 470 | * insert it. As a side-effect the process' priority may be updated.
|
---|
| 471 | */
|
---|
| 472 | static struct proc *prev_ptr = NIL_PROC; /* previous without time */
|
---|
| 473 | int time_left = (rp->p_ticks_left > 0); /* quantum fully consumed */
|
---|
| 474 | int penalty = 0; /* change in priority */
|
---|
| 475 |
|
---|
| 476 | /* Check whether the process has time left. Otherwise give a new quantum
|
---|
| 477 | * and possibly raise the priority. Processes using multiple quantums
|
---|
| 478 | * in a row get a lower priority to catch infinite loops in high priority
|
---|
| 479 | * processes (system servers and drivers).
|
---|
| 480 | */
|
---|
| 481 | if ( ! time_left) { /* quantum consumed ? */
|
---|
| 482 | rp->p_ticks_left = rp->p_quantum_size; /* give new quantum */
|
---|
| 483 | if (prev_ptr == rp) penalty ++; /* catch infinite loops */
|
---|
| 484 | else penalty --; /* give slow way back */
|
---|
| 485 | prev_ptr = rp; /* store ptr for next */
|
---|
| 486 | }
|
---|
| 487 |
|
---|
| 488 | /* Determine the new priority of this process. The bounds are determined
|
---|
| 489 | * by IDLE's queue and the maximum priority of this process. Kernel tasks
|
---|
| 490 | * and the idle process are never changed in priority.
|
---|
| 491 | */
|
---|
| 492 | if (penalty != 0 && ! iskernelp(rp)) {
|
---|
| 493 | rp->p_priority += penalty; /* update with penalty */
|
---|
| 494 | if (rp->p_priority < rp->p_max_priority) /* check upper bound */
|
---|
| 495 | rp->p_priority=rp->p_max_priority;
|
---|
| 496 | else if (rp->p_priority > IDLE_Q-1) /* check lower bound */
|
---|
| 497 | rp->p_priority = IDLE_Q-1;
|
---|
| 498 | }
|
---|
| 499 |
|
---|
| 500 | /* If there is time left, the process is added to the front of its queue,
|
---|
| 501 | * so that it can immediately run. The queue to use simply is always the
|
---|
| 502 | * process' current priority.
|
---|
| 503 | */
|
---|
| 504 | *queue = rp->p_priority;
|
---|
| 505 | *front = time_left;
|
---|
| 506 | }
|
---|
| 507 |
|
---|
| 508 | /*===========================================================================*
|
---|
| 509 | * pick_proc *
|
---|
| 510 | *===========================================================================*/
|
---|
| 511 | PRIVATE void pick_proc()
|
---|
| 512 | {
|
---|
| 513 | /* Decide who to run now. A new process is selected by setting 'next_ptr'.
|
---|
| 514 | * When a billable process is selected, record it in 'bill_ptr', so that the
|
---|
| 515 | * clock task can tell who to bill for system time.
|
---|
| 516 | */
|
---|
| 517 | register struct proc *rp; /* process to run */
|
---|
| 518 | int q; /* iterate over queues */
|
---|
| 519 |
|
---|
| 520 | /* Check each of the scheduling queues for ready processes. The number of
|
---|
| 521 | * queues is defined in proc.h, and priorities are set in the image table.
|
---|
| 522 | * The lowest queue contains IDLE, which is always ready.
|
---|
| 523 | */
|
---|
| 524 | for (q=0; q < NR_SCHED_QUEUES; q++) {
|
---|
| 525 | if ( (rp = rdy_head[q]) != NIL_PROC) {
|
---|
| 526 | next_ptr = rp; /* run process 'rp' next */
|
---|
| 527 | if (priv(rp)->s_flags & BILLABLE)
|
---|
| 528 | bill_ptr = rp; /* bill for system time */
|
---|
| 529 | return;
|
---|
| 530 | }
|
---|
| 531 | }
|
---|
| 532 | }
|
---|
| 533 |
|
---|
| 534 | /*===========================================================================*
|
---|
| 535 | * lock_send *
|
---|
| 536 | *===========================================================================*/
|
---|
| 537 | PUBLIC int lock_send(dst, m_ptr)
|
---|
| 538 | int dst; /* to whom is message being sent? */
|
---|
| 539 | message *m_ptr; /* pointer to message buffer */
|
---|
| 540 | {
|
---|
| 541 | /* Safe gateway to mini_send() for tasks. */
|
---|
| 542 | int result;
|
---|
| 543 | lock(2, "send");
|
---|
| 544 | result = mini_send(proc_ptr, dst, m_ptr, NON_BLOCKING);
|
---|
| 545 | unlock(2);
|
---|
| 546 | return(result);
|
---|
| 547 | }
|
---|
| 548 |
|
---|
| 549 | /*===========================================================================*
|
---|
| 550 | * lock_enqueue *
|
---|
| 551 | *===========================================================================*/
|
---|
| 552 | PUBLIC void lock_enqueue(rp)
|
---|
| 553 | struct proc *rp; /* this process is now runnable */
|
---|
| 554 | {
|
---|
| 555 | /* Safe gateway to enqueue() for tasks. */
|
---|
| 556 | lock(3, "enqueue");
|
---|
| 557 | enqueue(rp);
|
---|
| 558 | unlock(3);
|
---|
| 559 | }
|
---|
| 560 |
|
---|
| 561 | /*===========================================================================*
|
---|
| 562 | * lock_dequeue *
|
---|
| 563 | *===========================================================================*/
|
---|
| 564 | PUBLIC void lock_dequeue(rp)
|
---|
| 565 | struct proc *rp; /* this process is no longer runnable */
|
---|
| 566 | {
|
---|
| 567 | /* Safe gateway to dequeue() for tasks. */
|
---|
| 568 | lock(4, "dequeue");
|
---|
| 569 | dequeue(rp);
|
---|
| 570 | unlock(4);
|
---|
| 571 | }
|
---|
| 572 |
|
---|