source: trunk/minix/kernel/proc.c@ 9

Last change on this file since 9 was 9, checked in by Mattia Monga, 13 years ago

Minix 3.1.2a

File size: 29.6 KB
Line 
1/* This file contains essentially all of the process and message handling.
2 * Together with "mpx.s" it forms the lowest layer of the MINIX kernel.
3 * There is one entry point from the outside:
4 *
5 * sys_call: a system call, i.e., the kernel is trapped with an INT
6 *
7 * As well as several entry points used from the interrupt and task level:
8 *
9 * lock_notify: notify a process of a system event
10 * lock_send: send a message to a process
11 * lock_enqueue: put a process on one of the scheduling queues
12 * lock_dequeue: remove a process from the scheduling queues
13 *
14 * Changes:
15 * Aug 19, 2005 rewrote scheduling code (Jorrit N. Herder)
16 * Jul 25, 2005 rewrote system call handling (Jorrit N. Herder)
17 * May 26, 2005 rewrote message passing functions (Jorrit N. Herder)
18 * May 24, 2005 new notification system call (Jorrit N. Herder)
19 * Oct 28, 2004 nonblocking send and receive calls (Jorrit N. Herder)
20 *
21 * The code here is critical to make everything work and is important for the
22 * overall performance of the system. A large fraction of the code deals with
23 * list manipulation. To make this both easy to understand and fast to execute
24 * pointer pointers are used throughout the code. Pointer pointers prevent
25 * exceptions for the head or tail of a linked list.
26 *
27 * node_t *queue, *new_node; // assume these as global variables
28 * node_t **xpp = &queue; // get pointer pointer to head of queue
29 * while (*xpp != NULL) // find last pointer of the linked list
30 * xpp = &(*xpp)->next; // get pointer to next pointer
31 * *xpp = new_node; // now replace the end (the NULL pointer)
32 * new_node->next = NULL; // and mark the new end of the list
33 *
34 * For example, when adding a new node to the end of the list, one normally
35 * makes an exception for an empty list and looks up the end of the list for
36 * nonempty lists. As shown above, this is not required with pointer pointers.
37 */
38
39#include <minix/com.h>
40#include <minix/callnr.h>
41#include <minix/endpoint.h>
42#include "debug.h"
43#include "kernel.h"
44#include "proc.h"
45#include <signal.h>
46
47/* Scheduling and message passing functions. The functions are available to
48 * other parts of the kernel through lock_...(). The lock temporarily disables
49 * interrupts to prevent race conditions.
50 */
51FORWARD _PROTOTYPE( int mini_send, (struct proc *caller_ptr, int dst_e,
52 message *m_ptr, unsigned flags));
53FORWARD _PROTOTYPE( int mini_receive, (struct proc *caller_ptr, int src,
54 message *m_ptr, unsigned flags));
55FORWARD _PROTOTYPE( int mini_notify, (struct proc *caller_ptr, int dst));
56FORWARD _PROTOTYPE( int deadlock, (int function,
57 register struct proc *caller, int src_dst));
58FORWARD _PROTOTYPE( void enqueue, (struct proc *rp));
59FORWARD _PROTOTYPE( void dequeue, (struct proc *rp));
60FORWARD _PROTOTYPE( void sched, (struct proc *rp, int *queue, int *front));
61FORWARD _PROTOTYPE( void pick_proc, (void));
62
63#define BuildMess(m_ptr, src, dst_ptr) \
64 (m_ptr)->m_source = proc_addr(src)->p_endpoint; \
65 (m_ptr)->m_type = NOTIFY_FROM(src); \
66 (m_ptr)->NOTIFY_TIMESTAMP = get_uptime(); \
67 switch (src) { \
68 case HARDWARE: \
69 (m_ptr)->NOTIFY_ARG = priv(dst_ptr)->s_int_pending; \
70 priv(dst_ptr)->s_int_pending = 0; \
71 break; \
72 case SYSTEM: \
73 (m_ptr)->NOTIFY_ARG = priv(dst_ptr)->s_sig_pending; \
74 priv(dst_ptr)->s_sig_pending = 0; \
75 break; \
76 }
77
78#if (CHIP == INTEL)
79#define CopyMess(s,sp,sm,dp,dm) \
80 cp_mess(proc_addr(s)->p_endpoint, \
81 (sp)->p_memmap[D].mem_phys, \
82 (vir_bytes)sm, (dp)->p_memmap[D].mem_phys, (vir_bytes)dm)
83#endif /* (CHIP == INTEL) */
84
85#if (CHIP == M68000)
86/* M68000 does not have cp_mess() in assembly like INTEL. Declare prototype
87 * for cp_mess() here and define the function below. Also define CopyMess.
88 */
89#endif /* (CHIP == M68000) */
90
91/*===========================================================================*
92 * sys_call *
93 *===========================================================================*/
94PUBLIC int sys_call(call_nr, src_dst_e, m_ptr, bit_map)
95int call_nr; /* system call number and flags */
96int src_dst_e; /* src to receive from or dst to send to */
97message *m_ptr; /* pointer to message in the caller's space */
98long bit_map; /* notification event set or flags */
99{
100/* System calls are done by trapping to the kernel with an INT instruction.
101 * The trap is caught and sys_call() is called to send or receive a message
102 * (or both). The caller is always given by 'proc_ptr'.
103 */
104 register struct proc *caller_ptr = proc_ptr; /* get pointer to caller */
105 int function = call_nr & SYSCALL_FUNC; /* get system call function */
106 unsigned flags = call_nr & SYSCALL_FLAGS; /* get flags */
107 int mask_entry; /* bit to check in send mask */
108 int group_size; /* used for deadlock check */
109 int result; /* the system call's result */
110 int src_dst;
111 vir_clicks vlo, vhi; /* virtual clicks containing message to send */
112
113#if 0
114 if (caller_ptr->p_rts_flags & SLOT_FREE)
115 {
116 kprintf("called by the dead?!?\n");
117 return EINVAL;
118 }
119#endif
120
121 /* Require a valid source and/ or destination process, unless echoing. */
122 if (src_dst_e != ANY && function != ECHO) {
123 if(!isokendpt(src_dst_e, &src_dst)) {
124#if DEBUG_ENABLE_IPC_WARNINGS
125 kprintf("sys_call: trap %d by %d with bad endpoint %d\n",
126 function, proc_nr(caller_ptr), src_dst_e);
127#endif
128 return EDEADSRCDST;
129 }
130 } else src_dst = src_dst_e;
131
132 /* Check if the process has privileges for the requested call. Calls to the
133 * kernel may only be SENDREC, because tasks always reply and may not block
134 * if the caller doesn't do receive().
135 */
136 if (! (priv(caller_ptr)->s_trap_mask & (1 << function)) ||
137 (iskerneln(src_dst) && function != SENDREC
138 && function != RECEIVE)) {
139#if DEBUG_ENABLE_IPC_WARNINGS
140 kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
141 function, proc_nr(caller_ptr), src_dst);
142#endif
143 return(ETRAPDENIED); /* trap denied by mask or kernel */
144 }
145
146 /* If the call involves a message buffer, i.e., for SEND, RECEIVE, SENDREC,
147 * or ECHO, check the message pointer. This check allows a message to be
148 * anywhere in data or stack or gap. It will have to be made more elaborate
149 * for machines which don't have the gap mapped.
150 */
151 if (function & CHECK_PTR) {
152 vlo = (vir_bytes) m_ptr >> CLICK_SHIFT;
153 vhi = ((vir_bytes) m_ptr + MESS_SIZE - 1) >> CLICK_SHIFT;
154 if (vlo < caller_ptr->p_memmap[D].mem_vir || vlo > vhi ||
155 vhi >= caller_ptr->p_memmap[S].mem_vir +
156 caller_ptr->p_memmap[S].mem_len) {
157#if DEBUG_ENABLE_IPC_WARNINGS
158 kprintf("sys_call: invalid message pointer, trap %d, caller %d\n",
159 function, proc_nr(caller_ptr));
160#endif
161 return(EFAULT); /* invalid message pointer */
162 }
163 }
164
165 /* If the call is to send to a process, i.e., for SEND, SENDREC or NOTIFY,
166 * verify that the caller is allowed to send to the given destination.
167 */
168 if (function & CHECK_DST) {
169 if (! get_sys_bit(priv(caller_ptr)->s_ipc_to, nr_to_id(src_dst))) {
170#if DEBUG_ENABLE_IPC_WARNINGS
171 kprintf("sys_call: ipc mask denied trap %d from %d to %d\n",
172 function, proc_nr(caller_ptr), src_dst);
173#endif
174 return(ECALLDENIED); /* call denied by ipc mask */
175 }
176 }
177
178 /* Check for a possible deadlock for blocking SEND(REC) and RECEIVE. */
179 if (function & CHECK_DEADLOCK) {
180 if (group_size = deadlock(function, caller_ptr, src_dst)) {
181#if DEBUG_ENABLE_IPC_WARNINGS
182 kprintf("sys_call: trap %d from %d to %d deadlocked, group size %d\n",
183 function, proc_nr(caller_ptr), src_dst, group_size);
184#endif
185 return(ELOCKED);
186 }
187 }
188
189 /* Now check if the call is known and try to perform the request. The only
190 * system calls that exist in MINIX are sending and receiving messages.
191 * - SENDREC: combines SEND and RECEIVE in a single system call
192 * - SEND: sender blocks until its message has been delivered
193 * - RECEIVE: receiver blocks until an acceptable message has arrived
194 * - NOTIFY: nonblocking call; deliver notification or mark pending
195 * - ECHO: nonblocking call; directly echo back the message
196 */
197 switch(function) {
198 case SENDREC:
199 /* A flag is set so that notifications cannot interrupt SENDREC. */
200 caller_ptr->p_misc_flags |= REPLY_PENDING;
201 /* fall through */
202 case SEND:
203 result = mini_send(caller_ptr, src_dst_e, m_ptr, flags);
204 if (function == SEND || result != OK) {
205 break; /* done, or SEND failed */
206 } /* fall through for SENDREC */
207 case RECEIVE:
208 if (function == RECEIVE)
209 caller_ptr->p_misc_flags &= ~REPLY_PENDING;
210 result = mini_receive(caller_ptr, src_dst_e, m_ptr, flags);
211 break;
212 case NOTIFY:
213 result = mini_notify(caller_ptr, src_dst);
214 break;
215 case ECHO:
216 CopyMess(caller_ptr->p_nr, caller_ptr, m_ptr, caller_ptr, m_ptr);
217 result = OK;
218 break;
219 default:
220 result = EBADCALL; /* illegal system call */
221 }
222
223 /* Now, return the result of the system call to the caller. */
224 return(result);
225}
226
227/*===========================================================================*
228 * deadlock *
229 *===========================================================================*/
230PRIVATE int deadlock(function, cp, src_dst)
231int function; /* trap number */
232register struct proc *cp; /* pointer to caller */
233int src_dst; /* src or dst process */
234{
235/* Check for deadlock. This can happen if 'caller_ptr' and 'src_dst' have
236 * a cyclic dependency of blocking send and receive calls. The only cyclic
237 * depency that is not fatal is if the caller and target directly SEND(REC)
238 * and RECEIVE to each other. If a deadlock is found, the group size is
239 * returned. Otherwise zero is returned.
240 */
241 register struct proc *xp; /* process pointer */
242 int group_size = 1; /* start with only caller */
243 int trap_flags;
244
245 while (src_dst != ANY) { /* check while process nr */
246 int src_dst_e;
247 xp = proc_addr(src_dst); /* follow chain of processes */
248 group_size ++; /* extra process in group */
249
250 /* Check whether the last process in the chain has a dependency. If it
251 * has not, the cycle cannot be closed and we are done.
252 */
253 if (xp->p_rts_flags & RECEIVING) { /* xp has dependency */
254 if(xp->p_getfrom_e == ANY) src_dst = ANY;
255 else okendpt(xp->p_getfrom_e, &src_dst);
256 } else if (xp->p_rts_flags & SENDING) { /* xp has dependency */
257 okendpt(xp->p_sendto_e, &src_dst);
258 } else {
259 return(0); /* not a deadlock */
260 }
261
262 /* Now check if there is a cyclic dependency. For group sizes of two,
263 * a combination of SEND(REC) and RECEIVE is not fatal. Larger groups
264 * or other combinations indicate a deadlock.
265 */
266 if (src_dst == proc_nr(cp)) { /* possible deadlock */
267 if (group_size == 2) { /* caller and src_dst */
268 /* The function number is magically converted to flags. */
269 if ((xp->p_rts_flags ^ (function << 2)) & SENDING) {
270 return(0); /* not a deadlock */
271 }
272 }
273 return(group_size); /* deadlock found */
274 }
275 }
276 return(0); /* not a deadlock */
277}
278
279/*===========================================================================*
280 * mini_send *
281 *===========================================================================*/
282PRIVATE int mini_send(caller_ptr, dst_e, m_ptr, flags)
283register struct proc *caller_ptr; /* who is trying to send a message? */
284int dst_e; /* to whom is message being sent? */
285message *m_ptr; /* pointer to message buffer */
286unsigned flags; /* system call flags */
287{
288/* Send a message from 'caller_ptr' to 'dst'. If 'dst' is blocked waiting
289 * for this message, copy the message to it and unblock 'dst'. If 'dst' is
290 * not waiting at all, or is waiting for another source, queue 'caller_ptr'.
291 */
292 register struct proc *dst_ptr;
293 register struct proc **xpp;
294 int dst_p;
295
296 dst_p = _ENDPOINT_P(dst_e);
297 dst_ptr = proc_addr(dst_p);
298
299 if (dst_ptr->p_rts_flags & NO_ENDPOINT) return EDSTDIED;
300
301 /* Check if 'dst' is blocked waiting for this message. The destination's
302 * SENDING flag may be set when its SENDREC call blocked while sending.
303 */
304 if ( (dst_ptr->p_rts_flags & (RECEIVING | SENDING)) == RECEIVING &&
305 (dst_ptr->p_getfrom_e == ANY
306 || dst_ptr->p_getfrom_e == caller_ptr->p_endpoint)) {
307 /* Destination is indeed waiting for this message. */
308 CopyMess(caller_ptr->p_nr, caller_ptr, m_ptr, dst_ptr,
309 dst_ptr->p_messbuf);
310 if ((dst_ptr->p_rts_flags &= ~RECEIVING) == 0) enqueue(dst_ptr);
311 } else if ( ! (flags & NON_BLOCKING)) {
312 /* Destination is not waiting. Block and dequeue caller. */
313 caller_ptr->p_messbuf = m_ptr;
314 if (caller_ptr->p_rts_flags == 0) dequeue(caller_ptr);
315 caller_ptr->p_rts_flags |= SENDING;
316 caller_ptr->p_sendto_e = dst_e;
317
318 /* Process is now blocked. Put in on the destination's queue. */
319 xpp = &dst_ptr->p_caller_q; /* find end of list */
320 while (*xpp != NIL_PROC) xpp = &(*xpp)->p_q_link;
321 *xpp = caller_ptr; /* add caller to end */
322 caller_ptr->p_q_link = NIL_PROC; /* mark new end of list */
323 } else {
324 return(ENOTREADY);
325 }
326 return(OK);
327}
328
329/*===========================================================================*
330 * mini_receive *
331 *===========================================================================*/
332PRIVATE int mini_receive(caller_ptr, src_e, m_ptr, flags)
333register struct proc *caller_ptr; /* process trying to get message */
334int src_e; /* which message source is wanted */
335message *m_ptr; /* pointer to message buffer */
336unsigned flags; /* system call flags */
337{
338/* A process or task wants to get a message. If a message is already queued,
339 * acquire it and deblock the sender. If no message from the desired source
340 * is available block the caller, unless the flags don't allow blocking.
341 */
342 register struct proc **xpp;
343 register struct notification **ntf_q_pp;
344 message m;
345 int bit_nr;
346 sys_map_t *map;
347 bitchunk_t *chunk;
348 int i, src_id, src_proc_nr, src_p;
349
350 if(src_e == ANY) src_p = ANY;
351 else
352 {
353 okendpt(src_e, &src_p);
354 if (proc_addr(src_p)->p_rts_flags & NO_ENDPOINT) return ESRCDIED;
355 }
356
357
358 /* Check to see if a message from desired source is already available.
359 * The caller's SENDING flag may be set if SENDREC couldn't send. If it is
360 * set, the process should be blocked.
361 */
362 if (!(caller_ptr->p_rts_flags & SENDING)) {
363
364 /* Check if there are pending notifications, except for SENDREC. */
365 if (! (caller_ptr->p_misc_flags & REPLY_PENDING)) {
366
367 map = &priv(caller_ptr)->s_notify_pending;
368 for (chunk=&map->chunk[0]; chunk<&map->chunk[NR_SYS_CHUNKS]; chunk++) {
369
370 /* Find a pending notification from the requested source. */
371 if (! *chunk) continue; /* no bits in chunk */
372 for (i=0; ! (*chunk & (1<<i)); ++i) {} /* look up the bit */
373 src_id = (chunk - &map->chunk[0]) * BITCHUNK_BITS + i;
374 if (src_id >= NR_SYS_PROCS) break; /* out of range */
375 src_proc_nr = id_to_nr(src_id); /* get source proc */
376#if DEBUG_ENABLE_IPC_WARNINGS
377 if(src_proc_nr == NONE) {
378 kprintf("mini_receive: sending notify from NONE\n");
379 }
380#endif
381 if (src_e!=ANY && src_p != src_proc_nr) continue;/* source not ok */
382 *chunk &= ~(1 << i); /* no longer pending */
383
384 /* Found a suitable source, deliver the notification message. */
385 BuildMess(&m, src_proc_nr, caller_ptr); /* assemble message */
386 CopyMess(src_proc_nr, proc_addr(HARDWARE), &m, caller_ptr, m_ptr);
387 return(OK); /* report success */
388 }
389 }
390
391 /* Check caller queue. Use pointer pointers to keep code simple. */
392 xpp = &caller_ptr->p_caller_q;
393 while (*xpp != NIL_PROC) {
394 if (src_e == ANY || src_p == proc_nr(*xpp)) {
395#if 0
396 if ((*xpp)->p_rts_flags & SLOT_FREE)
397 {
398 kprintf("listening to the dead?!?\n");
399 return EINVAL;
400 }
401#endif
402
403 /* Found acceptable message. Copy it and update status. */
404 CopyMess((*xpp)->p_nr, *xpp, (*xpp)->p_messbuf, caller_ptr, m_ptr);
405 if (((*xpp)->p_rts_flags &= ~SENDING) == 0) enqueue(*xpp);
406 *xpp = (*xpp)->p_q_link; /* remove from queue */
407 return(OK); /* report success */
408 }
409 xpp = &(*xpp)->p_q_link; /* proceed to next */
410 }
411 }
412
413 /* No suitable message is available or the caller couldn't send in SENDREC.
414 * Block the process trying to receive, unless the flags tell otherwise.
415 */
416 if ( ! (flags & NON_BLOCKING)) {
417 caller_ptr->p_getfrom_e = src_e;
418 caller_ptr->p_messbuf = m_ptr;
419 if (caller_ptr->p_rts_flags == 0) dequeue(caller_ptr);
420 caller_ptr->p_rts_flags |= RECEIVING;
421 return(OK);
422 } else {
423 return(ENOTREADY);
424 }
425}
426
427/*===========================================================================*
428 * mini_notify *
429 *===========================================================================*/
430PRIVATE int mini_notify(caller_ptr, dst)
431register struct proc *caller_ptr; /* sender of the notification */
432int dst; /* which process to notify */
433{
434 register struct proc *dst_ptr = proc_addr(dst);
435 int src_id; /* source id for late delivery */
436 message m; /* the notification message */
437
438 /* Check to see if target is blocked waiting for this message. A process
439 * can be both sending and receiving during a SENDREC system call.
440 */
441 if ((dst_ptr->p_rts_flags & (RECEIVING|SENDING)) == RECEIVING &&
442 ! (dst_ptr->p_misc_flags & REPLY_PENDING) &&
443 (dst_ptr->p_getfrom_e == ANY ||
444 dst_ptr->p_getfrom_e == caller_ptr->p_endpoint)) {
445
446 /* Destination is indeed waiting for a message. Assemble a notification
447 * message and deliver it. Copy from pseudo-source HARDWARE, since the
448 * message is in the kernel's address space.
449 */
450 BuildMess(&m, proc_nr(caller_ptr), dst_ptr);
451 CopyMess(proc_nr(caller_ptr), proc_addr(HARDWARE), &m,
452 dst_ptr, dst_ptr->p_messbuf);
453 dst_ptr->p_rts_flags &= ~RECEIVING; /* deblock destination */
454 if (dst_ptr->p_rts_flags == 0) enqueue(dst_ptr);
455 return(OK);
456 }
457
458 /* Destination is not ready to receive the notification. Add it to the
459 * bit map with pending notifications. Note the indirectness: the system id
460 * instead of the process number is used in the pending bit map.
461 */
462 src_id = priv(caller_ptr)->s_id;
463 set_sys_bit(priv(dst_ptr)->s_notify_pending, src_id);
464 return(OK);
465}
466
467/*===========================================================================*
468 * lock_notify *
469 *===========================================================================*/
470PUBLIC int lock_notify(src_e, dst_e)
471int src_e; /* (endpoint) sender of the notification */
472int dst_e; /* (endpoint) who is to be notified */
473{
474/* Safe gateway to mini_notify() for tasks and interrupt handlers. The sender
475 * is explicitely given to prevent confusion where the call comes from. MINIX
476 * kernel is not reentrant, which means to interrupts are disabled after
477 * the first kernel entry (hardware interrupt, trap, or exception). Locking
478 * is done by temporarily disabling interrupts.
479 */
480 int result, src, dst;
481
482 if(!isokendpt(src_e, &src) || !isokendpt(dst_e, &dst))
483 return EDEADSRCDST;
484
485 /* Exception or interrupt occurred, thus already locked. */
486 if (k_reenter >= 0) {
487 result = mini_notify(proc_addr(src), dst);
488 }
489
490 /* Call from task level, locking is required. */
491 else {
492 lock(0, "notify");
493 result = mini_notify(proc_addr(src), dst);
494 unlock(0);
495 }
496 return(result);
497}
498
499/*===========================================================================*
500 * enqueue *
501 *===========================================================================*/
502PRIVATE void enqueue(rp)
503register struct proc *rp; /* this process is now runnable */
504{
505/* Add 'rp' to one of the queues of runnable processes. This function is
506 * responsible for inserting a process into one of the scheduling queues.
507 * The mechanism is implemented here. The actual scheduling policy is
508 * defined in sched() and pick_proc().
509 */
510 int q; /* scheduling queue to use */
511 int front; /* add to front or back */
512
513#if DEBUG_SCHED_CHECK
514 check_runqueues("enqueue");
515 if (rp->p_ready) kprintf("enqueue() already ready process\n");
516#endif
517
518 /* Determine where to insert to process. */
519 sched(rp, &q, &front);
520
521 /* Now add the process to the queue. */
522 if (rdy_head[q] == NIL_PROC) { /* add to empty queue */
523 rdy_head[q] = rdy_tail[q] = rp; /* create a new queue */
524 rp->p_nextready = NIL_PROC; /* mark new end */
525 }
526 else if (front) { /* add to head of queue */
527 rp->p_nextready = rdy_head[q]; /* chain head of queue */
528 rdy_head[q] = rp; /* set new queue head */
529 }
530 else { /* add to tail of queue */
531 rdy_tail[q]->p_nextready = rp; /* chain tail of queue */
532 rdy_tail[q] = rp; /* set new queue tail */
533 rp->p_nextready = NIL_PROC; /* mark new end */
534 }
535
536 /* Now select the next process to run. */
537 pick_proc();
538
539#if DEBUG_SCHED_CHECK
540 rp->p_ready = 1;
541 check_runqueues("enqueue");
542#endif
543}
544
545/*===========================================================================*
546 * dequeue *
547 *===========================================================================*/
548PRIVATE void dequeue(rp)
549register struct proc *rp; /* this process is no longer runnable */
550{
551/* A process must be removed from the scheduling queues, for example, because
552 * it has blocked. If the currently active process is removed, a new process
553 * is picked to run by calling pick_proc().
554 */
555 register int q = rp->p_priority; /* queue to use */
556 register struct proc **xpp; /* iterate over queue */
557 register struct proc *prev_xp;
558
559 /* Side-effect for kernel: check if the task's stack still is ok? */
560 if (iskernelp(rp)) {
561 if (*priv(rp)->s_stack_guard != STACK_GUARD)
562 panic("stack overrun by task", proc_nr(rp));
563 }
564
565#if DEBUG_SCHED_CHECK
566 check_runqueues("dequeue");
567 if (! rp->p_ready) kprintf("dequeue() already unready process\n");
568#endif
569
570 /* Now make sure that the process is not in its ready queue. Remove the
571 * process if it is found. A process can be made unready even if it is not
572 * running by being sent a signal that kills it.
573 */
574 prev_xp = NIL_PROC;
575 for (xpp = &rdy_head[q]; *xpp != NIL_PROC; xpp = &(*xpp)->p_nextready) {
576
577 if (*xpp == rp) { /* found process to remove */
578 *xpp = (*xpp)->p_nextready; /* replace with next chain */
579 if (rp == rdy_tail[q]) /* queue tail removed */
580 rdy_tail[q] = prev_xp; /* set new tail */
581 if (rp == proc_ptr || rp == next_ptr) /* active process removed */
582 pick_proc(); /* pick new process to run */
583 break;
584 }
585 prev_xp = *xpp; /* save previous in chain */
586 }
587
588#if DEBUG_SCHED_CHECK
589 rp->p_ready = 0;
590 check_runqueues("dequeue");
591#endif
592}
593
594/*===========================================================================*
595 * sched *
596 *===========================================================================*/
597PRIVATE void sched(rp, queue, front)
598register struct proc *rp; /* process to be scheduled */
599int *queue; /* return: queue to use */
600int *front; /* return: front or back */
601{
602/* This function determines the scheduling policy. It is called whenever a
603 * process must be added to one of the scheduling queues to decide where to
604 * insert it. As a side-effect the process' priority may be updated.
605 */
606 int time_left = (rp->p_ticks_left > 0); /* quantum fully consumed */
607
608 /* Check whether the process has time left. Otherwise give a new quantum
609 * and lower the process' priority, unless the process already is in the
610 * lowest queue.
611 */
612 if (! time_left) { /* quantum consumed ? */
613 rp->p_ticks_left = rp->p_quantum_size; /* give new quantum */
614 if (rp->p_priority < (IDLE_Q-1)) {
615 rp->p_priority += 1; /* lower priority */
616 }
617 }
618
619 /* If there is time left, the process is added to the front of its queue,
620 * so that it can immediately run. The queue to use simply is always the
621 * process' current priority.
622 */
623 *queue = rp->p_priority;
624 *front = time_left;
625}
626
627/*===========================================================================*
628 * pick_proc *
629 *===========================================================================*/
630PRIVATE void pick_proc()
631{
632/* Decide who to run now. A new process is selected by setting 'next_ptr'.
633 * When a billable process is selected, record it in 'bill_ptr', so that the
634 * clock task can tell who to bill for system time.
635 */
636 register struct proc *rp; /* process to run */
637 int q; /* iterate over queues */
638
639 /* Check each of the scheduling queues for ready processes. The number of
640 * queues is defined in proc.h, and priorities are set in the task table.
641 * The lowest queue contains IDLE, which is always ready.
642 */
643 for (q=0; q < NR_SCHED_QUEUES; q++) {
644 if ( (rp = rdy_head[q]) != NIL_PROC) {
645 next_ptr = rp; /* run process 'rp' next */
646 if (priv(rp)->s_flags & BILLABLE)
647 bill_ptr = rp; /* bill for system time */
648 return;
649 }
650 }
651}
652
653/*===========================================================================*
654 * balance_queues *
655 *===========================================================================*/
656#define Q_BALANCE_TICKS 100
657PUBLIC void balance_queues(tp)
658timer_t *tp; /* watchdog timer pointer */
659{
660/* Check entire process table and give all process a higher priority. This
661 * effectively means giving a new quantum. If a process already is at its
662 * maximum priority, its quantum will be renewed.
663 */
664 static timer_t queue_timer; /* timer structure to use */
665 register struct proc* rp; /* process table pointer */
666 clock_t next_period; /* time of next period */
667 int ticks_added = 0; /* total time added */
668
669 for (rp=BEG_PROC_ADDR; rp<END_PROC_ADDR; rp++) {
670 if (! isemptyp(rp)) { /* check slot use */
671 lock(5,"balance_queues");
672 if (rp->p_priority > rp->p_max_priority) { /* update priority? */
673 if (rp->p_rts_flags == 0) dequeue(rp); /* take off queue */
674 ticks_added += rp->p_quantum_size; /* do accounting */
675 rp->p_priority -= 1; /* raise priority */
676 if (rp->p_rts_flags == 0) enqueue(rp); /* put on queue */
677 }
678 else {
679 ticks_added += rp->p_quantum_size - rp->p_ticks_left;
680 rp->p_ticks_left = rp->p_quantum_size; /* give new quantum */
681 }
682 unlock(5);
683 }
684 }
685#if DEBUG
686 kprintf("ticks_added: %d\n", ticks_added);
687#endif
688
689 /* Now schedule a new watchdog timer to balance the queues again. The
690 * period depends on the total amount of quantum ticks added.
691 */
692 next_period = MAX(Q_BALANCE_TICKS, ticks_added); /* calculate next */
693 set_timer(&queue_timer, get_uptime() + next_period, balance_queues);
694}
695
696/*===========================================================================*
697 * lock_send *
698 *===========================================================================*/
699PUBLIC int lock_send(dst_e, m_ptr)
700int dst_e; /* to whom is message being sent? */
701message *m_ptr; /* pointer to message buffer */
702{
703/* Safe gateway to mini_send() for tasks. */
704 int result;
705 lock(2, "send");
706 result = mini_send(proc_ptr, dst_e, m_ptr, NON_BLOCKING);
707 unlock(2);
708 return(result);
709}
710
711/*===========================================================================*
712 * lock_enqueue *
713 *===========================================================================*/
714PUBLIC void lock_enqueue(rp)
715struct proc *rp; /* this process is now runnable */
716{
717/* Safe gateway to enqueue() for tasks. */
718 lock(3, "enqueue");
719 enqueue(rp);
720 unlock(3);
721}
722
723/*===========================================================================*
724 * lock_dequeue *
725 *===========================================================================*/
726PUBLIC void lock_dequeue(rp)
727struct proc *rp; /* this process is no longer runnable */
728{
729/* Safe gateway to dequeue() for tasks. */
730 if (k_reenter >= 0) {
731 /* We're in an exception or interrupt, so don't lock (and ...
732 * don't unlock).
733 */
734 dequeue(rp);
735 } else {
736 lock(4, "dequeue");
737 dequeue(rp);
738 unlock(4);
739 }
740}
741
742/*===========================================================================*
743 * isokendpt_f *
744 *===========================================================================*/
745#if DEBUG_ENABLE_IPC_WARNINGS
746PUBLIC int isokendpt_f(file, line, e, p, fatalflag)
747char *file;
748int line;
749#else
750PUBLIC int isokendpt_f(e, p, fatalflag)
751#endif
752int e, *p, fatalflag;
753{
754 int ok = 0;
755 /* Convert an endpoint number into a process number.
756 * Return nonzero if the process is alive with the corresponding
757 * generation number, zero otherwise.
758 *
759 * This function is called with file and line number by the
760 * isokendpt_d macro if DEBUG_ENABLE_IPC_WARNINGS is defined,
761 * otherwise without. This allows us to print the where the
762 * conversion was attempted, making the errors verbose without
763 * adding code for that at every call.
764 *
765 * If fatalflag is nonzero, we must panic if the conversion doesn't
766 * succeed.
767 */
768 *p = _ENDPOINT_P(e);
769 if(!isokprocn(*p)) {
770#if DEBUG_ENABLE_IPC_WARNINGS
771 kprintf("kernel:%s:%d: bad endpoint %d: proc %d out of range\n",
772 file, line, e, *p);
773#endif
774 } else if(isemptyn(*p)) {
775#if DEBUG_ENABLE_IPC_WARNINGS
776 kprintf("kernel:%s:%d: bad endpoint %d: proc %d empty\n", file, line, e, *p);
777#endif
778 } else if(proc_addr(*p)->p_endpoint != e) {
779#if DEBUG_ENABLE_IPC_WARNINGS
780 kprintf("kernel:%s:%d: bad endpoint %d: proc %d has ept %d (generation %d vs. %d)\n", file, line,
781 e, *p, proc_addr(*p)->p_endpoint,
782 _ENDPOINT_G(e), _ENDPOINT_G(proc_addr(*p)->p_endpoint));
783#endif
784 } else ok = 1;
785 if(!ok && fatalflag) {
786 panic("invalid endpoint ", e);
787 }
788 return ok;
789}
790
Note: See TracBrowser for help on using the repository browser.