1 | /* This file is concerned with allocating and freeing arbitrary-size blocks of
|
---|
2 | * physical memory on behalf of the FORK and EXEC system calls. The key data
|
---|
3 | * structure used is the hole table, which maintains a list of holes in memory.
|
---|
4 | * It is kept sorted in order of increasing memory address. The addresses
|
---|
5 | * it contains refers to physical memory, starting at absolute address 0
|
---|
6 | * (i.e., they are not relative to the start of PM). During system
|
---|
7 | * initialization, that part of memory containing the interrupt vectors,
|
---|
8 | * kernel, and PM are "allocated" to mark them as not available and to
|
---|
9 | * remove them from the hole list.
|
---|
10 | *
|
---|
11 | * The entry points into this file are:
|
---|
12 | * alloc_mem: allocate a given sized chunk of memory
|
---|
13 | * free_mem: release a previously allocated chunk of memory
|
---|
14 | * mem_init: initialize the tables when PM start up
|
---|
15 | * max_hole: returns the largest hole currently available
|
---|
16 | * mem_holes_copy: for outsiders who want a copy of the hole-list
|
---|
17 | */
|
---|
18 |
|
---|
19 | #include "pm.h"
|
---|
20 | #include <minix/com.h>
|
---|
21 | #include <minix/callnr.h>
|
---|
22 | #include <minix/type.h>
|
---|
23 | #include <minix/config.h>
|
---|
24 | #include <signal.h>
|
---|
25 | #include <stdlib.h>
|
---|
26 | #include <string.h>
|
---|
27 | #include "mproc.h"
|
---|
28 | #include "../../kernel/const.h"
|
---|
29 | #include "../../kernel/config.h"
|
---|
30 | #include "../../kernel/type.h"
|
---|
31 |
|
---|
32 | #define NIL_HOLE (struct hole *) 0
|
---|
33 |
|
---|
34 | PRIVATE struct hole hole[_NR_HOLES];
|
---|
35 | PRIVATE u32_t high_watermark = 0;
|
---|
36 |
|
---|
37 | PRIVATE struct hole *hole_head; /* pointer to first hole */
|
---|
38 | PRIVATE struct hole *free_slots;/* ptr to list of unused table slots */
|
---|
39 | #if ENABLE_SWAP
|
---|
40 | PRIVATE int swap_fd = -1; /* file descriptor of open swap file/device */
|
---|
41 | PRIVATE u32_t swap_offset; /* offset to start of swap area on swap file */
|
---|
42 | PRIVATE phys_clicks swap_base; /* memory offset chosen as swap base */
|
---|
43 | PRIVATE phys_clicks swap_maxsize;/* maximum amount of swap "memory" possible */
|
---|
44 | PRIVATE struct mproc *in_queue; /* queue of processes wanting to swap in */
|
---|
45 | PRIVATE struct mproc *outswap = &mproc[0]; /* outswap candidate? */
|
---|
46 | #else /* ! ENABLE_SWAP */
|
---|
47 | #define swap_base ((phys_clicks) -1)
|
---|
48 | #endif /* ENABLE_SWAP */
|
---|
49 |
|
---|
50 | FORWARD _PROTOTYPE( void del_slot, (struct hole *prev_ptr, struct hole *hp) );
|
---|
51 | FORWARD _PROTOTYPE( void merge, (struct hole *hp) );
|
---|
52 | #if ENABLE_SWAP
|
---|
53 | FORWARD _PROTOTYPE( int swap_out, (void) );
|
---|
54 | #else
|
---|
55 | #define swap_out() (0)
|
---|
56 | #endif
|
---|
57 |
|
---|
58 | /*===========================================================================*
|
---|
59 | * alloc_mem *
|
---|
60 | *===========================================================================*/
|
---|
61 | PUBLIC phys_clicks alloc_mem(clicks)
|
---|
62 | phys_clicks clicks; /* amount of memory requested */
|
---|
63 | {
|
---|
64 | /* Allocate a block of memory from the free list using first fit. The block
|
---|
65 | * consists of a sequence of contiguous bytes, whose length in clicks is
|
---|
66 | * given by 'clicks'. A pointer to the block is returned. The block is
|
---|
67 | * always on a click boundary. This procedure is called when memory is
|
---|
68 | * needed for FORK or EXEC. Swap other processes out if needed.
|
---|
69 | */
|
---|
70 | register struct hole *hp, *prev_ptr;
|
---|
71 | phys_clicks old_base;
|
---|
72 |
|
---|
73 | do {
|
---|
74 | prev_ptr = NIL_HOLE;
|
---|
75 | hp = hole_head;
|
---|
76 | while (hp != NIL_HOLE && hp->h_base < swap_base) {
|
---|
77 | if (hp->h_len >= clicks) {
|
---|
78 | /* We found a hole that is big enough. Use it. */
|
---|
79 | old_base = hp->h_base; /* remember where it started */
|
---|
80 | hp->h_base += clicks; /* bite a piece off */
|
---|
81 | hp->h_len -= clicks; /* ditto */
|
---|
82 |
|
---|
83 | /* Remember new high watermark of used memory. */
|
---|
84 | if(hp->h_base > high_watermark)
|
---|
85 | high_watermark = hp->h_base;
|
---|
86 |
|
---|
87 | /* Delete the hole if used up completely. */
|
---|
88 | if (hp->h_len == 0) del_slot(prev_ptr, hp);
|
---|
89 |
|
---|
90 | /* Return the start address of the acquired block. */
|
---|
91 | return(old_base);
|
---|
92 | }
|
---|
93 |
|
---|
94 | prev_ptr = hp;
|
---|
95 | hp = hp->h_next;
|
---|
96 | }
|
---|
97 | } while (swap_out()); /* try to swap some other process out */
|
---|
98 | return(NO_MEM);
|
---|
99 | }
|
---|
100 |
|
---|
101 | /*===========================================================================*
|
---|
102 | * free_mem *
|
---|
103 | *===========================================================================*/
|
---|
104 | PUBLIC void free_mem(base, clicks)
|
---|
105 | phys_clicks base; /* base address of block to free */
|
---|
106 | phys_clicks clicks; /* number of clicks to free */
|
---|
107 | {
|
---|
108 | /* Return a block of free memory to the hole list. The parameters tell where
|
---|
109 | * the block starts in physical memory and how big it is. The block is added
|
---|
110 | * to the hole list. If it is contiguous with an existing hole on either end,
|
---|
111 | * it is merged with the hole or holes.
|
---|
112 | */
|
---|
113 | register struct hole *hp, *new_ptr, *prev_ptr;
|
---|
114 |
|
---|
115 | if (clicks == 0) return;
|
---|
116 | if ( (new_ptr = free_slots) == NIL_HOLE)
|
---|
117 | panic(__FILE__,"hole table full", NO_NUM);
|
---|
118 | new_ptr->h_base = base;
|
---|
119 | new_ptr->h_len = clicks;
|
---|
120 | free_slots = new_ptr->h_next;
|
---|
121 | hp = hole_head;
|
---|
122 |
|
---|
123 | /* If this block's address is numerically less than the lowest hole currently
|
---|
124 | * available, or if no holes are currently available, put this hole on the
|
---|
125 | * front of the hole list.
|
---|
126 | */
|
---|
127 | if (hp == NIL_HOLE || base <= hp->h_base) {
|
---|
128 | /* Block to be freed goes on front of the hole list. */
|
---|
129 | new_ptr->h_next = hp;
|
---|
130 | hole_head = new_ptr;
|
---|
131 | merge(new_ptr);
|
---|
132 | return;
|
---|
133 | }
|
---|
134 |
|
---|
135 | /* Block to be returned does not go on front of hole list. */
|
---|
136 | prev_ptr = NIL_HOLE;
|
---|
137 | while (hp != NIL_HOLE && base > hp->h_base) {
|
---|
138 | prev_ptr = hp;
|
---|
139 | hp = hp->h_next;
|
---|
140 | }
|
---|
141 |
|
---|
142 | /* We found where it goes. Insert block after 'prev_ptr'. */
|
---|
143 | new_ptr->h_next = prev_ptr->h_next;
|
---|
144 | prev_ptr->h_next = new_ptr;
|
---|
145 | merge(prev_ptr); /* sequence is 'prev_ptr', 'new_ptr', 'hp' */
|
---|
146 | }
|
---|
147 |
|
---|
148 | /*===========================================================================*
|
---|
149 | * del_slot *
|
---|
150 | *===========================================================================*/
|
---|
151 | PRIVATE void del_slot(prev_ptr, hp)
|
---|
152 | /* pointer to hole entry just ahead of 'hp' */
|
---|
153 | register struct hole *prev_ptr;
|
---|
154 | /* pointer to hole entry to be removed */
|
---|
155 | register struct hole *hp;
|
---|
156 | {
|
---|
157 | /* Remove an entry from the hole list. This procedure is called when a
|
---|
158 | * request to allocate memory removes a hole in its entirety, thus reducing
|
---|
159 | * the numbers of holes in memory, and requiring the elimination of one
|
---|
160 | * entry in the hole list.
|
---|
161 | */
|
---|
162 | if (hp == hole_head)
|
---|
163 | hole_head = hp->h_next;
|
---|
164 | else
|
---|
165 | prev_ptr->h_next = hp->h_next;
|
---|
166 |
|
---|
167 | hp->h_next = free_slots;
|
---|
168 | hp->h_base = hp->h_len = 0;
|
---|
169 | free_slots = hp;
|
---|
170 | }
|
---|
171 |
|
---|
172 | /*===========================================================================*
|
---|
173 | * merge *
|
---|
174 | *===========================================================================*/
|
---|
175 | PRIVATE void merge(hp)
|
---|
176 | register struct hole *hp; /* ptr to hole to merge with its successors */
|
---|
177 | {
|
---|
178 | /* Check for contiguous holes and merge any found. Contiguous holes can occur
|
---|
179 | * when a block of memory is freed, and it happens to abut another hole on
|
---|
180 | * either or both ends. The pointer 'hp' points to the first of a series of
|
---|
181 | * three holes that can potentially all be merged together.
|
---|
182 | */
|
---|
183 | register struct hole *next_ptr;
|
---|
184 |
|
---|
185 | /* If 'hp' points to the last hole, no merging is possible. If it does not,
|
---|
186 | * try to absorb its successor into it and free the successor's table entry.
|
---|
187 | */
|
---|
188 | if ( (next_ptr = hp->h_next) == NIL_HOLE) return;
|
---|
189 | if (hp->h_base + hp->h_len == next_ptr->h_base) {
|
---|
190 | hp->h_len += next_ptr->h_len; /* first one gets second one's mem */
|
---|
191 | del_slot(hp, next_ptr);
|
---|
192 | } else {
|
---|
193 | hp = next_ptr;
|
---|
194 | }
|
---|
195 |
|
---|
196 | /* If 'hp' now points to the last hole, return; otherwise, try to absorb its
|
---|
197 | * successor into it.
|
---|
198 | */
|
---|
199 | if ( (next_ptr = hp->h_next) == NIL_HOLE) return;
|
---|
200 | if (hp->h_base + hp->h_len == next_ptr->h_base) {
|
---|
201 | hp->h_len += next_ptr->h_len;
|
---|
202 | del_slot(hp, next_ptr);
|
---|
203 | }
|
---|
204 | }
|
---|
205 |
|
---|
206 | /*===========================================================================*
|
---|
207 | * mem_init *
|
---|
208 | *===========================================================================*/
|
---|
209 | PUBLIC void mem_init(chunks, free)
|
---|
210 | struct memory *chunks; /* list of free memory chunks */
|
---|
211 | phys_clicks *free; /* memory size summaries */
|
---|
212 | {
|
---|
213 | /* Initialize hole lists. There are two lists: 'hole_head' points to a linked
|
---|
214 | * list of all the holes (unused memory) in the system; 'free_slots' points to
|
---|
215 | * a linked list of table entries that are not in use. Initially, the former
|
---|
216 | * list has one entry for each chunk of physical memory, and the second
|
---|
217 | * list links together the remaining table slots. As memory becomes more
|
---|
218 | * fragmented in the course of time (i.e., the initial big holes break up into
|
---|
219 | * smaller holes), new table slots are needed to represent them. These slots
|
---|
220 | * are taken from the list headed by 'free_slots'.
|
---|
221 | */
|
---|
222 | int i;
|
---|
223 | register struct hole *hp;
|
---|
224 |
|
---|
225 | /* Put all holes on the free list. */
|
---|
226 | for (hp = &hole[0]; hp < &hole[_NR_HOLES]; hp++) {
|
---|
227 | hp->h_next = hp + 1;
|
---|
228 | hp->h_base = hp->h_len = 0;
|
---|
229 | }
|
---|
230 | hole[_NR_HOLES-1].h_next = NIL_HOLE;
|
---|
231 | hole_head = NIL_HOLE;
|
---|
232 | free_slots = &hole[0];
|
---|
233 |
|
---|
234 | /* Use the chunks of physical memory to allocate holes. */
|
---|
235 | *free = 0;
|
---|
236 | for (i=NR_MEMS-1; i>=0; i--) {
|
---|
237 | if (chunks[i].size > 0) {
|
---|
238 | free_mem(chunks[i].base, chunks[i].size);
|
---|
239 | *free += chunks[i].size;
|
---|
240 | #if ENABLE_SWAP
|
---|
241 | if (swap_base < chunks[i].base + chunks[i].size)
|
---|
242 | swap_base = chunks[i].base + chunks[i].size;
|
---|
243 | #endif
|
---|
244 | }
|
---|
245 | }
|
---|
246 |
|
---|
247 | #if ENABLE_SWAP
|
---|
248 | /* The swap area is represented as a hole above and separate of regular
|
---|
249 | * memory. A hole at the size of the swap file is allocated on "swapon".
|
---|
250 | */
|
---|
251 | swap_base++; /* make separate */
|
---|
252 | swap_maxsize = 0 - swap_base; /* maximum we can possibly use */
|
---|
253 | #endif
|
---|
254 | }
|
---|
255 |
|
---|
256 | /*===========================================================================*
|
---|
257 | * mem_holes_copy *
|
---|
258 | *===========================================================================*/
|
---|
259 | PUBLIC int mem_holes_copy(struct hole *holecopies, size_t *bytes, u32_t *hi)
|
---|
260 | {
|
---|
261 | if(*bytes < sizeof(hole)) return ENOSPC;
|
---|
262 | memcpy(holecopies, hole, sizeof(hole));
|
---|
263 | *bytes = sizeof(hole);
|
---|
264 | *hi = high_watermark;
|
---|
265 | return OK;
|
---|
266 | }
|
---|
267 |
|
---|
268 | #if ENABLE_SWAP
|
---|
269 | /*===========================================================================*
|
---|
270 | * swap_on *
|
---|
271 | *===========================================================================*/
|
---|
272 | PUBLIC int swap_on(file, offset, size)
|
---|
273 | char *file; /* file to swap on */
|
---|
274 | u32_t offset, size; /* area on swap file to use */
|
---|
275 | {
|
---|
276 | /* Turn swapping on. */
|
---|
277 |
|
---|
278 | if (swap_fd != -1) return(EBUSY); /* already have swap? */
|
---|
279 |
|
---|
280 | tell_fs(CHDIR, who_e, FALSE, 0); /* be like the caller for open() */
|
---|
281 | if ((swap_fd = open(file, O_RDWR)) < 0) return(-errno);
|
---|
282 | swap_offset = offset;
|
---|
283 | size >>= CLICK_SHIFT;
|
---|
284 | if (size > swap_maxsize) size = swap_maxsize;
|
---|
285 | if (size > 0) free_mem(swap_base, (phys_clicks) size);
|
---|
286 | return(OK);
|
---|
287 | }
|
---|
288 |
|
---|
289 | /*===========================================================================*
|
---|
290 | * swap_off *
|
---|
291 | *===========================================================================*/
|
---|
292 | PUBLIC int swap_off()
|
---|
293 | {
|
---|
294 | /* Turn swapping off. */
|
---|
295 | struct mproc *rmp;
|
---|
296 | struct hole *hp, *prev_ptr;
|
---|
297 |
|
---|
298 | if (swap_fd == -1) return(OK); /* can't turn off what isn't on */
|
---|
299 |
|
---|
300 | /* Put all swapped out processes on the inswap queue and swap in. */
|
---|
301 | for (rmp = &mproc[0]; rmp < &mproc[NR_PROCS]; rmp++) {
|
---|
302 | if (rmp->mp_flags & ONSWAP) swap_inqueue(rmp);
|
---|
303 | }
|
---|
304 | swap_in();
|
---|
305 |
|
---|
306 | /* All in memory? */
|
---|
307 | for (rmp = &mproc[0]; rmp < &mproc[NR_PROCS]; rmp++) {
|
---|
308 | if (rmp->mp_flags & ONSWAP) return(ENOMEM);
|
---|
309 | }
|
---|
310 |
|
---|
311 | /* Yes. Remove the swap hole and close the swap file descriptor. */
|
---|
312 | for (hp = hole_head; hp != NIL_HOLE; prev_ptr = hp, hp = hp->h_next) {
|
---|
313 | if (hp->h_base >= swap_base) {
|
---|
314 | del_slot(prev_ptr, hp);
|
---|
315 | hp = hole_head;
|
---|
316 | }
|
---|
317 | }
|
---|
318 | close(swap_fd);
|
---|
319 | swap_fd = -1;
|
---|
320 | return(OK);
|
---|
321 | }
|
---|
322 |
|
---|
323 | /*===========================================================================*
|
---|
324 | * swap_inqueue *
|
---|
325 | *===========================================================================*/
|
---|
326 | PUBLIC void swap_inqueue(rmp)
|
---|
327 | register struct mproc *rmp; /* process to add to the queue */
|
---|
328 | {
|
---|
329 | /* Put a swapped out process on the queue of processes to be swapped in. This
|
---|
330 | * happens when such a process gets a signal, or if a reply message must be
|
---|
331 | * sent, like when a process doing a wait() has a child that exits.
|
---|
332 | */
|
---|
333 | struct mproc **pmp;
|
---|
334 |
|
---|
335 | if (rmp->mp_flags & SWAPIN) return; /* already queued */
|
---|
336 |
|
---|
337 |
|
---|
338 | for (pmp = &in_queue; *pmp != NULL; pmp = &(*pmp)->mp_swapq) {}
|
---|
339 | *pmp = rmp;
|
---|
340 | rmp->mp_swapq = NULL;
|
---|
341 | rmp->mp_flags |= SWAPIN;
|
---|
342 | }
|
---|
343 |
|
---|
344 | /*===========================================================================*
|
---|
345 | * swap_in *
|
---|
346 | *===========================================================================*/
|
---|
347 | PUBLIC void swap_in()
|
---|
348 | {
|
---|
349 | /* Try to swap in a process on the inswap queue. We want to send it a message,
|
---|
350 | * interrupt it, or something.
|
---|
351 | */
|
---|
352 | struct mproc **pmp, *rmp;
|
---|
353 | phys_clicks old_base, new_base, size;
|
---|
354 | off_t off;
|
---|
355 | int proc_nr;
|
---|
356 |
|
---|
357 | pmp = &in_queue;
|
---|
358 | while ((rmp = *pmp) != NULL) {
|
---|
359 | proc_nr = (rmp - mproc);
|
---|
360 | size = rmp->mp_seg[S].mem_vir + rmp->mp_seg[S].mem_len
|
---|
361 | - rmp->mp_seg[D].mem_vir;
|
---|
362 |
|
---|
363 | if (!(rmp->mp_flags & SWAPIN)) {
|
---|
364 | /* Guess it got killed. (Queue is cleaned here.) */
|
---|
365 | *pmp = rmp->mp_swapq;
|
---|
366 | continue;
|
---|
367 | } else
|
---|
368 | if ((new_base = alloc_mem(size)) == NO_MEM) {
|
---|
369 | /* No memory for this one, try the next. */
|
---|
370 | pmp = &rmp->mp_swapq;
|
---|
371 | } else {
|
---|
372 | /* We've found memory. Update map and swap in. */
|
---|
373 | old_base = rmp->mp_seg[D].mem_phys;
|
---|
374 | rmp->mp_seg[D].mem_phys = new_base;
|
---|
375 | rmp->mp_seg[S].mem_phys = rmp->mp_seg[D].mem_phys +
|
---|
376 | (rmp->mp_seg[S].mem_vir - rmp->mp_seg[D].mem_vir);
|
---|
377 | sys_newmap(rmp->mp_endpoint, rmp->mp_seg);
|
---|
378 | off = swap_offset + ((off_t) (old_base-swap_base)<<CLICK_SHIFT);
|
---|
379 | lseek(swap_fd, off, SEEK_SET);
|
---|
380 | rw_seg(0, swap_fd, rmp->mp_endpoint, D, (phys_bytes)size << CLICK_SHIFT);
|
---|
381 | free_mem(old_base, size);
|
---|
382 | rmp->mp_flags &= ~(ONSWAP|SWAPIN);
|
---|
383 | *pmp = rmp->mp_swapq;
|
---|
384 | check_pending(rmp); /* a signal may have waked this one */
|
---|
385 | }
|
---|
386 | }
|
---|
387 | }
|
---|
388 |
|
---|
389 | /*===========================================================================*
|
---|
390 | * swap_out *
|
---|
391 | *===========================================================================*/
|
---|
392 | PRIVATE int swap_out()
|
---|
393 | {
|
---|
394 | /* Try to find a process that can be swapped out. Candidates are those blocked
|
---|
395 | * on a system call that PM handles, like wait(), pause() or sigsuspend().
|
---|
396 | */
|
---|
397 | struct mproc *rmp;
|
---|
398 | struct hole *hp, *prev_ptr;
|
---|
399 | phys_clicks old_base, new_base, size;
|
---|
400 | off_t off;
|
---|
401 | int proc_nr;
|
---|
402 |
|
---|
403 | rmp = outswap;
|
---|
404 | do {
|
---|
405 | if (++rmp == &mproc[NR_PROCS]) rmp = &mproc[0];
|
---|
406 |
|
---|
407 | /* A candidate? */
|
---|
408 | if (!(rmp->mp_flags & (PAUSED | WAITING | SIGSUSPENDED))) continue;
|
---|
409 |
|
---|
410 | /* Already on swap or otherwise to be avoided? */
|
---|
411 | if (rmp->mp_flags & (DONT_SWAP | TRACED | REPLY | ONSWAP)) continue;
|
---|
412 |
|
---|
413 | /* Got one, find a swap hole and swap it out. */
|
---|
414 | proc_nr = (rmp - mproc);
|
---|
415 | size = rmp->mp_seg[S].mem_vir + rmp->mp_seg[S].mem_len
|
---|
416 | - rmp->mp_seg[D].mem_vir;
|
---|
417 |
|
---|
418 | prev_ptr = NIL_HOLE;
|
---|
419 | for (hp = hole_head; hp != NIL_HOLE; prev_ptr = hp, hp = hp->h_next) {
|
---|
420 | if (hp->h_base >= swap_base && hp->h_len >= size) break;
|
---|
421 | }
|
---|
422 | if (hp == NIL_HOLE) continue; /* oops, not enough swapspace */
|
---|
423 | new_base = hp->h_base;
|
---|
424 | hp->h_base += size;
|
---|
425 | hp->h_len -= size;
|
---|
426 | if (hp->h_len == 0) del_slot(prev_ptr, hp);
|
---|
427 |
|
---|
428 | off = swap_offset + ((off_t) (new_base - swap_base) << CLICK_SHIFT);
|
---|
429 | lseek(swap_fd, off, SEEK_SET);
|
---|
430 | rw_seg(1, swap_fd, rmp->mp_endpoint, D, (phys_bytes)size << CLICK_SHIFT);
|
---|
431 | old_base = rmp->mp_seg[D].mem_phys;
|
---|
432 | rmp->mp_seg[D].mem_phys = new_base;
|
---|
433 | rmp->mp_seg[S].mem_phys = rmp->mp_seg[D].mem_phys +
|
---|
434 | (rmp->mp_seg[S].mem_vir - rmp->mp_seg[D].mem_vir);
|
---|
435 | sys_newmap(rmp->mp_endpoint, rmp->mp_seg);
|
---|
436 | free_mem(old_base, size);
|
---|
437 | rmp->mp_flags |= ONSWAP;
|
---|
438 |
|
---|
439 | outswap = rmp; /* next time start here */
|
---|
440 | return(TRUE);
|
---|
441 | } while (rmp != outswap);
|
---|
442 |
|
---|
443 | return(FALSE); /* no candidate found */
|
---|
444 | }
|
---|
445 | #endif /* SWAP */
|
---|