source: tags/minix3.1.2a-orig/minix/servers/pm/alloc.c@ 14

Last change on this file since 14 was 14, checked in by Mattia Monga, 13 years ago

Best fit

File size: 16.5 KB
Line 
1/* This file is concerned with allocating and freeing arbitrary-size blocks of
2 * physical memory on behalf of the FORK and EXEC system calls. The key data
3 * structure used is the hole table, which maintains a list of holes in memory.
4 * It is kept sorted in order of increasing memory address. The addresses
5 * it contains refers to physical memory, starting at absolute address 0
6 * (i.e., they are not relative to the start of PM). During system
7 * initialization, that part of memory containing the interrupt vectors,
8 * kernel, and PM are "allocated" to mark them as not available and to
9 * remove them from the hole list.
10 *
11 * The entry points into this file are:
12 * alloc_mem: allocate a given sized chunk of memory
13 * free_mem: release a previously allocated chunk of memory
14 * mem_init: initialize the tables when PM start up
15 * max_hole: returns the largest hole currently available
16 * mem_holes_copy: for outsiders who want a copy of the hole-list
17 */
18
19#include "pm.h"
20#include <minix/com.h>
21#include <minix/callnr.h>
22#include <minix/type.h>
23#include <minix/config.h>
24#include <signal.h>
25#include <stdlib.h>
26#include <string.h>
27#include "mproc.h"
28#include "../../kernel/const.h"
29#include "../../kernel/config.h"
30#include "../../kernel/type.h"
31
32#define NIL_HOLE (struct hole *) 0
33
34PRIVATE struct hole hole[_NR_HOLES];
35PRIVATE u32_t high_watermark = 0;
36
37PRIVATE struct hole *hole_head; /* pointer to first hole */
38PRIVATE struct hole *free_slots;/* ptr to list of unused table slots */
39#if ENABLE_SWAP
40PRIVATE int swap_fd = -1; /* file descriptor of open swap file/device */
41PRIVATE u32_t swap_offset; /* offset to start of swap area on swap file */
42PRIVATE phys_clicks swap_base; /* memory offset chosen as swap base */
43PRIVATE phys_clicks swap_maxsize;/* maximum amount of swap "memory" possible */
44PRIVATE struct mproc *in_queue; /* queue of processes wanting to swap in */
45PRIVATE struct mproc *outswap = &mproc[0]; /* outswap candidate? */
46#else /* ! ENABLE_SWAP */
47#define swap_base ((phys_clicks) -1)
48#endif /* ENABLE_SWAP */
49
50FORWARD _PROTOTYPE( void del_slot, (struct hole *prev_ptr, struct hole *hp) );
51FORWARD _PROTOTYPE( void merge, (struct hole *hp) );
52#if ENABLE_SWAP
53FORWARD _PROTOTYPE( int swap_out, (void) );
54#else
55#define swap_out() (0)
56#endif
57
58/*===========================================================================*
59 * alloc_mem *
60 *===========================================================================*/
61PUBLIC phys_clicks alloc_mem(clicks)
62phys_clicks clicks; /* amount of memory requested */
63{
64/* Allocate a block of memory from the free list using first fit. The block
65 * consists of a sequence of contiguous bytes, whose length in clicks is
66 * given by 'clicks'. A pointer to the block is returned. The block is
67 * always on a click boundary. This procedure is called when memory is
68 * needed for FORK or EXEC. Swap other processes out if needed.
69 */
70 register struct hole *hp, *prev_ptr, *best, *prev_best;
71 int delta=-1;
72 phys_clicks old_base;
73 do {
74 prev_ptr = NIL_HOLE;
75 hp = hole_head;
76 while (hp != NIL_HOLE && hp->h_base < swap_base) {
77 printf("base:%d len:%d (req:%d)\n", hp->h_base, hp->h_len, clicks);
78 if (hp->h_len == clicks) {
79 /* We found a hole that is big enough. Use it. */
80 old_base = hp->h_base; /* remember where it started */
81 hp->h_base += clicks; /* bite a piece off */
82 hp->h_len -= clicks; /* ditto */
83 /* Remember new high watermark of used memory. */
84 if(hp->h_base > high_watermark)
85 high_watermark = hp->h_base;
86 /* Delete the hole if used up completely. */
87 if (hp->h_len == 0) del_slot(prev_ptr, hp);
88 /* Return the start address of the acquired block. */
89 printf("Allocazione 'perfetta'\n");
90 return(old_base);
91 if (hp->h_len > clicks) {
92 if (hp->h_len - clicks < delta || delta == -1){
93 delta = hp->h_len - clicks;
94 best = hp;
95 prev_best = prev_ptr;
96 }
97 prev_ptr = hp;
98 hp = hp->h_next;
99 if (delta > -1){
100 printf("Allocazione con delta=%d\n",delta);
101 old_base = best->h_base; /* remember where it started */
102 best->h_base += clicks; /* bite a piece off */
103 best->h_len -= clicks; /* cioe` delta */
104 /* Remember new high watermark of used memory. */
105 if(best->h_base > high_watermark)
106 high_watermark = best->h_base;
107 /* Delete the hole if used up completely. */
108 if (best->h_len == 0) printf("Non dovrebbe mai succedere\n");
109 /* Return the start address of the acquired block. */
110 return(old_base);
111 } while (swap_out()); /* try to swap some other process out */
112 return(NO_MEM);
113}
114
115/*===========================================================================*
116 * free_mem *
117 *===========================================================================*/
118PUBLIC void free_mem(base, clicks)
119phys_clicks base; /* base address of block to free */
120phys_clicks clicks; /* number of clicks to free */
121{
122/* Return a block of free memory to the hole list. The parameters tell where
123 * the block starts in physical memory and how big it is. The block is added
124 * to the hole list. If it is contiguous with an existing hole on either end,
125 * it is merged with the hole or holes.
126 */
127 register struct hole *hp, *new_ptr, *prev_ptr;
128
129 if (clicks == 0) return;
130 if ( (new_ptr = free_slots) == NIL_HOLE)
131 panic(__FILE__,"hole table full", NO_NUM);
132 new_ptr->h_base = base;
133 new_ptr->h_len = clicks;
134 free_slots = new_ptr->h_next;
135 hp = hole_head;
136
137 /* If this block's address is numerically less than the lowest hole currently
138 * available, or if no holes are currently available, put this hole on the
139 * front of the hole list.
140 */
141 if (hp == NIL_HOLE || base <= hp->h_base) {
142 /* Block to be freed goes on front of the hole list. */
143 new_ptr->h_next = hp;
144 hole_head = new_ptr;
145 merge(new_ptr);
146 return;
147 }
148
149 /* Block to be returned does not go on front of hole list. */
150 prev_ptr = NIL_HOLE;
151 while (hp != NIL_HOLE && base > hp->h_base) {
152 prev_ptr = hp;
153 hp = hp->h_next;
154 }
155
156 /* We found where it goes. Insert block after 'prev_ptr'. */
157 new_ptr->h_next = prev_ptr->h_next;
158 prev_ptr->h_next = new_ptr;
159 merge(prev_ptr); /* sequence is 'prev_ptr', 'new_ptr', 'hp' */
160}
161
162/*===========================================================================*
163 * del_slot *
164 *===========================================================================*/
165PRIVATE void del_slot(prev_ptr, hp)
166/* pointer to hole entry just ahead of 'hp' */
167register struct hole *prev_ptr;
168/* pointer to hole entry to be removed */
169register struct hole *hp;
170{
171/* Remove an entry from the hole list. This procedure is called when a
172 * request to allocate memory removes a hole in its entirety, thus reducing
173 * the numbers of holes in memory, and requiring the elimination of one
174 * entry in the hole list.
175 */
176 if (hp == hole_head)
177 hole_head = hp->h_next;
178 else
179 prev_ptr->h_next = hp->h_next;
180
181 hp->h_next = free_slots;
182 hp->h_base = hp->h_len = 0;
183 free_slots = hp;
184}
185
186/*===========================================================================*
187 * merge *
188 *===========================================================================*/
189PRIVATE void merge(hp)
190register struct hole *hp; /* ptr to hole to merge with its successors */
191{
192/* Check for contiguous holes and merge any found. Contiguous holes can occur
193 * when a block of memory is freed, and it happens to abut another hole on
194 * either or both ends. The pointer 'hp' points to the first of a series of
195 * three holes that can potentially all be merged together.
196 */
197 register struct hole *next_ptr;
198
199 /* If 'hp' points to the last hole, no merging is possible. If it does not,
200 * try to absorb its successor into it and free the successor's table entry.
201 */
202 if ( (next_ptr = hp->h_next) == NIL_HOLE) return;
203 if (hp->h_base + hp->h_len == next_ptr->h_base) {
204 hp->h_len += next_ptr->h_len; /* first one gets second one's mem */
205 del_slot(hp, next_ptr);
206 } else {
207 hp = next_ptr;
208 }
209
210 /* If 'hp' now points to the last hole, return; otherwise, try to absorb its
211 * successor into it.
212 */
213 if ( (next_ptr = hp->h_next) == NIL_HOLE) return;
214 if (hp->h_base + hp->h_len == next_ptr->h_base) {
215 hp->h_len += next_ptr->h_len;
216 del_slot(hp, next_ptr);
217 }
218}
219
220/*===========================================================================*
221 * mem_init *
222 *===========================================================================*/
223PUBLIC void mem_init(chunks, free)
224struct memory *chunks; /* list of free memory chunks */
225phys_clicks *free; /* memory size summaries */
226{
227/* Initialize hole lists. There are two lists: 'hole_head' points to a linked
228 * list of all the holes (unused memory) in the system; 'free_slots' points to
229 * a linked list of table entries that are not in use. Initially, the former
230 * list has one entry for each chunk of physical memory, and the second
231 * list links together the remaining table slots. As memory becomes more
232 * fragmented in the course of time (i.e., the initial big holes break up into
233 * smaller holes), new table slots are needed to represent them. These slots
234 * are taken from the list headed by 'free_slots'.
235 */
236 int i;
237 register struct hole *hp;
238
239 /* Put all holes on the free list. */
240 for (hp = &hole[0]; hp < &hole[_NR_HOLES]; hp++) {
241 hp->h_next = hp + 1;
242 hp->h_base = hp->h_len = 0;
243 }
244 hole[_NR_HOLES-1].h_next = NIL_HOLE;
245 hole_head = NIL_HOLE;
246 free_slots = &hole[0];
247
248 /* Use the chunks of physical memory to allocate holes. */
249 *free = 0;
250 for (i=NR_MEMS-1; i>=0; i--) {
251 if (chunks[i].size > 0) {
252 free_mem(chunks[i].base, chunks[i].size);
253 *free += chunks[i].size;
254#if ENABLE_SWAP
255 if (swap_base < chunks[i].base + chunks[i].size)
256 swap_base = chunks[i].base + chunks[i].size;
257#endif
258 }
259 }
260
261#if ENABLE_SWAP
262 /* The swap area is represented as a hole above and separate of regular
263 * memory. A hole at the size of the swap file is allocated on "swapon".
264 */
265 swap_base++; /* make separate */
266 swap_maxsize = 0 - swap_base; /* maximum we can possibly use */
267#endif
268}
269
270/*===========================================================================*
271 * mem_holes_copy *
272 *===========================================================================*/
273PUBLIC int mem_holes_copy(struct hole *holecopies, size_t *bytes, u32_t *hi)
274{
275 if(*bytes < sizeof(hole)) return ENOSPC;
276 memcpy(holecopies, hole, sizeof(hole));
277 *bytes = sizeof(hole);
278 *hi = high_watermark;
279 return OK;
280}
281
282#if ENABLE_SWAP
283/*===========================================================================*
284 * swap_on *
285 *===========================================================================*/
286PUBLIC int swap_on(file, offset, size)
287char *file; /* file to swap on */
288u32_t offset, size; /* area on swap file to use */
289{
290/* Turn swapping on. */
291
292 if (swap_fd != -1) return(EBUSY); /* already have swap? */
293
294 tell_fs(CHDIR, who_e, FALSE, 0); /* be like the caller for open() */
295 if ((swap_fd = open(file, O_RDWR)) < 0) return(-errno);
296 swap_offset = offset;
297 size >>= CLICK_SHIFT;
298 if (size > swap_maxsize) size = swap_maxsize;
299 if (size > 0) free_mem(swap_base, (phys_clicks) size);
300 return(OK);
301}
302
303/*===========================================================================*
304 * swap_off *
305 *===========================================================================*/
306PUBLIC int swap_off()
307{
308/* Turn swapping off. */
309 struct mproc *rmp;
310 struct hole *hp, *prev_ptr;
311
312 if (swap_fd == -1) return(OK); /* can't turn off what isn't on */
313
314 /* Put all swapped out processes on the inswap queue and swap in. */
315 for (rmp = &mproc[0]; rmp < &mproc[NR_PROCS]; rmp++) {
316 if (rmp->mp_flags & ONSWAP) swap_inqueue(rmp);
317 }
318 swap_in();
319
320 /* All in memory? */
321 for (rmp = &mproc[0]; rmp < &mproc[NR_PROCS]; rmp++) {
322 if (rmp->mp_flags & ONSWAP) return(ENOMEM);
323 }
324
325 /* Yes. Remove the swap hole and close the swap file descriptor. */
326 for (hp = hole_head; hp != NIL_HOLE; prev_ptr = hp, hp = hp->h_next) {
327 if (hp->h_base >= swap_base) {
328 del_slot(prev_ptr, hp);
329 hp = hole_head;
330 }
331 }
332 close(swap_fd);
333 swap_fd = -1;
334 return(OK);
335}
336
337/*===========================================================================*
338 * swap_inqueue *
339 *===========================================================================*/
340PUBLIC void swap_inqueue(rmp)
341register struct mproc *rmp; /* process to add to the queue */
342{
343/* Put a swapped out process on the queue of processes to be swapped in. This
344 * happens when such a process gets a signal, or if a reply message must be
345 * sent, like when a process doing a wait() has a child that exits.
346 */
347 struct mproc **pmp;
348
349 if (rmp->mp_flags & SWAPIN) return; /* already queued */
350
351
352 for (pmp = &in_queue; *pmp != NULL; pmp = &(*pmp)->mp_swapq) {}
353 *pmp = rmp;
354 rmp->mp_swapq = NULL;
355 rmp->mp_flags |= SWAPIN;
356}
357
358/*===========================================================================*
359 * swap_in *
360 *===========================================================================*/
361PUBLIC void swap_in()
362{
363/* Try to swap in a process on the inswap queue. We want to send it a message,
364 * interrupt it, or something.
365 */
366 struct mproc **pmp, *rmp;
367 phys_clicks old_base, new_base, size;
368 off_t off;
369 int proc_nr;
370
371 pmp = &in_queue;
372 while ((rmp = *pmp) != NULL) {
373 proc_nr = (rmp - mproc);
374 size = rmp->mp_seg[S].mem_vir + rmp->mp_seg[S].mem_len
375 - rmp->mp_seg[D].mem_vir;
376
377 if (!(rmp->mp_flags & SWAPIN)) {
378 /* Guess it got killed. (Queue is cleaned here.) */
379 *pmp = rmp->mp_swapq;
380 continue;
381 } else
382 if ((new_base = alloc_mem(size)) == NO_MEM) {
383 /* No memory for this one, try the next. */
384 pmp = &rmp->mp_swapq;
385 } else {
386 /* We've found memory. Update map and swap in. */
387 old_base = rmp->mp_seg[D].mem_phys;
388 rmp->mp_seg[D].mem_phys = new_base;
389 rmp->mp_seg[S].mem_phys = rmp->mp_seg[D].mem_phys +
390 (rmp->mp_seg[S].mem_vir - rmp->mp_seg[D].mem_vir);
391 sys_newmap(rmp->mp_endpoint, rmp->mp_seg);
392 off = swap_offset + ((off_t) (old_base-swap_base)<<CLICK_SHIFT);
393 lseek(swap_fd, off, SEEK_SET);
394 rw_seg(0, swap_fd, rmp->mp_endpoint, D, (phys_bytes)size << CLICK_SHIFT);
395 free_mem(old_base, size);
396 rmp->mp_flags &= ~(ONSWAP|SWAPIN);
397 *pmp = rmp->mp_swapq;
398 check_pending(rmp); /* a signal may have waked this one */
399 }
400 }
401}
402
403/*===========================================================================*
404 * swap_out *
405 *===========================================================================*/
406PRIVATE int swap_out()
407{
408/* Try to find a process that can be swapped out. Candidates are those blocked
409 * on a system call that PM handles, like wait(), pause() or sigsuspend().
410 */
411 struct mproc *rmp;
412 struct hole *hp, *prev_ptr;
413 phys_clicks old_base, new_base, size;
414 off_t off;
415 int proc_nr;
416
417 rmp = outswap;
418 do {
419 if (++rmp == &mproc[NR_PROCS]) rmp = &mproc[0];
420
421 /* A candidate? */
422 if (!(rmp->mp_flags & (PAUSED | WAITING | SIGSUSPENDED))) continue;
423
424 /* Already on swap or otherwise to be avoided? */
425 if (rmp->mp_flags & (DONT_SWAP | TRACED | REPLY | ONSWAP)) continue;
426
427 /* Got one, find a swap hole and swap it out. */
428 proc_nr = (rmp - mproc);
429 size = rmp->mp_seg[S].mem_vir + rmp->mp_seg[S].mem_len
430 - rmp->mp_seg[D].mem_vir;
431
432 prev_ptr = NIL_HOLE;
433 for (hp = hole_head; hp != NIL_HOLE; prev_ptr = hp, hp = hp->h_next) {
434 if (hp->h_base >= swap_base && hp->h_len >= size) break;
435 }
436 if (hp == NIL_HOLE) continue; /* oops, not enough swapspace */
437 new_base = hp->h_base;
438 hp->h_base += size;
439 hp->h_len -= size;
440 if (hp->h_len == 0) del_slot(prev_ptr, hp);
441
442 off = swap_offset + ((off_t) (new_base - swap_base) << CLICK_SHIFT);
443 lseek(swap_fd, off, SEEK_SET);
444 rw_seg(1, swap_fd, rmp->mp_endpoint, D, (phys_bytes)size << CLICK_SHIFT);
445 old_base = rmp->mp_seg[D].mem_phys;
446 rmp->mp_seg[D].mem_phys = new_base;
447 rmp->mp_seg[S].mem_phys = rmp->mp_seg[D].mem_phys +
448 (rmp->mp_seg[S].mem_vir - rmp->mp_seg[D].mem_vir);
449 sys_newmap(rmp->mp_endpoint, rmp->mp_seg);
450 free_mem(old_base, size);
451 rmp->mp_flags |= ONSWAP;
452
453 outswap = rmp; /* next time start here */
454 return(TRUE);
455 } while (rmp != outswap);
456
457 return(FALSE); /* no candidate found */
458}
459#endif /* SWAP */
Note: See TracBrowser for help on using the repository browser.