source: trunk/minix/commands/elle/sbm.c@ 11

Last change on this file since 11 was 9, checked in by Mattia Monga, 14 years ago

Minix 3.1.2a

File size: 30.1 KB
RevLine 
[9]1/* SB - Copyright 1982 by Ken Harrenstien, SRI International
2 * This software is quasi-public; it may be used freely with
3 * like software, but may NOT be sold or made part of licensed
4 * products without permission of the author. In all cases
5 * the source code and any modifications thereto must remain
6 * available to any user.
7 *
8 * This is part of the SB library package.
9 * Any software using the SB library must likewise be made
10 * quasi-public, with freely available sources.
11 */
12
13#if 0
14 This file contains the low-level memory allocation
15subroutines which are used by the SBLK routines. The code here
16is quite machine-dependent, and the definitions in "sb.h" should be
17carefully checked to verify that they are correct for the target
18machine.
19
20 The ultimate low-level routine is "sbrk()" which must be
21provided by the system''s C library. SBM expects that successive calls
22to sbrk() will return contiguous areas of memory with progressively
23higher addresses. Also, the very first call to sbrk() is assumed to
24return a word-aligned address.
25#endif /*COMMENT*/
26
27#include "sb.h"
28
29#define FUDGE (sizeof(struct smblk)) /* Allow this much fudge in
30 allocation, to prevent undue fragmentation */
31
32char *(*sbm_debug)(); /* Debug switch - user-furnished routine */
33
34struct smblk *sbm_nfl; /* Pointer to node freelist */
35struct smblk *sbm_nxtra; /* Reserved extra free node */
36struct smblk *sbm_list; /* Pointer to smblk memory alloc list.
37 * ALL smblks are strung onto this list
38 * except for the freelist!
39 */
40SBMA sbm_lowaddr; /* Lowest word-aligned address we know about.*/
41
42/* If compiling with debug switch set, use special routine in place of
43 * sbrk so we can pretend we have a very limited area of free memory.
44 */
45#ifdef DBG_SIZE
46#define SBM_SBRK sbm_brk
47char *sbm_brk();
48#else
49#define SBM_SBRK sbrk
50#endif /*DBG_SIZE*/
51
52/* Forward routine declarations */
53char *sbrk();
54struct smblk *sbm_nmak(), *sbm_nget(), *sbm_mget(), *sbm_split();
55struct smblk *sbm_lmak(), *sbm_err();
56
57
58/* SBM_INIT - Initialize storage management.
59 * If args are zero, normal initialization is done. Otherwise,
60 * args are understood to be pointers to an area of memory allocated
61 * on the stack (eg by an "int mem[2000]" declaration in MAIN) and
62 * initialization will include this area in addition to the
63 * unused space between "_end" and the start of the stack segment.
64 * This is mostly of use for PDP11s which would otherwise waste a lot
65 * of address space.
66 * Maybe should have a SBM_RESET() function?
67 */
68
69struct smblk *
70sbm_init(xaddr,xlen)
71SBMA xaddr; /* Address of allocated stack area if any */
72SBMO xlen; /* Size of this area */
73{ register struct smblk *sm, *sml;
74 register char *cp;
75
76 /* Get initial chunk of memory from standard system rtn */
77 if((cp = SBM_SBRK(SMNODES*sizeof(struct smblk))) == 0
78 || (int) cp == -1)
79 return(sbm_err(0,"Can't sbrk"));
80 sm = (struct smblk *)cp; /* Better be word-aligned! */
81 sbm_lmak(sm,(SBMO)sizeof(struct smblk),SMNODES); /* Make list */
82 sbm_nfl = sm; /* Point freelist at it */
83 sbm_lowaddr = (SBMA)sm; /* Remember lowest addr seen */
84
85 /* Set up 1st node pointing to all memory from here on up.
86 * We don't know exactly how much will be available at this point,
87 * so we just pretend we have the maximum possible.
88 */
89 sbm_list = sml = sbm_nget();
90 sml->smforw = sml->smback = 0;
91 sml->smflags = SM_USE|SM_NID; /* Initial flags */
92 sml->smaddr = (SBMA) sml;
93 sml->smlen = MAXSBMO; /* Pretend we have lots */
94 sml->smuse = (SMNODES * sizeof(struct smblk));
95
96 /* Now split off everything above initial allocation as NXM. */
97 sm = sbm_split(sml, sm->smuse);
98 sml->smflags |= SM_MNODS; /* Mark 1st node as having SM nodes */
99 sm->smflags |= SM_NXM; /* Mark 2nd node as NXM */
100
101 /* Now possibly set up extra nodes, if stack mem is being allocated
102 * (From our viewpoint it looks as if a chunk in the middle of
103 * the initial NXM section has been declared usable)
104 */
105 if(xlen)
106 { /* Allow for "extra" static stack memory */
107 /* Will lose if xaddr <= 1st NXM! */
108 sml = sbm_split(sm, (SBMO)(xaddr - sm->smaddr));
109 sbm_split(sml, xlen); /* Split off following NXM */
110 sml->smflags &= ~(SM_USE|SM_NXM); /* This node is free mem! */
111 }
112
113 /* Now set up a small additional node which points to the NXM
114 * that we cannot get from SBRK. At this stage, this is just
115 * a place-holder, to reserve the node so we don't have to
116 * worry about running out of nodes at the same time sbrk stops
117 * returning memory.
118 * SM points to the NXM that we expect SBRK to dig into.
119 */
120 sbm_split(sm, sm->smlen - WDSIZE); /* Chop off teensy bit */
121 sm->smflags &= ~SM_USE; /* Now mark NXM "free"!! */
122
123 /* Finally, reserve an "extra" SM node for use by sbm_nget
124 * when it is allocating more freelist node chunks.
125 */
126 sbm_nxtra = sbm_nget();
127
128 return(sbm_list);
129}
130
131
132/* SBM_NGET() - Get a free SM node.
133 * Note the hair to provide a spare SM node when
134 * we are allocating memory for more SM nodes. This is necessary
135 * because sbm_mget and sbm_nget call each other recursively and
136 * sbm_mget cannot create any new memory without a SM node to point
137 * at the allocated chunk.
138 */
139struct smblk *
140sbm_nget()
141{ register struct smblk *sm, *sml;
142
143 if(!(sm = sbm_nfl)) /* Get a node from freelist */
144 { /* Freelist is empty, try to allocate more nodes. */
145
146 /* Put our "spare" smblk on freelist temporarily so that
147 * sbm_mget has a chance of winning.
148 * Infinite recursion is avoided by a test
149 * in sbm_mget which checks sbm_nfl and sbm_nxtra.
150 */
151 if(!(sm = sbm_nxtra))
152 return(sbm_err(0,"Zero sbm_nxtra!"));
153 sm->smforw = 0;
154 sbm_nfl = sm;
155 sbm_nxtra = 0;
156
157 /* Try to allocate another chunk of SM nodes. */
158 sml = sbm_nmak(sizeof(struct smblk),SM_MNODS);
159
160 /* Put the new free nodes (if any) on freelist.
161 * Done this way because freelist may have had one or two
162 * nodes added to it by sbm_mget, so can't just stick
163 * a new pointer in sbm_nfl.
164 */
165 while(sm = sml)
166 { sml = sm->smforw;
167 sbm_nfre(sm);
168 }
169
170 /* Now reserve an extra node again.
171 * It is an error if there is nothing on freelist here,
172 * because even if sbm_mget failed the "extra node" should
173 * still be on freelist. The check for a zero sbm_nxtra
174 * above will catch such an error.
175 */
176 sbm_nxtra = sbm_nget();
177
178 /* Now see if anything to return */
179 if(!(sm = sbm_nfl)) /* If freelist empty again, */
180 return(0); /* give up. */
181 }
182 sbm_nfl = sm->smforw; /* If win, take it off freelist */
183 return(sm); /* Return ptr or 0 if none */
184}
185
186/* SBM_NFRE(sm) - Return a SM node to the SM freelist.
187 */
188sbm_nfre(smp)
189struct smblk *smp;
190{ register struct smblk *sm;
191 (sm = smp)->smflags = 0;
192 sm->smforw = sbm_nfl;
193 sbm_nfl = sm;
194}
195
196/* SBM_NMAK(elsize, flag) - Make (allocate & build) a typeless node freelist.
197 */
198struct smblk *
199sbm_nmak(elsize, flag)
200SBMO elsize;
201unsigned flag;
202{ register struct smblk *sm, *smp;
203 register int cnt;
204
205 if((sm = sbm_mget(SMNODES*elsize,SMNODES*elsize)) == 0)
206 return(0);
207
208 sm->smflags |= flag; /* Indicate type of nodes */
209 cnt = sm->smlen/elsize; /* Find # nodes that will fit */
210 sm->smuse = cnt * elsize; /* Actual size used */
211 smp = (struct smblk *)(sm->smaddr); /* Ptr to 1st loc of mem */
212 sbm_lmak(smp, (SBMO)elsize, cnt); /* Build freelist */
213 return(smp); /* Return 1st free node. Caller is */
214 /* responsible for setting freelist ptr. */
215}
216
217/* SBM_LMAK - Build freelist of typeless nodes.
218 * Note this does not allocate memory, it just converts an already
219 * allocated memory area.
220 */
221struct smblk *
222sbm_lmak(addr, elsize, num)
223SBMA addr;
224SBMO elsize;
225int num;
226{ register struct smblk *sm, *smp;
227 register int cnt;
228
229 smp = (struct smblk *) addr;
230 if((cnt = num) <= 0)
231 return(0);
232 do { sm = smp; /* Save ptr */
233 sm->smforw = (smp = (struct smblk *) ((SBMA)smp + elsize));
234 sm->smflags = 0;
235 } while(--cnt);
236 sm->smforw = 0; /* Last node points to nothing */
237 return(sm); /* Return ptr to last node */
238}
239
240/* SBM_NMOV(sm1, sm2, begp, elsize) - Move a typeless node.
241 * Copy sm1 to sm2, adjust ptrs, leave sm1 free.
242 */
243sbm_nmov(smp1,smp2,begp,elsize)
244struct smblk *smp1, *smp2, **begp;
245int elsize;
246{ register struct smblk *sm;
247
248 bcopy((SBMA)smp1,(SBMA)(sm = smp2), elsize); /* Copy the stuff */
249 if(sm->smforw) sm->smforw->smback = sm; /* Fix up links */
250 if(sm->smback) sm->smback->smforw = sm;
251 else *begp = sm;
252}
253
254
255/* SBM_MGET(min,max) - Get a SMBLK with specified amount of memory.
256 * Returns 0 if none available.
257 * Memory is guaranteed to start on word boundary, but may not
258 * end on one. Note that sbm_mfree is responsible for
259 * ensuring that free mem starts word-aligned.
260 * A subtle but major concern of this code is the number of freelist
261 * nodes gobbled by a single call. If the freelist happens to not have
262 * enough nodes, then a recursive call to sbm_mget is made (via sbm_nget)
263 * in order to allocate a new batch of freelist nodes! sbm_nget will
264 * always provide a single "spare" node during such an allocation, but
265 * there is only one and it is essential that sbm_mget gobble only ONE
266 * (if any) during such a call, which is indicated by sbm_nxtra==0.
267 * The maximum # of freelist nodes that sbm_mget can gobble is
268 * 2, when (1) NXM memory is obtained, and a SM is needed to point at
269 * the new free mem, plus (2) the resulting SM is too big, and has to
270 * be split up, which requires another SM for the remainder.
271 * The "used-NXM" smblk is set up at init time precisely in order to
272 * avoid the necessity of creating it here when sbrk stops winning, since
273 * that would require yet another freelist node and make it possible for
274 * sbm_mget to gobble 3 during one call -- too many.
275 * Further note: the sbm_nfl checks are necessary in order
276 * to ensure that a SM node is available for use by sbm_split. Otherwise
277 * the calls to sbm_split might create a new SM freelist by gobbling the
278 * very memory which we are hoping to return!
279 */
280SBMO sbm_chksiz = SMCHUNKSIZ; /* Current chunk size to feed sbrk */
281
282struct smblk *
283sbm_mget(cmin,cmax)
284SBMO cmin,cmax;
285{ register struct smblk *sm, *sml;
286 register SBMO csiz;
287 register SBMA addr, xaddr;
288
289 if((sm = sbm_list) == 0 /* If never done, */
290 && (sm = sbm_init((SBMA)0,(SBMO)0)) == 0) /* initialize mem alloc stuff. */
291 return(0); /* Can't init??? */
292
293 /* Round up sizes to word boundary */
294 if(rndrem(cmin)) cmin = rndup(cmin);
295 if(rndrem(cmax)) cmax = rndup(cmax);
296
297 /* Search for a free block having enough memory.
298 * If run into a free-NXM block, always "win", since there may be
299 * a combination of preceding free-mem and new mem which will satisfy
300 * the request. If it turns out this didn't work, we'll just fail
301 * a little farther on.
302 */
303retry: csiz = cmin; /* Set size that will satisfy us */
304 do {
305 if( ((sm->smflags&SM_USE) == 0)
306 && ((sm->smlen >= csiz) || (sm->smflags&SM_NXM)) )
307 break;
308 } while(sm = sm->smforw);
309 if(sm == 0)
310 return(0); /* Found none that minimum would fit */
311
312 if(sm->smflags&SM_NXM)
313 { /* Found free area, but it's marked NXM and the system
314 * must be persuaded (via sbrk) to let us use that portion
315 * of our address space. Grab a good-sized chunk.
316 */
317 if(sbm_nfl == 0) /* Verify a spare SM node is avail */
318 goto getnod; /* Nope, must get one. */
319
320 /* Decide amount of mem to ask system for, via sbrk.
321 * The fine point here is the check of sbm_nxtra to make sure
322 * that, when building more freelist nodes, we don't have
323 * to use more than one SM node in the process. If we
324 * asked for too much mem, we'd have to use a SM node
325 * to hold the excess after splitting.
326 */
327 csiz = cmax;
328 if(sbm_nxtra /* If normal then try for big chunk */
329 && csiz < sbm_chksiz) csiz = sbm_chksiz; /* Max */
330 if (csiz > sm->smlen) csiz = sm->smlen; /* Min */
331
332 /* Get the NXM mem */
333 if((addr = (SBMA)SBM_SBRK(csiz)) != sm->smaddr)
334 { /* Unexpected value returned from SBRK! */
335
336 if((int)addr != 0 && (int)addr != -1)
337 { return(sbm_err(0,"SBRK %o != %o", addr,
338 sm->smaddr));
339#if 0
340 /* If value indicates couldn't get the stuff, then
341 * we have probably hit our limit and the rest of
342 * NXM should be declared "used" to prevent further
343 * hopeless sbrk calls. We split off the portion
344 * of NXM that is known for sure to be unavailable,
345 * and mark it "used". If a "used NXM" area already
346 * exists following this one, the two are merged.
347 * The chunk size is then reduced by half, so
348 * only log2(SMCHUNKSIZ) attempts will be made, and
349 * we try again.
350 */
351 /* If returned some mem which starts outside
352 * the NXM then something is screwed up. */
353 if(addr < sm->smaddr
354 || (addr >= sm->smaddr+sm->smlen))
355 return(sbm_err(0,"SBRK %o != %o",
356 addr, sm->smaddr));
357 /* Got some mem, falls within NXM.
358 * Presumably someone else has called sbrk
359 * since last time, so we need to fence off
360 * the intervening area. */
361 sm = sbm_split((sml=sm),(addr - sm->smaddr));
362 sml->smflags |= SM_USE|SM_EXT;
363 return(sbm_mget(cmin,cmax));
364#endif /*COMMENT*/
365 }
366
367 /* Handle case of SBRK claiming no more memory.
368 * Gobble as much as we can, and then turn this NXM
369 * block into a free-mem block, and leave the
370 * remainder in the used-NXM block (which should
371 * immediately follow this free-NXM block!)
372 */
373 if(!(sml = sm->smforw) /* Ensure have used-NXM blk */
374 || (sml->smflags&(SM_USE|SM_NXM))
375 != (SM_USE|SM_NXM))
376 return(sbm_err(0,"No uNXM node!"));
377 xaddr = sm->smaddr; /* Use this for checking */
378 sm->smuse = 0; /* Use this for sum */
379 for(csiz = sm->smlen; csiz > 0;)
380 { addr = SBM_SBRK(csiz);
381 if((int)addr == 0 || (int)addr == -1)
382 { csiz >>= 1;
383 continue;
384 }
385 if(addr != xaddr)
386 return(sbm_err(0,"SBRK %o != %o", addr,
387 xaddr));
388 sm->smuse += csiz;
389 xaddr += csiz;
390 }
391
392 /* Have gobbled as much from SBRK as we could.
393 * Turn the free-NXM block into a free-mem block,
394 * unless we got nothing, in which case just merge
395 * it into the used-NXM block and continue
396 * searching from this point.
397 */
398 if(!(csiz = sm->smuse)) /* Get total added */
399 { sm->smflags = sml->smflags; /* Ugh. */
400 sbm_mmrg(sm);
401 goto retry; /* Keep looking */
402 }
403 else
404 { sml->smaddr = sm->smaddr + csiz;
405 sml->smlen += sm->smlen - csiz;
406 sm->smlen = csiz;
407 sm->smflags &= ~SM_NXM; /* No longer NXM */
408 }
409 }
410
411 /* Here when we've acquired CSIZ more memory from sbrk.
412 * If preceding mem area is not in use, merge new mem
413 * into it.
414 */
415 if((sml = sm->smback) &&
416 (sml->smflags&(SM_USE|SM_NXM))==0) /* Previous free? */
417 { sml->smlen += csiz; /* Yes, simple! */
418 sm->smaddr += csiz; /* Fix up */
419 if((sm->smlen -= csiz) == 0) /* If no NXM left,*/
420 sbm_mmrg(sml); /* Merge NXM node w/prev */
421 sm = sml; /* Prev is now winning node */
422 }
423 else
424 { /* Prev node isn't a free area. Split up the NXM
425 * node to account for acquired mem, unless we
426 * gobbled all the mem available.
427 */
428 if(sm->smlen > csiz /* Split unless all used */
429 && !sbm_split(sm,csiz)) /* Call shd always win */
430 return(sbm_err(0,"getsplit err: %o",sm));
431 sm->smflags &= ~SM_NXM; /* Node is now real mem */
432 }
433
434 /* Now make a final check that we have enough memory.
435 * This can fail because SBRK may not have been able
436 * to gobble enough memory, either because (1) not
437 * as much NXM was available as we thought,
438 * or (2) we noticed the free-NXM area and immediately
439 * gambled on trying it without checking any lengths.
440 * In any case, we try again starting from the current SM
441 * because there may be more free mem higher up (eg on
442 * stack).
443 */
444 if(sm->smlen < cmin)
445 goto retry;
446 }
447
448 /* Check to see if node has too much mem. This is especially true
449 * for memory just acquired via sbrk, which gobbles a huge chunk each
450 * time. If there's too much, we split up the area.
451 */
452 if(sm->smlen > cmax+FUDGE) /* Got too much? (Allow some fudge)*/
453 /* Yes, split up so don't gobble too much. */
454 if(sbm_nfl) /* If success guaranteed, */
455 sbm_split(sm,cmax); /* split it, all's well. */
456 else goto getnod;
457
458 sm->smuse = 0;
459 sm->smflags |= SM_USE; /* Finally seize it by marking "in-use". */
460 return(sm);
461
462 /* Come here when we will need to get another SM node but the
463 * SM freelist is empty. We have to forget about using the area
464 * we just found, because sbm_nget may gobble it for the
465 * freelist. So, we first force a refill of the freelist, and then
466 * invoke ourselves again on what's left.
467 */
468getnod:
469 if(sml = sbm_nget()) /* Try to build freelist */
470 { sbm_nfre(sml); /* Won, give node back, */
471 sm = sbm_list; /* and retry, starting over! */
472 goto retry;
473 }
474 /* Failed. Not enough memory for both this request
475 * and one more block of SM nodes. Since such a SM_MNODS
476 * block isn't very big, we are so close to the limits that it
477 * isn't worth trying to do something fancy here to satisfy the
478 * original request. So we just fail.
479 */
480 return(0);
481}
482
483#ifdef DBG_SIZE
484/* Code for debugging stuff by imposing an artificial limitation on size
485 * of available memory.
486 */
487SBMO sbm_dlim = MAXSBMO; /* Amount of mem to allow (default is max) */
488
489char *
490sbm_brk(size)
491unsigned size;
492{ register char *addr;
493
494 if(size > sbm_dlim) return(0);
495 addr = sbrk(size);
496 if((int)addr == 0 || (int)addr == -1)
497 return(0);
498 sbm_dlim -= size;
499 return(addr);
500}
501#endif /*DBG_SIZE*/
502
503
504/* SBM_MFREE(sm) - Free up an allocated memory area.
505 */
506sbm_mfree(sm)
507register struct smblk *sm;
508{ register struct smblk *smx;
509 register SBMO crem;
510
511 sm->smflags &= ~SM_USE; /* Say mem is free */
512 if((smx = sm->smback) /* Check preceding mem */
513 && (smx->smflags&(SM_USE|SM_NXM))==0) /* If it's free, */
514 sbm_mmrg(sm = smx); /* then merge 'em. */
515 if((smx = sm->smforw) /* Check following mem */
516 && (smx->smflags&(SM_USE|SM_NXM))==0) /* Again, if free, */
517 sbm_mmrg(sm); /* merge them. */
518
519 if(sm->smlen == 0) /* Just in case, chk for null blk */
520 { if(smx = sm->smback) /* If pred exists, */
521 sbm_mmrg(smx); /* merge quietly. */
522 else {
523 sbm_list = sm->smforw; /* 1st node on list, so */
524 sbm_nfre(sm); /* simply flush it. */
525 }
526 return;
527 }
528
529 /* This code is slightly over-general for some machines.
530 * The pointer subtraction is done in order to get a valid integer
531 * offset value regardless of the internal representation of a pointer.
532 * We cannot reliably force alignment via casts; some C implementations
533 * treat that as a no-op.
534 */
535 if(crem = rndrem(sm->smaddr - sbm_lowaddr)) /* On word bndry? */
536 { /* No -- must adjust. All free mem blks MUST, by fiat,
537 * start on word boundary. Here we fix things by
538 * making the leftover bytes belong to the previous blk,
539 * no matter what it is used for. Prev blk is guaranteed to
540 * (1) Exist (this cannot be 1st blk since 1st is known to
541 * start on wd boundary) and to be (2) Non-free (else it would
542 * have been merged).
543 */
544 if((smx = sm->smback) == 0) /* Get ptr to prev blk */
545 { sbm_err(0,"Align err"); /* Catch screws */
546 return;
547 }
548 crem = WDSIZE - crem; /* Find # bytes to flush */
549 if(crem >= sm->smlen) /* Make sure node has that many */
550 { sbm_mmrg(smx); /* Flush node to avoid zero length */
551 return;
552 }
553 smx->smlen += crem; /* Make stray bytes part of prev */
554 sm->smaddr += crem; /* And flush from current. */
555 sm->smlen -= crem;
556 }
557}
558
559
560/* SBM_EXP - Expand (or shrink) size of an allocated memory chunk.
561 * "nsize" is desired new size; may be larger or smaller than current
562 * size.
563 */
564struct smblk *
565sbm_exp(sm,size)
566register struct smblk *sm;
567register SBMO size;
568{ register struct smblk *smf;
569 register SBMO mexp, pred, succ;
570
571 if(sm->smlen >= size) /* Do we want truncation? */
572 goto realo2; /* Yup, go split block */
573
574 /* Block is expanding. */
575 mexp = size - sm->smlen; /* Get # bytes to expand by */
576 pred = succ = 0;
577 if((smf = sm->smforw) /* See if free mem follows */
578 && (smf->smflags&(SM_USE|SM_NXM)) == 0)
579 if((succ = smf->smlen) >= mexp)
580 goto realo1; /* Quick stuff if succ OK */
581
582 if((smf = sm->smback) /* See if free mem precedes */
583 && (smf->smflags&(SM_USE|SM_NXM)) == 0)
584 pred = smf->smlen;
585
586 /* If not enough free space combined on both sides of this chunk,
587 * we have to look for a completely new block.
588 */
589 if(pred+succ < mexp)
590 { if((smf = sbm_mget(size,size)) == 0)
591 return(0); /* Couldn't find one */
592 else pred = 0; /* Won, indicate new block */
593 }
594
595 /* OK, must copy either into new block or down into predecessor
596 * (overlap is OK as long as bcopy moves 1st byte first)
597 */
598 bcopy(sm->smaddr, smf->smaddr, sm->smlen);
599 smf->smflags = sm->smflags; /* Copy extra attribs */
600 smf->smuse = sm->smuse;
601 if(!pred) /* If invoked sbm_mget */
602 { sbm_mfree(sm); /* then must free up old area */
603 return(smf); /* and can return immediately. */
604 }
605 sbm_mmrg(smf); /* Merge current into pred blk */
606 sm = smf; /* Now pred is current blk. */
607
608 if(succ)
609realo1: sbm_mmrg(sm); /* Merge succ into current blk */
610realo2: if(sm->smlen > size /* If now have too much, */
611 && sbm_split(sm, size)) /* split up and possibly */
612 sbm_mfree(sm->smforw); /* free up unused space. */
613 return(sm);
614
615 /* Note that sbm_split can fail if it can't get a free node,
616 * which is only possible if we are reducing the size of an area.
617 * If it fails, we just return anyway without truncating the area.
618 */
619}
620
621
622/* SBM_MMRG(sm) - Merge a memory area with the area following it.
623 * The node (and memory area) following the SM pointed to are
624 * merged in and the successor node freed up. The flags
625 * and smuse of the current SM (which is not moved or anything)
626 * remain the same.
627 */
628sbm_mmrg(smp)
629struct smblk *smp;
630{ register struct smblk *sm, *sm2;
631
632 sm = smp;
633 sm->smlen += (sm2 = sm->smforw)->smlen; /* Add succ's len */
634 if(sm->smforw = sm2->smforw) /* and fix linkages */
635 sm->smforw->smback = sm;
636 sbm_nfre(sm2); /* now can flush succ node */
637}
638
639/* SBM_SPLIT - Split up an area (gets a new smblk to point to split-off
640 * portion.)
641 * Note returned value is ptr to 2nd smblk, since this is a new one.
642 * Ptr to 1st remains valid since original smblk stays where it is.
643 * NOTE: Beware of splitting up free mem (SM_USE == 0) since sbm_nget may
644 * steal it out from under unless precautions are taken! See comments
645 * at sbm_mget related to this.
646 */
647struct smblk *
648sbm_split(smp,coff)
649struct smblk *smp;
650SBMO coff;
651{ register struct smblk *sm, *smx;
652 register SBMO csiz;
653
654 if((sm = smp)->smlen <= (csiz = coff))
655 return(0);
656 if((smx = sbm_nget()) == 0)
657 return(0);
658 smx->smlen = sm->smlen - csiz; /* Set 2nd size */
659 smx->smaddr = sm->smaddr + csiz; /* Set 2nd addr */
660 sm->smlen = csiz; /* Take from 1st size */
661 smx->smflags = sm->smflags; /* Copy flags */
662 if(smx->smforw = sm->smforw) /* Splice 2nd after 1 */
663 smx->smforw->smback = smx;
664 smx->smback = sm;
665 sm->smforw = smx; /* Put 2nd into chain */
666 return(smx); /* Return ptr to 2nd smblk */
667}
668
669#if 0 /* Replaced by "bcopy" for system-dep efficiency */
670/* SBM_SCPY - Copy string of bytes. Somewhat machine-dependent;
671 * Tries to be clever about using word moves instead of byte moves.
672 */
673sbm_scpy(from, to, count) /* Copy count bytes from -> to */
674char *from, *to;
675unsigned count;
676{ register char *s1, *s2;
677 register unsigned cnt;
678 int tmp;
679
680 if((cnt = count) == 0)
681 return;
682 s1 = from;
683 s2 = to;
684 while(rndrem((int)s1)) /* Get 1st ptr aligned */
685 { *s2++ = *s1++;
686 if(--cnt == 0) return;
687 }
688 if(rndrem((int)s2) == 0) /* Do wd move if ptr 2 now aligned */
689 {
690#ifdef DUMBPCC /* Code for dumber (Portable C type) compiler */
691 register WORD *ap, *bp;
692 tmp = cnt;
693 ap = (WORD *) s1;
694 bp = (WORD *) s2;
695 if(cnt = rnddiv(cnt))
696 do { *bp++ = *ap++; }
697 while(--cnt);
698 if ((cnt = rndrem(tmp)) ==0)
699 return;
700 s1 = (char *) ap;
701 s2 = (char *) bp;
702#else
703 /* Tight loop for efficient copying on 11s */
704 tmp = cnt;
705 if(cnt = rnddiv(cnt))
706 do { *((WORD *)s2)++ = *((WORD *)s1)++; }
707 while(--cnt);
708 if((cnt = rndrem(tmp)) == 0)
709 return;
710#endif /*-DUMBPCC*/
711 }
712 do { *s2++ = *s1++; } /* Finish up with byte loop */
713 while(--cnt);
714}
715#endif /*COMMENT*/
716
717struct smblk * /* If it returns at all, this is most common type */
718sbm_err(val,str,a0,a1,a2,a3)
719char *str;
720struct smblk *val;
721{ int *sptr;
722
723 sptr = (int *) &sptr; /* Point to self on stack */
724 sptr += 5; /* Point to return addr */
725 if((int)sbm_debug==1)
726 abort();
727 if(sbm_debug)
728 (*sbm_debug)(0,*sptr,str,a0,a1,a2,a3);
729 return(val);
730}
731
732
733/* These routines correspond to the V7 LIBC routines as described
734 * in the V7 UPM (3). They should provide satisfactory emulation
735 * if the documentation is correct. Replacement is necessary since
736 * the SBM routines are jealous and cannot tolerate competition for
737 * calls of SBRK; i.e. the memory being managed must be contiguous.
738 */
739
740/* Guaranteed to return word-aligned pointer to area of AT LEAST
741 * requested size. Area size is rounded up to word boundary.
742 */
743
744char *
745malloc(size)
746unsigned size;
747{ register struct smblk *sm, **sma;
748 register SBMO siz;
749
750 siz = rndup(size + sizeof (struct smblk *)); /* Make room for ptr */
751 if((sm = sbm_mget(siz,siz)) == 0)
752 return(0);
753 *(sma = (struct smblk **)sm->smaddr) = sm; /* Store ptr in addr-1 */
754 return((char *)++sma);
755}
756
757char *
758alloc(size) /* For V6 programs - note different failure value! */
759unsigned size;
760{ register char *addr;
761 return((addr = malloc(size)) ? addr : (char *) -1);
762}
763
764free(ptr)
765char *ptr;
766{ register struct smblk *sm, **smp;
767
768 smp = &((struct smblk **)ptr)[-1]; /* Point to addr-1 */
769 sm = *smp; /* Pluck SM ptr therefrom */
770 if(((sm->smflags&0377) != SM_NID) || sm->smaddr != (SBMA)smp)
771 return((int)sbm_err(0,"free: bad arg %o", ptr));
772 sbm_mfree(sm);
773 return(1);
774}
775
776char *
777realloc(ptr,size)
778char *ptr;
779unsigned size;
780{ register struct smblk *sm, **smp;
781
782 smp = &((struct smblk **)ptr)[-1]; /* Point to addr-1 */
783 sm = *smp; /* Pluck SM ptr therefrom */
784 if(((sm->smflags&0377) != SM_NID) || (sm->smaddr != (SBMA)smp))
785 return((char *)sbm_err(0,"realloc: bad arg %o",ptr));
786 if((sm = sbm_exp(sm, rndup(size+(sizeof(struct smblk *))))) == 0)
787 return(0);
788 *(smp = (struct smblk **)sm->smaddr) = sm; /* Save smblk ptr */
789 return((char *)++smp);
790}
791
792char *
793calloc(nelem,elsize)
794unsigned nelem, elsize;
795{ register SBMO cmin;
796 register WORD *ip; /* Clear in units of words */
797 register char *addr;
798
799 if((cmin = nelem*elsize) == 0 /* Find # bytes to get */
800 || (addr = malloc(cmin)) == 0) /* Get it */
801 return(0);
802 ip = (WORD *) addr; /* Set up ptr to area */
803 cmin = rnddiv(cmin+WDSIZE-1); /* Find # words to clear */
804 do { *ip++ = 0; } while (--cmin); /* Zap the area */
805 return(addr);
806}
807
808
809/* SBM_NGC() - Specific routine for GC'ing SMBLK nodes.
810 *
811 * SBM_XNGC(begp, elsize, type) - Compact nodes of specified type.
812 * Scans allocated mem from low to high to find chunks with nodes of
813 * the specified type.
814 * Flushes current freelist and rebuilds it as scan progresses,
815 * such that 1st thing on list is lowest-addr node. When a node is
816 * seen that can be moved, new node is acquired from freelist if
817 * it exists, otherwise no move is made. If a chunk has been scanned
818 * and no active nodes remain, it is flushed and freelist updated.
819 * NOTE: This has not yet been verified to work with nodes of any
820 * type other than SMBLK.
821 */
822
823sbm_ngc()
824{ register struct smblk *sm;
825 if(!(sm = sbm_nxtra))
826 return((int)sbm_err(0,"Zero sbm_nxtra"));
827 sm->smflags |= SM_USE; /* Ensure this one isn't GC'd */
828 sbm_xngc(&sbm_nfl, sizeof(struct smblk), SM_MNODS);
829 sm->smflags = 0; /* Flush temporary crock */
830}
831sbm_xngc(begp, elsize, flag)
832struct smblk **begp;
833unsigned elsize, flag;
834{ register struct smblk *sm, *chk, *smf;
835 struct smblk *ftail, *savtail;
836 int cnt, inuse;
837
838 *begp = ftail = 0; /* Flush node freelist */
839 for(chk = sbm_list; chk; chk = chk->smforw)
840 if(chk->smflags&flag)
841 { sm = (struct smblk *) chk->smaddr;
842 cnt = (chk->smuse)/elsize;
843 savtail = ftail;
844 inuse = 0;
845 smf = *begp;
846 /* Set up ptr to 1st freelist node */
847 while(--cnt >= 0)
848 { /* Here decide if movable */
849 if(sm->smflags && smf /* Live and have copy place */
850 && (
851 (sm->smflags&SM_USE) == 0 /* Free mem? */
852 || (sm->smflags&(SM_MNODS|SM_DNODS))
853 )
854 && sm->smback) /* has backptr (see ncpy) */
855 { /* Move the node */
856 *begp = smf->smforw; /* Get free node */
857 if(smf == ftail)
858 ftail = 0;
859 if(smf == savtail)
860 savtail = 0;
861 /* Move node. Already checked for back ptr
862 * of 0 since no obvious way to tell where
863 * the ptr to list is kept. Sigh.
864 */
865 sbm_nmov(sm,smf,(struct smblk **)0,elsize);
866 /* Get ptr to new freelist node. Note
867 * check to ensure that it is not in this
868 * same chunk (if it is, no point in moving
869 * any nodes!)
870 */
871 if((smf = *begp) >= chk)
872 smf = 0; /* Zero if same chk */
873 sm->smflags = 0; /* Make node free */
874 }
875 /* At this point, not movable */
876 if(sm->smflags == 0) /* Free node? */
877 { if(ftail) /* Add to freelist */
878 ftail->smforw = sm;
879 ftail = sm;
880 if(*begp == 0)
881 *begp = sm;
882 sm->smforw = 0;
883 }
884 else inuse++;
885 sm = (struct smblk *)((SBMA)sm + elsize);
886 }
887 if(inuse == 0 /* All free? */
888 && (sm = chk->smback)) /* & not 1st? */
889 { if(savtail) /* Edit freelist */
890 (ftail = savtail)->smforw = 0;
891 else *begp = ftail = 0;
892 sbm_mfree(chk);
893 chk = sm;
894 }
895 }
896}
897
898/*
899 * Note that proc must return a zero value, or loop aborts and
900 * returns that selfsame value.
901 */
902sbm_nfor(flag,nodsiz,proc,arg)
903int flag;
904int (*proc)();
905int nodsiz;
906struct sbfile *arg;
907{ register struct smblk *sm, *np;
908 register int cnt;
909 int res;
910
911 for(sm = sbm_list; sm; sm = sm->smforw)
912 if(sm->smflags&flag)
913 { np = (struct smblk *) sm->smaddr;
914 cnt = sm->smuse/nodsiz;
915 do {
916 if(np->smflags)
917 if(res = (*proc)(np,arg))
918 return(res);
919 np = (struct smblk *)((SBMA)np + nodsiz);
920 } while(--cnt);
921 }
922 return(0);
923}
Note: See TracBrowser for help on using the repository browser.