source: branches/minix3-book/servers/fs/cache.c@ 4

Last change on this file since 4 was 4, checked in by Mattia Monga, 13 years ago

Importazione sorgenti libro

File size: 14.3 KB
Line 
1/* The file system maintains a buffer cache to reduce the number of disk
2 * accesses needed. Whenever a read or write to the disk is done, a check is
3 * first made to see if the block is in the cache. This file manages the
4 * cache.
5 *
6 * The entry points into this file are:
7 * get_block: request to fetch a block for reading or writing from cache
8 * put_block: return a block previously requested with get_block
9 * alloc_zone: allocate a new zone (to increase the length of a file)
10 * free_zone: release a zone (when a file is removed)
11 * rw_block: read or write a block from the disk itself
12 * invalidate: remove all the cache blocks on some device
13 */
14
15#include "fs.h"
16#include <minix/com.h>
17#include "buf.h"
18#include "file.h"
19#include "fproc.h"
20#include "super.h"
21
22FORWARD _PROTOTYPE( void rm_lru, (struct buf *bp) );
23
24/*===========================================================================*
25 * get_block *
26 *===========================================================================*/
27PUBLIC struct buf *get_block(dev, block, only_search)
28register dev_t dev; /* on which device is the block? */
29register block_t block; /* which block is wanted? */
30int only_search; /* if NO_READ, don't read, else act normal */
31{
32/* Check to see if the requested block is in the block cache. If so, return
33 * a pointer to it. If not, evict some other block and fetch it (unless
34 * 'only_search' is 1). All the blocks in the cache that are not in use
35 * are linked together in a chain, with 'front' pointing to the least recently
36 * used block and 'rear' to the most recently used block. If 'only_search' is
37 * 1, the block being requested will be overwritten in its entirety, so it is
38 * only necessary to see if it is in the cache; if it is not, any free buffer
39 * will do. It is not necessary to actually read the block in from disk.
40 * If 'only_search' is PREFETCH, the block need not be read from the disk,
41 * and the device is not to be marked on the block, so callers can tell if
42 * the block returned is valid.
43 * In addition to the LRU chain, there is also a hash chain to link together
44 * blocks whose block numbers end with the same bit strings, for fast lookup.
45 */
46
47 int b;
48 register struct buf *bp, *prev_ptr;
49
50 /* Search the hash chain for (dev, block). Do_read() can use
51 * get_block(NO_DEV ...) to get an unnamed block to fill with zeros when
52 * someone wants to read from a hole in a file, in which case this search
53 * is skipped
54 */
55 if (dev != NO_DEV) {
56 b = (int) block & HASH_MASK;
57 bp = buf_hash[b];
58 while (bp != NIL_BUF) {
59 if (bp->b_blocknr == block && bp->b_dev == dev) {
60 /* Block needed has been found. */
61 if (bp->b_count == 0) rm_lru(bp);
62 bp->b_count++; /* record that block is in use */
63
64 return(bp);
65 } else {
66 /* This block is not the one sought. */
67 bp = bp->b_hash; /* move to next block on hash chain */
68 }
69 }
70 }
71
72 /* Desired block is not on available chain. Take oldest block ('front'). */
73 if ((bp = front) == NIL_BUF) panic(__FILE__,"all buffers in use", NR_BUFS);
74 rm_lru(bp);
75
76 /* Remove the block that was just taken from its hash chain. */
77 b = (int) bp->b_blocknr & HASH_MASK;
78 prev_ptr = buf_hash[b];
79 if (prev_ptr == bp) {
80 buf_hash[b] = bp->b_hash;
81 } else {
82 /* The block just taken is not on the front of its hash chain. */
83 while (prev_ptr->b_hash != NIL_BUF)
84 if (prev_ptr->b_hash == bp) {
85 prev_ptr->b_hash = bp->b_hash; /* found it */
86 break;
87 } else {
88 prev_ptr = prev_ptr->b_hash; /* keep looking */
89 }
90 }
91
92 /* If the block taken is dirty, make it clean by writing it to the disk.
93 * Avoid hysteresis by flushing all other dirty blocks for the same device.
94 */
95 if (bp->b_dev != NO_DEV) {
96 if (bp->b_dirt == DIRTY) flushall(bp->b_dev);
97 }
98
99 /* Fill in block's parameters and add it to the hash chain where it goes. */
100 bp->b_dev = dev; /* fill in device number */
101 bp->b_blocknr = block; /* fill in block number */
102 bp->b_count++; /* record that block is being used */
103 b = (int) bp->b_blocknr & HASH_MASK;
104 bp->b_hash = buf_hash[b];
105 buf_hash[b] = bp; /* add to hash list */
106
107 /* Go get the requested block unless searching or prefetching. */
108 if (dev != NO_DEV) {
109 if (only_search == PREFETCH) bp->b_dev = NO_DEV;
110 else
111 if (only_search == NORMAL) {
112 rw_block(bp, READING);
113 }
114 }
115 return(bp); /* return the newly acquired block */
116}
117
118/*===========================================================================*
119 * put_block *
120 *===========================================================================*/
121PUBLIC void put_block(bp, block_type)
122register struct buf *bp; /* pointer to the buffer to be released */
123int block_type; /* INODE_BLOCK, DIRECTORY_BLOCK, or whatever */
124{
125/* Return a block to the list of available blocks. Depending on 'block_type'
126 * it may be put on the front or rear of the LRU chain. Blocks that are
127 * expected to be needed again shortly (e.g., partially full data blocks)
128 * go on the rear; blocks that are unlikely to be needed again shortly
129 * (e.g., full data blocks) go on the front. Blocks whose loss can hurt
130 * the integrity of the file system (e.g., inode blocks) are written to
131 * disk immediately if they are dirty.
132 */
133 if (bp == NIL_BUF) return; /* it is easier to check here than in caller */
134
135 bp->b_count--; /* there is one use fewer now */
136 if (bp->b_count != 0) return; /* block is still in use */
137
138 bufs_in_use--; /* one fewer block buffers in use */
139
140 /* Put this block back on the LRU chain. If the ONE_SHOT bit is set in
141 * 'block_type', the block is not likely to be needed again shortly, so put
142 * it on the front of the LRU chain where it will be the first one to be
143 * taken when a free buffer is needed later.
144 */
145 if (bp->b_dev == DEV_RAM || block_type & ONE_SHOT) {
146 /* Block probably won't be needed quickly. Put it on front of chain.
147 * It will be the next block to be evicted from the cache.
148 */
149 bp->b_prev = NIL_BUF;
150 bp->b_next = front;
151 if (front == NIL_BUF)
152 rear = bp; /* LRU chain was empty */
153 else
154 front->b_prev = bp;
155 front = bp;
156 } else {
157 /* Block probably will be needed quickly. Put it on rear of chain.
158 * It will not be evicted from the cache for a long time.
159 */
160 bp->b_prev = rear;
161 bp->b_next = NIL_BUF;
162 if (rear == NIL_BUF)
163 front = bp;
164 else
165 rear->b_next = bp;
166 rear = bp;
167 }
168
169 /* Some blocks are so important (e.g., inodes, indirect blocks) that they
170 * should be written to the disk immediately to avoid messing up the file
171 * system in the event of a crash.
172 */
173 if ((block_type & WRITE_IMMED) && bp->b_dirt==DIRTY && bp->b_dev != NO_DEV) {
174 rw_block(bp, WRITING);
175 }
176}
177
178/*===========================================================================*
179 * alloc_zone *
180 *===========================================================================*/
181PUBLIC zone_t alloc_zone(dev, z)
182dev_t dev; /* device where zone wanted */
183zone_t z; /* try to allocate new zone near this one */
184{
185/* Allocate a new zone on the indicated device and return its number. */
186
187 int major, minor;
188 bit_t b, bit;
189 struct super_block *sp;
190
191 /* Note that the routine alloc_bit() returns 1 for the lowest possible
192 * zone, which corresponds to sp->s_firstdatazone. To convert a value
193 * between the bit number, 'b', used by alloc_bit() and the zone number, 'z',
194 * stored in the inode, use the formula:
195 * z = b + sp->s_firstdatazone - 1
196 * Alloc_bit() never returns 0, since this is used for NO_BIT (failure).
197 */
198 sp = get_super(dev);
199
200 /* If z is 0, skip initial part of the map known to be fully in use. */
201 if (z == sp->s_firstdatazone) {
202 bit = sp->s_zsearch;
203 } else {
204 bit = (bit_t) z - (sp->s_firstdatazone - 1);
205 }
206 b = alloc_bit(sp, ZMAP, bit);
207 if (b == NO_BIT) {
208 err_code = ENOSPC;
209 major = (int) (sp->s_dev >> MAJOR) & BYTE;
210 minor = (int) (sp->s_dev >> MINOR) & BYTE;
211 printf("No space on %sdevice %d/%d\n",
212 sp->s_dev == root_dev ? "root " : "", major, minor);
213 return(NO_ZONE);
214 }
215 if (z == sp->s_firstdatazone) sp->s_zsearch = b; /* for next time */
216 return(sp->s_firstdatazone - 1 + (zone_t) b);
217}
218
219/*===========================================================================*
220 * free_zone *
221 *===========================================================================*/
222PUBLIC void free_zone(dev, numb)
223dev_t dev; /* device where zone located */
224zone_t numb; /* zone to be returned */
225{
226/* Return a zone. */
227
228 register struct super_block *sp;
229 bit_t bit;
230
231 /* Locate the appropriate super_block and return bit. */
232 sp = get_super(dev);
233 if (numb < sp->s_firstdatazone || numb >= sp->s_zones) return;
234 bit = (bit_t) (numb - (sp->s_firstdatazone - 1));
235 free_bit(sp, ZMAP, bit);
236 if (bit < sp->s_zsearch) sp->s_zsearch = bit;
237}
238
239/*===========================================================================*
240 * rw_block *
241 *===========================================================================*/
242PUBLIC void rw_block(bp, rw_flag)
243register struct buf *bp; /* buffer pointer */
244int rw_flag; /* READING or WRITING */
245{
246/* Read or write a disk block. This is the only routine in which actual disk
247 * I/O is invoked. If an error occurs, a message is printed here, but the error
248 * is not reported to the caller. If the error occurred while purging a block
249 * from the cache, it is not clear what the caller could do about it anyway.
250 */
251
252 int r, op;
253 off_t pos;
254 dev_t dev;
255 int block_size;
256
257 block_size = get_block_size(bp->b_dev);
258
259 if ( (dev = bp->b_dev) != NO_DEV) {
260 pos = (off_t) bp->b_blocknr * block_size;
261 op = (rw_flag == READING ? DEV_READ : DEV_WRITE);
262 r = dev_io(op, dev, FS_PROC_NR, bp->b_data, pos, block_size, 0);
263 if (r != block_size) {
264 if (r >= 0) r = END_OF_FILE;
265 if (r != END_OF_FILE)
266 printf("Unrecoverable disk error on device %d/%d, block %ld\n",
267 (dev>>MAJOR)&BYTE, (dev>>MINOR)&BYTE, bp->b_blocknr);
268 bp->b_dev = NO_DEV; /* invalidate block */
269
270 /* Report read errors to interested parties. */
271 if (rw_flag == READING) rdwt_err = r;
272 }
273 }
274
275 bp->b_dirt = CLEAN;
276}
277
278/*===========================================================================*
279 * invalidate *
280 *===========================================================================*/
281PUBLIC void invalidate(device)
282dev_t device; /* device whose blocks are to be purged */
283{
284/* Remove all the blocks belonging to some device from the cache. */
285
286 register struct buf *bp;
287
288 for (bp = &buf[0]; bp < &buf[NR_BUFS]; bp++)
289 if (bp->b_dev == device) bp->b_dev = NO_DEV;
290}
291
292/*===========================================================================*
293 * flushall *
294 *===========================================================================*/
295PUBLIC void flushall(dev)
296dev_t dev; /* device to flush */
297{
298/* Flush all dirty blocks for one device. */
299
300 register struct buf *bp;
301 static struct buf *dirty[NR_BUFS]; /* static so it isn't on stack */
302 int ndirty;
303
304 for (bp = &buf[0], ndirty = 0; bp < &buf[NR_BUFS]; bp++)
305 if (bp->b_dirt == DIRTY && bp->b_dev == dev) dirty[ndirty++] = bp;
306 rw_scattered(dev, dirty, ndirty, WRITING);
307}
308
309/*===========================================================================*
310 * rw_scattered *
311 *===========================================================================*/
312PUBLIC void rw_scattered(dev, bufq, bufqsize, rw_flag)
313dev_t dev; /* major-minor device number */
314struct buf **bufq; /* pointer to array of buffers */
315int bufqsize; /* number of buffers */
316int rw_flag; /* READING or WRITING */
317{
318/* Read or write scattered data from a device. */
319
320 register struct buf *bp;
321 int gap;
322 register int i;
323 register iovec_t *iop;
324 static iovec_t iovec[NR_IOREQS]; /* static so it isn't on stack */
325 int j, r;
326 int block_size;
327
328 block_size = get_block_size(dev);
329
330 /* (Shell) sort buffers on b_blocknr. */
331 gap = 1;
332 do
333 gap = 3 * gap + 1;
334 while (gap <= bufqsize);
335 while (gap != 1) {
336 gap /= 3;
337 for (j = gap; j < bufqsize; j++) {
338 for (i = j - gap;
339 i >= 0 && bufq[i]->b_blocknr > bufq[i + gap]->b_blocknr;
340 i -= gap) {
341 bp = bufq[i];
342 bufq[i] = bufq[i + gap];
343 bufq[i + gap] = bp;
344 }
345 }
346 }
347
348 /* Set up I/O vector and do I/O. The result of dev_io is OK if everything
349 * went fine, otherwise the error code for the first failed transfer.
350 */
351 while (bufqsize > 0) {
352 for (j = 0, iop = iovec; j < NR_IOREQS && j < bufqsize; j++, iop++) {
353 bp = bufq[j];
354 if (bp->b_blocknr != bufq[0]->b_blocknr + j) break;
355 iop->iov_addr = (vir_bytes) bp->b_data;
356 iop->iov_size = block_size;
357 }
358 r = dev_io(rw_flag == WRITING ? DEV_SCATTER : DEV_GATHER,
359 dev, FS_PROC_NR, iovec,
360 (off_t) bufq[0]->b_blocknr * block_size, j, 0);
361
362 /* Harvest the results. Dev_io reports the first error it may have
363 * encountered, but we only care if it's the first block that failed.
364 */
365 for (i = 0, iop = iovec; i < j; i++, iop++) {
366 bp = bufq[i];
367 if (iop->iov_size != 0) {
368 /* Transfer failed. An error? Do we care? */
369 if (r != OK && i == 0) {
370 printf(
371 "fs: I/O error on device %d/%d, block %lu\n",
372 (dev>>MAJOR)&BYTE, (dev>>MINOR)&BYTE,
373 bp->b_blocknr);
374 bp->b_dev = NO_DEV; /* invalidate block */
375 }
376 break;
377 }
378 if (rw_flag == READING) {
379 bp->b_dev = dev; /* validate block */
380 put_block(bp, PARTIAL_DATA_BLOCK);
381 } else {
382 bp->b_dirt = CLEAN;
383 }
384 }
385 bufq += i;
386 bufqsize -= i;
387 if (rw_flag == READING) {
388 /* Don't bother reading more than the device is willing to
389 * give at this time. Don't forget to release those extras.
390 */
391 while (bufqsize > 0) {
392 put_block(*bufq++, PARTIAL_DATA_BLOCK);
393 bufqsize--;
394 }
395 }
396 if (rw_flag == WRITING && i == 0) {
397 /* We're not making progress, this means we might keep
398 * looping. Buffers remain dirty if un-written. Buffers are
399 * lost if invalidate()d or LRU-removed while dirty. This
400 * is better than keeping unwritable blocks around forever..
401 */
402 break;
403 }
404 }
405}
406
407/*===========================================================================*
408 * rm_lru *
409 *===========================================================================*/
410PRIVATE void rm_lru(bp)
411struct buf *bp;
412{
413/* Remove a block from its LRU chain. */
414 struct buf *next_ptr, *prev_ptr;
415
416 bufs_in_use++;
417 next_ptr = bp->b_next; /* successor on LRU chain */
418 prev_ptr = bp->b_prev; /* predecessor on LRU chain */
419 if (prev_ptr != NIL_BUF)
420 prev_ptr->b_next = next_ptr;
421 else
422 front = next_ptr; /* this block was at front of chain */
423
424 if (next_ptr != NIL_BUF)
425 next_ptr->b_prev = prev_ptr;
426 else
427 rear = prev_ptr; /* this block was at rear of chain */
428}
Note: See TracBrowser for help on using the repository browser.