Fork me on GitHub

root/fs/buffer.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. insert_to_hash
  2. remove_from_hash
  3. insert_on_dirty_list
  4. remove_from_dirty_list
  5. insert_on_free_list
  6. remove_from_free_list
  7. buffer_wait
  8. get_free_buffer
  9. sync_one_buffer
  10. search_buffer_hash
  11. getblk
  12. bread
  13. bwrite
  14. brelse
  15. sync_buffers
  16. invalidate_buffers
  17. reclaim_buffers
  18. buffer_init

   1 /*
   2  * fiwix/fs/buffer.c
   3  *
   4  * Copyright 2018-2021, Jordi Sanfeliu. All rights reserved.
   5  * Distributed under the terms of the Fiwix License.
   6  */
   7 
   8 /*
   9  * buffer.c implements a cache using the LRU (Least Recently Used) algorithm,
  10  * with a free list as a doubly circular linked list and a chained hash table
  11  * with doubly linked lists.
  12  *
  13  * hash table
  14  * +--------+  +--------------+  +--------------+  +--------------+
  15  * | index  |  |prev|data|next|  |prev|data|next|  |prev|data|next|
  16  * |   0   --> | /  |    | --->  <--- |    | --->  <--- |    |  / |
  17  * +--------+  +--------------+  +--------------+  +--------------+
  18  * +--------+  +--------------+  +--------------+  +--------------+
  19  * | index  |  |prev|data|next|  |prev|data|next|  |prev|data|next|
  20  * |   1   --> | /  |    | --->  <--- |    | --->  <--- |    |  / |
  21  * +--------+  +--------------+  +--------------+  +--------------+
  22  *              (buffer)          (buffer)          (buffer)
  23  *    ...
  24  */
  25 
  26 #include <fiwix/asm.h>
  27 #include <fiwix/kernel.h>
  28 #include <fiwix/sleep.h>
  29 #include <fiwix/sched.h>
  30 #include <fiwix/buffer.h>
  31 #include <fiwix/devices.h>
  32 #include <fiwix/fs.h>
  33 #include <fiwix/mm.h>
  34 #include <fiwix/errno.h>
  35 #include <fiwix/stdio.h>
  36 #include <fiwix/string.h>
  37 #include <fiwix/stat.h>
  38 
  39 #define BUFFER_HASH(dev, block) (((__dev_t)(dev) ^ (__blk_t)(block)) % (NR_BUF_HASH))
  40 #define NR_BUFFERS      (buffer_table_size / sizeof(struct buffer))
  41 #define NR_BUF_HASH     (buffer_hash_table_size / sizeof(unsigned int))
  42 
  43 struct buffer *buffer_table;            /* buffer pool */
  44 struct buffer *buffer_head;             /* buffer pool head */
  45 struct buffer *buffer_dirty_head;
  46 struct buffer **buffer_hash_table;
  47 
  48 static struct resource sync_resource = { NULL, NULL };
  49 
  50 static void insert_to_hash(struct buffer *buf)
  51 {
  52         struct buffer **h;
  53         int i;
  54 
  55         i = BUFFER_HASH(buf->dev, buf->block);
  56         h = &buffer_hash_table[i];
  57 
  58         if(!*h) {
  59                 *h = buf;
  60                 (*h)->prev_hash = (*h)->next_hash = NULL;
  61         } else {
  62                 buf->prev_hash = NULL;
  63                 buf->next_hash = *h;
  64                 (*h)->prev_hash = buf;
  65                 *h = buf;
  66         }
  67 }
  68 
  69 static void remove_from_hash(struct buffer *buf)
  70 {
  71         struct buffer **h;
  72         int i;
  73 
  74         i = BUFFER_HASH(buf->dev, buf->block);
  75         h = &buffer_hash_table[i];
  76 
  77         while(*h) {
  78                 if(*h == buf) {
  79                         if((*h)->next_hash) {
  80                                 (*h)->next_hash->prev_hash = (*h)->prev_hash;
  81                         }
  82                         if((*h)->prev_hash) {
  83                                 (*h)->prev_hash->next_hash = (*h)->next_hash;
  84                         }
  85                         if(h == &buffer_hash_table[i]) {
  86                                 *h = (*h)->next_hash;
  87                         }
  88                         break;
  89                 }
  90                 h = &(*h)->next_hash;
  91         }
  92 }
  93 
  94 static void insert_on_dirty_list(struct buffer *buf)
  95 {
  96         if(buf->prev_dirty || buf->next_dirty) {
  97                 return;
  98         }
  99 
 100         if(buffer_dirty_head) {
 101                 buf->next_dirty = buffer_dirty_head;
 102                 buffer_dirty_head->prev_dirty = buf;
 103         }
 104         buffer_dirty_head = buf;
 105         kstat.dirty += (PAGE_SIZE / 1024);
 106 }
 107 
 108 static void remove_from_dirty_list(struct buffer *buf)
 109 {
 110         if(buf->next_dirty) {
 111                 buf->next_dirty->prev_dirty = buf->prev_dirty;
 112         }
 113         if(buf->prev_dirty) {
 114                 buf->prev_dirty->next_dirty = buf->next_dirty;
 115         }
 116         if(buf == buffer_dirty_head) {
 117                 buffer_dirty_head = buf->next_dirty;
 118         }
 119         buf->prev_dirty = buf->next_dirty = NULL;
 120         buf->flags &= ~BUFFER_DIRTY;
 121         kstat.dirty -= (PAGE_SIZE / 1024);
 122 }
 123 
 124 static void insert_on_free_list(struct buffer *buf)
 125 {
 126         if(!buffer_head) {
 127                 buf->prev_free = buf->next_free = buf;
 128                 buffer_head = buf;
 129         } else {
 130                 buf->next_free = buffer_head;
 131                 buf->prev_free = buffer_head->prev_free;
 132                 buffer_head->prev_free->next_free = buf;
 133                 buffer_head->prev_free = buf;
 134 
 135                 /*
 136                  * If is marked as not valid then the buffer is
 137                  * placed at the beginning of the free list.
 138                  */
 139                 if(!(buf->flags & BUFFER_VALID)) {
 140                         buffer_head = buf;
 141                 }
 142         }
 143 }
 144 
 145 static void remove_from_free_list(struct buffer *buf)
 146 {
 147         if(!buffer_head) {
 148                 return;
 149         }
 150 
 151         buf->prev_free->next_free = buf->next_free;
 152         buf->next_free->prev_free = buf->prev_free;
 153         if(buf == buffer_head) {
 154                 buffer_head = buf->next_free;
 155         }
 156 
 157         if(buffer_head == buffer_head->next_free) {
 158                 buffer_head = NULL;
 159         }
 160 }
 161 
 162 static void buffer_wait(struct buffer *buf)
 163 {
 164         unsigned long int flags;
 165 
 166         for(;;) {
 167                 SAVE_FLAGS(flags); CLI();
 168                 if(buf->flags & BUFFER_LOCKED) {
 169                         RESTORE_FLAGS(flags);
 170                         sleep(&buffer_wait, PROC_UNINTERRUPTIBLE);
 171                 } else {
 172                         break;
 173                 }
 174         }
 175         buf->flags |= BUFFER_LOCKED;
 176         RESTORE_FLAGS(flags);
 177 }
 178 
 179 static struct buffer * get_free_buffer(void)
 180 {
 181         unsigned long int flags;
 182         struct buffer *buf;
 183 
 184         /* no more buffers on free list */
 185         if(!buffer_head) {
 186                 return NULL;
 187         }
 188 
 189         for(;;) {
 190                 SAVE_FLAGS(flags); CLI();
 191                 buf = buffer_head;
 192                 if(buf->flags & BUFFER_LOCKED) {
 193                         RESTORE_FLAGS(flags);
 194                         sleep(&buffer_wait, PROC_UNINTERRUPTIBLE);
 195                 } else {
 196                         break;
 197                 }
 198         }
 199 
 200         remove_from_free_list(buf);
 201         buf->flags |= BUFFER_LOCKED;
 202 
 203         RESTORE_FLAGS(flags);
 204         return buf;
 205 }
 206 
 207 static void sync_one_buffer(struct buffer *buf)
 208 {
 209         struct device *d;
 210         int errno;
 211 
 212         if(!(d = get_device(BLK_DEV, buf->dev))) {
 213                 printk("WARNING: %s(): block device %d,%d not registered!\n", __FUNCTION__, MAJOR(buf->dev), MINOR(buf->dev));
 214                 return;
 215         }
 216 
 217         /* this shouldn't happen */
 218         if(!buf->data) {
 219                 printk("WARNING: %s(): buffer (dev=%x, block=%d, size=%d) don't has data!\n", __FUNCTION__, buf->dev, buf->block, buf->size);
 220                 return;
 221         }
 222 
 223         if(d->fsop && d->fsop->write_block) {
 224                 errno = d->fsop->write_block(buf->dev, buf->block, buf->data, buf->size);
 225                 if(errno < 0) {
 226                         if(errno == -EROFS) {
 227                                 printk("WARNING: %s(): write protection on device %d,%d.\n", __FUNCTION__, MAJOR(buf->dev), MINOR(buf->dev), buf->block);
 228                         } else {
 229                                 printk("WARNING: %s(): I/O error on device %d,%d.\n", __FUNCTION__, MAJOR(buf->dev), MINOR(buf->dev), buf->block);
 230                         }
 231                         return;
 232                 }
 233                 remove_from_dirty_list(buf);
 234         } else {
 235                 printk("WARNING: %s(): device %d,%d does not have the write_block() method!\n", __FUNCTION__, MAJOR(buf->dev), MINOR(buf->dev));
 236         }
 237 }
 238 
 239 static struct buffer * search_buffer_hash(__dev_t dev, __blk_t block, int size)
 240 {
 241         struct buffer *buf;
 242         int i;
 243 
 244         i = BUFFER_HASH(dev, block);
 245         buf = buffer_hash_table[i];
 246 
 247         while(buf) {
 248                 if(buf->dev == dev && buf->block == block && buf->size == size) {
 249                         return buf;
 250                 }
 251                 buf = buf->next_hash;
 252         }
 253 
 254         return NULL;
 255 }
 256 
 257 static struct buffer * getblk(__dev_t dev, __blk_t block, int size)
 258 {
 259         unsigned long int flags;
 260         struct buffer *buf;
 261 
 262         for(;;) {
 263                 if((buf = search_buffer_hash(dev, block, size))) {
 264                         SAVE_FLAGS(flags); CLI();
 265                         if(buf->flags & BUFFER_LOCKED) {
 266                                 RESTORE_FLAGS(flags);
 267                                 sleep(&buffer_wait, PROC_UNINTERRUPTIBLE);
 268                                 continue;
 269                         }
 270                         buf->flags |= BUFFER_LOCKED;
 271                         remove_from_free_list(buf);
 272                         RESTORE_FLAGS(flags);
 273                         return buf;
 274                 }
 275 
 276                 if(!(buf = get_free_buffer())) {
 277                         printk("WARNING: %s(): no more buffers on free list!\n", __FUNCTION__);
 278                         sleep(&get_free_buffer, PROC_UNINTERRUPTIBLE);
 279                         continue;
 280                 }
 281 
 282                 if(buf->flags & BUFFER_DIRTY) {
 283                         sync_one_buffer(buf);
 284                 } else {
 285                         if(!buf->data) {
 286                                 if(!(buf->data = (char *)kmalloc())) {
 287                                         brelse(buf);
 288                                         printk("%s(): returning NULL\n", __FUNCTION__);
 289                                         return NULL;
 290                                 }
 291                                 kstat.buffers += (PAGE_SIZE / 1024);
 292                         }
 293                 }
 294 
 295                 SAVE_FLAGS(flags); CLI();
 296                 remove_from_hash(buf);  /* remove it from old hash */
 297                 buf->dev = dev;
 298                 buf->block = block;
 299                 buf->size = size;
 300                 insert_to_hash(buf);
 301                 buf->flags &= ~BUFFER_VALID;
 302                 RESTORE_FLAGS(flags);
 303                 return buf;
 304         }
 305 }
 306 
 307 struct buffer * bread(__dev_t dev, __blk_t block, int size)
 308 {
 309         struct buffer *buf;
 310         struct device *d;
 311 
 312         if(!(d = get_device(BLK_DEV, dev))) {
 313                 printk("WARNING: %s(): device major %d not found!\n", __FUNCTION__, MAJOR(dev));
 314                 return NULL;
 315         }
 316 
 317         if((buf = getblk(dev, block, size))) {
 318                 if(buf->flags & BUFFER_VALID) {
 319                         return buf;
 320                 }
 321                 if(d->fsop && d->fsop->read_block) {
 322                         if(d->fsop->read_block(dev, block, buf->data, size) >= 0) {
 323                                 buf->flags |= BUFFER_VALID;
 324                         }
 325                 }
 326                 if(buf->flags & BUFFER_VALID) {
 327                         return buf;
 328                 }
 329                 brelse(buf);
 330         }
 331         
 332         printk("WARNING: %s(): returning NULL!\n", __FUNCTION__);
 333         return NULL;
 334 }
 335 
 336 void bwrite(struct buffer *buf)
 337 {
 338         buf->flags |= (BUFFER_DIRTY | BUFFER_VALID);
 339         brelse(buf);
 340 }
 341 
 342 void brelse(struct buffer *buf)
 343 {
 344         unsigned long int flags;
 345 
 346         SAVE_FLAGS(flags); CLI();
 347 
 348         if(buf->flags & BUFFER_DIRTY) {
 349                 insert_on_dirty_list(buf);
 350         }
 351 
 352         insert_on_free_list(buf);
 353         buf->flags &= ~BUFFER_LOCKED;
 354 
 355         RESTORE_FLAGS(flags);
 356 
 357         wakeup(&get_free_buffer);
 358         wakeup(&buffer_wait);
 359 }
 360 
 361 void sync_buffers(__dev_t dev)
 362 {
 363         struct buffer *buf, *next;
 364 
 365         buf = buffer_dirty_head;
 366 
 367         lock_resource(&sync_resource);
 368         while(buf) {
 369                 next = buf->next_dirty;
 370                 if(!dev || buf->dev == dev) {
 371                         buffer_wait(buf);
 372                         sync_one_buffer(buf);
 373                         buf->flags &= ~BUFFER_LOCKED;
 374                         wakeup(&buffer_wait);
 375                 }
 376                 buf = next;
 377         }
 378         unlock_resource(&sync_resource);
 379 }
 380 
 381 void invalidate_buffers(__dev_t dev)
 382 {
 383         unsigned long int flags;
 384         unsigned int n;
 385         struct buffer *buf;
 386 
 387         buf = &buffer_table[0];
 388         SAVE_FLAGS(flags); CLI();
 389 
 390         for(n = 0; n < NR_BUFFERS; n++) {
 391                 if(!(buf->flags & BUFFER_LOCKED) && buf->dev == dev) {
 392                         buffer_wait(buf);
 393                         remove_from_hash(buf);
 394                         buf->flags &= ~(BUFFER_VALID | BUFFER_LOCKED);
 395                         wakeup(&buffer_wait);
 396                 }
 397                 buf++;
 398         }
 399 
 400         RESTORE_FLAGS(flags);
 401         /* FIXME: invalidate_pages(dev); */
 402 }
 403 
 404 /*
 405  * When kernel runs out of pages, kswapd is awaken and it calls this function
 406  * which goes throught the buffer cache, freeing up to NR_BUF_RECLAIM buffers.
 407  */
 408 int reclaim_buffers(void)
 409 {
 410         struct buffer *buf, *first;
 411         int reclaimed;
 412 
 413         reclaimed = 0;
 414         first = NULL;
 415 
 416         for(;;) {
 417                 if(!(buf = get_free_buffer())) {
 418                         printk("WARNING: %s(): no more buffers on free list!\n", __FUNCTION__);
 419                         sleep(&get_free_buffer, PROC_UNINTERRUPTIBLE);
 420                         continue;
 421                 }
 422 
 423                 if(buf->flags & BUFFER_DIRTY) {
 424                         sync_one_buffer(buf);
 425                 }
 426 
 427                 /* this ensures the buffer will go to the tail */
 428                 buf->flags |= BUFFER_VALID;
 429 
 430                 if(first) {
 431                         if(first == buf) {
 432                                 brelse(buf);
 433                                 break;
 434                         }
 435                 } else {
 436                         first = buf;
 437                 }
 438                 if(buf->data) {
 439                         kfree((unsigned int)buf->data);
 440                         buf->data = NULL;
 441                         remove_from_hash(buf);
 442                         kstat.buffers -= (PAGE_SIZE / 1024);
 443                         reclaimed++;
 444                         if(reclaimed == NR_BUF_RECLAIM) {
 445                                 brelse(buf);
 446                                 break;
 447                         }
 448                 }
 449                 brelse(buf);
 450         }
 451 
 452         wakeup(&buffer_wait);
 453 
 454         /*
 455          * If the total number of buffers reclaimed was less or equal to
 456          * NR_BUF_RECLAIM, then wakeup any process waiting for a new page
 457          * because release_page() won't do it.
 458          */
 459         if(reclaimed && reclaimed <= NR_BUF_RECLAIM) {
 460                 wakeup(&get_free_page);
 461         }
 462 
 463         return reclaimed;
 464 }
 465 
 466 void buffer_init(void)
 467 {
 468         struct buffer *buf;
 469         unsigned int n;
 470 
 471         memset_b(buffer_table, NULL, buffer_table_size);
 472         memset_b(buffer_hash_table, NULL, buffer_hash_table_size);
 473 
 474         for(n = 0; n < NR_BUFFERS; n++) {
 475                 buf = &buffer_table[n];
 476                 insert_on_free_list(buf);
 477         }
 478 }

/* [previous][next][first][last][top][bottom][index][help] */