Fork me on GitHub

root/fs/buffer.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. insert_to_hash
  2. remove_from_hash
  3. insert_on_dirty_list
  4. remove_from_dirty_list
  5. insert_on_free_list
  6. remove_from_free_list
  7. buffer_wait
  8. get_free_buffer
  9. sync_one_buffer
  10. search_buffer_hash
  11. getblk
  12. bread
  13. bwrite
  14. brelse
  15. sync_buffers
  16. invalidate_buffers
  17. reclaim_buffers
  18. buffer_init

   1 /*
   2  * fiwix/fs/buffer.c
   3  *
   4  * Copyright 2018-2022, Jordi Sanfeliu. All rights reserved.
   5  * Distributed under the terms of the Fiwix License.
   6  */
   7 
   8 /*
   9  * buffer.c implements a cache using the LRU (Least Recently Used) algorithm,
  10  * with a free list as a doubly circular linked list and a chained hash table
  11  * with doubly linked lists.
  12  *
  13  * hash table
  14  * +--------+  +--------------+  +--------------+  +--------------+
  15  * | index  |  |prev|data|next|  |prev|data|next|  |prev|data|next|
  16  * |   0   --> | /  |    | --->  <--- |    | --->  <--- |    |  / |
  17  * +--------+  +--------------+  +--------------+  +--------------+
  18  * +--------+  +--------------+  +--------------+  +--------------+
  19  * | index  |  |prev|data|next|  |prev|data|next|  |prev|data|next|
  20  * |   1   --> | /  |    | --->  <--- |    | --->  <--- |    |  / |
  21  * +--------+  +--------------+  +--------------+  +--------------+
  22  *              (buffer)          (buffer)          (buffer)
  23  *    ...
  24  */
  25 
  26 #include <fiwix/asm.h>
  27 #include <fiwix/kernel.h>
  28 #include <fiwix/sleep.h>
  29 #include <fiwix/sched.h>
  30 #include <fiwix/buffer.h>
  31 #include <fiwix/devices.h>
  32 #include <fiwix/fs.h>
  33 #include <fiwix/mm.h>
  34 #include <fiwix/errno.h>
  35 #include <fiwix/stdio.h>
  36 #include <fiwix/string.h>
  37 #include <fiwix/stat.h>
  38 
  39 #define BUFFER_HASH(dev, block) (((__dev_t)(dev) ^ (__blk_t)(block)) % (NR_BUF_HASH))
  40 #define NR_BUFFERS      (buffer_table_size / sizeof(struct buffer))
  41 #define NR_BUF_HASH     (buffer_hash_table_size / sizeof(unsigned int))
  42 
  43 struct buffer *buffer_table;            /* buffer pool */
  44 struct buffer *buffer_head;             /* buffer pool head */
  45 struct buffer *buffer_dirty_head;
  46 struct buffer **buffer_hash_table;
  47 
  48 static struct resource sync_resource = { 0, 0 };
  49 
  50 static void insert_to_hash(struct buffer *buf)
  51 {
  52         struct buffer **h;
  53         int i;
  54 
  55         i = BUFFER_HASH(buf->dev, buf->block);
  56         h = &buffer_hash_table[i];
  57 
  58         if(!*h) {
  59                 *h = buf;
  60                 (*h)->prev_hash = (*h)->next_hash = NULL;
  61         } else {
  62                 buf->prev_hash = NULL;
  63                 buf->next_hash = *h;
  64                 (*h)->prev_hash = buf;
  65                 *h = buf;
  66         }
  67 }
  68 
  69 static void remove_from_hash(struct buffer *buf)
  70 {
  71         struct buffer **h;
  72         int i;
  73 
  74         i = BUFFER_HASH(buf->dev, buf->block);
  75         h = &buffer_hash_table[i];
  76 
  77         while(*h) {
  78                 if(*h == buf) {
  79                         if((*h)->next_hash) {
  80                                 (*h)->next_hash->prev_hash = (*h)->prev_hash;
  81                         }
  82                         if((*h)->prev_hash) {
  83                                 (*h)->prev_hash->next_hash = (*h)->next_hash;
  84                         }
  85                         if(h == &buffer_hash_table[i]) {
  86                                 *h = (*h)->next_hash;
  87                         }
  88                         break;
  89                 }
  90                 h = &(*h)->next_hash;
  91         }
  92 }
  93 
  94 static void insert_on_dirty_list(struct buffer *buf)
  95 {
  96         if(buf->prev_dirty || buf->next_dirty) {
  97                 return;
  98         }
  99 
 100         if(buffer_dirty_head) {
 101                 buf->next_dirty = buffer_dirty_head;
 102                 buffer_dirty_head->prev_dirty = buf;
 103         }
 104         buffer_dirty_head = buf;
 105         kstat.dirty += (PAGE_SIZE / 1024);
 106 }
 107 
 108 static void remove_from_dirty_list(struct buffer *buf)
 109 {
 110         if(buf->next_dirty) {
 111                 buf->next_dirty->prev_dirty = buf->prev_dirty;
 112         }
 113         if(buf->prev_dirty) {
 114                 buf->prev_dirty->next_dirty = buf->next_dirty;
 115         }
 116         if(buf == buffer_dirty_head) {
 117                 buffer_dirty_head = buf->next_dirty;
 118         }
 119         buf->prev_dirty = buf->next_dirty = NULL;
 120         buf->flags &= ~BUFFER_DIRTY;
 121         kstat.dirty -= (PAGE_SIZE / 1024);
 122 }
 123 
 124 static void insert_on_free_list(struct buffer *buf)
 125 {
 126         if(!buffer_head) {
 127                 buf->prev_free = buf->next_free = buf;
 128                 buffer_head = buf;
 129         } else {
 130                 buf->next_free = buffer_head;
 131                 buf->prev_free = buffer_head->prev_free;
 132                 buffer_head->prev_free->next_free = buf;
 133                 buffer_head->prev_free = buf;
 134 
 135                 /*
 136                  * If is marked as not valid then the buffer is
 137                  * placed at the beginning of the free list.
 138                  */
 139                 if(!(buf->flags & BUFFER_VALID)) {
 140                         buffer_head = buf;
 141                 }
 142         }
 143 }
 144 
 145 static void remove_from_free_list(struct buffer *buf)
 146 {
 147         if(!buffer_head) {
 148                 return;
 149         }
 150 
 151         buf->prev_free->next_free = buf->next_free;
 152         buf->next_free->prev_free = buf->prev_free;
 153         if(buf == buffer_head) {
 154                 buffer_head = buf->next_free;
 155         }
 156 
 157         if(buffer_head == buffer_head->next_free) {
 158                 buffer_head = NULL;
 159         }
 160 }
 161 
 162 static void buffer_wait(struct buffer *buf)
 163 {
 164         unsigned long int flags;
 165 
 166         for(;;) {
 167                 SAVE_FLAGS(flags); CLI();
 168                 if(buf->flags & BUFFER_LOCKED) {
 169                         RESTORE_FLAGS(flags);
 170                         sleep(&buffer_wait, PROC_UNINTERRUPTIBLE);
 171                 } else {
 172                         break;
 173                 }
 174         }
 175         buf->flags |= BUFFER_LOCKED;
 176         RESTORE_FLAGS(flags);
 177 }
 178 
 179 static struct buffer *get_free_buffer(void)
 180 {
 181         unsigned long int flags;
 182         struct buffer *buf;
 183 
 184         /* no more buffers on free list */
 185         if(!buffer_head) {
 186                 return NULL;
 187         }
 188 
 189         for(;;) {
 190                 SAVE_FLAGS(flags); CLI();
 191                 buf = buffer_head;
 192                 if(buf->flags & BUFFER_LOCKED) {
 193                         RESTORE_FLAGS(flags);
 194                         sleep(&buffer_wait, PROC_UNINTERRUPTIBLE);
 195                 } else {
 196                         break;
 197                 }
 198         }
 199 
 200         remove_from_free_list(buf);
 201         buf->flags |= BUFFER_LOCKED;
 202 
 203         RESTORE_FLAGS(flags);
 204         return buf;
 205 }
 206 
 207 static void sync_one_buffer(struct buffer *buf)
 208 {
 209         struct device *d;
 210         int errno;
 211 
 212         if(!(d = get_device(BLK_DEV, buf->dev))) {
 213                 printk("WARNING: %s(): block device %d,%d not registered!\n", __FUNCTION__, MAJOR(buf->dev), MINOR(buf->dev));
 214                 return;
 215         }
 216 
 217         /* this shouldn't happen */
 218         if(!buf->data) {
 219                 printk("WARNING: %s(): buffer (dev=%x, block=%d, size=%d) has no data!\n", __FUNCTION__, buf->dev, buf->block, buf->size);
 220                 remove_from_dirty_list(buf);
 221                 return;
 222         }
 223 
 224         if(d->fsop && d->fsop->write_block) {
 225                 errno = d->fsop->write_block(buf->dev, buf->block, buf->data, buf->size);
 226                 if(errno < 0) {
 227                         if(errno == -EROFS) {
 228                                 printk("WARNING: %s(): write protection on device %d,%d.\n", __FUNCTION__, MAJOR(buf->dev), MINOR(buf->dev), buf->block);
 229                         } else {
 230                                 printk("WARNING: %s(): I/O error on device %d,%d.\n", __FUNCTION__, MAJOR(buf->dev), MINOR(buf->dev), buf->block);
 231                         }
 232                         return;
 233                 }
 234                 remove_from_dirty_list(buf);
 235         } else {
 236                 printk("WARNING: %s(): device %d,%d does not have the write_block() method!\n", __FUNCTION__, MAJOR(buf->dev), MINOR(buf->dev));
 237         }
 238 }
 239 
 240 static struct buffer *search_buffer_hash(__dev_t dev, __blk_t block, int size)
 241 {
 242         struct buffer *buf;
 243         int i;
 244 
 245         i = BUFFER_HASH(dev, block);
 246         buf = buffer_hash_table[i];
 247 
 248         while(buf) {
 249                 if(buf->dev == dev && buf->block == block && buf->size == size) {
 250                         return buf;
 251                 }
 252                 buf = buf->next_hash;
 253         }
 254 
 255         return NULL;
 256 }
 257 
 258 static struct buffer *getblk(__dev_t dev, __blk_t block, int size)
 259 {
 260         unsigned long int flags;
 261         struct buffer *buf;
 262 
 263         for(;;) {
 264                 if((buf = search_buffer_hash(dev, block, size))) {
 265                         SAVE_FLAGS(flags); CLI();
 266                         if(buf->flags & BUFFER_LOCKED) {
 267                                 RESTORE_FLAGS(flags);
 268                                 sleep(&buffer_wait, PROC_UNINTERRUPTIBLE);
 269                                 continue;
 270                         }
 271                         buf->flags |= BUFFER_LOCKED;
 272                         remove_from_free_list(buf);
 273                         RESTORE_FLAGS(flags);
 274                         return buf;
 275                 }
 276 
 277                 if(!(buf = get_free_buffer())) {
 278                         printk("WARNING: %s(): no more buffers on free list!\n", __FUNCTION__);
 279                         sleep(&get_free_buffer, PROC_UNINTERRUPTIBLE);
 280                         continue;
 281                 }
 282 
 283                 if(buf->flags & BUFFER_DIRTY) {
 284                         sync_one_buffer(buf);
 285                 } else {
 286                         if(!buf->data) {
 287                                 if(!(buf->data = (char *)kmalloc())) {
 288                                         brelse(buf);
 289                                         printk("%s(): returning NULL\n", __FUNCTION__);
 290                                         return NULL;
 291                                 }
 292                                 kstat.buffers += (PAGE_SIZE / 1024);
 293                         }
 294                 }
 295 
 296                 SAVE_FLAGS(flags); CLI();
 297                 remove_from_hash(buf);  /* remove it from old hash */
 298                 buf->dev = dev;
 299                 buf->block = block;
 300                 buf->size = size;
 301                 insert_to_hash(buf);
 302                 buf->flags &= ~BUFFER_VALID;
 303                 RESTORE_FLAGS(flags);
 304                 return buf;
 305         }
 306 }
 307 
 308 struct buffer *bread(__dev_t dev, __blk_t block, int size)
 309 {
 310         struct buffer *buf;
 311         struct device *d;
 312 
 313         if(!(d = get_device(BLK_DEV, dev))) {
 314                 printk("WARNING: %s(): device major %d not found!\n", __FUNCTION__, MAJOR(dev));
 315                 return NULL;
 316         }
 317 
 318         if((buf = getblk(dev, block, size))) {
 319                 if(buf->flags & BUFFER_VALID) {
 320                         return buf;
 321                 }
 322                 if(d->fsop && d->fsop->read_block) {
 323                         if(d->fsop->read_block(dev, block, buf->data, size) >= 0) {
 324                                 buf->flags |= BUFFER_VALID;
 325                         }
 326                 }
 327                 if(buf->flags & BUFFER_VALID) {
 328                         return buf;
 329                 }
 330                 brelse(buf);
 331         }
 332         
 333         printk("WARNING: %s(): returning NULL!\n", __FUNCTION__);
 334         return NULL;
 335 }
 336 
 337 void bwrite(struct buffer *buf)
 338 {
 339         buf->flags |= (BUFFER_DIRTY | BUFFER_VALID);
 340         brelse(buf);
 341 }
 342 
 343 void brelse(struct buffer *buf)
 344 {
 345         unsigned long int flags;
 346 
 347         SAVE_FLAGS(flags); CLI();
 348 
 349         if(buf->flags & BUFFER_DIRTY) {
 350                 insert_on_dirty_list(buf);
 351         }
 352 
 353         insert_on_free_list(buf);
 354         buf->flags &= ~BUFFER_LOCKED;
 355 
 356         RESTORE_FLAGS(flags);
 357 
 358         wakeup(&get_free_buffer);
 359         wakeup(&buffer_wait);
 360 }
 361 
 362 void sync_buffers(__dev_t dev)
 363 {
 364         struct buffer *buf, *next;
 365 
 366         buf = buffer_dirty_head;
 367 
 368         lock_resource(&sync_resource);
 369         while(buf) {
 370                 next = buf->next_dirty;
 371                 if(!dev || buf->dev == dev) {
 372                         buffer_wait(buf);
 373                         sync_one_buffer(buf);
 374                         buf->flags &= ~BUFFER_LOCKED;
 375                         wakeup(&buffer_wait);
 376                 }
 377                 buf = next;
 378         }
 379         unlock_resource(&sync_resource);
 380 }
 381 
 382 void invalidate_buffers(__dev_t dev)
 383 {
 384         unsigned long int flags;
 385         unsigned int n;
 386         struct buffer *buf;
 387 
 388         buf = &buffer_table[0];
 389         SAVE_FLAGS(flags); CLI();
 390 
 391         for(n = 0; n < NR_BUFFERS; n++) {
 392                 if(!(buf->flags & BUFFER_LOCKED) && buf->dev == dev) {
 393                         buffer_wait(buf);
 394                         remove_from_hash(buf);
 395                         buf->flags &= ~(BUFFER_VALID | BUFFER_LOCKED);
 396                         wakeup(&buffer_wait);
 397                 }
 398                 buf++;
 399         }
 400 
 401         RESTORE_FLAGS(flags);
 402         /* FIXME: invalidate_pages(dev); */
 403 }
 404 
 405 /*
 406  * When kernel runs out of pages, kswapd is awaken and it calls this function
 407  * which goes throught the buffer cache, freeing up to NR_BUF_RECLAIM buffers.
 408  */
 409 int reclaim_buffers(void)
 410 {
 411         struct buffer *buf, *first;
 412         int reclaimed;
 413 
 414         reclaimed = 0;
 415         first = NULL;
 416 
 417         for(;;) {
 418                 if(!(buf = get_free_buffer())) {
 419                         printk("WARNING: %s(): no more buffers on free list!\n", __FUNCTION__);
 420                         sleep(&get_free_buffer, PROC_UNINTERRUPTIBLE);
 421                         continue;
 422                 }
 423 
 424                 if(buf->flags & BUFFER_DIRTY) {
 425                         sync_one_buffer(buf);
 426                 }
 427 
 428                 /* this ensures the buffer will go to the tail */
 429                 buf->flags |= BUFFER_VALID;
 430 
 431                 if(first) {
 432                         if(first == buf) {
 433                                 brelse(buf);
 434                                 break;
 435                         }
 436                 } else {
 437                         first = buf;
 438                 }
 439                 if(buf->data) {
 440                         kfree((unsigned int)buf->data);
 441                         buf->data = NULL;
 442                         remove_from_hash(buf);
 443                         kstat.buffers -= (PAGE_SIZE / 1024);
 444                         reclaimed++;
 445                         if(reclaimed == NR_BUF_RECLAIM) {
 446                                 brelse(buf);
 447                                 break;
 448                         }
 449                 }
 450                 brelse(buf);
 451         }
 452 
 453         wakeup(&buffer_wait);
 454 
 455         /*
 456          * If the total number of buffers reclaimed was less or equal to
 457          * NR_BUF_RECLAIM, then wakeup any process waiting for a new page
 458          * because release_page() won't do it.
 459          */
 460         if(reclaimed && reclaimed <= NR_BUF_RECLAIM) {
 461                 wakeup(&get_free_page);
 462         }
 463 
 464         return reclaimed;
 465 }
 466 
 467 void buffer_init(void)
 468 {
 469         struct buffer *buf;
 470         unsigned int n;
 471 
 472         memset_b(buffer_table, 0, buffer_table_size);
 473         memset_b(buffer_hash_table, 0, buffer_hash_table_size);
 474 
 475         for(n = 0; n < NR_BUFFERS; n++) {
 476                 buf = &buffer_table[n];
 477                 insert_on_free_list(buf);
 478         }
 479 }

/* [previous][next][first][last][top][bottom][index][help] */