Fork me on GitHub

root/mm/memory.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. map_kaddr
  2. bss_init
  3. setup_minmem
  4. get_mapped_addr
  5. clone_pages
  6. free_page_tables
  7. map_page
  8. unmap_page
  9. mem_init
  10. mem_stats

   1 /*
   2  * fiwix/mm/memory.c
   3  *
   4  * Copyright 2018-2022, Jordi Sanfeliu. All rights reserved.
   5  * Distributed under the terms of the Fiwix License.
   6  */
   7 
   8 #include <fiwix/kernel.h>
   9 #include <fiwix/asm.h>
  10 #include <fiwix/mm.h>
  11 #include <fiwix/mman.h>
  12 #include <fiwix/bios.h>
  13 #include <fiwix/ramdisk.h>
  14 #include <fiwix/process.h>
  15 #include <fiwix/buffer.h>
  16 #include <fiwix/fs.h>
  17 #include <fiwix/filesystems.h>
  18 #include <fiwix/stdio.h>
  19 #include <fiwix/string.h>
  20 
  21 #define KERNEL_TEXT_SIZE        ((int)_etext - (PAGE_OFFSET + KERNEL_ENTRY_ADDR))
  22 #define KERNEL_DATA_SIZE        ((int)_edata - (int)_etext)
  23 #define KERNEL_BSS_SIZE         ((int)_end - (int)_edata)
  24 
  25 #define PGDIR_4MB_ADDR          0x50000
  26 
  27 unsigned int *kpage_dir;
  28 unsigned int *kpage_table;
  29 
  30 unsigned int _last_data_addr;
  31 
  32 unsigned int proc_table_size = 0;
  33 struct proc *proc_table;
  34 
  35 unsigned int buffer_table_size = 0;
  36 unsigned int buffer_hash_table_size = 0;
  37 struct buffer *buffer_table;
  38 struct buffer **buffer_hash_table;
  39 
  40 unsigned int inode_table_size = 0;
  41 unsigned int inode_hash_table_size = 0;
  42 struct inode *inode_table;
  43 struct inode **inode_hash_table;
  44 
  45 unsigned int fd_table_size = 0;
  46 struct fd *fd_table;
  47 
  48 unsigned int mount_table_size = 0;
  49 struct mount *mount_table;
  50 
  51 unsigned int page_table_size = 0;
  52 unsigned int page_hash_table_size = 0;
  53 struct page *page_table;
  54 struct page **page_hash_table;
  55 
  56 static void map_kaddr(unsigned int from, unsigned int to, int flags)
  57 {
  58         unsigned int n;
  59         unsigned int *pgtbl;
  60         unsigned int pde, pte;
  61 
  62         for(n = from >> PAGE_SHIFT; n < (to >> PAGE_SHIFT); n++) {
  63                 pde = GET_PGDIR(n << PAGE_SHIFT);
  64                 pte = GET_PGTBL(n << PAGE_SHIFT);
  65                 if(!(kpage_dir[pde] & ~PAGE_MASK)) {
  66                         unsigned int addr;
  67                         addr = _last_data_addr;
  68                         _last_data_addr += PAGE_SIZE;
  69                         kpage_dir[pde] = addr | flags;
  70                         memset_b((void *)addr, 0, PAGE_SIZE);
  71                 }
  72                 pgtbl = (unsigned int *)(kpage_dir[pde] & PAGE_MASK);
  73                 pgtbl[pte] = (n << PAGE_SHIFT) | flags;
  74         }
  75 }
  76 
  77 void bss_init(void)
  78 {
  79         memset_b((void *)((int)_edata), 0, KERNEL_BSS_SIZE);
  80 }
  81 
  82 /*
  83  * This function creates a minimal Page Directory covering only the first 4MB
  84  * of physical memory. Just enough to boot the kernel.
  85  * (it returns the address to be used by the CR3 register)
  86  */
  87 unsigned int setup_minmem(void)
  88 {
  89         int n;
  90         unsigned int addr;
  91         short int pd, mb4;
  92 
  93         mb4 = 1;        /* 4MB units */
  94         addr = PAGE_OFFSET + PGDIR_4MB_ADDR;
  95 
  96         kpage_dir = (unsigned int *)addr;
  97         memset_b(kpage_dir, 0, PAGE_SIZE);
  98 
  99         addr += PAGE_SIZE;
 100         kpage_table = (unsigned int *)addr;
 101         memset_b(kpage_table, 0, PAGE_SIZE * mb4);
 102 
 103         for(n = 0; n < (1024 * mb4); n++) {
 104                 kpage_table[n] = (n << PAGE_SHIFT) | PAGE_PRESENT | PAGE_RW;
 105                 if(!(n % 1024)) {
 106                         pd = n / 1024;
 107                         kpage_dir[pd] = (unsigned int)(addr + (PAGE_SIZE * pd) + 0x40000000) | PAGE_PRESENT | PAGE_RW;
 108                         kpage_dir[GET_PGDIR(PAGE_OFFSET) + pd] = (unsigned int)(addr + (PAGE_SIZE * pd) + 0x40000000) | PAGE_PRESENT | PAGE_RW;
 109                 }
 110         }
 111         return (unsigned int)kpage_dir + 0x40000000;
 112 }
 113 
 114 /* returns the mapped address of a virtual address */
 115 unsigned int get_mapped_addr(struct proc *p, unsigned int addr)
 116 {
 117         unsigned int *pgdir, *pgtbl;
 118         unsigned int pde, pte;
 119 
 120         pgdir = (unsigned int *)P2V(p->tss.cr3);
 121         pde = GET_PGDIR(addr);
 122         pte = GET_PGTBL(addr);
 123         pgtbl = (unsigned int *)P2V((pgdir[pde] & PAGE_MASK));
 124         return pgtbl[pte];
 125 }
 126 
 127 int clone_pages(struct proc *child)
 128 {
 129         unsigned int *src_pgdir, *dst_pgdir;
 130         unsigned int *src_pgtbl, *dst_pgtbl;
 131         unsigned int pde, pte;
 132         unsigned int p_addr, c_addr;
 133         unsigned int n, n2, pages;
 134         struct page *pg;
 135         struct vma *vma;
 136 
 137         src_pgdir = (unsigned int *)P2V(current->tss.cr3);
 138         dst_pgdir = (unsigned int *)P2V(child->tss.cr3);
 139         vma = current->vma;
 140 
 141         for(n = 0, pages = 0; n < VMA_REGIONS && vma->start; n++, vma++) {
 142                 for(n2 = vma->start; n2 < vma->end; n2 += PAGE_SIZE) {
 143                         if(vma->flags & MAP_SHARED) {
 144                                 continue;
 145                         }
 146                         pde = GET_PGDIR(n2);
 147                         pte = GET_PGTBL(n2);
 148                         if(src_pgdir[pde] & PAGE_PRESENT) {
 149                                 src_pgtbl = (unsigned int *)P2V((src_pgdir[pde] & PAGE_MASK));
 150                                 if(!(dst_pgdir[pde] & PAGE_PRESENT)) {
 151                                         if(!(c_addr = kmalloc())) {
 152                                                 printk("%s(): returning 0!\n", __FUNCTION__);
 153                                                 return 0;
 154                                         }
 155                                         current->rss++;
 156                                         pages++;
 157                                         dst_pgdir[pde] = V2P(c_addr) | PAGE_PRESENT | PAGE_RW | PAGE_USER;
 158                                         memset_b((void *)c_addr, 0, PAGE_SIZE);
 159                                 }
 160                                 dst_pgtbl = (unsigned int *)P2V((dst_pgdir[pde] & PAGE_MASK));
 161                                 if(src_pgtbl[pte] & PAGE_PRESENT) {
 162                                         p_addr = src_pgtbl[pte] >> PAGE_SHIFT;
 163                                         pg = &page_table[p_addr];
 164                                         if(pg->flags & PAGE_RESERVED) {
 165                                                 continue;
 166                                         }
 167                                         src_pgtbl[pte] &= ~PAGE_RW;
 168                                         /* mark write-only pages as copy-on-write */
 169                                         if(vma->prot & PROT_WRITE) {
 170                                                 pg->flags |= PAGE_COW;
 171                                         }
 172                                         dst_pgtbl[pte] = src_pgtbl[pte];
 173                                         if(!is_valid_page((dst_pgtbl[pte] & PAGE_MASK) >> PAGE_SHIFT)) {
 174                                                 PANIC("%s: missing page %d during copy-on-write process.\n", __FUNCTION__, (dst_pgtbl[pte] & PAGE_MASK) >> PAGE_SHIFT);
 175                                         }
 176                                         pg = &page_table[(dst_pgtbl[pte] & PAGE_MASK) >> PAGE_SHIFT];
 177                                         pg->count++;
 178                                 }
 179                         }
 180                 }
 181         }
 182         return pages;
 183 }
 184 
 185 int free_page_tables(struct proc *p)
 186 {
 187         unsigned int *pgdir;
 188         int n, count;
 189 
 190         pgdir = (unsigned int *)P2V(p->tss.cr3);
 191         for(n = 0, count = 0; n < PD_ENTRIES; n++) {
 192                 if((pgdir[n] & (PAGE_PRESENT | PAGE_RW | PAGE_USER)) == (PAGE_PRESENT | PAGE_RW | PAGE_USER)) {
 193                         kfree(P2V(pgdir[n]) & PAGE_MASK);
 194                         pgdir[n] = 0;
 195                         count++;
 196                 }
 197         }
 198         return count;
 199 }
 200 
 201 unsigned int map_page(struct proc *p, unsigned int vaddr, unsigned int addr, unsigned int prot)
 202 {
 203         unsigned int *pgdir, *pgtbl;
 204         unsigned int newaddr;
 205         int pde, pte;
 206 
 207         pgdir = (unsigned int *)P2V(p->tss.cr3);
 208         pde = GET_PGDIR(vaddr);
 209         pte = GET_PGTBL(vaddr);
 210 
 211         if(!(pgdir[pde] & PAGE_PRESENT)) {      /* allocating page table */
 212                 if(!(newaddr = kmalloc())) {
 213                         return 0;
 214                 }
 215                 p->rss++;
 216                 pgdir[pde] = V2P(newaddr) | PAGE_PRESENT | PAGE_RW | PAGE_USER;
 217                 memset_b((void *)newaddr, 0, PAGE_SIZE);
 218         }
 219         pgtbl = (unsigned int *)P2V((pgdir[pde] & PAGE_MASK));
 220         if(!(pgtbl[pte] & PAGE_PRESENT)) {      /* allocating page */
 221                 if(!addr) {
 222                         if(!(addr = kmalloc())) {
 223                                 return 0;
 224                         }
 225                         addr = V2P(addr);
 226                         p->rss++;
 227                 }
 228                 pgtbl[pte] = addr | PAGE_PRESENT | PAGE_USER;
 229         }
 230         if(prot & PROT_WRITE) {
 231                 pgtbl[pte] |= PAGE_RW;
 232         }
 233         return P2V(addr);
 234 }
 235 
 236 int unmap_page(unsigned int vaddr)
 237 {
 238         unsigned int *pgdir, *pgtbl;
 239         unsigned int addr;
 240         int pde, pte;
 241 
 242         pgdir = (unsigned int *)P2V(current->tss.cr3);
 243         pde = GET_PGDIR(vaddr);
 244         pte = GET_PGTBL(vaddr);
 245         if(!(pgdir[pde] & PAGE_PRESENT)) {
 246                 printk("WARNING: %s(): trying to unmap an unallocated pde '0x%08x'\n", __FUNCTION__, vaddr);
 247                 return 1;
 248         }
 249 
 250         pgtbl = (unsigned int *)P2V((pgdir[pde] & PAGE_MASK));
 251         if(!(pgtbl[pte] & PAGE_PRESENT)) {
 252                 printk("WARNING: %s(): trying to unmap an unallocated page '0x%08x'\n", __FUNCTION__, vaddr);
 253                 return 1;
 254         }
 255 
 256         addr = pgtbl[pte] & PAGE_MASK;
 257         pgtbl[pte] = 0;
 258         kfree(P2V(addr));
 259         current->rss--;
 260         return 0;
 261 }
 262 
 263 void mem_init(void)
 264 {
 265         unsigned int sizek;
 266         unsigned int physical_page_tables;
 267         unsigned int physical_memory;
 268         int n, pages;
 269 
 270         physical_page_tables = (kstat.physical_pages / 1024) + ((kstat.physical_pages % 1024) ? 1 : 0);
 271         physical_memory = (kstat.physical_pages << PAGE_SHIFT); /* in bytes */
 272 
 273         /* Page Directory will be aligned to the next page */
 274         _last_data_addr = PAGE_ALIGN(_last_data_addr);
 275         kpage_dir = (unsigned int *)_last_data_addr;
 276         memset_b(kpage_dir, 0, PAGE_SIZE);
 277         _last_data_addr += PAGE_SIZE;
 278 
 279         /* Page Tables */
 280         kpage_table = (unsigned int *)_last_data_addr;
 281         memset_b(kpage_table, 0, physical_page_tables * PAGE_SIZE);
 282         _last_data_addr += physical_page_tables * PAGE_SIZE;
 283 
 284         /* Page Directory and Page Tables initialization */
 285         for(n = 0; n < kstat.physical_pages; n++) {
 286                 kpage_table[n] = (n << PAGE_SHIFT) | PAGE_PRESENT | PAGE_RW;
 287                 if(!(n % 1024)) {
 288                         kpage_dir[GET_PGDIR(PAGE_OFFSET) + (n / 1024)] = (unsigned int)&kpage_table[n] | PAGE_PRESENT | PAGE_RW;
 289                 }
 290         }
 291 
 292         map_kaddr(KERNEL_ENTRY_ADDR, _last_data_addr, PAGE_PRESENT | PAGE_RW);
 293 
 294         /*
 295          * FIXME: this is ugly!
 296          * It should go in console_init() once we have a proper kernel memory/page management.
 297          * Then map_kaddr will be a public function (not static).
 298          */
 299         if(video.flags & VPF_VGA) {
 300                 map_kaddr(0xA0000, 0xA0000 + video.memsize, PAGE_PRESENT | PAGE_RW);
 301         };
 302         if(video.flags & VPF_VESAFB) {
 303                 map_kaddr((unsigned int)video.address, (unsigned int)video.address + video.memsize, PAGE_PRESENT | PAGE_RW);
 304         }
 305 /*      printk("_last_data_addr = 0x%08x-0x%08x (kernel)\n", KERNEL_ENTRY_ADDR, _last_data_addr); */
 306         activate_kpage_dir();
 307 
 308         /* since Page Directory is now activated we can use virtual addresses */
 309         _last_data_addr = P2V(_last_data_addr);
 310 
 311 
 312         /* reserve memory space for proc_table[NR_PROCS] */
 313         proc_table_size = PAGE_ALIGN(sizeof(struct proc) * NR_PROCS);
 314         if(!addr_in_bios_map(V2P(_last_data_addr) + proc_table_size)) {
 315                 PANIC("Not enough memory for proc_table.\n");
 316         }
 317 /*      printk("_last_data_addr = 0x%08x-0x%08x (proc_table)\n", _last_data_addr, _last_data_addr + proc_table_size); */
 318         proc_table = (struct proc *)_last_data_addr;
 319         _last_data_addr += proc_table_size;
 320 
 321 
 322         /* reserve memory space for buffer_table */
 323         buffer_table_size = (kstat.physical_pages * BUFFER_PERCENTAGE) / 100;
 324         buffer_table_size *= sizeof(struct buffer);
 325         pages = buffer_table_size >> PAGE_SHIFT;
 326         buffer_table_size = !pages ? 4096 : pages << PAGE_SHIFT;
 327 /*      printk("_last_data_addr = 0x%08x-0x%08x (buffer_table)\n", _last_data_addr, _last_data_addr + buffer_table_size); */
 328         if(!addr_in_bios_map(V2P(_last_data_addr) + buffer_table_size)) {
 329                 PANIC("Not enough memory for buffer_table.\n");
 330         }
 331         buffer_table = (struct buffer *)_last_data_addr;
 332         _last_data_addr += buffer_table_size;
 333 
 334 
 335         /* reserve memory space for buffer_hash_table */
 336         n = (buffer_table_size / sizeof(struct buffer) * BUFFER_HASH_PERCENTAGE) / 100;
 337         n = MAX(n, 10); /* 10 buffer hashes as minimum */
 338         /* buffer_hash_table is an array of pointers */
 339         pages = ((n * sizeof(unsigned int)) / PAGE_SIZE) + 1;
 340         buffer_hash_table_size = pages << PAGE_SHIFT;
 341 /*      printk("_last_data_addr = 0x%08x-0x%08x (buffer_hash_table)\n", _last_data_addr, _last_data_addr + buffer_hash_table_size); */
 342         if(!addr_in_bios_map(V2P(_last_data_addr) + buffer_hash_table_size)) {
 343                 PANIC("Not enough memory for buffer_hash_table.\n");
 344         }
 345         buffer_hash_table = (struct buffer **)_last_data_addr;
 346         _last_data_addr += buffer_hash_table_size;
 347 
 348 
 349         /* reserve memory space for inode_table */
 350         sizek = physical_memory / 1024; /* this helps to avoid overflow */
 351         inode_table_size = (sizek * INODE_PERCENTAGE) / 100;
 352         inode_table_size *= 1024;
 353         pages = inode_table_size >> PAGE_SHIFT;
 354         inode_table_size = pages << PAGE_SHIFT;
 355 /*      printk("_last_data_addr = 0x%08x-0x%08x (inode_table)\n", _last_data_addr, _last_data_addr + inode_table_size); */
 356         if(!addr_in_bios_map(V2P(_last_data_addr) + inode_table_size)) {
 357                 PANIC("Not enough memory for inode_table.\n");
 358         }
 359         inode_table = (struct inode *)_last_data_addr;
 360         _last_data_addr += inode_table_size;
 361 
 362 
 363         /* reserve memory space for inode_hash_table */
 364         n = ((inode_table_size / sizeof(struct inode)) * INODE_HASH_PERCENTAGE) / 100;
 365         n = MAX(n, 10); /* 10 inode hash buckets as minimum */
 366         /* inode_hash_table is an array of pointers */
 367         pages = ((n * sizeof(unsigned int)) / PAGE_SIZE) + 1;
 368         inode_hash_table_size = pages << PAGE_SHIFT;
 369 /*      printk("_last_data_addr = 0x%08x-0x%08x (inode_hash_table)\n", _last_data_addr, _last_data_addr + inode_hash_table_size); */
 370         if(!addr_in_bios_map(V2P(_last_data_addr) + inode_hash_table_size)) {
 371                 PANIC("Not enough memory for inode_hash_table.\n");
 372         }
 373         inode_hash_table = (struct inode **)_last_data_addr;
 374         _last_data_addr += inode_hash_table_size;
 375 
 376 
 377         /* reserve memory space for fd_table[NR_OPENS] */
 378         fd_table_size = PAGE_ALIGN(sizeof(struct fd) * NR_OPENS);
 379 /*      printk("_last_data_addr = 0x%08x-0x%08x (fd_table)\n", _last_data_addr, _last_data_addr + fd_table_size); */
 380         if(!addr_in_bios_map(V2P(_last_data_addr) + fd_table_size)) {
 381                 PANIC("Not enough memory for fd_table.\n");
 382         }
 383         fd_table = (struct fd *)_last_data_addr;
 384         _last_data_addr += fd_table_size;
 385 
 386 
 387         /* reserve memory space for mount_table[NR_MOUNT_POINTS] */
 388         mount_table_size = PAGE_ALIGN(sizeof(struct mount) * NR_MOUNT_POINTS);
 389 /*      printk("_last_data_addr = 0x%08x-0x%08x (mount_table)\n", _last_data_addr, _last_data_addr + mount_table_size); */
 390         if(!addr_in_bios_map(V2P(_last_data_addr) + mount_table_size)) {
 391                 PANIC("Not enough memory for mount_table.\n");
 392         }
 393         mount_table = (struct mount *)_last_data_addr;
 394         _last_data_addr += mount_table_size;
 395 
 396 
 397         /* reserve memory space for RAMdisk(s) */
 398         if(_ramdisksize > 0) {
 399                 /*
 400                  * If the 'initrd=' parameter was supplied, then the first
 401                  * ramdisk device was already assigned to the initial ramdisk
 402                  * image.
 403                  */
 404                 if(ramdisk_table[0].addr) {
 405                         n = 1;
 406                 } else {
 407                         n = 0;
 408                 }
 409                 for(; n < RAMDISK_MINORS; n++) {
 410                         if(!addr_in_bios_map(V2P(_last_data_addr) + (_ramdisksize * 1024))) {
 411                                 printk("WARNING: RAMdisk device disabled (not enough physical memory).\n");
 412                                 break;
 413                         }
 414 /*                      printk("_last_data_addr = 0x%08x-0x%08x (/dev/ram%d)\n", _last_data_addr, _last_data_addr + (_ramdisksize * 1024), n); */
 415                         ramdisk_table[n].addr = (char *)_last_data_addr;
 416                         _last_data_addr += _ramdisksize * 1024;
 417                 }
 418         }
 419 
 420         /*
 421          * FIXME: this is ugly!
 422          * It should go in console_init() once we have a proper kernel memory/page management.
 423          */
 424         #include <fiwix/console.h>
 425         for(n = 1; n <= NR_VCONSOLES; n++) {
 426                 vc_screen[n] = (short int *)_last_data_addr;
 427                 _last_data_addr += (video.columns * video.lines * 2);
 428         }
 429         /*
 430          * FIXME: this is ugly!
 431          * It should go in console_init() once we have a proper kernel memory/page management.
 432          */
 433         vcbuf = (short int *)_last_data_addr;
 434         _last_data_addr += (video.columns * video.lines * SCREENS_LOG * 2 * sizeof(short int));
 435 
 436 
 437         /* the last one must be the page_table structure */
 438         page_hash_table_size = 1 * PAGE_SIZE;   /* only 1 page size */
 439         if(!addr_in_bios_map(V2P(_last_data_addr) + page_hash_table_size)) {
 440                 PANIC("Not enough memory for page_hash_table.\n");
 441         }
 442         page_hash_table = (struct page **)_last_data_addr;
 443 /*      printk("_last_data_addr = 0x%08x-0x%08x (page_hash_table)\n", _last_data_addr, _last_data_addr + page_hash_table_size); */
 444         _last_data_addr += page_hash_table_size;
 445 
 446         page_table_size = PAGE_ALIGN(kstat.physical_pages * sizeof(struct page));
 447         if(!addr_in_bios_map(V2P(_last_data_addr) + page_table_size)) {
 448                 PANIC("Not enough memory for page_table.\n");
 449         }
 450         page_table = (struct page *)_last_data_addr;
 451 /*      printk("page_table_size = %d\n", page_table_size); */
 452 /*      printk("_last_data_addr = 0x%08x-0x%08x (page_table)\n", _last_data_addr, _last_data_addr + page_table_size); */
 453         _last_data_addr += page_table_size;
 454 
 455         page_init(kstat.physical_pages);
 456 }
 457 
 458 void mem_stats(void)
 459 {
 460         printk("\n");
 461         printk("memory: total=%dKB, user=%dKB, kernel=%dKB, reserved=%dKB\n", kstat.physical_pages << 2, kstat.total_mem_pages << 2, kstat.kernel_reserved, kstat.physical_reserved);
 462         printk("kernel: text=%dKB, data=%dKB, bss=%dKB, i/o buffers=%d (%dKB)\n", KERNEL_TEXT_SIZE / 1024, KERNEL_DATA_SIZE / 1024, KERNEL_BSS_SIZE / 1024, buffer_table_size / sizeof(struct buffer), (buffer_table_size + buffer_hash_table_size) / 1024);
 463         printk("\tinodes=%d (%dKB)\n\n", inode_table_size / sizeof(struct inode), (inode_table_size + inode_hash_table_size) / 1024);
 464 }

/* [previous][next][first][last][top][bottom][index][help] */