Fork me on GitHub

root/mm/mmap.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. show_vma_regions
  2. get_new_vma_region
  3. sort_vma
  4. optimize_vma
  5. get_unmapped_vma_region
  6. free_vma_pages
  7. free_vma_region
  8. release_binary
  9. find_vma_region
  10. expand_heap
  11. do_mmap
  12. do_munmap
  13. do_mprotect

   1 /*
   2  * fiwix/mm/mmap.c
   3  *
   4  * Copyright 2018-2021, Jordi Sanfeliu. All rights reserved.
   5  * Distributed under the terms of the Fiwix License.
   6  */
   7 
   8 #include <fiwix/asm.h>
   9 #include <fiwix/mm.h>
  10 #include <fiwix/fs.h>
  11 #include <fiwix/fcntl.h>
  12 #include <fiwix/stat.h>
  13 #include <fiwix/process.h>
  14 #include <fiwix/mman.h>
  15 #include <fiwix/errno.h>
  16 #include <fiwix/stdio.h>
  17 #include <fiwix/string.h>
  18 
  19 void show_vma_regions(struct proc *p)
  20 {
  21         __ino_t inode;
  22         int major, minor;
  23         char *section;
  24         char r, w, x, f;
  25         struct vma *vma;
  26         unsigned int n;
  27         int count;
  28 
  29         vma = p->vma;
  30         printk("num  address range         flag offset     dev   inode      mod section cnt\n");
  31         printk("---- --------------------- ---- ---------- ----- ---------- --- ------- ----\n");
  32         for(n = 0; n < VMA_REGIONS && vma->start; n++, vma++) {
  33                 r = vma->prot & PROT_READ ? 'r' : '-';
  34                 w = vma->prot & PROT_WRITE ? 'w' : '-';
  35                 x = vma->prot & PROT_EXEC ? 'x' : '-';
  36                 if(vma->flags & MAP_SHARED) {
  37                         f = 's';
  38                 } else if(vma->flags & MAP_PRIVATE) {
  39                         f = 'p';
  40                 } else {
  41                         f = '-';
  42                 }
  43                 switch(vma->s_type) {
  44                         case P_TEXT:    section = "text ";
  45                                         break;
  46                         case P_DATA:    section = "data ";
  47                                         break;
  48                         case P_BSS:     section = "bss  ";
  49                                         break;
  50                         case P_HEAP:    section = "heap ";
  51                                         break;
  52                         case P_STACK:   section = "stack";
  53                                         break;
  54                         case P_MMAP:    section = "mmap ";
  55                                         break;
  56                         default:
  57                                 section = NULL;
  58                                 break;
  59                 }
  60                 inode = major = minor = count = 0;
  61                 if(vma->inode) {
  62                         inode = vma->inode->inode;
  63                         major = MAJOR(vma->inode->dev);
  64                         minor = MINOR(vma->inode->dev);
  65                         count = vma->inode->count;
  66                 }
  67                 printk("[%02d] 0x%08x-0x%08x %c%c%c%c 0x%08x %02d:%02d %- 10u <%d> [%s]  (%d)\n", n, vma->start, vma->end, r, w, x, f, vma->offset, major, minor, inode, vma->o_mode, section, count);
  68         }
  69         if(!n) {
  70                 printk("[no vma regions]\n");
  71         }
  72 }
  73 
  74 static struct vma * get_new_vma_region(void)
  75 {
  76         unsigned int n;
  77         struct vma *vma;
  78 
  79         vma = current->vma;
  80 
  81         for(n = 0; n < VMA_REGIONS; n++, vma++) {
  82                 if(!vma->start && !vma->end) {
  83                         return vma;
  84                 }
  85         }
  86         return NULL;
  87 }
  88 
  89 /*
  90  * This sorts regions (in ascending order), merging equal regions and keeping
  91  * the unused ones at the end of the array.
  92  */
  93 static void sort_vma(void)
  94 {
  95         unsigned int n, n2, needs_sort;
  96         struct vma *vma, tmp;
  97 
  98         vma = current->vma;
  99 
 100         do {
 101                 needs_sort = 0;
 102                 for(n = 0, n2 = 1; n2 < VMA_REGIONS; n++, n2++) {
 103                         if(vma[n].end && vma[n2].start) {
 104                                 if((vma[n].end == vma[n2].start) &&
 105                                   (vma[n].prot == vma[n2].prot) &&
 106                                   (vma[n].flags == vma[n2].flags) &&
 107                                   (vma[n].offset == vma[n2].offset) &&
 108                                   (vma[n].s_type == vma[n2].s_type) &&
 109                                   (vma[n].inode == vma[n2].inode)) {
 110                                         vma[n].end = vma[n2].end;
 111                                         memset_b(&vma[n2], NULL, sizeof(struct vma));
 112                                         needs_sort++;
 113                                 }
 114                         }
 115                         if((vma[n2].start && (vma[n].start > vma[n2].start)) || (!vma[n].start && vma[n2].start)) {
 116                                 memcpy_b(&tmp, &vma[n], sizeof(struct vma));
 117                                 memcpy_b(&vma[n], &vma[n2], sizeof(struct vma));
 118                                 memcpy_b(&vma[n2], &tmp, sizeof(struct vma));
 119                                 needs_sort++;
 120                         }
 121                 }
 122         } while(needs_sort);
 123 }
 124 
 125 /*
 126  * This function removes all redundant entries.
 127  *
 128  * for example, if for any reason the map looks like this:
 129  * [01] 0x0808e984-0x08092000 rw-p 0x00000000 0
 130  * [02] 0x0808f000-0x0808ffff rw-p 0x000c0000 4066
 131  *
 132  * this function converts it to this:
 133  * [01] 0x0808e984-0x0808f000 rw-p 0x00000000 0
 134  * [02] 0x0808f000-0x0808ffff rw-p 0x000c0000 4066
 135  * [03] 0x08090000-0x08092000 rw-p 0x00000000 0
 136  */
 137 static int optimize_vma(void)
 138 {
 139         unsigned int n, needs_sort;
 140         struct vma *vma, *prev, *new;
 141 
 142         for(;;) {
 143                 needs_sort = 0;
 144                 prev = new = NULL;
 145                 vma = current->vma;
 146                 for(n = 0; n < VMA_REGIONS && vma->start; n++, vma++) {
 147                         if(!prev) {
 148                                 prev = vma;
 149                                 continue;
 150                         }
 151                         if((vma->start < prev->end)) {
 152                                 if(!(new = get_new_vma_region())) {
 153                                         printk("WARNING: %s(): unable to get a free vma region.\n", __FUNCTION__);
 154                                         return -ENOMEM;
 155                                 }
 156                                 new->start = vma->end;
 157                                 new->end = prev->end;
 158                                 new->prot = prev->prot;
 159                                 new->flags = prev->flags;
 160                                 new->offset = prev->offset;
 161                                 new->s_type = prev->s_type;
 162                                 new->inode = prev->inode;
 163                                 new->o_mode = prev->o_mode;
 164                                 prev->end = vma->start;
 165                                 needs_sort++;
 166                                 if(prev->start == prev->end) {
 167                                         memset_b(prev, NULL, sizeof(struct vma));
 168                                 }
 169                                 if(new->start == new->end) {
 170                                         memset_b(new, NULL, sizeof(struct vma));
 171                                 }
 172                                 break;
 173                         }
 174                         prev = vma;
 175                 }
 176                 if(!needs_sort) {
 177                         break;
 178                 }
 179                 sort_vma();
 180         }
 181 
 182         return 0;
 183 }
 184 
 185 /* return the first free address that matches with the size of length */
 186 static unsigned int get_unmapped_vma_region(unsigned int length)
 187 {
 188         unsigned int n, addr;
 189         struct vma *vma;
 190 
 191         if(!length) {
 192                 return 0;
 193         }
 194 
 195         addr = MMAP_START;
 196         vma = current->vma;
 197 
 198         for(n = 0; n < VMA_REGIONS && vma->start; n++, vma++) {
 199                 if(vma->start < MMAP_START) {
 200                         continue;
 201                 }
 202                 if(vma->start - addr >= length) {
 203                         return PAGE_ALIGN(addr);
 204                 }
 205                 addr = PAGE_ALIGN(vma->end);
 206         }
 207         return 0;
 208 }
 209 
 210 static void free_vma_pages(unsigned int start, __size_t length, struct vma *vma)
 211 {
 212         unsigned int n, addr;
 213         unsigned int *pgdir, *pgtbl;
 214         unsigned int pde, pte;
 215         struct page *pg;
 216         int page;
 217 
 218         pgdir = (unsigned int *)P2V(current->tss.cr3);
 219         pgtbl = NULL;
 220 
 221         for(n = 0; n < (length / PAGE_SIZE); n++) {
 222                 pde = GET_PGDIR(start + (n * PAGE_SIZE));
 223                 pte = GET_PGTBL(start + (n * PAGE_SIZE));
 224                 if(pgdir[pde] & PAGE_PRESENT) {
 225                         pgtbl = (unsigned int *)P2V((pgdir[pde] & PAGE_MASK));
 226                         if(pgtbl[pte] & PAGE_PRESENT) {
 227                                 /* make sure to not free reserved pages */
 228                                 page = pgtbl[pte] >> PAGE_SHIFT;
 229                                 pg = &page_table[page];
 230                                 if(pg->flags & PAGE_RESERVED) {
 231                                         continue;
 232                                 }
 233 
 234                                 if(vma->prot & PROT_WRITE && vma->flags & MAP_SHARED) {
 235                                         addr = start - vma->start + vma->offset;
 236                                         write_page(pg, vma->inode, addr, length);
 237                                 }
 238 
 239                                 kfree(P2V(pgtbl[pte]) & PAGE_MASK);
 240                                 current->rss--;
 241                                 pgtbl[pte] = NULL;
 242 
 243                                 /* check if a page table can be freed */
 244                                 for(pte = 0; pte < PT_ENTRIES; pte++) {
 245                                         if(pgtbl[pte] & PAGE_MASK) {
 246                                                 break;
 247                                         }
 248                                 }
 249                                 if(pte == PT_ENTRIES) {
 250                                         kfree((unsigned int)pgtbl & PAGE_MASK);
 251                                         current->rss--;
 252                                         pgdir[pde] = NULL;
 253                                 }
 254                         }
 255                 }
 256         }
 257 }
 258 
 259 static int free_vma_region(struct vma *vma, unsigned int start, __ssize_t length)
 260 {
 261         struct vma *new;
 262 
 263         if(!(new = get_new_vma_region())) {
 264                 printk("WARNING: %s(): unable to get a free vma region.\n", __FUNCTION__);
 265                 return -ENOMEM;
 266         }
 267 
 268         new->start = start + length;
 269         new->end = vma->end;
 270         new->prot = vma->prot;
 271         new->flags = vma->flags;
 272         new->offset = vma->offset;
 273         new->s_type = vma->s_type;
 274         new->inode = vma->inode;
 275         new->o_mode = vma->o_mode;
 276 
 277         vma->end = start;
 278 
 279         if(vma->start == vma->end) {
 280                 if(vma->inode) {
 281                         iput(vma->inode);
 282                 }
 283                 memset_b(vma, NULL, sizeof(struct vma));
 284         }
 285         if(new->start == new->end) {
 286                 memset_b(new, NULL, sizeof(struct vma));
 287         }
 288         return 0;
 289 }
 290 
 291 void release_binary(void)
 292 {
 293         unsigned int n;
 294         struct vma *vma;
 295 
 296         vma = current->vma;
 297 
 298         for(n = 0; n < VMA_REGIONS && vma->start; n++, vma++) {
 299                 free_vma_pages(vma->start, vma->end - vma->start, vma);
 300                 free_vma_region(vma, vma->start, vma->end - vma->start);
 301         }
 302         sort_vma();
 303         optimize_vma();
 304         invalidate_tlb();
 305 }
 306 
 307 struct vma * find_vma_region(unsigned int addr)
 308 {
 309         unsigned int n;
 310         struct vma *vma;
 311 
 312         if(!addr) {
 313                 return NULL;
 314         }
 315 
 316         addr &= PAGE_MASK;
 317         vma = current->vma;
 318 
 319         for(n = 0; n < VMA_REGIONS && vma->start; n++, vma++) {
 320                 if((addr >= vma->start) && (addr < vma->end)) {
 321                         return vma;
 322                 }
 323         }
 324         return NULL;
 325 }
 326 
 327 int expand_heap(unsigned int new)
 328 {
 329         unsigned int n;
 330         struct vma *vma, *heap;
 331 
 332         vma = current->vma;
 333         heap = NULL;
 334 
 335         for(n = 0; n < VMA_REGIONS && vma->start; n++, vma++) {
 336                 /* make sure the new heap won't overlap the next region */
 337                 if(heap && new < vma->start) {
 338                         heap->end = new;
 339                         return 0;
 340                 } else {
 341                         heap = NULL;    /* was a bad candidate */
 342                 }
 343                 if(!heap && vma->s_type == P_HEAP) {
 344                         heap = vma;     /* possible candidate */
 345                         continue;
 346                 }
 347         }
 348 
 349         /* out of memory! */
 350         return 1;
 351 }
 352 
 353 int do_mmap(struct inode *i, unsigned int start, unsigned int length, unsigned int prot, unsigned int flags, unsigned int offset, char type, char mode)
 354 {
 355         struct vma *vma;
 356         int errno;
 357 
 358         if(!(length = PAGE_ALIGN(length))) {
 359                 return start;
 360         }
 361 
 362         if(start > KERNEL_BASE_ADDR || start + length > KERNEL_BASE_ADDR) {
 363                 return -EINVAL;
 364         }
 365 
 366         /* file mapping */
 367         if(i) {
 368                 if(!S_ISREG(i->i_mode) && !S_ISCHR(i->i_mode)) {
 369                         return -ENODEV;
 370                 }
 371 
 372                 /* 
 373                  * The file shall have been opened with read permission,
 374                  * regardless of the protection options specified.
 375                  * IEEE Std 1003.1, 2004 Edition.
 376                  */
 377                 if(mode == O_WRONLY) {
 378                         return -EACCES;
 379                 }
 380                 switch(flags & MAP_TYPE) {
 381                         case MAP_SHARED:
 382                                 if(prot & PROT_WRITE) {
 383                                         if(!(mode & (O_WRONLY | O_RDWR))) {
 384                                                 return -EACCES;
 385                                         }
 386                                 }
 387                                 break;
 388                         case MAP_PRIVATE:
 389                                 break;
 390                         default:
 391                                 return -EINVAL;
 392                 }
 393                 i->count++;
 394 
 395         /* anonymous mapping */
 396         } else {
 397                 if((flags & MAP_TYPE) != MAP_PRIVATE) {
 398                         return -EINVAL;
 399                 }
 400 
 401                 /* anonymous objects must be filled with zeros */
 402                 flags |= ZERO_PAGE;
 403         }
 404 
 405         if(flags & MAP_FIXED) {
 406                 if(start & ~PAGE_MASK) {
 407                         return -EINVAL;
 408                 }
 409         } else {
 410                 start = get_unmapped_vma_region(length);
 411                 if(!start) {
 412                         printk("WARNING: %s(): unable to get an unmapped vma region.\n", __FUNCTION__);
 413                         return -ENOMEM;
 414                 }
 415         }
 416 
 417         if(!(vma = get_new_vma_region())) {
 418                 printk("WARNING: %s(): unable to get a free vma region.\n", __FUNCTION__);
 419                 return -ENOMEM;
 420         }
 421 
 422         vma->start = start;
 423         vma->end = start + length;
 424         vma->prot = prot;
 425         vma->flags = flags;
 426         vma->offset = offset;
 427         vma->s_type = type;
 428         vma->inode = i;
 429         vma->o_mode = mode;
 430 
 431         if(i && i->fsop->mmap) {
 432                 if((errno = i->fsop->mmap(i, vma))) {
 433                         int errno2;
 434 
 435                         if((errno2 = free_vma_region(vma, start, length))) {
 436                                 return errno2;
 437                         }
 438                         sort_vma();
 439                         if((errno2 = optimize_vma())) {
 440                                 return errno2;
 441                         }
 442                         return errno;
 443                 }
 444         }
 445 
 446         sort_vma();
 447         if((errno = optimize_vma())) {
 448                 return errno;
 449         }
 450         return start;
 451 }
 452 
 453 int do_munmap(unsigned int addr, __size_t length)
 454 {
 455         struct vma *vma;
 456         unsigned int size;
 457         int errno;
 458 
 459         if((addr & ~PAGE_MASK) || length < 0) {
 460                 return -EINVAL;
 461         }
 462 
 463         length = PAGE_ALIGN(length);
 464 
 465         while(length) {
 466                 if((vma = find_vma_region(addr))) {
 467                         if((addr + length) > vma->end) {
 468                                 size = vma->end - addr;
 469                         } else {
 470                                 size = length;
 471                         }
 472 
 473                         free_vma_pages(addr, size, vma);
 474                         invalidate_tlb();
 475                         if((errno = free_vma_region(vma, addr, size))) {
 476                                 return errno;
 477                         }
 478                         sort_vma();
 479                         if((errno = optimize_vma())) {
 480                                 return errno;
 481                         }
 482                         length -= size;
 483                         addr += size;
 484                 } else {
 485                         break;
 486                 }
 487         }
 488 
 489         return 0;
 490 }
 491 
 492 int do_mprotect(struct vma *vma, unsigned int addr, __size_t length, int prot)
 493 {
 494         struct vma *new;
 495         int errno;
 496 
 497         if(!(new = get_new_vma_region())) {
 498                 printk("WARNING: %s(): unable to get a free vma region.\n", __FUNCTION__);
 499                 return -ENOMEM;
 500         }
 501 
 502         new->start = addr;
 503         new->end = addr + length;
 504         new->prot = prot;
 505         new->flags = vma->flags;
 506         new->offset = vma->offset;
 507         new->s_type = vma->s_type;
 508         new->inode = vma->inode;
 509         new->o_mode = vma->o_mode;
 510 
 511         sort_vma();
 512         if((errno = optimize_vma())) {
 513                 return errno;
 514         }
 515         return 0;
 516 }

/* [previous][next][first][last][top][bottom][index][help] */