/*
       *	linux/mm/remap.c
       *
       *	(C) Copyright 1996 Linus Torvalds
       */
      
      #include <linux/slab.h>
      #include <linux/smp_lock.h>
      #include <linux/shm.h>
      #include <linux/mman.h>
      #include <linux/swap.h>
      
      #include <asm/uaccess.h>
      #include <asm/pgalloc.h>
      
      extern int vm_enough_memory(long pages);
      
  18  static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr)
      {
      	pgd_t * pgd;
      	pmd_t * pmd;
      	pte_t * pte = NULL;
      
      	pgd = pgd_offset(mm, addr);
  25  	if (pgd_none(*pgd))
  26  		goto end;
  27  	if (pgd_bad(*pgd)) {
      		pgd_ERROR(*pgd);
  29  		pgd_clear(pgd);
  30  		goto end;
      	}
      
      	pmd = pmd_offset(pgd, addr);
  34  	if (pmd_none(*pmd))
  35  		goto end;
  36  	if (pmd_bad(*pmd)) {
      		pmd_ERROR(*pmd);
  38  		pmd_clear(pmd);
  39  		goto end;
      	}
      
      	pte = pte_offset(pmd, addr);
  43  	if (pte_none(*pte))
      		pte = NULL;
      end:
  46  	return pte;
      }
      
  49  static inline pte_t *alloc_one_pte(struct mm_struct *mm, unsigned long addr)
      {
      	pmd_t * pmd;
      	pte_t * pte = NULL;
      
      	pmd = pmd_alloc(pgd_offset(mm, addr), addr);
  55  	if (pmd)
      		pte = pte_alloc(pmd, addr);
  57  	return pte;
      }
      
  60  static inline int copy_one_pte(struct mm_struct *mm, pte_t * src, pte_t * dst)
      {
      	int error = 0;
      	pte_t pte;
      
      	spin_lock(&mm->page_table_lock);
  66  	if (!pte_none(*src)) {
      		pte = ptep_get_and_clear(src);
  68  		if (!dst) {
      			/* No dest?  We must put it back. */
      			dst = src;
      			error++;
      		}
      		set_pte(dst, pte);
      	}
  75  	spin_unlock(&mm->page_table_lock);
  76  	return error;
      }
      
  79  static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned long new_addr)
      {
      	int error = 0;
      	pte_t * src;
      
      	src = get_one_pte(mm, old_addr);
  85  	if (src)
      		error = copy_one_pte(mm, src, alloc_one_pte(mm, new_addr));
  87  	return error;
      }
      
  90  static int move_page_tables(struct mm_struct * mm,
      	unsigned long new_addr, unsigned long old_addr, unsigned long len)
      {
      	unsigned long offset = len;
      
  95  	flush_cache_range(mm, old_addr, old_addr + len);
      
      	/*
      	 * This is not the clever way to do this, but we're taking the
      	 * easy way out on the assumption that most remappings will be
      	 * only a few pages.. This also makes error recovery easier.
      	 */
 102  	while (offset) {
      		offset -= PAGE_SIZE;
 104  		if (move_one_page(mm, old_addr + offset, new_addr + offset))
 105  			goto oops_we_failed;
      	}
      	flush_tlb_range(mm, old_addr, old_addr + len);
 108  	return 0;
      
      	/*
      	 * Ok, the move failed because we didn't have enough pages for
      	 * the new page table tree. This is unlikely, but we have to
      	 * take the possibility into account. In that case we just move
      	 * all the pages back (this will work, because we still have
      	 * the old page tables)
      	 */
      oops_we_failed:
 118  	flush_cache_range(mm, new_addr, new_addr + len);
 119  	while ((offset += PAGE_SIZE) < len)
      		move_one_page(mm, new_addr + offset, old_addr + offset);
      	zap_page_range(mm, new_addr, len);
      	flush_tlb_range(mm, new_addr, new_addr + len);
 123  	return -1;
      }
      
 126  static inline unsigned long move_vma(struct vm_area_struct * vma,
      	unsigned long addr, unsigned long old_len, unsigned long new_len,
      	unsigned long new_addr)
      {
      	struct vm_area_struct * new_vma;
      
      	new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
 133  	if (new_vma) {
 134  		if (!move_page_tables(current->mm, new_addr, addr, old_len)) {
      			*new_vma = *vma;
      			new_vma->vm_start = new_addr;
      			new_vma->vm_end = new_addr+new_len;
      			new_vma->vm_pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
      			new_vma->vm_raend = 0;
 140  			if (new_vma->vm_file)
      				get_file(new_vma->vm_file);
 142  			if (new_vma->vm_ops && new_vma->vm_ops->open)
      				new_vma->vm_ops->open(new_vma);
      			insert_vm_struct(current->mm, new_vma);
      			do_munmap(current->mm, addr, old_len);
      			current->mm->total_vm += new_len >> PAGE_SHIFT;
 147  			if (new_vma->vm_flags & VM_LOCKED) {
      				current->mm->locked_vm += new_len >> PAGE_SHIFT;
      				make_pages_present(new_vma->vm_start,
      						   new_vma->vm_end);
      			}
 152  			return new_addr;
      		}
      		kmem_cache_free(vm_area_cachep, new_vma);
      	}
 156  	return -ENOMEM;
      }
      
      /*
       * Expand (or shrink) an existing mapping, potentially moving it at the
       * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
       *
       * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
       * This option implies MREMAP_MAYMOVE.
       */
 166  unsigned long do_mremap(unsigned long addr,
      	unsigned long old_len, unsigned long new_len,
      	unsigned long flags, unsigned long new_addr)
      {
      	struct vm_area_struct *vma;
      	unsigned long ret = -EINVAL;
      
 173  	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
 174  		goto out;
      
 176  	if (addr & ~PAGE_MASK)
 177  		goto out;
      
      	old_len = PAGE_ALIGN(old_len);
      	new_len = PAGE_ALIGN(new_len);
      
      	/* new_addr is only valid if MREMAP_FIXED is specified */
 183  	if (flags & MREMAP_FIXED) {
 184  		if (new_addr & ~PAGE_MASK)
 185  			goto out;
 186  		if (!(flags & MREMAP_MAYMOVE))
 187  			goto out;
      
 189  		if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
 190  			goto out;
      
      		/* Check if the location we're moving into overlaps the
      		 * old location at all, and fail if it does.
      		 */
 195  		if ((new_addr <= addr) && (new_addr+new_len) > addr)
 196  			goto out;
      
 198  		if ((addr <= new_addr) && (addr+old_len) > new_addr)
 199  			goto out;
      
      		do_munmap(current->mm, new_addr, new_len);
      	}
      
      	/*
      	 * Always allow a shrinking remap: that just unmaps
      	 * the unnecessary pages..
      	 */
      	ret = addr;
 209  	if (old_len >= new_len) {
      		do_munmap(current->mm, addr+new_len, old_len - new_len);
 211  		if (!(flags & MREMAP_FIXED) || (new_addr == addr))
 212  			goto out;
      	}
      
      	/*
      	 * Ok, we need to grow..  or relocate.
      	 */
      	ret = -EFAULT;
      	vma = find_vma(current->mm, addr);
 220  	if (!vma || vma->vm_start > addr)
 221  		goto out;
      	/* We can't remap across vm area boundaries */
 223  	if (old_len > vma->vm_end - addr)
 224  		goto out;
 225  	if (vma->vm_flags & VM_DONTEXPAND) {
 226  		if (new_len > old_len)
 227  			goto out;
      	}
 229  	if (vma->vm_flags & VM_LOCKED) {
      		unsigned long locked = current->mm->locked_vm << PAGE_SHIFT;
      		locked += new_len - old_len;
      		ret = -EAGAIN;
 233  		if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
 234  			goto out;
      	}
      	ret = -ENOMEM;
      	if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len)
 238  	    > current->rlim[RLIMIT_AS].rlim_cur)
 239  		goto out;
      	/* Private writable mapping? Check memory availability.. */
      	if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
      	    !(flags & MAP_NORESERVE)				 &&
 243  	    !vm_enough_memory((new_len - old_len) >> PAGE_SHIFT))
 244  		goto out;
      
      	/* old_len exactly to the end of the area..
      	 * And we're not relocating the area.
      	 */
      	if (old_len == vma->vm_end - addr &&
      	    !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
 251  	    (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
      		unsigned long max_addr = TASK_SIZE;
 253  		if (vma->vm_next)
      			max_addr = vma->vm_next->vm_start;
      		/* can we just expand the current mapping? */
 256  		if (max_addr - addr >= new_len) {
      			int pages = (new_len - old_len) >> PAGE_SHIFT;
      			spin_lock(&vma->vm_mm->page_table_lock);
      			vma->vm_end = addr + new_len;
 260  			spin_unlock(&vma->vm_mm->page_table_lock);
      			current->mm->total_vm += pages;
 262  			if (vma->vm_flags & VM_LOCKED) {
      				current->mm->locked_vm += pages;
      				make_pages_present(addr + old_len,
      						   addr + new_len);
      			}
      			ret = addr;
 268  			goto out;
      		}
      	}
      
      	/*
      	 * We weren't able to just expand or shrink the area,
      	 * we need to create a new one and move it..
      	 */
      	ret = -ENOMEM;
 277  	if (flags & MREMAP_MAYMOVE) {
 278  		if (!(flags & MREMAP_FIXED)) {
      			new_addr = get_unmapped_area(0, new_len);
 280  			if (!new_addr)
 281  				goto out;
      		}
      		ret = move_vma(vma, addr, old_len, new_len, new_addr);
      	}
      out:
 286  	return ret;
      }
      
 289  asmlinkage unsigned long sys_mremap(unsigned long addr,
      	unsigned long old_len, unsigned long new_len,
      	unsigned long flags, unsigned long new_addr)
      {
      	unsigned long ret;
      
      	down(¤t->mm->mmap_sem);
      	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
      	up(¤t->mm->mmap_sem);
 298  	return ret;
      }