/*
       *	linux/mm/mprotect.c
       *
       *  (C) Copyright 1994 Linus Torvalds
       */
      #include <linux/slab.h>
      #include <linux/smp_lock.h>
      #include <linux/shm.h>
      #include <linux/mman.h>
      
      #include <asm/uaccess.h>
      #include <asm/pgalloc.h>
      #include <asm/pgtable.h>
      
  15  static inline void change_pte_range(pmd_t * pmd, unsigned long address,
      	unsigned long size, pgprot_t newprot)
      {
      	pte_t * pte;
      	unsigned long end;
      
  21  	if (pmd_none(*pmd))
  22  		return;
  23  	if (pmd_bad(*pmd)) {
      		pmd_ERROR(*pmd);
  25  		pmd_clear(pmd);
  26  		return;
      	}
      	pte = pte_offset(pmd, address);
      	address &= ~PMD_MASK;
      	end = address + size;
  31  	if (end > PMD_SIZE)
      		end = PMD_SIZE;
  33  	do {
  34  		if (pte_present(*pte)) {
      			pte_t entry;
      
      			/* Avoid an SMP race with hardware updated dirty/clean
      			 * bits by wiping the pte and then setting the new pte
      			 * into place.
      			 */
      			entry = ptep_get_and_clear(pte);
      			set_pte(pte, pte_modify(entry, newprot));
      		}
      		address += PAGE_SIZE;
      		pte++;
  46  	} while (address && (address < end));
      }
      
  49  static inline void change_pmd_range(pgd_t * pgd, unsigned long address,
      	unsigned long size, pgprot_t newprot)
      {
      	pmd_t * pmd;
      	unsigned long end;
      
  55  	if (pgd_none(*pgd))
  56  		return;
  57  	if (pgd_bad(*pgd)) {
      		pgd_ERROR(*pgd);
  59  		pgd_clear(pgd);
  60  		return;
      	}
      	pmd = pmd_offset(pgd, address);
      	address &= ~PGDIR_MASK;
      	end = address + size;
  65  	if (end > PGDIR_SIZE)
      		end = PGDIR_SIZE;
  67  	do {
      		change_pte_range(pmd, address, end - address, newprot);
      		address = (address + PMD_SIZE) & PMD_MASK;
      		pmd++;
  71  	} while (address && (address < end));
      }
      
  74  static void change_protection(unsigned long start, unsigned long end, pgprot_t newprot)
      {
      	pgd_t *dir;
      	unsigned long beg = start;
      
      	dir = pgd_offset(current->mm, start);
  80  	flush_cache_range(current->mm, beg, end);
  81  	if (start >= end)
  82  		BUG();
      	spin_lock(¤t->mm->page_table_lock);
  84  	do {
      		change_pmd_range(dir, start, end - start, newprot);
      		start = (start + PGDIR_SIZE) & PGDIR_MASK;
      		dir++;
  88  	} while (start && (start < end));
  89  	spin_unlock(¤t->mm->page_table_lock);
      	flush_tlb_range(current->mm, beg, end);
  91  	return;
      }
      
  94  static inline int mprotect_fixup_all(struct vm_area_struct * vma,
      	int newflags, pgprot_t prot)
      {
      	spin_lock(&vma->vm_mm->page_table_lock);
      	vma->vm_flags = newflags;
      	vma->vm_page_prot = prot;
 100  	spin_unlock(&vma->vm_mm->page_table_lock);
 101  	return 0;
      }
      
 104  static inline int mprotect_fixup_start(struct vm_area_struct * vma,
      	unsigned long end,
      	int newflags, pgprot_t prot)
      {
      	struct vm_area_struct * n;
      
      	n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
 111  	if (!n)
 112  		return -ENOMEM;
      	*n = *vma;
      	n->vm_end = end;
      	n->vm_flags = newflags;
      	n->vm_raend = 0;
      	n->vm_page_prot = prot;
 118  	if (n->vm_file)
      		get_file(n->vm_file);
 120  	if (n->vm_ops && n->vm_ops->open)
      		n->vm_ops->open(n);
      	lock_vma_mappings(vma);
      	spin_lock(&vma->vm_mm->page_table_lock);
      	vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT;
      	vma->vm_start = end;
      	__insert_vm_struct(current->mm, n);
 127  	spin_unlock(&vma->vm_mm->page_table_lock);
      	unlock_vma_mappings(vma);
 129  	return 0;
      }
      
 132  static inline int mprotect_fixup_end(struct vm_area_struct * vma,
      	unsigned long start,
      	int newflags, pgprot_t prot)
      {
      	struct vm_area_struct * n;
      
      	n = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 139  	if (!n)
 140  		return -ENOMEM;
      	*n = *vma;
      	n->vm_start = start;
      	n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT;
      	n->vm_flags = newflags;
      	n->vm_raend = 0;
      	n->vm_page_prot = prot;
 147  	if (n->vm_file)
      		get_file(n->vm_file);
 149  	if (n->vm_ops && n->vm_ops->open)
      		n->vm_ops->open(n);
      	lock_vma_mappings(vma);
      	spin_lock(&vma->vm_mm->page_table_lock);
      	vma->vm_end = start;
      	__insert_vm_struct(current->mm, n);
 155  	spin_unlock(&vma->vm_mm->page_table_lock);
      	unlock_vma_mappings(vma);
 157  	return 0;
      }
      
 160  static inline int mprotect_fixup_middle(struct vm_area_struct * vma,
      	unsigned long start, unsigned long end,
      	int newflags, pgprot_t prot)
      {
      	struct vm_area_struct * left, * right;
      
      	left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
 167  	if (!left)
 168  		return -ENOMEM;
      	right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
 170  	if (!right) {
      		kmem_cache_free(vm_area_cachep, left);
 172  		return -ENOMEM;
      	}
      	*left = *vma;
      	*right = *vma;
      	left->vm_end = start;
      	right->vm_start = end;
      	right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT;
      	left->vm_raend = 0;
      	right->vm_raend = 0;
 181  	if (vma->vm_file)
      		atomic_add(2,&vma->vm_file->f_count);
 183  	if (vma->vm_ops && vma->vm_ops->open) {
      		vma->vm_ops->open(left);
      		vma->vm_ops->open(right);
      	}
      	lock_vma_mappings(vma);
      	spin_lock(&vma->vm_mm->page_table_lock);
      	vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT;
      	vma->vm_start = start;
      	vma->vm_end = end;
      	vma->vm_flags = newflags;
      	vma->vm_raend = 0;
      	vma->vm_page_prot = prot;
      	__insert_vm_struct(current->mm, left);
      	__insert_vm_struct(current->mm, right);
 197  	spin_unlock(&vma->vm_mm->page_table_lock);
      	unlock_vma_mappings(vma);
 199  	return 0;
      }
      
 202  static int mprotect_fixup(struct vm_area_struct * vma, 
      	unsigned long start, unsigned long end, unsigned int newflags)
      {
      	pgprot_t newprot;
      	int error;
      
 208  	if (newflags == vma->vm_flags)
 209  		return 0;
      	newprot = protection_map[newflags & 0xf];
 211  	if (start == vma->vm_start) {
 212  		if (end == vma->vm_end)
      			error = mprotect_fixup_all(vma, newflags, newprot);
 214  		else
      			error = mprotect_fixup_start(vma, end, newflags, newprot);
 216  	} else if (end == vma->vm_end)
      		error = mprotect_fixup_end(vma, start, newflags, newprot);
 218  	else
      		error = mprotect_fixup_middle(vma, start, end, newflags, newprot);
      
 221  	if (error)
 222  		return error;
      
      	change_protection(start, end, newprot);
 225  	return 0;
      }
      
 228  asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot)
      {
      	unsigned long nstart, end, tmp;
      	struct vm_area_struct * vma, * next;
      	int error = -EINVAL;
      
 234  	if (start & ~PAGE_MASK)
 235  		return -EINVAL;
      	len = PAGE_ALIGN(len);
      	end = start + len;
 238  	if (end < start)
 239  		return -EINVAL;
 240  	if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
 241  		return -EINVAL;
 242  	if (end == start)
 243  		return 0;
      
      	down(¤t->mm->mmap_sem);
      
      	vma = find_vma(current->mm, start);
      	error = -EFAULT;
 249  	if (!vma || vma->vm_start > start)
 250  		goto out;
      
 252  	for (nstart = start ; ; ) {
      		unsigned int newflags;
      
      		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
      
      		newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
 258  		if ((newflags & ~(newflags >> 4)) & 0xf) {
      			error = -EACCES;
 260  			break;
      		}
      
 263  		if (vma->vm_end >= end) {
      			error = mprotect_fixup(vma, nstart, end, newflags);
 265  			break;
      		}
      
      		tmp = vma->vm_end;
      		next = vma->vm_next;
      		error = mprotect_fixup(vma, nstart, tmp, newflags);
 271  		if (error)
 272  			break;
      		nstart = tmp;
      		vma = next;
 275  		if (!vma || vma->vm_start != nstart) {
      			error = -EFAULT;
 277  			break;
      		}
      	}
      out:
      	up(¤t->mm->mmap_sem);
 282  	return error;
      }