/*
       * arch/i386/mm/ioremap.c
       *
       * Re-map IO memory to kernel address space so that we can access it.
       * This is needed for high PCI addresses that aren't mapped in the
       * 640k-1MB IO memory area on PC's
       *
       * (C) Copyright 1995 1996 Linus Torvalds
       */
      
      #include <linux/vmalloc.h>
      #include <asm/io.h>
      #include <asm/pgalloc.h>
      
  15  static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
      	unsigned long phys_addr, unsigned long flags)
      {
      	unsigned long end;
      
      	address &= ~PMD_MASK;
      	end = address + size;
  22  	if (end > PMD_SIZE)
      		end = PMD_SIZE;
  24  	if (address >= end)
  25  		BUG();
  26  	do {
  27  		if (!pte_none(*pte)) {
      			printk("remap_area_pte: page already exists\n");
  29  			BUG();
      		}
      		set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW | 
      					_PAGE_DIRTY | _PAGE_ACCESSED | flags)));
      		address += PAGE_SIZE;
      		phys_addr += PAGE_SIZE;
      		pte++;
  36  	} while (address && (address < end));
      }
      
  39  static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
      	unsigned long phys_addr, unsigned long flags)
      {
      	unsigned long end;
      
      	address &= ~PGDIR_MASK;
      	end = address + size;
  46  	if (end > PGDIR_SIZE)
      		end = PGDIR_SIZE;
      	phys_addr -= address;
  49  	if (address >= end)
  50  		BUG();
  51  	do {
      		pte_t * pte = pte_alloc_kernel(pmd, address);
  53  		if (!pte)
  54  			return -ENOMEM;
      		remap_area_pte(pte, address, end - address, address + phys_addr, flags);
      		address = (address + PMD_SIZE) & PMD_MASK;
      		pmd++;
  58  	} while (address && (address < end));
  59  	return 0;
      }
      
  62  static int remap_area_pages(unsigned long address, unsigned long phys_addr,
      				 unsigned long size, unsigned long flags)
      {
      	pgd_t * dir;
      	unsigned long end = address + size;
      
      	phys_addr -= address;
      	dir = pgd_offset(&init_mm, address);
  70  	flush_cache_all();
  71  	if (address >= end)
  72  		BUG();
  73  	do {
      		pmd_t *pmd;
      		pmd = pmd_alloc_kernel(dir, address);
  76  		if (!pmd)
  77  			return -ENOMEM;
      		if (remap_area_pmd(pmd, address, end - address,
  79  					 phys_addr + address, flags))
  80  			return -ENOMEM;
      		address = (address + PGDIR_SIZE) & PGDIR_MASK;
      		dir++;
  83  	} while (address && (address < end));
  84  	flush_tlb_all();
  85  	return 0;
      }
      
      /*
       * Generic mapping function (not visible outside):
       */
      
      /*
       * Remap an arbitrary physical address space into the kernel virtual
       * address space. Needed when the kernel wants to access high addresses
       * directly.
       *
       * NOTE! We need to allow non-page-aligned mappings too: we will obviously
       * have to convert them into an offset in a page-aligned mapping, but the
       * caller shouldn't need to know that small detail.
       */
 101  void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
      {
      	void * addr;
      	struct vm_struct * area;
      	unsigned long offset, last_addr;
      
      	/* Don't allow wraparound or zero size */
      	last_addr = phys_addr + size - 1;
 109  	if (!size || last_addr < phys_addr)
 110  		return NULL;
      
      	/*
      	 * Don't remap the low PCI/ISA area, it's always mapped..
      	 */
 115  	if (phys_addr >= 0xA0000 && last_addr < 0x100000)
 116  		return phys_to_virt(phys_addr);
      
      	/*
      	 * Don't allow anybody to remap normal RAM that we're using..
      	 */
 121  	if (phys_addr < virt_to_phys(high_memory)) {
      		char *t_addr, *t_end;
      		struct page *page;
      
      		t_addr = __va(phys_addr);
      		t_end = t_addr + (size - 1);
      	   
 128  		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
 129  			if(!PageReserved(page))
 130  				return NULL;
      	}
      
      	/*
      	 * Mappings have to be page-aligned
      	 */
      	offset = phys_addr & ~PAGE_MASK;
      	phys_addr &= PAGE_MASK;
      	size = PAGE_ALIGN(last_addr) - phys_addr;
      
      	/*
      	 * Ok, go for it..
      	 */
      	area = get_vm_area(size, VM_IOREMAP);
 144  	if (!area)
 145  		return NULL;
      	addr = area->addr;
 147  	if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) {
      		vfree(addr);
 149  		return NULL;
      	}
 151  	return (void *) (offset + (char *)addr);
      }
      
 154  void iounmap(void *addr)
      {
 156  	if (addr > high_memory)
 157  		return vfree((void *) (PAGE_MASK & (unsigned long) addr));
      }