/*
       * fs/dcache.c
       *
       * Complete reimplementation
       * (C) 1997 Thomas Schoebel-Theuer,
       * with heavy changes by Linus Torvalds
       */
      
      /*
       * Notes on the allocation strategy:
       *
       * The dcache is a master of the icache - whenever a dcache entry
       * exists, the inode will always exist. "iput()" is done either when
       * the dcache entry is deleted or garbage collected.
       */
      
      #include <linux/config.h>
      #include <linux/string.h>
      #include <linux/mm.h>
      #include <linux/fs.h>
      #include <linux/malloc.h>
      #include <linux/slab.h>
      #include <linux/init.h>
      #include <linux/smp_lock.h>
      #include <linux/cache.h>
      
      #include <asm/uaccess.h>
      
      #define DCACHE_PARANOIA 1
      /* #define DCACHE_DEBUG 1 */
      
      spinlock_t dcache_lock = SPIN_LOCK_UNLOCKED;
      
      /* Right now the dcache depends on the kernel lock */
      #define check_lock()	if (!kernel_locked()) BUG()
      
      static kmem_cache_t *dentry_cache; 
      
      /*
       * This is the single most critical data structure when it comes
       * to the dcache: the hashtable for lookups. Somebody should try
       * to make this good - I've just made it work.
       *
       * This hash-function tries to avoid losing too many bits of hash
       * information, yet avoid using a prime hash-size or similar.
       */
      #define D_HASHBITS     d_hash_shift
      #define D_HASHMASK     d_hash_mask
      
      static unsigned int d_hash_mask;
      static unsigned int d_hash_shift;
      static struct list_head *dentry_hashtable;
      static LIST_HEAD(dentry_unused);
      
      struct {
      	int nr_dentry;
      	int nr_unused;
      	int age_limit;		/* age in seconds */
      	int want_pages;		/* pages requested by system */
      	int dummy[2];
      } dentry_stat = {0, 0, 45, 0,};
      
      /* no dcache_lock, please */
  64  static inline void d_free(struct dentry *dentry)
      {
  66  	if (dentry->d_op && dentry->d_op->d_release)
      		dentry->d_op->d_release(dentry);
  68  	if (dname_external(dentry)) 
      		kfree(dentry->d_name.name);
      	kmem_cache_free(dentry_cache, dentry); 
      	dentry_stat.nr_dentry--;
      }
      
      /*
       * Release the dentry's inode, using the fileystem
       * d_iput() operation if defined.
       * Called with dcache_lock held, drops it.
       */
  79  static inline void dentry_iput(struct dentry * dentry)
      {
      	struct inode *inode = dentry->d_inode;
  82  	if (inode) {
      		dentry->d_inode = NULL;
      		list_del_init(&dentry->d_alias);
  85  		spin_unlock(&dcache_lock);
  86  		if (dentry->d_op && dentry->d_op->d_iput)
      			dentry->d_op->d_iput(dentry, inode);
  88  		else
      			iput(inode);
  90  	} else
  91  		spin_unlock(&dcache_lock);
      }
      
      /* 
       * This is dput
       *
       * This is complicated by the fact that we do not want to put
       * dentries that are no longer on any hash chain on the unused
       * list: we'd much rather just get rid of them immediately.
       *
       * However, that implies that we have to traverse the dentry
       * tree upwards to the parents which might _also_ now be
       * scheduled for deletion (it may have been only waiting for
       * its last child to go away).
       *
       * This tail recursion is done by hand as we don't want to depend
       * on the compiler to always get this right (gcc generally doesn't).
       * Real recursion would eat up our stack space.
       */
      
      /*
       * dput - release a dentry
       * @dentry: dentry to release 
       *
       * Release a dentry. This will drop the usage count and if appropriate
       * call the dentry unlink method as well as removing it from the queues and
       * releasing its resources. If the parent dentries were scheduled for release
       * they too may now get deleted.
       *
       * no dcache lock, please.
       */
      
 123  void dput(struct dentry *dentry)
      {
 125  	if (!dentry)
 126  		return;
      
      repeat:
 129  	if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock))
 130  		return;
      
      	/* dput on a free dentry? */
 133  	if (!list_empty(&dentry->d_lru))
 134  		BUG();
      	/*
      	 * AV: ->d_delete() is _NOT_ allowed to block now.
      	 */
 138  	if (dentry->d_op && dentry->d_op->d_delete) {
 139  		if (dentry->d_op->d_delete(dentry))
 140  			goto unhash_it;
      	}
      	/* Unreachable? Get rid of it */
 143  	if (list_empty(&dentry->d_hash))
 144  		goto kill_it;
      	list_add(&dentry->d_lru, &dentry_unused);
      	dentry_stat.nr_unused++;
      	/*
      	 * Update the timestamp
      	 */
      	dentry->d_reftime = jiffies;
 151  	spin_unlock(&dcache_lock);
 152  	return;
      
      unhash_it:
      	list_del_init(&dentry->d_hash);
      
      kill_it: {
      		struct dentry *parent;
      		list_del(&dentry->d_child);
      		/* drops the lock, at that point nobody can reach this dentry */
      		dentry_iput(dentry);
      		parent = dentry->d_parent;
      		d_free(dentry);
 164  		if (dentry == parent)
 165  			return;
      		dentry = parent;
 167  		goto repeat;
      	}
      }
      
      /**
       * d_invalidate - invalidate a dentry
       * @dentry: dentry to invalidate
       *
       * Try to invalidate the dentry if it turns out to be
       * possible. If there are other dentries that can be
       * reached through this one we can't delete it and we
       * return -EBUSY. On success we return 0.
       *
       * no dcache lock.
       */
       
 183  int d_invalidate(struct dentry * dentry)
      {
      	/*
      	 * If it's already been dropped, return OK.
      	 */
      	spin_lock(&dcache_lock);
 189  	if (list_empty(&dentry->d_hash)) {
 190  		spin_unlock(&dcache_lock);
 191  		return 0;
      	}
      	/*
      	 * Check whether to do a partial shrink_dcache
      	 * to get rid of unused child entries.
      	 */
 197  	if (!list_empty(&dentry->d_subdirs)) {
 198  		spin_unlock(&dcache_lock);
      		shrink_dcache_parent(dentry);
      		spin_lock(&dcache_lock);
      	}
      
      	/*
      	 * Somebody else still using it?
      	 *
      	 * If it's a directory, we can't drop it
      	 * for fear of somebody re-populating it
      	 * with children (even though dropping it
      	 * would make it unreachable from the root,
      	 * we might still populate it if it was a
      	 * working directory or similar).
      	 */
 213  	if (atomic_read(&dentry->d_count) > 1) {
 214  		if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
 215  			spin_unlock(&dcache_lock);
 216  			return -EBUSY;
      		}
      	}
      
      	list_del_init(&dentry->d_hash);
 221  	spin_unlock(&dcache_lock);
 222  	return 0;
      }
      
      /* This should be called _only_ with dcache_lock held */
      
 227  static inline struct dentry * __dget_locked(struct dentry *dentry)
      {
      	atomic_inc(&dentry->d_count);
 230  	if (atomic_read(&dentry->d_count) == 1) {
      		dentry_stat.nr_unused--;
      		list_del(&dentry->d_lru);
 233  		INIT_LIST_HEAD(&dentry->d_lru);		/* make "list_empty()" work */
      	}
 235  	return dentry;
      }
      
 238  struct dentry * dget_locked(struct dentry *dentry)
      {
 240  	return __dget_locked(dentry);
      }
      
      /**
       * d_find_alias - grab a hashed alias of inode
       * @inode: inode in question
       *
       * If inode has a hashed alias - acquire the reference to alias and
       * return it. Otherwise return NULL. Notice that if inode is a directory
       * there can be only one alias and it can be unhashed only if it has
       * no children.
       */
      
 253  struct dentry * d_find_alias(struct inode *inode)
      {
      	struct list_head *head, *next, *tmp;
      	struct dentry *alias;
      
      	spin_lock(&dcache_lock);
      	head = &inode->i_dentry;
      	next = inode->i_dentry.next;
 261  	while (next != head) {
      		tmp = next;
      		next = tmp->next;
      		alias = list_entry(tmp, struct dentry, d_alias);
 265  		if (!list_empty(&alias->d_hash)) {
      			__dget_locked(alias);
 267  			spin_unlock(&dcache_lock);
 268  			return alias;
      		}
      	}
 271  	spin_unlock(&dcache_lock);
 272  	return NULL;
      }
      
      /*
       *	Try to kill dentries associated with this inode.
       * WARNING: you must own a reference to inode.
       */
 279  void d_prune_aliases(struct inode *inode)
      {
      	struct list_head *tmp, *head = &inode->i_dentry;
      restart:
      	spin_lock(&dcache_lock);
      	tmp = head;
 285  	while ((tmp = tmp->next) != head) {
      		struct dentry *dentry = list_entry(tmp, struct dentry, d_alias);
 287  		if (!atomic_read(&dentry->d_count)) {
      			__dget_locked(dentry);
 289  			spin_unlock(&dcache_lock);
      			d_drop(dentry);
      			dput(dentry);
 292  			goto restart;
      		}
      	}
 295  	spin_unlock(&dcache_lock);
      }
      
      /*
       * Throw away a dentry - free the inode, dput the parent.
       * This requires that the LRU list has already been
       * removed.
       * Called with dcache_lock, drops it and then regains.
       */
 304  static inline void prune_one_dentry(struct dentry * dentry)
      {
      	struct dentry * parent;
      
      	list_del_init(&dentry->d_hash);
      	list_del(&dentry->d_child);
      	dentry_iput(dentry);
      	parent = dentry->d_parent;
      	d_free(dentry);
 313  	if (parent != dentry)
      		dput(parent);
      	spin_lock(&dcache_lock);
      }
      
      /**
       * prune_dcache - shrink the dcache
       * @count: number of entries to try and free
       *
       * Shrink the dcache. This is done when we need
       * more memory, or simply when we need to unmount
       * something (at which point we need to unuse
       * all dentries).
       *
       * This function may fail to free any resources if
       * all the dentries are in use.
       */
       
 331  void prune_dcache(int count)
      {
      	spin_lock(&dcache_lock);
 334  	for (;;) {
      		struct dentry *dentry;
      		struct list_head *tmp;
      
      		tmp = dentry_unused.prev;
      
 340  		if (tmp == &dentry_unused)
 341  			break;
      		list_del_init(tmp);
      		dentry = list_entry(tmp, struct dentry, d_lru);
      
      		/* If the dentry was recently referenced, don't free it. */
 346  		if (dentry->d_flags & DCACHE_REFERENCED) {
      			dentry->d_flags &= ~DCACHE_REFERENCED;
      			list_add(&dentry->d_lru, &dentry_unused);
      			count--;
 350  			continue;
      		}
      		dentry_stat.nr_unused--;
      
      		/* Unused dentry with a count? */
 355  		if (atomic_read(&dentry->d_count))
 356  			BUG();
      
      		prune_one_dentry(dentry);
 359  		if (!--count)
 360  			break;
      	}
 362  	spin_unlock(&dcache_lock);
      }
      
      /*
       * Shrink the dcache for the specified super block.
       * This allows us to unmount a device without disturbing
       * the dcache for the other devices.
       *
       * This implementation makes just two traversals of the
       * unused list.  On the first pass we move the selected
       * dentries to the most recent end, and on the second
       * pass we free them.  The second pass must restart after
       * each dput(), but since the target dentries are all at
       * the end, it's really just a single traversal.
       */
      
      /**
       * shrink_dcache_sb - shrink dcache for a superblock
       * @sb: superblock
       *
       * Shrink the dcache for the specified super block. This
       * is used to free the dcache before unmounting a file
       * system
       */
      
 387  void shrink_dcache_sb(struct super_block * sb)
      {
      	struct list_head *tmp, *next;
      	struct dentry *dentry;
      
      	/*
      	 * Pass one ... move the dentries for the specified
      	 * superblock to the most recent end of the unused list.
      	 */
      	spin_lock(&dcache_lock);
      	next = dentry_unused.next;
 398  	while (next != &dentry_unused) {
      		tmp = next;
      		next = tmp->next;
      		dentry = list_entry(tmp, struct dentry, d_lru);
 402  		if (dentry->d_sb != sb)
 403  			continue;
      		list_del(tmp);
      		list_add(tmp, &dentry_unused);
      	}
      
      	/*
      	 * Pass two ... free the dentries for this superblock.
      	 */
      repeat:
      	next = dentry_unused.next;
 413  	while (next != &dentry_unused) {
      		tmp = next;
      		next = tmp->next;
      		dentry = list_entry(tmp, struct dentry, d_lru);
 417  		if (dentry->d_sb != sb)
 418  			continue;
 419  		if (atomic_read(&dentry->d_count))
 420  			continue;
      		dentry_stat.nr_unused--;
      		list_del(tmp);
 423  		INIT_LIST_HEAD(tmp);
      		prune_one_dentry(dentry);
 425  		goto repeat;
      	}
 427  	spin_unlock(&dcache_lock);
      }
      
      /*
       * Search for at least 1 mount point in the dentry's subdirs.
       * We descend to the next level whenever the d_subdirs
       * list is non-empty and continue searching.
       */
       
      /**
       * have_submounts - check for mounts over a dentry
       * @parent: dentry to check.
       *
       * Return true if the parent or its subdirectories contain
       * a mount point
       */
       
 444  int have_submounts(struct dentry *parent)
      {
      	struct dentry *this_parent = parent;
      	struct list_head *next;
      
      	spin_lock(&dcache_lock);
 450  	if (d_mountpoint(parent))
 451  		goto positive;
      repeat:
      	next = this_parent->d_subdirs.next;
      resume:
 455  	while (next != &this_parent->d_subdirs) {
      		struct list_head *tmp = next;
      		struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
      		next = tmp->next;
      		/* Have we found a mount point ? */
 460  		if (d_mountpoint(dentry))
 461  			goto positive;
 462  		if (!list_empty(&dentry->d_subdirs)) {
      			this_parent = dentry;
 464  			goto repeat;
      		}
      	}
      	/*
      	 * All done at this level ... ascend and resume the search.
      	 */
 470  	if (this_parent != parent) {
      		next = this_parent->d_child.next; 
      		this_parent = this_parent->d_parent;
 473  		goto resume;
      	}
 475  	spin_unlock(&dcache_lock);
 476  	return 0; /* No mount points found in tree */
      positive:
 478  	spin_unlock(&dcache_lock);
 479  	return 1;
      }
      
      /*
       * Search the dentry child list for the specified parent,
       * and move any unused dentries to the end of the unused
       * list for prune_dcache(). We descend to the next level
       * whenever the d_subdirs list is non-empty and continue
       * searching.
       */
 489  static int select_parent(struct dentry * parent)
      {
      	struct dentry *this_parent = parent;
      	struct list_head *next;
      	int found = 0;
      
      	spin_lock(&dcache_lock);
      repeat:
      	next = this_parent->d_subdirs.next;
      resume:
 499  	while (next != &this_parent->d_subdirs) {
      		struct list_head *tmp = next;
      		struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
      		next = tmp->next;
 503  		if (!atomic_read(&dentry->d_count)) {
      			list_del(&dentry->d_lru);
      			list_add(&dentry->d_lru, dentry_unused.prev);
      			found++;
      		}
      		/*
      		 * Descend a level if the d_subdirs list is non-empty.
      		 */
 511  		if (!list_empty(&dentry->d_subdirs)) {
      			this_parent = dentry;
      #ifdef DCACHE_DEBUG
      printk(KERN_DEBUG "select_parent: descending to %s/%s, found=%d\n",
      dentry->d_parent->d_name.name, dentry->d_name.name, found);
      #endif
 517  			goto repeat;
      		}
      	}
      	/*
      	 * All done at this level ... ascend and resume the search.
      	 */
 523  	if (this_parent != parent) {
      		next = this_parent->d_child.next; 
      		this_parent = this_parent->d_parent;
      #ifdef DCACHE_DEBUG
      printk(KERN_DEBUG "select_parent: ascending to %s/%s, found=%d\n",
      this_parent->d_parent->d_name.name, this_parent->d_name.name, found);
      #endif
 530  		goto resume;
      	}
 532  	spin_unlock(&dcache_lock);
 533  	return found;
      }
      
      /**
       * shrink_dcache_parent - prune dcache
       * @parent: parent of entries to prune
       *
       * Prune the dcache to remove unused children of the parent dentry.
       */
       
 543  void shrink_dcache_parent(struct dentry * parent)
      {
      	int found;
      
 547  	while ((found = select_parent(parent)) != 0)
      		prune_dcache(found);
      }
      
      /*
       * This is called from kswapd when we think we need some
       * more memory, but aren't really sure how much. So we
       * carefully try to free a _bit_ of our dcache, but not
       * too much.
       *
       * Priority:
       *   0 - very urgent: shrink everything
       *  ...
       *   6 - base-level: try to shrink a bit.
       */
 562  void shrink_dcache_memory(int priority, unsigned int gfp_mask)
      {
      	int count = 0;
      
      	/*
      	 * Nasty deadlock avoidance.
      	 *
      	 * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache->
      	 * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->
      	 * put_inode->ext2_discard_prealloc->ext2_free_blocks->lock_super->
      	 * DEADLOCK.
      	 *
      	 * We should make sure we don't hold the superblock lock over
      	 * block allocations, but for now:
      	 */
 577  	if (!(gfp_mask & __GFP_IO))
 578  		return;
      
 580  	if (priority)
      		count = dentry_stat.nr_unused / priority;
      
      	prune_dcache(count);
      	kmem_cache_shrink(dentry_cache);
      }
      
      #define NAME_ALLOC_LEN(len)	((len+16) & ~15)
      
      /**
       * d_alloc	-	allocate a dcache entry
       * @parent: parent of entry to allocate
       * @name: qstr of the name
       *
       * Allocates a dentry. It returns %NULL if there is insufficient memory
       * available. On a success the dentry is returned. The name passed in is
       * copied and the copy passed in may be reused after this call.
       */
       
 599  struct dentry * d_alloc(struct dentry * parent, const struct qstr *name)
      {
      	char * str;
      	struct dentry *dentry;
      
      	dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 
 605  	if (!dentry)
 606  		return NULL;
      
 608  	if (name->len > DNAME_INLINE_LEN-1) {
      		str = kmalloc(NAME_ALLOC_LEN(name->len), GFP_KERNEL);
 610  		if (!str) {
      			kmem_cache_free(dentry_cache, dentry); 
 612  			return NULL;
      		}
 614  	} else
      		str = dentry->d_iname; 
      
      	memcpy(str, name->name, name->len);
      	str[name->len] = 0;
      
      	atomic_set(&dentry->d_count, 1);
      	dentry->d_flags = 0;
      	dentry->d_inode = NULL;
      	dentry->d_parent = NULL;
      	dentry->d_sb = NULL;
      	dentry->d_name.name = str;
      	dentry->d_name.len = name->len;
      	dentry->d_name.hash = name->hash;
      	dentry->d_op = NULL;
      	dentry->d_fsdata = NULL;
 630  	INIT_LIST_HEAD(&dentry->d_vfsmnt);
 631  	INIT_LIST_HEAD(&dentry->d_hash);
 632  	INIT_LIST_HEAD(&dentry->d_lru);
 633  	INIT_LIST_HEAD(&dentry->d_subdirs);
 634  	INIT_LIST_HEAD(&dentry->d_alias);
 635  	if (parent) {
      		dentry->d_parent = dget(parent);
      		dentry->d_sb = parent->d_sb;
      		spin_lock(&dcache_lock);
      		list_add(&dentry->d_child, &parent->d_subdirs);
 640  		spin_unlock(&dcache_lock);
 641  	} else
 642  		INIT_LIST_HEAD(&dentry->d_child);
      
      	dentry_stat.nr_dentry++;
 645  	return dentry;
      }
      
      /**
       * d_instantiate - fill in inode information for a dentry
       * @entry: dentry to complete
       * @inode: inode to attach to this dentry
       *
       * Fill in inode information in the entry.
       *
       * This turns negative dentries into productive full members
       * of society.
       *
       * NOTE! This assumes that the inode count has been incremented
       * (or otherwise set) by the caller to indicate that it is now
       * in use by the dcache.
       */
       
 663  void d_instantiate(struct dentry *entry, struct inode * inode)
      {
      	spin_lock(&dcache_lock);
 666  	if (inode)
      		list_add(&entry->d_alias, &inode->i_dentry);
      	entry->d_inode = inode;
 669  	spin_unlock(&dcache_lock);
      }
      
      /**
       * d_alloc_root - allocate root dentry
       * @root_inode: inode to allocate the root for
       *
       * Allocate a root ("/") dentry for the inode given. The inode is
       * instantiated and returned. %NULL is returned if there is insufficient
       * memory or the inode passed is %NULL.
       */
       
 681  struct dentry * d_alloc_root(struct inode * root_inode)
      {
      	struct dentry *res = NULL;
      
 685  	if (root_inode) {
      		res = d_alloc(NULL, &(const struct qstr) { "/", 1, 0 });
 687  		if (res) {
      			res->d_sb = root_inode->i_sb;
      			res->d_parent = res;
      			d_instantiate(res, root_inode);
      		}
      	}
 693  	return res;
      }
      
 696  static inline struct list_head * d_hash(struct dentry * parent, unsigned long hash)
      {
      	hash += (unsigned long) parent / L1_CACHE_BYTES;
      	hash = hash ^ (hash >> D_HASHBITS) ^ (hash >> D_HASHBITS*2);
 700  	return dentry_hashtable + (hash & D_HASHMASK);
      }
      
      /**
       * d_lookup - search for a dentry
       * @parent: parent dentry
       * @name: qstr of name we wish to find
       *
       * Searches the children of the parent dentry for the name in question. If
       * the dentry is found its reference count is incremented and the dentry
       * is returned. The caller must use d_put to free the entry when it has
       * finished using it. %NULL is returned on failure.
       */
       
 714  struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
      {
      	unsigned int len = name->len;
      	unsigned int hash = name->hash;
      	const unsigned char *str = name->name;
      	struct list_head *head = d_hash(parent,hash);
      	struct list_head *tmp;
      
      	spin_lock(&dcache_lock);
      	tmp = head->next;
 724  	for (;;) {
      		struct dentry * dentry = list_entry(tmp, struct dentry, d_hash);
 726  		if (tmp == head)
 727  			break;
      		tmp = tmp->next;
 729  		if (dentry->d_name.hash != hash)
 730  			continue;
 731  		if (dentry->d_parent != parent)
 732  			continue;
 733  		if (parent->d_op && parent->d_op->d_compare) {
 734  			if (parent->d_op->d_compare(parent, &dentry->d_name, name))
 735  				continue;
 736  		} else {
 737  			if (dentry->d_name.len != len)
 738  				continue;
 739  			if (memcmp(dentry->d_name.name, str, len))
 740  				continue;
      		}
      		__dget_locked(dentry);
      		dentry->d_flags |= DCACHE_REFERENCED;
 744  		spin_unlock(&dcache_lock);
 745  		return dentry;
      	}
 747  	spin_unlock(&dcache_lock);
 748  	return NULL;
      }
      
      /**
       * d_validate - verify dentry provided from insecure source
       * @dentry: The dentry alleged to be valid
       * @dparent: The parent dentry
       * @hash: Hash of the dentry
       * @len: Length of the name
       *
       * An insecure source has sent us a dentry, here we verify it and dget() it.
       * This is used by ncpfs in its readdir implementation.
       * Zero is returned in the dentry is invalid.
       *
       * NOTE: This function does _not_ dereference the pointers before we have
       * validated them. We can test the pointer values, but we
       * must not actually use them until we have found a valid
       * copy of the pointer in kernel space..
       */
       
 768  int d_validate(struct dentry *dentry, struct dentry *dparent,
      	       unsigned int hash, unsigned int len)
      {
      	struct list_head *base, *lhp;
      	int valid = 1;
      
      	spin_lock(&dcache_lock);
 775  	if (dentry != dparent) {
      		base = d_hash(dparent, hash);
      		lhp = base;
 778  		while ((lhp = lhp->next) != base) {
 779  			if (dentry == list_entry(lhp, struct dentry, d_hash)) {
      				__dget_locked(dentry);
 781  				goto out;
      			}
      		}
 784  	} else {
      		/*
      		 * Special case: local mount points don't live in
      		 * the hashes, so we search the super blocks.
      		 */
      		struct super_block *sb = sb_entry(super_blocks.next);
      
 791  		for (; sb != sb_entry(&super_blocks); 
      		     sb = sb_entry(sb->s_list.next)) {
      			if (!sb->s_dev)
      				continue;
      			if (sb->s_root == dentry) {
      				__dget_locked(dentry);
      				goto out;
      			}
      		}
      	}
      	valid = 0;
      out:
 803  	spin_unlock(&dcache_lock);
 804  	return valid;
      }
      
      /*
       * When a file is deleted, we have two options:
       * - turn this dentry into a negative dentry
       * - unhash this dentry and free it.
       *
       * Usually, we want to just turn this into
       * a negative dentry, but if anybody else is
       * currently using the dentry or the inode
       * we can't do that and we fall back on removing
       * it from the hash queues and waiting for
       * it to be deleted later when it has no users
       */
       
      /**
       * d_delete - delete a dentry
       * @dentry: The dentry to delete
       *
       * Turn the dentry into a negative dentry if possible, otherwise
       * remove it from the hash queues so it can be deleted later
       */
       
 828  void d_delete(struct dentry * dentry)
      {
      	/*
      	 * Are we the only user?
      	 */
      	spin_lock(&dcache_lock);
 834  	if (atomic_read(&dentry->d_count) == 1) {
      		dentry_iput(dentry);
 836  		return;
      	}
 838  	spin_unlock(&dcache_lock);
      
      	/*
      	 * If not, just drop the dentry and let dput
      	 * pick up the tab..
      	 */
      	d_drop(dentry);
      }
      
      /**
       * d_rehash	- add an entry back to the hash
       * @entry: dentry to add to the hash
       *
       * Adds a dentry to the hash according to its name.
       */
       
 854  void d_rehash(struct dentry * entry)
      {
      	struct list_head *list = d_hash(entry->d_parent, entry->d_name.hash);
      	spin_lock(&dcache_lock);
      	list_add(&entry->d_hash, list);
 859  	spin_unlock(&dcache_lock);
      }
      
      #define do_switch(x,y) do { \
      	__typeof__ (x) __tmp = x; \
      	x = y; y = __tmp; } while (0)
      
      /*
       * When switching names, the actual string doesn't strictly have to
       * be preserved in the target - because we're dropping the target
       * anyway. As such, we can just do a simple memcpy() to copy over
       * the new name before we switch.
       *
       * Note that we have to be a lot more careful about getting the hash
       * switched - we have to switch the hash value properly even if it
       * then no longer matches the actual (corrupted) string of the target.
       * The hash value has to match the hash queue that the dentry is on..
       */
 877  static inline void switch_names(struct dentry * dentry, struct dentry * target)
      {
      	const unsigned char *old_name, *new_name;
      
 881  	check_lock();
      	memcpy(dentry->d_iname, target->d_iname, DNAME_INLINE_LEN); 
      	old_name = target->d_name.name;
      	new_name = dentry->d_name.name;
 885  	if (old_name == target->d_iname)
      		old_name = dentry->d_iname;
 887  	if (new_name == dentry->d_iname)
      		new_name = target->d_iname;
      	target->d_name.name = new_name;
      	dentry->d_name.name = old_name;
      }
      
      /*
       * We cannibalize "target" when moving dentry on top of it,
       * because it's going to be thrown away anyway. We could be more
       * polite about it, though.
       *
       * This forceful removal will result in ugly /proc output if
       * somebody holds a file open that got deleted due to a rename.
       * We could be nicer about the deleted file, and let it show
       * up under the name it got deleted rather than the name that
       * deleted it.
       *
       * Careful with the hash switch. The hash switch depends on
       * the fact that any list-entry can be a head of the list.
       * Think about it.
       */
       
      /**
       * d_move - move a dentry
       * @dentry: entry to move
       * @target: new dentry
       *
       * Update the dcache to reflect the move of a file name. Negative
       * dcache entries should not be moved in this way.
       */
        
 918  void d_move(struct dentry * dentry, struct dentry * target)
      {
 920  	check_lock();
      
 922  	if (!dentry->d_inode)
      		printk(KERN_WARNING "VFS: moving negative dcache entry\n");
      
      	spin_lock(&dcache_lock);
      	/* Move the dentry to the target hash queue */
      	list_del(&dentry->d_hash);
      	list_add(&dentry->d_hash, &target->d_hash);
      
      	/* Unhash the target: dput() will then get rid of it */
      	list_del(&target->d_hash);
 932  	INIT_LIST_HEAD(&target->d_hash);
      
      	list_del(&dentry->d_child);
      	list_del(&target->d_child);
      
      	/* Switch the parents and the names.. */
      	switch_names(dentry, target);
 939  	do_switch(dentry->d_parent, target->d_parent);
 940  	do_switch(dentry->d_name.len, target->d_name.len);
 941  	do_switch(dentry->d_name.hash, target->d_name.hash);
      
      	/* And add them back to the (new) parent lists */
      	list_add(&target->d_child, &target->d_parent->d_subdirs);
      	list_add(&dentry->d_child, &dentry->d_parent->d_subdirs);
 946  	spin_unlock(&dcache_lock);
      }
      
      /**
       * d_path - return the path of a dentry
       * @dentry: dentry to report
       * @vfsmnt: vfsmnt to which the dentry belongs
       * @root: root dentry
       * @rootmnt: vfsmnt to which the root dentry belongs
       * @buffer: buffer to return value in
       * @buflen: buffer length
       *
       * Convert a dentry into an ASCII path name. If the entry has been deleted
       * the string " (deleted)" is appended. Note that this is ambiguous. Returns
       * the buffer.
       *
       * "buflen" should be %PAGE_SIZE or more. Caller holds the dcache_lock.
       */
 964  char * __d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
      		struct dentry *root, struct vfsmount *rootmnt,
      		char *buffer, int buflen)
      {
      	char * end = buffer+buflen;
      	char * retval;
      	int namelen;
      
      	*--end = '\0';
      	buflen--;
 974  	if (!IS_ROOT(dentry) && list_empty(&dentry->d_hash)) {
      		buflen -= 10;
      		end -= 10;
      		memcpy(end, " (deleted)", 10);
      	}
      
      	/* Get '/' right */
      	retval = end-1;
      	*retval = '/';
      
 984  	for (;;) {
      		struct dentry * parent;
      
 987  		if (dentry == root && vfsmnt == rootmnt)
 988  			break;
 989  		if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
      			/* Global root? */
 991  			if (vfsmnt->mnt_parent == vfsmnt)
 992  				goto global_root;
      			dentry = vfsmnt->mnt_mountpoint;
      			vfsmnt = vfsmnt->mnt_parent;
 995  			continue;
      		}
      		parent = dentry->d_parent;
      		namelen = dentry->d_name.len;
      		buflen -= namelen + 1;
1000  		if (buflen < 0)
1001  			break;
      		end -= namelen;
      		memcpy(end, dentry->d_name.name, namelen);
      		*--end = '/';
      		retval = end;
      		dentry = parent;
      	}
1008  	return retval;
      global_root:
      	namelen = dentry->d_name.len;
      	buflen -= namelen;
1012  	if (buflen >= 0) {
      		retval -= namelen-1;	/* hit the slash */
      		memcpy(retval, dentry->d_name.name, namelen);
      	}
1016  	return retval;
      }
      
      /*
       * NOTE! The user-level library version returns a
       * character pointer. The kernel system call just
       * returns the length of the buffer filled (which
       * includes the ending '\0' character), or a negative
       * error value. So libc would do something like
       *
       *	char *getcwd(char * buf, size_t size)
       *	{
       *		int retval;
       *
       *		retval = sys_getcwd(buf, size);
       *		if (retval >= 0)
       *			return buf;
       *		errno = -retval;
       *		return NULL;
       *	}
       */
1037  asmlinkage long sys_getcwd(char *buf, unsigned long size)
      {
      	int error;
      	struct vfsmount *pwdmnt, *rootmnt;
      	struct dentry *pwd, *root;
      	char *page = (char *) __get_free_page(GFP_USER);
      
1044  	if (!page)
1045  		return -ENOMEM;
      
      	read_lock(¤t->fs->lock);
      	pwdmnt = mntget(current->fs->pwdmnt);
      	pwd = dget(current->fs->pwd);
      	rootmnt = mntget(current->fs->rootmnt);
      	root = dget(current->fs->root);
1052  	read_unlock(¤t->fs->lock);
      
      	error = -ENOENT;
      	/* Has the current directory has been unlinked? */
      	spin_lock(&dcache_lock);
1057  	if (pwd->d_parent == pwd || !list_empty(&pwd->d_hash)) {
      		unsigned long len;
      		char * cwd;
      
      		cwd = __d_path(pwd, pwdmnt, root, rootmnt, page, PAGE_SIZE);
1062  		spin_unlock(&dcache_lock);
      
      		error = -ERANGE;
      		len = PAGE_SIZE + page - cwd;
1066  		if (len <= size) {
      			error = len;
1068  			if (copy_to_user(buf, cwd, len))
      				error = -EFAULT;
      		}
1071  	} else
1072  		spin_unlock(&dcache_lock);
      	dput(pwd);
      	mntput(pwdmnt);
      	dput(root);
      	mntput(rootmnt);
      	free_page((unsigned long) page);
1078  	return error;
      }
      
      /*
       * Test whether new_dentry is a subdirectory of old_dentry.
       *
       * Trivially implemented using the dcache structure
       */
      
      /**
       * is_subdir - is new dentry a subdirectory of old_dentry
       * @new_dentry: new dentry
       * @old_dentry: old dentry
       *
       * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
       * Returns 0 otherwise.
       */
        
1096  int is_subdir(struct dentry * new_dentry, struct dentry * old_dentry)
      {
      	int result;
      
      	result = 0;
1101  	for (;;) {
1102  		if (new_dentry != old_dentry) {
      			struct dentry * parent = new_dentry->d_parent;
1104  			if (parent == new_dentry)
1105  				break;
      			new_dentry = parent;
1107  			continue;
      		}
      		result = 1;
1110  		break;
      	}
1112  	return result;
      }
      
1115  void d_genocide(struct dentry *root)
      {
      	struct dentry *this_parent = root;
      	struct list_head *next;
      
      	spin_lock(&dcache_lock);
      repeat:
      	next = this_parent->d_subdirs.next;
      resume:
1124  	while (next != &this_parent->d_subdirs) {
      		struct list_head *tmp = next;
      		struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
      		next = tmp->next;
1128  		if (d_unhashed(dentry)||!dentry->d_inode)
1129  			continue;
1130  		if (!list_empty(&dentry->d_subdirs)) {
      			this_parent = dentry;
1132  			goto repeat;
      		}
      		atomic_dec(&dentry->d_count);
      	}
1136  	if (this_parent != root) {
      		next = this_parent->d_child.next; 
      		atomic_dec(&this_parent->d_count);
      		this_parent = this_parent->d_parent;
1140  		goto resume;
      	}
1142  	spin_unlock(&dcache_lock);
      }
      
      /**
       * find_inode_number - check for dentry with name
       * @dir: directory to check
       * @name: Name to find.
       *
       * Check whether a dentry already exists for the given name,
       * and return the inode number if it has an inode. Otherwise
       * 0 is returned.
       *
       * This routine is used to post-process directory listings for
       * filesystems using synthetic inode numbers, and is necessary
       * to keep getcwd() working.
       */
       
1159  ino_t find_inode_number(struct dentry *dir, struct qstr *name)
      {
      	struct dentry * dentry;
      	ino_t ino = 0;
      
      	/*
      	 * Check for a fs-specific hash function. Note that we must
      	 * calculate the standard hash first, as the d_op->d_hash()
      	 * routine may choose to leave the hash value unchanged.
      	 */
      	name->hash = full_name_hash(name->name, name->len);
1170  	if (dir->d_op && dir->d_op->d_hash)
      	{
1172  		if (dir->d_op->d_hash(dir, name) != 0)
1173  			goto out;
      	}
      
      	dentry = d_lookup(dir, name);
1177  	if (dentry)
      	{
1179  		if (dentry->d_inode)
      			ino = dentry->d_inode->i_ino;
      		dput(dentry);
      	}
      out:
1184  	return ino;
      }
      
1187  static void __init dcache_init(unsigned long mempages)
      {
      	struct list_head *d;
      	unsigned long order;
      	unsigned int nr_hash;
      	int i;
      
      	/* 
      	 * A constructor could be added for stable state like the lists,
      	 * but it is probably not worth it because of the cache nature
      	 * of the dcache. 
      	 * If fragmentation is too bad then the SLAB_HWCACHE_ALIGN
      	 * flag could be removed here, to hint to the allocator that
      	 * it should not try to get multiple page regions.  
      	 */
      	dentry_cache = kmem_cache_create("dentry_cache",
      					 sizeof(struct dentry),
      					 0,
      					 SLAB_HWCACHE_ALIGN,
      					 NULL, NULL);
1207  	if (!dentry_cache)
      		panic("Cannot create dentry cache");
      
      #if PAGE_SHIFT < 13
      	mempages >>= (13 - PAGE_SHIFT);
      #endif
      	mempages *= sizeof(struct list_head);
1214  	for (order = 0; ((1UL << order) << PAGE_SHIFT) < mempages; order++)
      		;
      
1217  	do {
      		unsigned long tmp;
      
      		nr_hash = (1UL << order) * PAGE_SIZE /
      			sizeof(struct list_head);
      		d_hash_mask = (nr_hash - 1);
      
      		tmp = nr_hash;
      		d_hash_shift = 0;
1226  		while ((tmp >>= 1UL) != 0UL)
      			d_hash_shift++;
      
      		dentry_hashtable = (struct list_head *)
      			__get_free_pages(GFP_ATOMIC, order);
1231  	} while (dentry_hashtable == NULL && --order >= 0);
      
      	printk("Dentry-cache hash table entries: %d (order: %ld, %ld bytes)\n",
      			nr_hash, order, (PAGE_SIZE << order));
      
1236  	if (!dentry_hashtable)
      		panic("Failed to allocate dcache hash table\n");
      
      	d = dentry_hashtable;
      	i = nr_hash;
1241  	do {
1242  		INIT_LIST_HEAD(d);
      		d++;
      		i--;
1245  	} while (i);
      }
      
      /* SLAB cache for __getname() consumers */
      kmem_cache_t *names_cachep;
      
      /* SLAB cache for file structures */
      kmem_cache_t *filp_cachep;
      
      /* SLAB cache for dquot structures */
      kmem_cache_t *dquot_cachep;
      
      /* SLAB cache for buffer_head structures */
      kmem_cache_t *bh_cachep;
      
1260  void __init vfs_caches_init(unsigned long mempages)
      {
      	bh_cachep = kmem_cache_create("buffer_head",
      			sizeof(struct buffer_head), 0,
      			SLAB_HWCACHE_ALIGN, NULL, NULL);
1265  	if(!bh_cachep)
      		panic("Cannot create buffer head SLAB cache");
      
      	names_cachep = kmem_cache_create("names_cache", 
      			PATH_MAX + 1, 0, 
      			SLAB_HWCACHE_ALIGN, NULL, NULL);
1271  	if (!names_cachep)
      		panic("Cannot create names SLAB cache");
      
      	filp_cachep = kmem_cache_create("filp", 
      			sizeof(struct file), 0,
      			SLAB_HWCACHE_ALIGN, NULL, NULL);
1277  	if(!filp_cachep)
      		panic("Cannot create filp SLAB cache");
      
      #if defined (CONFIG_QUOTA)
      	dquot_cachep = kmem_cache_create("dquot", 
      			sizeof(struct dquot), sizeof(unsigned long) * 4,
      			SLAB_HWCACHE_ALIGN, NULL, NULL);
      	if (!dquot_cachep)
      		panic("Cannot create dquot SLAB cache");
      #endif
      
      	dcache_init(mempages);
      }