inode.c 43.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * (C) 1997 Linus Torvalds
3
 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
L
Linus Torvalds 已提交
4 5 6 7 8 9 10 11 12 13
 */
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/dcache.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/writeback.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/wait.h>
N
Nick Piggin 已提交
14
#include <linux/rwsem.h>
L
Linus Torvalds 已提交
15 16 17 18 19 20
#include <linux/hash.h>
#include <linux/swap.h>
#include <linux/security.h>
#include <linux/pagemap.h>
#include <linux/cdev.h>
#include <linux/bootmem.h>
21
#include <linux/fsnotify.h>
22
#include <linux/mount.h>
23
#include <linux/async.h>
A
Al Viro 已提交
24
#include <linux/posix_acl.h>
25
#include <linux/prefetch.h>
26
#include <linux/ima.h>
27
#include <linux/cred.h>
28
#include <linux/buffer_head.h> /* for inode_has_buffers */
29
#include "internal.h"
L
Linus Torvalds 已提交
30

31
/*
32
 * Inode locking rules:
33 34 35
 *
 * inode->i_lock protects:
 *   inode->i_state, inode->i_hash, __iget()
36 37
 * inode_lru_lock protects:
 *   inode_lru, inode->i_lru
38 39
 * inode_sb_list_lock protects:
 *   sb->s_inodes, inode->i_sb_list
40 41
 * inode_wb_list_lock protects:
 *   bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
42 43
 * inode_hash_lock protects:
 *   inode_hashtable, inode->i_hash
44 45
 *
 * Lock ordering:
46 47 48
 *
 * inode_sb_list_lock
 *   inode->i_lock
49
 *     inode_lru_lock
50 51 52
 *
 * inode_wb_list_lock
 *   inode->i_lock
53 54 55 56 57 58 59
 *
 * inode_hash_lock
 *   inode_sb_list_lock
 *   inode->i_lock
 *
 * iunique_lock
 *   inode_hash_lock
60 61
 */

62 63
static unsigned int i_hash_mask __read_mostly;
static unsigned int i_hash_shift __read_mostly;
64 65
static struct hlist_head *inode_hashtable __read_mostly;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
L
Linus Torvalds 已提交
66

N
Nick Piggin 已提交
67
static LIST_HEAD(inode_lru);
68
static DEFINE_SPINLOCK(inode_lru_lock);
L
Linus Torvalds 已提交
69

70
__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
71
__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
72

L
Linus Torvalds 已提交
73
/*
C
Christoph Hellwig 已提交
74 75
 * iprune_sem provides exclusion between the icache shrinking and the
 * umount path.
N
Nick Piggin 已提交
76
 *
C
Christoph Hellwig 已提交
77 78 79 80
 * We don't actually need it to protect anything in the umount path,
 * but only need to cycle through it to make sure any inode that
 * prune_icache took off the LRU list has been fully torn down by the
 * time we are past evict_inodes.
L
Linus Torvalds 已提交
81
 */
N
Nick Piggin 已提交
82
static DECLARE_RWSEM(iprune_sem);
L
Linus Torvalds 已提交
83

J
Jens Axboe 已提交
84 85 86 87 88 89 90 91
/*
 * Empty aops. Can be used for the cases where the user does not
 * define any of the address_space operations.
 */
const struct address_space_operations empty_aops = {
};
EXPORT_SYMBOL(empty_aops);

L
Linus Torvalds 已提交
92 93 94 95 96
/*
 * Statistics gathering..
 */
struct inodes_stat_t inodes_stat;

97
static DEFINE_PER_CPU(unsigned int, nr_inodes);
98

99
static struct kmem_cache *inode_cachep __read_mostly;
L
Linus Torvalds 已提交
100

101
static int get_nr_inodes(void)
102
{
103 104 105 106 107
	int i;
	int sum = 0;
	for_each_possible_cpu(i)
		sum += per_cpu(nr_inodes, i);
	return sum < 0 ? 0 : sum;
108 109 110 111
}

static inline int get_nr_inodes_unused(void)
{
112
	return inodes_stat.nr_unused;
113 114 115 116
}

int get_nr_dirty_inodes(void)
{
117
	/* not actually dirty inodes, but a wild approximation */
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
	int nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
	return nr_dirty > 0 ? nr_dirty : 0;
}

/*
 * Handle nr_inode sysctl
 */
#ifdef CONFIG_SYSCTL
int proc_nr_inodes(ctl_table *table, int write,
		   void __user *buffer, size_t *lenp, loff_t *ppos)
{
	inodes_stat.nr_inodes = get_nr_inodes();
	return proc_dointvec(table, write, buffer, lenp, ppos);
}
#endif

D
David Chinner 已提交
134 135
/**
 * inode_init_always - perform inode structure intialisation
R
Randy Dunlap 已提交
136 137
 * @sb: superblock inode belongs to
 * @inode: inode to initialise
D
David Chinner 已提交
138 139 140 141
 *
 * These are initializations that need to be done on every inode
 * allocation as the fields are not initialised by slab allocation.
 */
142
int inode_init_always(struct super_block *sb, struct inode *inode)
L
Linus Torvalds 已提交
143
{
144
	static const struct inode_operations empty_iops;
145
	static const struct file_operations empty_fops;
146
	struct address_space *const mapping = &inode->i_data;
D
David Chinner 已提交
147 148 149 150 151 152 153 154

	inode->i_sb = sb;
	inode->i_blkbits = sb->s_blocksize_bits;
	inode->i_flags = 0;
	atomic_set(&inode->i_count, 1);
	inode->i_op = &empty_iops;
	inode->i_fop = &empty_fops;
	inode->i_nlink = 1;
A
Al Viro 已提交
155 156
	inode->i_uid = 0;
	inode->i_gid = 0;
D
David Chinner 已提交
157 158 159 160 161
	atomic_set(&inode->i_writecount, 0);
	inode->i_size = 0;
	inode->i_blocks = 0;
	inode->i_bytes = 0;
	inode->i_generation = 0;
L
Linus Torvalds 已提交
162
#ifdef CONFIG_QUOTA
D
David Chinner 已提交
163
	memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
L
Linus Torvalds 已提交
164
#endif
D
David Chinner 已提交
165 166 167 168 169
	inode->i_pipe = NULL;
	inode->i_bdev = NULL;
	inode->i_cdev = NULL;
	inode->i_rdev = 0;
	inode->dirtied_when = 0;
M
Mimi Zohar 已提交
170 171

	if (security_inode_alloc(inode))
172
		goto out;
D
David Chinner 已提交
173 174 175 176 177 178 179 180 181 182 183 184
	spin_lock_init(&inode->i_lock);
	lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);

	mutex_init(&inode->i_mutex);
	lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);

	init_rwsem(&inode->i_alloc_sem);
	lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key);

	mapping->a_ops = &empty_aops;
	mapping->host = inode;
	mapping->flags = 0;
H
Hugh Dickins 已提交
185
	mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
D
David Chinner 已提交
186 187 188 189 190 191 192 193 194 195 196 197
	mapping->assoc_mapping = NULL;
	mapping->backing_dev_info = &default_backing_dev_info;
	mapping->writeback_index = 0;

	/*
	 * If the block_device provides a backing_dev_info for client
	 * inodes then use that.  Otherwise the inode share the bdev's
	 * backing_dev_info.
	 */
	if (sb->s_bdev) {
		struct backing_dev_info *bdi;

198
		bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
D
David Chinner 已提交
199 200 201 202
		mapping->backing_dev_info = bdi;
	}
	inode->i_private = NULL;
	inode->i_mapping = mapping;
A
Al Viro 已提交
203 204 205
#ifdef CONFIG_FS_POSIX_ACL
	inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
#endif
D
David Chinner 已提交
206

207 208 209 210
#ifdef CONFIG_FSNOTIFY
	inode->i_fsnotify_mask = 0;
#endif

211
	this_cpu_inc(nr_inodes);
212

213 214 215
	return 0;
out:
	return -ENOMEM;
L
Linus Torvalds 已提交
216
}
D
David Chinner 已提交
217 218 219 220 221 222 223 224 225 226 227
EXPORT_SYMBOL(inode_init_always);

static struct inode *alloc_inode(struct super_block *sb)
{
	struct inode *inode;

	if (sb->s_op->alloc_inode)
		inode = sb->s_op->alloc_inode(sb);
	else
		inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);

228 229 230 231 232 233 234 235 236 237 238 239
	if (!inode)
		return NULL;

	if (unlikely(inode_init_always(sb, inode))) {
		if (inode->i_sb->s_op->destroy_inode)
			inode->i_sb->s_op->destroy_inode(inode);
		else
			kmem_cache_free(inode_cachep, inode);
		return NULL;
	}

	return inode;
D
David Chinner 已提交
240
}
L
Linus Torvalds 已提交
241

242 243 244 245 246 247
void free_inode_nonrcu(struct inode *inode)
{
	kmem_cache_free(inode_cachep, inode);
}
EXPORT_SYMBOL(free_inode_nonrcu);

C
Christoph Hellwig 已提交
248
void __destroy_inode(struct inode *inode)
L
Linus Torvalds 已提交
249
{
250
	BUG_ON(inode_has_buffers(inode));
L
Linus Torvalds 已提交
251
	security_inode_free(inode);
252
	fsnotify_inode_delete(inode);
A
Al Viro 已提交
253 254 255 256 257 258
#ifdef CONFIG_FS_POSIX_ACL
	if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
		posix_acl_release(inode->i_acl);
	if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
		posix_acl_release(inode->i_default_acl);
#endif
259
	this_cpu_dec(nr_inodes);
C
Christoph Hellwig 已提交
260 261 262
}
EXPORT_SYMBOL(__destroy_inode);

N
Nick Piggin 已提交
263 264 265 266 267 268 269
static void i_callback(struct rcu_head *head)
{
	struct inode *inode = container_of(head, struct inode, i_rcu);
	INIT_LIST_HEAD(&inode->i_dentry);
	kmem_cache_free(inode_cachep, inode);
}

C
Christoph Hellwig 已提交
270
static void destroy_inode(struct inode *inode)
C
Christoph Hellwig 已提交
271
{
N
Nick Piggin 已提交
272
	BUG_ON(!list_empty(&inode->i_lru));
C
Christoph Hellwig 已提交
273
	__destroy_inode(inode);
L
Linus Torvalds 已提交
274 275 276
	if (inode->i_sb->s_op->destroy_inode)
		inode->i_sb->s_op->destroy_inode(inode);
	else
N
Nick Piggin 已提交
277
		call_rcu(&inode->i_rcu, i_callback);
L
Linus Torvalds 已提交
278 279
}

280 281 282 283 284
void address_space_init_once(struct address_space *mapping)
{
	memset(mapping, 0, sizeof(*mapping));
	INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
	spin_lock_init(&mapping->tree_lock);
285
	mutex_init(&mapping->i_mmap_mutex);
286 287 288 289 290 291 292
	INIT_LIST_HEAD(&mapping->private_list);
	spin_lock_init(&mapping->private_lock);
	INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
	INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
}
EXPORT_SYMBOL(address_space_init_once);

L
Linus Torvalds 已提交
293 294 295 296 297 298 299 300 301 302 303
/*
 * These are initializations that only need to be done
 * once, because the fields are idempotent across use
 * of the inode, so let the slab aware of that.
 */
void inode_init_once(struct inode *inode)
{
	memset(inode, 0, sizeof(*inode));
	INIT_HLIST_NODE(&inode->i_hash);
	INIT_LIST_HEAD(&inode->i_dentry);
	INIT_LIST_HEAD(&inode->i_devices);
N
Nick Piggin 已提交
304 305
	INIT_LIST_HEAD(&inode->i_wb_list);
	INIT_LIST_HEAD(&inode->i_lru);
306
	address_space_init_once(&inode->i_data);
L
Linus Torvalds 已提交
307
	i_size_ordered_init(inode);
308
#ifdef CONFIG_FSNOTIFY
309
	INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
310
#endif
L
Linus Torvalds 已提交
311 312 313
}
EXPORT_SYMBOL(inode_init_once);

314
static void init_once(void *foo)
L
Linus Torvalds 已提交
315
{
316
	struct inode *inode = (struct inode *) foo;
L
Linus Torvalds 已提交
317

C
Christoph Lameter 已提交
318
	inode_init_once(inode);
L
Linus Torvalds 已提交
319 320 321
}

/*
322
 * inode->i_lock must be held
L
Linus Torvalds 已提交
323
 */
324
void __iget(struct inode *inode)
L
Linus Torvalds 已提交
325
{
326 327
	atomic_inc(&inode->i_count);
}
328

A
Al Viro 已提交
329 330 331 332 333 334 335 336 337
/*
 * get additional reference to inode; caller must already hold one.
 */
void ihold(struct inode *inode)
{
	WARN_ON(atomic_inc_return(&inode->i_count) < 2);
}
EXPORT_SYMBOL(ihold);

338 339
static void inode_lru_list_add(struct inode *inode)
{
340
	spin_lock(&inode_lru_lock);
N
Nick Piggin 已提交
341 342
	if (list_empty(&inode->i_lru)) {
		list_add(&inode->i_lru, &inode_lru);
343
		inodes_stat.nr_unused++;
344
	}
345
	spin_unlock(&inode_lru_lock);
346
}
347

348 349
static void inode_lru_list_del(struct inode *inode)
{
350
	spin_lock(&inode_lru_lock);
N
Nick Piggin 已提交
351 352
	if (!list_empty(&inode->i_lru)) {
		list_del_init(&inode->i_lru);
353
		inodes_stat.nr_unused--;
354
	}
355
	spin_unlock(&inode_lru_lock);
L
Linus Torvalds 已提交
356 357
}

358 359 360 361 362 363
/**
 * inode_sb_list_add - add inode to the superblock list of inodes
 * @inode: inode to add
 */
void inode_sb_list_add(struct inode *inode)
{
364 365 366
	spin_lock(&inode_sb_list_lock);
	list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
	spin_unlock(&inode_sb_list_lock);
367 368 369
}
EXPORT_SYMBOL_GPL(inode_sb_list_add);

370
static inline void inode_sb_list_del(struct inode *inode)
371
{
372
	spin_lock(&inode_sb_list_lock);
373
	list_del_init(&inode->i_sb_list);
374
	spin_unlock(&inode_sb_list_lock);
375 376
}

377 378 379 380 381 382
static unsigned long hash(struct super_block *sb, unsigned long hashval)
{
	unsigned long tmp;

	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
			L1_CACHE_BYTES;
383 384
	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
	return tmp & i_hash_mask;
385 386 387 388 389 390 391 392 393 394 395 396
}

/**
 *	__insert_inode_hash - hash an inode
 *	@inode: unhashed inode
 *	@hashval: unsigned long value used to locate this object in the
 *		inode_hashtable.
 *
 *	Add an inode to the inode hash for this superblock.
 */
void __insert_inode_hash(struct inode *inode, unsigned long hashval)
{
397 398
	struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);

399
	spin_lock(&inode_hash_lock);
400
	spin_lock(&inode->i_lock);
401
	hlist_add_head(&inode->i_hash, b);
402
	spin_unlock(&inode->i_lock);
403
	spin_unlock(&inode_hash_lock);
404 405 406 407 408 409 410 411 412 413 414
}
EXPORT_SYMBOL(__insert_inode_hash);

/**
 *	remove_inode_hash - remove an inode from the hash
 *	@inode: inode to unhash
 *
 *	Remove an inode from the superblock.
 */
void remove_inode_hash(struct inode *inode)
{
415
	spin_lock(&inode_hash_lock);
416
	spin_lock(&inode->i_lock);
417
	hlist_del_init(&inode->i_hash);
418
	spin_unlock(&inode->i_lock);
419
	spin_unlock(&inode_hash_lock);
420 421 422
}
EXPORT_SYMBOL(remove_inode_hash);

A
Al Viro 已提交
423 424 425
void end_writeback(struct inode *inode)
{
	might_sleep();
426 427 428 429 430 431
	/*
	 * We have to cycle tree_lock here because reclaim can be still in the
	 * process of removing the last page (in __delete_from_page_cache())
	 * and we must not free mapping under it.
	 */
	spin_lock_irq(&inode->i_data.tree_lock);
A
Al Viro 已提交
432
	BUG_ON(inode->i_data.nrpages);
433
	spin_unlock_irq(&inode->i_data.tree_lock);
A
Al Viro 已提交
434 435 436 437
	BUG_ON(!list_empty(&inode->i_data.private_list));
	BUG_ON(!(inode->i_state & I_FREEING));
	BUG_ON(inode->i_state & I_CLEAR);
	inode_sync_wait(inode);
N
Nick Piggin 已提交
438
	/* don't need i_lock here, no concurrent mods to i_state */
A
Al Viro 已提交
439 440 441 442
	inode->i_state = I_FREEING | I_CLEAR;
}
EXPORT_SYMBOL(end_writeback);

D
Dave Chinner 已提交
443 444 445 446 447 448 449 450 451 452 453 454 455
/*
 * Free the inode passed in, removing it from the lists it is still connected
 * to. We remove any pages still attached to the inode and wait for any IO that
 * is still in progress before finally destroying the inode.
 *
 * An inode must already be marked I_FREEING so that we avoid the inode being
 * moved back onto lists if we race with other code that manipulates the lists
 * (e.g. writeback_single_inode). The caller is responsible for setting this.
 *
 * An inode must already be removed from the LRU list before being evicted from
 * the cache. This should occur atomically with setting the I_FREEING state
 * flag, so no inodes here should ever be on the LRU when being evicted.
 */
456
static void evict(struct inode *inode)
457 458 459
{
	const struct super_operations *op = inode->i_sb->s_op;

D
Dave Chinner 已提交
460 461 462
	BUG_ON(!(inode->i_state & I_FREEING));
	BUG_ON(!list_empty(&inode->i_lru));

463
	inode_wb_list_del(inode);
464 465
	inode_sb_list_del(inode);

A
Al Viro 已提交
466 467
	if (op->evict_inode) {
		op->evict_inode(inode);
468 469 470
	} else {
		if (inode->i_data.nrpages)
			truncate_inode_pages(&inode->i_data, 0);
A
Al Viro 已提交
471
		end_writeback(inode);
472
	}
473 474 475 476
	if (S_ISBLK(inode->i_mode) && inode->i_bdev)
		bd_forget(inode);
	if (S_ISCHR(inode->i_mode) && inode->i_cdev)
		cd_forget(inode);
D
Dave Chinner 已提交
477 478 479 480 481 482 483 484 485

	remove_inode_hash(inode);

	spin_lock(&inode->i_lock);
	wake_up_bit(&inode->i_state, __I_NEW);
	BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
	spin_unlock(&inode->i_lock);

	destroy_inode(inode);
486 487
}

L
Linus Torvalds 已提交
488 489 490 491 492 493 494 495 496 497 498 499
/*
 * dispose_list - dispose of the contents of a local list
 * @head: the head of the list to free
 *
 * Dispose-list gets a local list with local inodes in it, so it doesn't
 * need to worry about list corruption and SMP locks.
 */
static void dispose_list(struct list_head *head)
{
	while (!list_empty(head)) {
		struct inode *inode;

N
Nick Piggin 已提交
500 501
		inode = list_first_entry(head, struct inode, i_lru);
		list_del_init(&inode->i_lru);
L
Linus Torvalds 已提交
502

503
		evict(inode);
L
Linus Torvalds 已提交
504 505 506
	}
}

A
Al Viro 已提交
507 508 509 510 511 512 513 514
/**
 * evict_inodes	- evict all evictable inodes for a superblock
 * @sb:		superblock to operate on
 *
 * Make sure that no inodes with zero refcount are retained.  This is
 * called by superblock shutdown after having MS_ACTIVE flag removed,
 * so any inode reaching zero refcount during or after that call will
 * be immediately evicted.
L
Linus Torvalds 已提交
515
 */
A
Al Viro 已提交
516
void evict_inodes(struct super_block *sb)
L
Linus Torvalds 已提交
517
{
A
Al Viro 已提交
518 519
	struct inode *inode, *next;
	LIST_HEAD(dispose);
L
Linus Torvalds 已提交
520

521
	spin_lock(&inode_sb_list_lock);
A
Al Viro 已提交
522 523
	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
		if (atomic_read(&inode->i_count))
N
Nick Piggin 已提交
524
			continue;
525 526 527 528

		spin_lock(&inode->i_lock);
		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
			spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
529
			continue;
530
		}
A
Al Viro 已提交
531 532

		inode->i_state |= I_FREEING;
533
		inode_lru_list_del(inode);
534
		spin_unlock(&inode->i_lock);
535
		list_add(&inode->i_lru, &dispose);
L
Linus Torvalds 已提交
536
	}
537
	spin_unlock(&inode_sb_list_lock);
A
Al Viro 已提交
538 539

	dispose_list(&dispose);
C
Christoph Hellwig 已提交
540 541 542 543 544 545 546

	/*
	 * Cycle through iprune_sem to make sure any inode that prune_icache
	 * moved off the list before we took the lock has been fully torn
	 * down.
	 */
	down_write(&iprune_sem);
A
Al Viro 已提交
547
	up_write(&iprune_sem);
L
Linus Torvalds 已提交
548 549 550
}

/**
551 552
 * invalidate_inodes	- attempt to free all inodes on a superblock
 * @sb:		superblock to operate on
553
 * @kill_dirty: flag to guide handling of dirty inodes
L
Linus Torvalds 已提交
554
 *
555 556
 * Attempts to free all inodes for a given superblock.  If there were any
 * busy inodes return a non-zero value, else zero.
557 558
 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
 * them as busy.
L
Linus Torvalds 已提交
559
 */
560
int invalidate_inodes(struct super_block *sb, bool kill_dirty)
L
Linus Torvalds 已提交
561
{
562
	int busy = 0;
563 564
	struct inode *inode, *next;
	LIST_HEAD(dispose);
L
Linus Torvalds 已提交
565

566
	spin_lock(&inode_sb_list_lock);
567
	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
568 569 570
		spin_lock(&inode->i_lock);
		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
			spin_unlock(&inode->i_lock);
N
Nick Piggin 已提交
571
			continue;
572
		}
573
		if (inode->i_state & I_DIRTY && !kill_dirty) {
574
			spin_unlock(&inode->i_lock);
575 576 577
			busy = 1;
			continue;
		}
578
		if (atomic_read(&inode->i_count)) {
579
			spin_unlock(&inode->i_lock);
580
			busy = 1;
L
Linus Torvalds 已提交
581 582
			continue;
		}
583 584

		inode->i_state |= I_FREEING;
585
		inode_lru_list_del(inode);
586
		spin_unlock(&inode->i_lock);
587
		list_add(&inode->i_lru, &dispose);
L
Linus Torvalds 已提交
588
	}
589
	spin_unlock(&inode_sb_list_lock);
L
Linus Torvalds 已提交
590

591
	dispose_list(&dispose);
L
Linus Torvalds 已提交
592 593 594 595 596 597

	return busy;
}

static int can_unuse(struct inode *inode)
{
598
	if (inode->i_state & ~I_REFERENCED)
L
Linus Torvalds 已提交
599 600 601 602 603 604 605 606 607 608 609
		return 0;
	if (inode_has_buffers(inode))
		return 0;
	if (atomic_read(&inode->i_count))
		return 0;
	if (inode->i_data.nrpages)
		return 0;
	return 1;
}

/*
610
 * Scan `goal' inodes on the unused list for freeable ones. They are moved to a
611
 * temporary list and then are freed outside inode_lru_lock by dispose_list().
L
Linus Torvalds 已提交
612 613
 *
 * Any inodes which are pinned purely because of attached pagecache have their
614 615
 * pagecache removed.  If the inode has metadata buffers attached to
 * mapping->private_list then try to remove them.
L
Linus Torvalds 已提交
616
 *
617 618 619 620 621 622 623
 * If the inode has the I_REFERENCED flag set, then it means that it has been
 * used recently - the flag is set in iput_final(). When we encounter such an
 * inode, clear the flag and move it to the back of the LRU so it gets another
 * pass through the LRU before it gets reclaimed. This is necessary because of
 * the fact we are doing lazy LRU updates to minimise lock contention so the
 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
 * with this flag set because they are the inodes that are out of order.
L
Linus Torvalds 已提交
624 625 626 627 628 629 630
 */
static void prune_icache(int nr_to_scan)
{
	LIST_HEAD(freeable);
	int nr_scanned;
	unsigned long reap = 0;

N
Nick Piggin 已提交
631
	down_read(&iprune_sem);
632
	spin_lock(&inode_lru_lock);
L
Linus Torvalds 已提交
633 634 635
	for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
		struct inode *inode;

N
Nick Piggin 已提交
636
		if (list_empty(&inode_lru))
L
Linus Torvalds 已提交
637 638
			break;

N
Nick Piggin 已提交
639
		inode = list_entry(inode_lru.prev, struct inode, i_lru);
L
Linus Torvalds 已提交
640

641 642 643 644 645 646 647 648 649 650
		/*
		 * we are inverting the inode_lru_lock/inode->i_lock here,
		 * so use a trylock. If we fail to get the lock, just move the
		 * inode to the back of the list so we don't spin on it.
		 */
		if (!spin_trylock(&inode->i_lock)) {
			list_move(&inode->i_lru, &inode_lru);
			continue;
		}

651 652 653 654 655 656
		/*
		 * Referenced or dirty inodes are still in use. Give them
		 * another pass through the LRU as we canot reclaim them now.
		 */
		if (atomic_read(&inode->i_count) ||
		    (inode->i_state & ~I_REFERENCED)) {
N
Nick Piggin 已提交
657
			list_del_init(&inode->i_lru);
658
			spin_unlock(&inode->i_lock);
659
			inodes_stat.nr_unused--;
660 661 662 663 664 665
			continue;
		}

		/* recently referenced inodes get one more pass */
		if (inode->i_state & I_REFERENCED) {
			inode->i_state &= ~I_REFERENCED;
666
			list_move(&inode->i_lru, &inode_lru);
667
			spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
668 669 670 671
			continue;
		}
		if (inode_has_buffers(inode) || inode->i_data.nrpages) {
			__iget(inode);
672
			spin_unlock(&inode->i_lock);
673
			spin_unlock(&inode_lru_lock);
L
Linus Torvalds 已提交
674
			if (remove_inode_buffers(inode))
675 676
				reap += invalidate_mapping_pages(&inode->i_data,
								0, -1);
L
Linus Torvalds 已提交
677
			iput(inode);
678
			spin_lock(&inode_lru_lock);
L
Linus Torvalds 已提交
679

N
Nick Piggin 已提交
680 681
			if (inode != list_entry(inode_lru.next,
						struct inode, i_lru))
L
Linus Torvalds 已提交
682
				continue;	/* wrong inode or list_empty */
683 684 685
			/* avoid lock inversions with trylock */
			if (!spin_trylock(&inode->i_lock))
				continue;
686 687
			if (!can_unuse(inode)) {
				spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
688
				continue;
689
			}
L
Linus Torvalds 已提交
690
		}
691
		WARN_ON(inode->i_state & I_NEW);
L
Linus Torvalds 已提交
692
		inode->i_state |= I_FREEING;
693
		spin_unlock(&inode->i_lock);
N
Nick Piggin 已提交
694 695

		list_move(&inode->i_lru, &freeable);
696
		inodes_stat.nr_unused--;
L
Linus Torvalds 已提交
697
	}
698 699 700 701
	if (current_is_kswapd())
		__count_vm_events(KSWAPD_INODESTEAL, reap);
	else
		__count_vm_events(PGINODESTEAL, reap);
702
	spin_unlock(&inode_lru_lock);
L
Linus Torvalds 已提交
703 704

	dispose_list(&freeable);
N
Nick Piggin 已提交
705
	up_read(&iprune_sem);
L
Linus Torvalds 已提交
706 707 708 709 710 711 712 713 714 715 716
}

/*
 * shrink_icache_memory() will attempt to reclaim some unused inodes.  Here,
 * "unused" means that no dentries are referring to the inodes: the files are
 * not open and the dcache references to those inodes have already been
 * reclaimed.
 *
 * This function is passed the number of inodes to scan, and it returns the
 * total number of remaining possibly-reclaimable inodes.
 */
717 718
static int shrink_icache_memory(struct shrinker *shrink,
				struct shrink_control *sc)
L
Linus Torvalds 已提交
719
{
720 721 722
	int nr = sc->nr_to_scan;
	gfp_t gfp_mask = sc->gfp_mask;

L
Linus Torvalds 已提交
723 724 725 726 727
	if (nr) {
		/*
		 * Nasty deadlock avoidance.  We may hold various FS locks,
		 * and we don't want to recurse into the FS that called us
		 * in clear_inode() and friends..
728
		 */
L
Linus Torvalds 已提交
729 730 731 732
		if (!(gfp_mask & __GFP_FS))
			return -1;
		prune_icache(nr);
	}
733
	return (get_nr_inodes_unused() / 100) * sysctl_vfs_cache_pressure;
L
Linus Torvalds 已提交
734 735
}

736 737 738 739 740
static struct shrinker icache_shrinker = {
	.shrink = shrink_icache_memory,
	.seeks = DEFAULT_SEEKS,
};

L
Linus Torvalds 已提交
741 742 743 744
static void __wait_on_freeing_inode(struct inode *inode);
/*
 * Called with the inode lock held.
 */
745 746 747 748
static struct inode *find_inode(struct super_block *sb,
				struct hlist_head *head,
				int (*test)(struct inode *, void *),
				void *data)
L
Linus Torvalds 已提交
749 750
{
	struct hlist_node *node;
751
	struct inode *inode = NULL;
L
Linus Torvalds 已提交
752 753

repeat:
754
	hlist_for_each_entry(inode, node, head, i_hash) {
755 756 757
		spin_lock(&inode->i_lock);
		if (inode->i_sb != sb) {
			spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
758
			continue;
759 760 761
		}
		if (!test(inode, data)) {
			spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
762
			continue;
763
		}
A
Al Viro 已提交
764
		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
L
Linus Torvalds 已提交
765 766 767
			__wait_on_freeing_inode(inode);
			goto repeat;
		}
768
		__iget(inode);
769
		spin_unlock(&inode->i_lock);
770
		return inode;
L
Linus Torvalds 已提交
771
	}
772
	return NULL;
L
Linus Torvalds 已提交
773 774 775 776 777 778
}

/*
 * find_inode_fast is the fast path version of find_inode, see the comment at
 * iget_locked for details.
 */
779 780
static struct inode *find_inode_fast(struct super_block *sb,
				struct hlist_head *head, unsigned long ino)
L
Linus Torvalds 已提交
781 782
{
	struct hlist_node *node;
783
	struct inode *inode = NULL;
L
Linus Torvalds 已提交
784 785

repeat:
786
	hlist_for_each_entry(inode, node, head, i_hash) {
787 788 789
		spin_lock(&inode->i_lock);
		if (inode->i_ino != ino) {
			spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
790
			continue;
791 792 793
		}
		if (inode->i_sb != sb) {
			spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
794
			continue;
795
		}
A
Al Viro 已提交
796
		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
L
Linus Torvalds 已提交
797 798 799
			__wait_on_freeing_inode(inode);
			goto repeat;
		}
800
		__iget(inode);
801
		spin_unlock(&inode->i_lock);
802
		return inode;
L
Linus Torvalds 已提交
803
	}
804
	return NULL;
805 806
}

807 808 809 810
/*
 * Each cpu owns a range of LAST_INO_BATCH numbers.
 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
 * to renew the exhausted range.
811
 *
812 813 814 815 816 817 818 819 820
 * This does not significantly increase overflow rate because every CPU can
 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
 * overflow rate by 2x, which does not seem too significant.
 *
 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
 * error if st_ino won't fit in target struct field. Use 32bit counter
 * here to attempt to avoid that.
821
 */
822 823 824
#define LAST_INO_BATCH 1024
static DEFINE_PER_CPU(unsigned int, last_ino);

825
unsigned int get_next_ino(void)
826
{
827 828
	unsigned int *p = &get_cpu_var(last_ino);
	unsigned int res = *p;
829

830 831 832 833 834 835 836 837 838 839 840 841
#ifdef CONFIG_SMP
	if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
		static atomic_t shared_last_ino;
		int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);

		res = next - LAST_INO_BATCH;
	}
#endif

	*p = ++res;
	put_cpu_var(last_ino);
	return res;
842
}
843
EXPORT_SYMBOL(get_next_ino);
844

L
Linus Torvalds 已提交
845 846 847 848
/**
 *	new_inode 	- obtain an inode
 *	@sb: superblock
 *
849
 *	Allocates a new inode for given superblock. The default gfp_mask
H
Hugh Dickins 已提交
850
 *	for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
851 852 853 854 855
 *	If HIGHMEM pages are unsuitable or it is known that pages allocated
 *	for the page cache are not reclaimable or migratable,
 *	mapping_set_gfp_mask() must be called with suitable flags on the
 *	newly created inode's mapping
 *
L
Linus Torvalds 已提交
856 857 858
 */
struct inode *new_inode(struct super_block *sb)
{
859
	struct inode *inode;
L
Linus Torvalds 已提交
860

861
	spin_lock_prefetch(&inode_sb_list_lock);
862

L
Linus Torvalds 已提交
863 864
	inode = alloc_inode(sb);
	if (inode) {
865
		spin_lock(&inode->i_lock);
L
Linus Torvalds 已提交
866
		inode->i_state = 0;
867
		spin_unlock(&inode->i_lock);
868
		inode_sb_list_add(inode);
L
Linus Torvalds 已提交
869 870 871 872 873
	}
	return inode;
}
EXPORT_SYMBOL(new_inode);

874 875 876 877 878 879 880
/**
 * unlock_new_inode - clear the I_NEW state and wake up any waiters
 * @inode:	new inode to unlock
 *
 * Called when the inode is fully initialised to clear the new state of the
 * inode and wake up anyone waiting for the inode to finish initialisation.
 */
L
Linus Torvalds 已提交
881 882
void unlock_new_inode(struct inode *inode)
{
883
#ifdef CONFIG_DEBUG_LOCK_ALLOC
884
	if (S_ISDIR(inode->i_mode)) {
885 886
		struct file_system_type *type = inode->i_sb->s_type;

887 888 889 890 891 892 893 894 895 896 897
		/* Set new key only if filesystem hasn't already changed it */
		if (!lockdep_match_class(&inode->i_mutex,
		    &type->i_mutex_key)) {
			/*
			 * ensure nobody is actually holding i_mutex
			 */
			mutex_destroy(&inode->i_mutex);
			mutex_init(&inode->i_mutex);
			lockdep_set_class(&inode->i_mutex,
					  &type->i_mutex_dir_key);
		}
898
	}
899
#endif
900
	spin_lock(&inode->i_lock);
C
Christoph Hellwig 已提交
901 902
	WARN_ON(!(inode->i_state & I_NEW));
	inode->i_state &= ~I_NEW;
903 904
	wake_up_bit(&inode->i_state, __I_NEW);
	spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
905 906 907
}
EXPORT_SYMBOL(unlock_new_inode);

C
Christoph Hellwig 已提交
908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923
/**
 * iget5_locked - obtain an inode from a mounted file system
 * @sb:		super block of file system
 * @hashval:	hash value (usually inode number) to get
 * @test:	callback used for comparisons between inodes
 * @set:	callback used to initialize a new struct inode
 * @data:	opaque data pointer to pass to @test and @set
 *
 * Search for the inode specified by @hashval and @data in the inode cache,
 * and if present it is return it with an increased reference count. This is
 * a generalized version of iget_locked() for file systems where the inode
 * number is not sufficient for unique identification of an inode.
 *
 * If the inode is not in cache, allocate a new inode and return it locked,
 * hashed, and with the I_NEW flag set. The file system gets to fill it in
 * before unlocking it via unlock_new_inode().
L
Linus Torvalds 已提交
924
 *
C
Christoph Hellwig 已提交
925 926
 * Note both @test and @set are called with the inode_hash_lock held, so can't
 * sleep.
L
Linus Torvalds 已提交
927
 */
C
Christoph Hellwig 已提交
928 929 930
struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
		int (*test)(struct inode *, void *),
		int (*set)(struct inode *, void *), void *data)
L
Linus Torvalds 已提交
931
{
C
Christoph Hellwig 已提交
932
	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
933
	struct inode *inode;
L
Linus Torvalds 已提交
934

C
Christoph Hellwig 已提交
935 936 937 938 939 940 941 942 943
	spin_lock(&inode_hash_lock);
	inode = find_inode(sb, head, test, data);
	spin_unlock(&inode_hash_lock);

	if (inode) {
		wait_on_inode(inode);
		return inode;
	}

L
Linus Torvalds 已提交
944 945
	inode = alloc_inode(sb);
	if (inode) {
946
		struct inode *old;
L
Linus Torvalds 已提交
947

948
		spin_lock(&inode_hash_lock);
L
Linus Torvalds 已提交
949 950 951 952 953 954
		/* We released the lock, so.. */
		old = find_inode(sb, head, test, data);
		if (!old) {
			if (set(inode, data))
				goto set_failed;

955 956
			spin_lock(&inode->i_lock);
			inode->i_state = I_NEW;
957
			hlist_add_head(&inode->i_hash, head);
958
			spin_unlock(&inode->i_lock);
959
			inode_sb_list_add(inode);
960
			spin_unlock(&inode_hash_lock);
L
Linus Torvalds 已提交
961 962 963 964 965 966 967 968 969 970 971 972

			/* Return the locked inode with I_NEW set, the
			 * caller is responsible for filling in the contents
			 */
			return inode;
		}

		/*
		 * Uhhuh, somebody else created the same inode under
		 * us. Use the old inode instead of the one we just
		 * allocated.
		 */
973
		spin_unlock(&inode_hash_lock);
L
Linus Torvalds 已提交
974 975 976 977 978 979 980
		destroy_inode(inode);
		inode = old;
		wait_on_inode(inode);
	}
	return inode;

set_failed:
981
	spin_unlock(&inode_hash_lock);
L
Linus Torvalds 已提交
982 983 984
	destroy_inode(inode);
	return NULL;
}
C
Christoph Hellwig 已提交
985
EXPORT_SYMBOL(iget5_locked);
L
Linus Torvalds 已提交
986

C
Christoph Hellwig 已提交
987 988 989 990 991 992 993 994 995 996 997 998
/**
 * iget_locked - obtain an inode from a mounted file system
 * @sb:		super block of file system
 * @ino:	inode number to get
 *
 * Search for the inode specified by @ino in the inode cache and if present
 * return it with an increased reference count. This is for file systems
 * where the inode number is sufficient for unique identification of an inode.
 *
 * If the inode is not in cache, allocate a new inode and return it locked,
 * hashed, and with the I_NEW flag set.  The file system gets to fill it in
 * before unlocking it via unlock_new_inode().
L
Linus Torvalds 已提交
999
 */
C
Christoph Hellwig 已提交
1000
struct inode *iget_locked(struct super_block *sb, unsigned long ino)
L
Linus Torvalds 已提交
1001
{
C
Christoph Hellwig 已提交
1002
	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1003
	struct inode *inode;
L
Linus Torvalds 已提交
1004

C
Christoph Hellwig 已提交
1005 1006 1007 1008 1009 1010 1011 1012
	spin_lock(&inode_hash_lock);
	inode = find_inode_fast(sb, head, ino);
	spin_unlock(&inode_hash_lock);
	if (inode) {
		wait_on_inode(inode);
		return inode;
	}

L
Linus Torvalds 已提交
1013 1014
	inode = alloc_inode(sb);
	if (inode) {
1015
		struct inode *old;
L
Linus Torvalds 已提交
1016

1017
		spin_lock(&inode_hash_lock);
L
Linus Torvalds 已提交
1018 1019 1020 1021
		/* We released the lock, so.. */
		old = find_inode_fast(sb, head, ino);
		if (!old) {
			inode->i_ino = ino;
1022 1023
			spin_lock(&inode->i_lock);
			inode->i_state = I_NEW;
1024
			hlist_add_head(&inode->i_hash, head);
1025
			spin_unlock(&inode->i_lock);
1026
			inode_sb_list_add(inode);
1027
			spin_unlock(&inode_hash_lock);
L
Linus Torvalds 已提交
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039

			/* Return the locked inode with I_NEW set, the
			 * caller is responsible for filling in the contents
			 */
			return inode;
		}

		/*
		 * Uhhuh, somebody else created the same inode under
		 * us. Use the old inode instead of the one we just
		 * allocated.
		 */
1040
		spin_unlock(&inode_hash_lock);
L
Linus Torvalds 已提交
1041 1042 1043 1044 1045 1046
		destroy_inode(inode);
		inode = old;
		wait_on_inode(inode);
	}
	return inode;
}
C
Christoph Hellwig 已提交
1047
EXPORT_SYMBOL(iget_locked);
L
Linus Torvalds 已提交
1048

1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
/*
 * search the inode cache for a matching inode number.
 * If we find one, then the inode number we are trying to
 * allocate is not unique and so we should not use it.
 *
 * Returns 1 if the inode number is unique, 0 if it is not.
 */
static int test_inode_iunique(struct super_block *sb, unsigned long ino)
{
	struct hlist_head *b = inode_hashtable + hash(sb, ino);
	struct hlist_node *node;
	struct inode *inode;

1062
	spin_lock(&inode_hash_lock);
1063
	hlist_for_each_entry(inode, node, b, i_hash) {
1064 1065
		if (inode->i_ino == ino && inode->i_sb == sb) {
			spin_unlock(&inode_hash_lock);
1066
			return 0;
1067
		}
1068
	}
1069
	spin_unlock(&inode_hash_lock);
1070 1071 1072 1073

	return 1;
}

L
Linus Torvalds 已提交
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
/**
 *	iunique - get a unique inode number
 *	@sb: superblock
 *	@max_reserved: highest reserved inode number
 *
 *	Obtain an inode number that is unique on the system for a given
 *	superblock. This is used by file systems that have no natural
 *	permanent inode numbering system. An inode number is returned that
 *	is higher than the reserved limit but unique.
 *
 *	BUGS:
 *	With a large number of inodes live on the file system this function
 *	currently becomes quite slow.
 */
ino_t iunique(struct super_block *sb, ino_t max_reserved)
{
1090 1091 1092 1093 1094
	/*
	 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
	 * error if st_ino won't fit in target struct field. Use 32bit counter
	 * here to attempt to avoid that.
	 */
1095
	static DEFINE_SPINLOCK(iunique_lock);
1096
	static unsigned int counter;
L
Linus Torvalds 已提交
1097
	ino_t res;
1098

1099
	spin_lock(&iunique_lock);
1100 1101 1102
	do {
		if (counter <= max_reserved)
			counter = max_reserved + 1;
L
Linus Torvalds 已提交
1103
		res = counter++;
1104 1105
	} while (!test_inode_iunique(sb, res));
	spin_unlock(&iunique_lock);
L
Linus Torvalds 已提交
1106

1107 1108
	return res;
}
L
Linus Torvalds 已提交
1109 1110 1111 1112
EXPORT_SYMBOL(iunique);

struct inode *igrab(struct inode *inode)
{
1113 1114
	spin_lock(&inode->i_lock);
	if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
L
Linus Torvalds 已提交
1115
		__iget(inode);
1116 1117 1118
		spin_unlock(&inode->i_lock);
	} else {
		spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
1119 1120 1121 1122 1123 1124
		/*
		 * Handle the case where s_op->clear_inode is not been
		 * called yet, and somebody is calling igrab
		 * while the inode is getting freed.
		 */
		inode = NULL;
1125
	}
L
Linus Torvalds 已提交
1126 1127 1128 1129 1130
	return inode;
}
EXPORT_SYMBOL(igrab);

/**
C
Christoph Hellwig 已提交
1131
 * ilookup5_nowait - search for an inode in the inode cache
L
Linus Torvalds 已提交
1132
 * @sb:		super block of file system to search
C
Christoph Hellwig 已提交
1133
 * @hashval:	hash value (usually inode number) to search for
L
Linus Torvalds 已提交
1134 1135 1136
 * @test:	callback used for comparisons between inodes
 * @data:	opaque data pointer to pass to @test
 *
C
Christoph Hellwig 已提交
1137
 * Search for the inode specified by @hashval and @data in the inode cache.
L
Linus Torvalds 已提交
1138 1139 1140
 * If the inode is in the cache, the inode is returned with an incremented
 * reference count.
 *
C
Christoph Hellwig 已提交
1141 1142
 * Note: I_NEW is not waited upon so you have to be very careful what you do
 * with the returned inode.  You probably should be using ilookup5() instead.
L
Linus Torvalds 已提交
1143
 *
1144
 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
L
Linus Torvalds 已提交
1145
 */
C
Christoph Hellwig 已提交
1146 1147
struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
		int (*test)(struct inode *, void *), void *data)
L
Linus Torvalds 已提交
1148
{
C
Christoph Hellwig 已提交
1149
	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
L
Linus Torvalds 已提交
1150 1151
	struct inode *inode;

1152
	spin_lock(&inode_hash_lock);
L
Linus Torvalds 已提交
1153
	inode = find_inode(sb, head, test, data);
1154
	spin_unlock(&inode_hash_lock);
1155

C
Christoph Hellwig 已提交
1156
	return inode;
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
}
EXPORT_SYMBOL(ilookup5_nowait);

/**
 * ilookup5 - search for an inode in the inode cache
 * @sb:		super block of file system to search
 * @hashval:	hash value (usually inode number) to search for
 * @test:	callback used for comparisons between inodes
 * @data:	opaque data pointer to pass to @test
 *
C
Christoph Hellwig 已提交
1167 1168 1169
 * Search for the inode specified by @hashval and @data in the inode cache,
 * and if the inode is in the cache, return the inode with an incremented
 * reference count.  Waits on I_NEW before returning the inode.
1170
 * returned with an incremented reference count.
L
Linus Torvalds 已提交
1171
 *
C
Christoph Hellwig 已提交
1172 1173
 * This is a generalized version of ilookup() for file systems where the
 * inode number is not sufficient for unique identification of an inode.
L
Linus Torvalds 已提交
1174
 *
C
Christoph Hellwig 已提交
1175
 * Note: @test is called with the inode_hash_lock held, so can't sleep.
L
Linus Torvalds 已提交
1176 1177 1178 1179
 */
struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
		int (*test)(struct inode *, void *), void *data)
{
C
Christoph Hellwig 已提交
1180
	struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
L
Linus Torvalds 已提交
1181

C
Christoph Hellwig 已提交
1182 1183 1184
	if (inode)
		wait_on_inode(inode);
	return inode;
L
Linus Torvalds 已提交
1185 1186 1187 1188 1189 1190 1191 1192
}
EXPORT_SYMBOL(ilookup5);

/**
 * ilookup - search for an inode in the inode cache
 * @sb:		super block of file system to search
 * @ino:	inode number to search for
 *
C
Christoph Hellwig 已提交
1193 1194
 * Search for the inode @ino in the inode cache, and if the inode is in the
 * cache, the inode is returned with an incremented reference count.
L
Linus Torvalds 已提交
1195 1196 1197 1198 1199 1200
 */
struct inode *ilookup(struct super_block *sb, unsigned long ino)
{
	struct hlist_head *head = inode_hashtable + hash(sb, ino);
	struct inode *inode;

C
Christoph Hellwig 已提交
1201 1202 1203
	spin_lock(&inode_hash_lock);
	inode = find_inode_fast(sb, head, ino);
	spin_unlock(&inode_hash_lock);
L
Linus Torvalds 已提交
1204 1205

	if (inode)
C
Christoph Hellwig 已提交
1206 1207
		wait_on_inode(inode);
	return inode;
L
Linus Torvalds 已提交
1208
}
C
Christoph Hellwig 已提交
1209
EXPORT_SYMBOL(ilookup);
L
Linus Torvalds 已提交
1210

A
Al Viro 已提交
1211 1212 1213 1214 1215 1216 1217
int insert_inode_locked(struct inode *inode)
{
	struct super_block *sb = inode->i_sb;
	ino_t ino = inode->i_ino;
	struct hlist_head *head = inode_hashtable + hash(sb, ino);

	while (1) {
1218 1219
		struct hlist_node *node;
		struct inode *old = NULL;
1220
		spin_lock(&inode_hash_lock);
1221 1222 1223 1224 1225
		hlist_for_each_entry(old, node, head, i_hash) {
			if (old->i_ino != ino)
				continue;
			if (old->i_sb != sb)
				continue;
1226 1227 1228
			spin_lock(&old->i_lock);
			if (old->i_state & (I_FREEING|I_WILL_FREE)) {
				spin_unlock(&old->i_lock);
1229
				continue;
1230
			}
1231 1232 1233
			break;
		}
		if (likely(!node)) {
1234 1235
			spin_lock(&inode->i_lock);
			inode->i_state |= I_NEW;
A
Al Viro 已提交
1236
			hlist_add_head(&inode->i_hash, head);
1237
			spin_unlock(&inode->i_lock);
1238
			spin_unlock(&inode_hash_lock);
A
Al Viro 已提交
1239 1240 1241
			return 0;
		}
		__iget(old);
1242
		spin_unlock(&old->i_lock);
1243
		spin_unlock(&inode_hash_lock);
A
Al Viro 已提交
1244
		wait_on_inode(old);
A
Al Viro 已提交
1245
		if (unlikely(!inode_unhashed(old))) {
A
Al Viro 已提交
1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260
			iput(old);
			return -EBUSY;
		}
		iput(old);
	}
}
EXPORT_SYMBOL(insert_inode_locked);

int insert_inode_locked4(struct inode *inode, unsigned long hashval,
		int (*test)(struct inode *, void *), void *data)
{
	struct super_block *sb = inode->i_sb;
	struct hlist_head *head = inode_hashtable + hash(sb, hashval);

	while (1) {
1261 1262 1263
		struct hlist_node *node;
		struct inode *old = NULL;

1264
		spin_lock(&inode_hash_lock);
1265 1266 1267 1268 1269
		hlist_for_each_entry(old, node, head, i_hash) {
			if (old->i_sb != sb)
				continue;
			if (!test(old, data))
				continue;
1270 1271 1272
			spin_lock(&old->i_lock);
			if (old->i_state & (I_FREEING|I_WILL_FREE)) {
				spin_unlock(&old->i_lock);
1273
				continue;
1274
			}
1275 1276 1277
			break;
		}
		if (likely(!node)) {
1278 1279
			spin_lock(&inode->i_lock);
			inode->i_state |= I_NEW;
A
Al Viro 已提交
1280
			hlist_add_head(&inode->i_hash, head);
1281
			spin_unlock(&inode->i_lock);
1282
			spin_unlock(&inode_hash_lock);
A
Al Viro 已提交
1283 1284 1285
			return 0;
		}
		__iget(old);
1286
		spin_unlock(&old->i_lock);
1287
		spin_unlock(&inode_hash_lock);
A
Al Viro 已提交
1288
		wait_on_inode(old);
A
Al Viro 已提交
1289
		if (unlikely(!inode_unhashed(old))) {
A
Al Viro 已提交
1290 1291 1292 1293 1294 1295 1296 1297
			iput(old);
			return -EBUSY;
		}
		iput(old);
	}
}
EXPORT_SYMBOL(insert_inode_locked4);

L
Linus Torvalds 已提交
1298

1299 1300 1301 1302 1303 1304
int generic_delete_inode(struct inode *inode)
{
	return 1;
}
EXPORT_SYMBOL(generic_delete_inode);

L
Linus Torvalds 已提交
1305
/*
1306 1307 1308
 * Normal UNIX filesystem behaviour: delete the
 * inode when the usage count drops to zero, and
 * i_nlink is zero.
L
Linus Torvalds 已提交
1309
 */
1310
int generic_drop_inode(struct inode *inode)
L
Linus Torvalds 已提交
1311
{
A
Al Viro 已提交
1312
	return !inode->i_nlink || inode_unhashed(inode);
L
Linus Torvalds 已提交
1313
}
1314
EXPORT_SYMBOL_GPL(generic_drop_inode);
L
Linus Torvalds 已提交
1315

1316 1317 1318
/*
 * Called when we're dropping the last reference
 * to an inode.
1319
 *
1320 1321 1322 1323 1324
 * Call the FS "drop_inode()" function, defaulting to
 * the legacy UNIX filesystem behaviour.  If it tells
 * us to evict inode, do so.  Otherwise, retain inode
 * in cache if fs is alive, sync and evict if fs is
 * shutting down.
1325
 */
1326
static void iput_final(struct inode *inode)
L
Linus Torvalds 已提交
1327 1328
{
	struct super_block *sb = inode->i_sb;
1329 1330 1331
	const struct super_operations *op = inode->i_sb->s_op;
	int drop;

1332 1333
	WARN_ON(inode->i_state & I_NEW);

1334 1335 1336 1337
	if (op && op->drop_inode)
		drop = op->drop_inode(inode);
	else
		drop = generic_drop_inode(inode);
L
Linus Torvalds 已提交
1338

D
Dave Chinner 已提交
1339 1340 1341 1342 1343 1344 1345 1346
	if (!drop && (sb->s_flags & MS_ACTIVE)) {
		inode->i_state |= I_REFERENCED;
		if (!(inode->i_state & (I_DIRTY|I_SYNC)))
			inode_lru_list_add(inode);
		spin_unlock(&inode->i_lock);
		return;
	}

1347
	if (!drop) {
1348
		inode->i_state |= I_WILL_FREE;
1349
		spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
1350
		write_inode_now(inode, 1);
1351
		spin_lock(&inode->i_lock);
1352
		WARN_ON(inode->i_state & I_NEW);
1353
		inode->i_state &= ~I_WILL_FREE;
L
Linus Torvalds 已提交
1354
	}
N
Nick Piggin 已提交
1355

1356
	inode->i_state |= I_FREEING;
1357
	inode_lru_list_del(inode);
D
Dave Chinner 已提交
1358 1359
	spin_unlock(&inode->i_lock);

1360
	evict(inode);
L
Linus Torvalds 已提交
1361 1362 1363
}

/**
1364
 *	iput	- put an inode
L
Linus Torvalds 已提交
1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
 *	@inode: inode to put
 *
 *	Puts an inode, dropping its usage count. If the inode use count hits
 *	zero, the inode is then freed and may also be destroyed.
 *
 *	Consequently, iput() can sleep.
 */
void iput(struct inode *inode)
{
	if (inode) {
A
Al Viro 已提交
1375
		BUG_ON(inode->i_state & I_CLEAR);
L
Linus Torvalds 已提交
1376

1377
		if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock))
L
Linus Torvalds 已提交
1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
			iput_final(inode);
	}
}
EXPORT_SYMBOL(iput);

/**
 *	bmap	- find a block number in a file
 *	@inode: inode of file
 *	@block: block to find
 *
 *	Returns the block number on the device holding the inode that
 *	is the disk block number for the block of the file requested.
 *	That is, asked for block 4 of inode 1 the function will return the
1391
 *	disk block relative to the disk start that holds that block of the
L
Linus Torvalds 已提交
1392 1393
 *	file.
 */
1394
sector_t bmap(struct inode *inode, sector_t block)
L
Linus Torvalds 已提交
1395 1396 1397 1398 1399 1400 1401 1402
{
	sector_t res = 0;
	if (inode->i_mapping->a_ops->bmap)
		res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
	return res;
}
EXPORT_SYMBOL(bmap);

1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436
/*
 * With relative atime, only update atime if the previous atime is
 * earlier than either the ctime or mtime or if at least a day has
 * passed since the last atime update.
 */
static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
			     struct timespec now)
{

	if (!(mnt->mnt_flags & MNT_RELATIME))
		return 1;
	/*
	 * Is mtime younger than atime? If yes, update atime:
	 */
	if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0)
		return 1;
	/*
	 * Is ctime younger than atime? If yes, update atime:
	 */
	if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0)
		return 1;

	/*
	 * Is the previous atime value older than a day? If yes,
	 * update atime:
	 */
	if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
		return 1;
	/*
	 * Good, we can skip the atime update:
	 */
	return 0;
}

L
Linus Torvalds 已提交
1437
/**
C
Christoph Hellwig 已提交
1438 1439
 *	touch_atime	-	update the access time
 *	@mnt: mount the inode is accessed on
1440
 *	@dentry: dentry accessed
L
Linus Torvalds 已提交
1441 1442 1443 1444 1445
 *
 *	Update the accessed time on an inode and mark it for writeback.
 *	This function automatically handles read only file systems and media,
 *	as well as the "noatime" flag and inode specific "noatime" markers.
 */
C
Christoph Hellwig 已提交
1446
void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
L
Linus Torvalds 已提交
1447
{
C
Christoph Hellwig 已提交
1448
	struct inode *inode = dentry->d_inode;
L
Linus Torvalds 已提交
1449 1450
	struct timespec now;

1451
	if (inode->i_flags & S_NOATIME)
A
Andi Kleen 已提交
1452
		return;
1453
	if (IS_NOATIME(inode))
A
Andi Kleen 已提交
1454
		return;
A
Andrew Morton 已提交
1455
	if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
A
Andi Kleen 已提交
1456
		return;
V
Valerie Henson 已提交
1457

1458
	if (mnt->mnt_flags & MNT_NOATIME)
A
Andi Kleen 已提交
1459
		return;
1460
	if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
A
Andi Kleen 已提交
1461
		return;
L
Linus Torvalds 已提交
1462 1463

	now = current_fs_time(inode->i_sb);
1464 1465

	if (!relatime_need_update(mnt, inode, now))
A
Andi Kleen 已提交
1466
		return;
1467

V
Valerie Henson 已提交
1468
	if (timespec_equal(&inode->i_atime, &now))
A
Andi Kleen 已提交
1469 1470 1471 1472
		return;

	if (mnt_want_write(mnt))
		return;
V
Valerie Henson 已提交
1473 1474 1475

	inode->i_atime = now;
	mark_inode_dirty_sync(inode);
1476
	mnt_drop_write(mnt);
L
Linus Torvalds 已提交
1477
}
C
Christoph Hellwig 已提交
1478
EXPORT_SYMBOL(touch_atime);
L
Linus Torvalds 已提交
1479 1480

/**
1481 1482
 *	file_update_time	-	update mtime and ctime time
 *	@file: file accessed
L
Linus Torvalds 已提交
1483
 *
1484 1485 1486 1487
 *	Update the mtime and ctime members of an inode and mark the inode
 *	for writeback.  Note that this function is meant exclusively for
 *	usage in the file write path of filesystems, and filesystems may
 *	choose to explicitly ignore update via this function with the
1488
 *	S_NOCMTIME inode flag, e.g. for network filesystem where these
1489
 *	timestamps are handled by the server.
L
Linus Torvalds 已提交
1490 1491
 */

1492
void file_update_time(struct file *file)
L
Linus Torvalds 已提交
1493
{
1494
	struct inode *inode = file->f_path.dentry->d_inode;
L
Linus Torvalds 已提交
1495
	struct timespec now;
A
Andi Kleen 已提交
1496
	enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
L
Linus Torvalds 已提交
1497

A
Andi Kleen 已提交
1498
	/* First try to exhaust all avenues to not sync */
L
Linus Torvalds 已提交
1499 1500
	if (IS_NOCMTIME(inode))
		return;
1501

L
Linus Torvalds 已提交
1502
	now = current_fs_time(inode->i_sb);
A
Andi Kleen 已提交
1503 1504
	if (!timespec_equal(&inode->i_mtime, &now))
		sync_it = S_MTIME;
L
Linus Torvalds 已提交
1505

A
Andi Kleen 已提交
1506 1507
	if (!timespec_equal(&inode->i_ctime, &now))
		sync_it |= S_CTIME;
1508

A
Andi Kleen 已提交
1509 1510
	if (IS_I_VERSION(inode))
		sync_it |= S_VERSION;
1511

A
Andi Kleen 已提交
1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526
	if (!sync_it)
		return;

	/* Finally allowed to write? Takes lock. */
	if (mnt_want_write_file(file))
		return;

	/* Only change inode inside the lock region */
	if (sync_it & S_VERSION)
		inode_inc_iversion(inode);
	if (sync_it & S_CTIME)
		inode->i_ctime = now;
	if (sync_it & S_MTIME)
		inode->i_mtime = now;
	mark_inode_dirty_sync(inode);
1527
	mnt_drop_write(file->f_path.mnt);
L
Linus Torvalds 已提交
1528
}
1529
EXPORT_SYMBOL(file_update_time);
L
Linus Torvalds 已提交
1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545

int inode_needs_sync(struct inode *inode)
{
	if (IS_SYNC(inode))
		return 1;
	if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
		return 1;
	return 0;
}
EXPORT_SYMBOL(inode_needs_sync);

int inode_wait(void *word)
{
	schedule();
	return 0;
}
1546
EXPORT_SYMBOL(inode_wait);
L
Linus Torvalds 已提交
1547 1548

/*
1549 1550 1551 1552 1553 1554
 * If we try to find an inode in the inode hash while it is being
 * deleted, we have to wait until the filesystem completes its
 * deletion before reporting that it isn't found.  This function waits
 * until the deletion _might_ have completed.  Callers are responsible
 * to recheck inode state.
 *
C
Christoph Hellwig 已提交
1555
 * It doesn't matter if I_NEW is not set initially, a call to
1556 1557
 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
 * will DTRT.
L
Linus Torvalds 已提交
1558 1559 1560 1561
 */
static void __wait_on_freeing_inode(struct inode *inode)
{
	wait_queue_head_t *wq;
C
Christoph Hellwig 已提交
1562 1563
	DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
	wq = bit_waitqueue(&inode->i_state, __I_NEW);
L
Linus Torvalds 已提交
1564
	prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1565
	spin_unlock(&inode->i_lock);
1566
	spin_unlock(&inode_hash_lock);
L
Linus Torvalds 已提交
1567 1568
	schedule();
	finish_wait(wq, &wait.wait);
1569
	spin_lock(&inode_hash_lock);
L
Linus Torvalds 已提交
1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608
}

static __initdata unsigned long ihash_entries;
static int __init set_ihash_entries(char *str)
{
	if (!str)
		return 0;
	ihash_entries = simple_strtoul(str, &str, 0);
	return 1;
}
__setup("ihash_entries=", set_ihash_entries);

/*
 * Initialize the waitqueues and inode hash table.
 */
void __init inode_init_early(void)
{
	int loop;

	/* If hashes are distributed across NUMA nodes, defer
	 * hash allocation until vmalloc space is available.
	 */
	if (hashdist)
		return;

	inode_hashtable =
		alloc_large_system_hash("Inode-cache",
					sizeof(struct hlist_head),
					ihash_entries,
					14,
					HASH_EARLY,
					&i_hash_shift,
					&i_hash_mask,
					0);

	for (loop = 0; loop < (1 << i_hash_shift); loop++)
		INIT_HLIST_HEAD(&inode_hashtable[loop]);
}

1609
void __init inode_init(void)
L
Linus Torvalds 已提交
1610 1611 1612 1613
{
	int loop;

	/* inode slab cache */
1614 1615 1616 1617 1618
	inode_cachep = kmem_cache_create("inode_cache",
					 sizeof(struct inode),
					 0,
					 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
					 SLAB_MEM_SPREAD),
1619
					 init_once);
1620
	register_shrinker(&icache_shrinker);
L
Linus Torvalds 已提交
1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653

	/* Hash may have been set up in inode_init_early */
	if (!hashdist)
		return;

	inode_hashtable =
		alloc_large_system_hash("Inode-cache",
					sizeof(struct hlist_head),
					ihash_entries,
					14,
					0,
					&i_hash_shift,
					&i_hash_mask,
					0);

	for (loop = 0; loop < (1 << i_hash_shift); loop++)
		INIT_HLIST_HEAD(&inode_hashtable[loop]);
}

void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
{
	inode->i_mode = mode;
	if (S_ISCHR(mode)) {
		inode->i_fop = &def_chr_fops;
		inode->i_rdev = rdev;
	} else if (S_ISBLK(mode)) {
		inode->i_fop = &def_blk_fops;
		inode->i_rdev = rdev;
	} else if (S_ISFIFO(mode))
		inode->i_fop = &def_fifo_fops;
	else if (S_ISSOCK(mode))
		inode->i_fop = &bad_sock_fops;
	else
1654 1655 1656
		printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
				  " inode %s:%lu\n", mode, inode->i_sb->s_id,
				  inode->i_ino);
L
Linus Torvalds 已提交
1657 1658
}
EXPORT_SYMBOL(init_special_inode);
1659 1660

/**
1661
 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678
 * @inode: New inode
 * @dir: Directory inode
 * @mode: mode of the new inode
 */
void inode_init_owner(struct inode *inode, const struct inode *dir,
			mode_t mode)
{
	inode->i_uid = current_fsuid();
	if (dir && dir->i_mode & S_ISGID) {
		inode->i_gid = dir->i_gid;
		if (S_ISDIR(mode))
			mode |= S_ISGID;
	} else
		inode->i_gid = current_fsgid();
	inode->i_mode = mode;
}
EXPORT_SYMBOL(inode_init_owner);
1679

1680 1681 1682 1683 1684 1685
/**
 * inode_owner_or_capable - check current task permissions to inode
 * @inode: inode being checked
 *
 * Return true if current either has CAP_FOWNER to the inode, or
 * owns the file.
1686
 */
1687
bool inode_owner_or_capable(const struct inode *inode)
1688 1689 1690 1691 1692 1693 1694 1695 1696
{
	struct user_namespace *ns = inode_userns(inode);

	if (current_user_ns() == ns && current_fsuid() == inode->i_uid)
		return true;
	if (ns_capable(ns, CAP_FOWNER))
		return true;
	return false;
}
1697
EXPORT_SYMBOL(inode_owner_or_capable);