namei.c 94.9 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/namei.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/namei.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 *  Directory entry file type support and forward compatibility hooks
 *	for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
 *  Hash Tree Directory indexing (c)
 *	Daniel Phillips, 2001
 *  Hash Tree Directory indexing porting
 *	Christopher Li, 2002
 *  Hash Tree Directory indexing cleanup
 *	Theodore Ts'o, 2002
 */

#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/time.h>
#include <linux/fcntl.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/bio.h>
36 37
#include "ext4.h"
#include "ext4_jbd2.h"
38 39 40 41

#include "xattr.h"
#include "acl.h"

42
#include <trace/events/ext4.h>
43 44 45 46 47
/*
 * define how far ahead to read directories while searching them.
 */
#define NAMEI_RA_CHUNKS  2
#define NAMEI_RA_BLOCKS  4
D
Dave Kleikamp 已提交
48
#define NAMEI_RA_SIZE	     (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
49

50
static struct buffer_head *ext4_append(handle_t *handle,
51
					struct inode *inode,
52
					ext4_lblk_t *block)
53 54
{
	struct buffer_head *bh;
55
	int err;
56

57 58
	if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
		     ((inode->i_size >> 10) >=
59 60
		      EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
		return ERR_PTR(-ENOSPC);
61

62 63
	*block = inode->i_size >> inode->i_sb->s_blocksize_bits;

64 65 66
	bh = ext4_bread(handle, inode, *block, 1);
	if (IS_ERR(bh))
		return bh;
67 68
	inode->i_size += inode->i_sb->s_blocksize;
	EXT4_I(inode)->i_disksize = inode->i_size;
69
	BUFFER_TRACE(bh, "get_write_access");
70 71 72 73 74
	err = ext4_journal_get_write_access(handle, bh);
	if (err) {
		brelse(bh);
		ext4_std_error(inode->i_sb, err);
		return ERR_PTR(err);
C
Carlos Maiolino 已提交
75
	}
76 77 78
	return bh;
}

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
static int ext4_dx_csum_verify(struct inode *inode,
			       struct ext4_dir_entry *dirent);

typedef enum {
	EITHER, INDEX, DIRENT
} dirblock_type_t;

#define ext4_read_dirblock(inode, block, type) \
	__ext4_read_dirblock((inode), (block), (type), __LINE__)

static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
					      ext4_lblk_t block,
					      dirblock_type_t type,
					      unsigned int line)
{
	struct buffer_head *bh;
	struct ext4_dir_entry *dirent;
96
	int is_dx_block = 0;
97

98 99
	bh = ext4_bread(NULL, inode, block, 0);
	if (IS_ERR(bh)) {
100
		__ext4_warning(inode->i_sb, __func__, line,
101 102
			       "error %ld reading directory block "
			       "(ino %lu, block %lu)", PTR_ERR(bh), inode->i_ino,
103
			       (unsigned long) block);
104 105 106 107 108 109

		return bh;
	}
	if (!bh) {
		ext4_error_inode(inode, __func__, line, block, "Directory hole found");
		return ERR_PTR(-EIO);
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
	}
	dirent = (struct ext4_dir_entry *) bh->b_data;
	/* Determine whether or not we have an index block */
	if (is_dx(inode)) {
		if (block == 0)
			is_dx_block = 1;
		else if (ext4_rec_len_from_disk(dirent->rec_len,
						inode->i_sb->s_blocksize) ==
			 inode->i_sb->s_blocksize)
			is_dx_block = 1;
	}
	if (!is_dx_block && type == INDEX) {
		ext4_error_inode(inode, __func__, line, block,
		       "directory leaf block found instead of index block");
		return ERR_PTR(-EIO);
	}
126
	if (!ext4_has_metadata_csum(inode->i_sb) ||
127 128 129 130 131 132 133 134 135 136 137 138 139 140
	    buffer_verified(bh))
		return bh;

	/*
	 * An empty leaf block can get mistaken for a index block; for
	 * this reason, we can only check the index checksum when the
	 * caller is sure it should be an index block.
	 */
	if (is_dx_block && type == INDEX) {
		if (ext4_dx_csum_verify(inode, dirent))
			set_buffer_verified(bh);
		else {
			ext4_error_inode(inode, __func__, line, block,
				"Directory index failed checksum");
141
			brelse(bh);
142
			return ERR_PTR(-EIO);
143
		}
144
	}
145 146 147 148 149 150 151 152 153
	if (!is_dx_block) {
		if (ext4_dirent_csum_verify(inode, dirent))
			set_buffer_verified(bh);
		else {
			ext4_error_inode(inode, __func__, line, block,
				"Directory block failed checksum");
			brelse(bh);
			return ERR_PTR(-EIO);
		}
C
Carlos Maiolino 已提交
154
	}
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
	return bh;
}

#ifndef assert
#define assert(test) J_ASSERT(test)
#endif

#ifdef DX_DEBUG
#define dxtrace(command) command
#else
#define dxtrace(command)
#endif

struct fake_dirent
{
	__le32 inode;
	__le16 rec_len;
	u8 name_len;
	u8 file_type;
};

struct dx_countlimit
{
	__le16 limit;
	__le16 count;
};

struct dx_entry
{
	__le32 hash;
	__le32 block;
};

/*
 * dx_root_info is laid out so that if it should somehow get overlaid by a
 * dirent the two low bits of the hash version will be zero.  Therefore, the
 * hash version mod 4 should never be 0.  Sincerely, the paranoia department.
 */

struct dx_root
{
	struct fake_dirent dot;
	char dot_name[4];
	struct fake_dirent dotdot;
	char dotdot_name[4];
	struct dx_root_info
	{
		__le32 reserved_zero;
		u8 hash_version;
		u8 info_length; /* 8 */
		u8 indirect_levels;
		u8 unused_flags;
	}
	info;
	struct dx_entry	entries[0];
};

struct dx_node
{
	struct fake_dirent fake;
	struct dx_entry	entries[0];
};


struct dx_frame
{
	struct buffer_head *bh;
	struct dx_entry *entries;
	struct dx_entry *at;
};

struct dx_map_entry
{
	u32 hash;
229 230
	u16 offs;
	u16 size;
231 232
};

233 234 235 236 237 238 239 240
/*
 * This goes at the end of each htree block.
 */
struct dx_tail {
	u32 dt_reserved;
	__le32 dt_checksum;	/* crc32c(uuid+inum+dirblock) */
};

A
Aneesh Kumar K.V 已提交
241 242
static inline ext4_lblk_t dx_get_block(struct dx_entry *entry);
static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value);
243 244 245 246 247 248 249 250
static inline unsigned dx_get_hash(struct dx_entry *entry);
static void dx_set_hash(struct dx_entry *entry, unsigned value);
static unsigned dx_get_count(struct dx_entry *entries);
static unsigned dx_get_limit(struct dx_entry *entries);
static void dx_set_count(struct dx_entry *entries, unsigned value);
static void dx_set_limit(struct dx_entry *entries, unsigned value);
static unsigned dx_root_limit(struct inode *dir, unsigned infosize);
static unsigned dx_node_limit(struct inode *dir);
251
static struct dx_frame *dx_probe(const struct qstr *d_name,
252 253
				 struct inode *dir,
				 struct dx_hash_info *hinfo,
254
				 struct dx_frame *frame);
255
static void dx_release(struct dx_frame *frames);
256
static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
257
		       struct dx_hash_info *hinfo, struct dx_map_entry map[]);
258
static void dx_sort_map(struct dx_map_entry *map, unsigned count);
259
static struct ext4_dir_entry_2 *dx_move_dirents(char *from, char *to,
260
		struct dx_map_entry *offsets, int count, unsigned blocksize);
261
static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize);
A
Aneesh Kumar K.V 已提交
262 263
static void dx_insert_block(struct dx_frame *frame,
					u32 hash, ext4_lblk_t block);
264
static int ext4_htree_next_block(struct inode *dir, __u32 hash,
265 266 267
				 struct dx_frame *frame,
				 struct dx_frame *frames,
				 __u32 *start_hash);
268 269
static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
		const struct qstr *d_name,
270
		struct ext4_dir_entry_2 **res_dir);
271
static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
272 273
			     struct inode *inode);

274
/* checksumming functions */
275 276
void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
			    unsigned int blocksize)
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
{
	memset(t, 0, sizeof(struct ext4_dir_entry_tail));
	t->det_rec_len = ext4_rec_len_to_disk(
			sizeof(struct ext4_dir_entry_tail), blocksize);
	t->det_reserved_ft = EXT4_FT_DIR_CSUM;
}

/* Walk through a dirent block to find a checksum "dirent" at the tail */
static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
						   struct ext4_dir_entry *de)
{
	struct ext4_dir_entry_tail *t;

#ifdef PARANOID
	struct ext4_dir_entry *d, *top;

	d = de;
	top = (struct ext4_dir_entry *)(((void *)de) +
		(EXT4_BLOCK_SIZE(inode->i_sb) -
		sizeof(struct ext4_dir_entry_tail)));
	while (d < top && d->rec_len)
		d = (struct ext4_dir_entry *)(((void *)d) +
		    le16_to_cpu(d->rec_len));

	if (d != top)
		return NULL;

	t = (struct ext4_dir_entry_tail *)d;
#else
	t = EXT4_DIRENT_TAIL(de, EXT4_BLOCK_SIZE(inode->i_sb));
#endif

	if (t->det_reserved_zero1 ||
	    le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) ||
	    t->det_reserved_zero2 ||
	    t->det_reserved_ft != EXT4_FT_DIR_CSUM)
		return NULL;

	return t;
}

static __le32 ext4_dirent_csum(struct inode *inode,
			       struct ext4_dir_entry *dirent, int size)
{
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
	struct ext4_inode_info *ei = EXT4_I(inode);
	__u32 csum;

	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
	return cpu_to_le32(csum);
}

329 330 331 332 333 334
static void warn_no_space_for_csum(struct inode *inode)
{
	ext4_warning(inode->i_sb, "no space in directory inode %lu leaf for "
		     "checksum.  Please run e2fsck -D.", inode->i_ino);
}

335 336 337 338
int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent)
{
	struct ext4_dir_entry_tail *t;

339
	if (!ext4_has_metadata_csum(inode->i_sb))
340 341 342 343
		return 1;

	t = get_dirent_tail(inode, dirent);
	if (!t) {
344
		warn_no_space_for_csum(inode);
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
		return 0;
	}

	if (t->det_checksum != ext4_dirent_csum(inode, dirent,
						(void *)t - (void *)dirent))
		return 0;

	return 1;
}

static void ext4_dirent_csum_set(struct inode *inode,
				 struct ext4_dir_entry *dirent)
{
	struct ext4_dir_entry_tail *t;

360
	if (!ext4_has_metadata_csum(inode->i_sb))
361 362 363 364
		return;

	t = get_dirent_tail(inode, dirent);
	if (!t) {
365
		warn_no_space_for_csum(inode);
366 367 368 369 370 371 372
		return;
	}

	t->det_checksum = ext4_dirent_csum(inode, dirent,
					   (void *)t - (void *)dirent);
}

373 374 375
int ext4_handle_dirty_dirent_node(handle_t *handle,
				  struct inode *inode,
				  struct buffer_head *bh)
376 377 378 379 380
{
	ext4_dirent_csum_set(inode, (struct ext4_dir_entry *)bh->b_data);
	return ext4_handle_dirty_metadata(handle, inode, bh);
}

381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
static struct dx_countlimit *get_dx_countlimit(struct inode *inode,
					       struct ext4_dir_entry *dirent,
					       int *offset)
{
	struct ext4_dir_entry *dp;
	struct dx_root_info *root;
	int count_offset;

	if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
		count_offset = 8;
	else if (le16_to_cpu(dirent->rec_len) == 12) {
		dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
		if (le16_to_cpu(dp->rec_len) !=
		    EXT4_BLOCK_SIZE(inode->i_sb) - 12)
			return NULL;
		root = (struct dx_root_info *)(((void *)dp + 12));
		if (root->reserved_zero ||
		    root->info_length != sizeof(struct dx_root_info))
			return NULL;
		count_offset = 32;
	} else
		return NULL;

	if (offset)
		*offset = count_offset;
	return (struct dx_countlimit *)(((void *)dirent) + count_offset);
}

static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent,
			   int count_offset, int count, struct dx_tail *t)
{
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
	struct ext4_inode_info *ei = EXT4_I(inode);
414 415
	__u32 csum;
	__le32 save_csum;
416 417 418
	int size;

	size = count_offset + (count * sizeof(struct dx_entry));
419
	save_csum = t->dt_checksum;
420 421 422
	t->dt_checksum = 0;
	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
	csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail));
423
	t->dt_checksum = save_csum;
424 425 426 427 428 429 430 431 432 433 434

	return cpu_to_le32(csum);
}

static int ext4_dx_csum_verify(struct inode *inode,
			       struct ext4_dir_entry *dirent)
{
	struct dx_countlimit *c;
	struct dx_tail *t;
	int count_offset, limit, count;

435
	if (!ext4_has_metadata_csum(inode->i_sb))
436 437 438 439 440 441 442 443 444 445 446
		return 1;

	c = get_dx_countlimit(inode, dirent, &count_offset);
	if (!c) {
		EXT4_ERROR_INODE(inode, "dir seems corrupt?  Run e2fsck -D.");
		return 1;
	}
	limit = le16_to_cpu(c->limit);
	count = le16_to_cpu(c->count);
	if (count_offset + (limit * sizeof(struct dx_entry)) >
	    EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
447
		warn_no_space_for_csum(inode);
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
		return 1;
	}
	t = (struct dx_tail *)(((struct dx_entry *)c) + limit);

	if (t->dt_checksum != ext4_dx_csum(inode, dirent, count_offset,
					    count, t))
		return 0;
	return 1;
}

static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent)
{
	struct dx_countlimit *c;
	struct dx_tail *t;
	int count_offset, limit, count;

464
	if (!ext4_has_metadata_csum(inode->i_sb))
465 466 467 468 469 470 471 472 473 474 475
		return;

	c = get_dx_countlimit(inode, dirent, &count_offset);
	if (!c) {
		EXT4_ERROR_INODE(inode, "dir seems corrupt?  Run e2fsck -D.");
		return;
	}
	limit = le16_to_cpu(c->limit);
	count = le16_to_cpu(c->count);
	if (count_offset + (limit * sizeof(struct dx_entry)) >
	    EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
476
		warn_no_space_for_csum(inode);
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
		return;
	}
	t = (struct dx_tail *)(((struct dx_entry *)c) + limit);

	t->dt_checksum = ext4_dx_csum(inode, dirent, count_offset, count, t);
}

static inline int ext4_handle_dirty_dx_node(handle_t *handle,
					    struct inode *inode,
					    struct buffer_head *bh)
{
	ext4_dx_csum_set(inode, (struct ext4_dir_entry *)bh->b_data);
	return ext4_handle_dirty_metadata(handle, inode, bh);
}

492 493 494 495
/*
 * p is at least 6 bytes before the end of page
 */
static inline struct ext4_dir_entry_2 *
496
ext4_next_entry(struct ext4_dir_entry_2 *p, unsigned long blocksize)
497 498
{
	return (struct ext4_dir_entry_2 *)((char *)p +
499
		ext4_rec_len_from_disk(p->rec_len, blocksize));
500 501
}

502 503 504 505 506
/*
 * Future: use high four bits of block for coalesce-on-delete flags
 * Mask them off for now.
 */

A
Aneesh Kumar K.V 已提交
507
static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
508 509 510 511
{
	return le32_to_cpu(entry->block) & 0x00ffffff;
}

A
Aneesh Kumar K.V 已提交
512
static inline void dx_set_block(struct dx_entry *entry, ext4_lblk_t value)
513 514 515 516
{
	entry->block = cpu_to_le32(value);
}

517
static inline unsigned dx_get_hash(struct dx_entry *entry)
518 519 520 521
{
	return le32_to_cpu(entry->hash);
}

522
static inline void dx_set_hash(struct dx_entry *entry, unsigned value)
523 524 525 526
{
	entry->hash = cpu_to_le32(value);
}

527
static inline unsigned dx_get_count(struct dx_entry *entries)
528 529 530 531
{
	return le16_to_cpu(((struct dx_countlimit *) entries)->count);
}

532
static inline unsigned dx_get_limit(struct dx_entry *entries)
533 534 535 536
{
	return le16_to_cpu(((struct dx_countlimit *) entries)->limit);
}

537
static inline void dx_set_count(struct dx_entry *entries, unsigned value)
538 539 540 541
{
	((struct dx_countlimit *) entries)->count = cpu_to_le16(value);
}

542
static inline void dx_set_limit(struct dx_entry *entries, unsigned value)
543 544 545 546
{
	((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
}

547
static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
548
{
549 550
	unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
		EXT4_DIR_REC_LEN(2) - infosize;
551

552
	if (ext4_has_metadata_csum(dir->i_sb))
553
		entry_space -= sizeof(struct dx_tail);
554
	return entry_space / sizeof(struct dx_entry);
555 556
}

557
static inline unsigned dx_node_limit(struct inode *dir)
558
{
559
	unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
560

561
	if (ext4_has_metadata_csum(dir->i_sb))
562
		entry_space -= sizeof(struct dx_tail);
563
	return entry_space / sizeof(struct dx_entry);
564 565 566 567 568 569
}

/*
 * Debug
 */
#ifdef DX_DEBUG
570
static void dx_show_index(char * label, struct dx_entry *entries)
571
{
A
Andrew Morton 已提交
572
	int i, n = dx_get_count (entries);
573
	printk(KERN_DEBUG "%s index ", label);
A
Andrew Morton 已提交
574
	for (i = 0; i < n; i++) {
575
		printk("%x->%lu ", i ? dx_get_hash(entries + i) :
A
Aneesh Kumar K.V 已提交
576
				0, (unsigned long)dx_get_block(entries + i));
A
Andrew Morton 已提交
577 578
	}
	printk("\n");
579 580 581 582 583 584 585 586 587
}

struct stats
{
	unsigned names;
	unsigned space;
	unsigned bcount;
};

588
static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_entry_2 *de,
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
				 int size, int show_names)
{
	unsigned names = 0, space = 0;
	char *base = (char *) de;
	struct dx_hash_info h = *hinfo;

	printk("names: ");
	while ((char *) de < base + size)
	{
		if (de->inode)
		{
			if (show_names)
			{
				int len = de->name_len;
				char *name = de->name;
				while (len--) printk("%c", *name++);
605
				ext4fs_dirhash(de->name, de->name_len, &h);
606
				printk(":%x.%u ", h.hash,
607
				       (unsigned) ((char *) de - base));
608
			}
609
			space += EXT4_DIR_REC_LEN(de->name_len);
610 611
			names++;
		}
612
		de = ext4_next_entry(de, size);
613 614 615 616 617 618 619 620 621
	}
	printk("(%i)\n", names);
	return (struct stats) { names, space, 1 };
}

struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
			     struct dx_entry *entries, int levels)
{
	unsigned blocksize = dir->i_sb->s_blocksize;
622
	unsigned count = dx_get_count(entries), names = 0, space = 0, i;
623 624 625 626 627 628
	unsigned bcount = 0;
	struct buffer_head *bh;
	int err;
	printk("%i indexed blocks...\n", count);
	for (i = 0; i < count; i++, entries++)
	{
A
Aneesh Kumar K.V 已提交
629 630
		ext4_lblk_t block = dx_get_block(entries);
		ext4_lblk_t hash  = i ? dx_get_hash(entries): 0;
631 632 633
		u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
		struct stats stats;
		printk("%s%3u:%03u hash %8x/%8x ",levels?"":"   ", i, block, hash, range);
634 635 636
		bh = ext4_bread(NULL,dir, block, 0);
		if (!bh || IS_ERR(bh))
			continue;
637 638
		stats = levels?
		   dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
639
		   dx_show_leaf(hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0);
640 641 642
		names += stats.names;
		space += stats.space;
		bcount += stats.bcount;
643
		brelse(bh);
644 645
	}
	if (bcount)
646
		printk(KERN_DEBUG "%snames %u, fullness %u (%u%%)\n",
647 648
		       levels ? "" : "   ", names, space/bcount,
		       (space/bcount)*100/blocksize);
649 650 651 652 653 654 655 656 657 658 659 660 661 662
	return (struct stats) { names, space, bcount};
}
#endif /* DX_DEBUG */

/*
 * Probe for a directory leaf block to search.
 *
 * dx_probe can return ERR_BAD_DX_DIR, which means there was a format
 * error in the directory index, and the caller should fall back to
 * searching the directory normally.  The callers of dx_probe **MUST**
 * check for this error code, and make sure it never gets reflected
 * back to userspace.
 */
static struct dx_frame *
663
dx_probe(const struct qstr *d_name, struct inode *dir,
664
	 struct dx_hash_info *hinfo, struct dx_frame *frame_in)
665 666 667 668 669
{
	unsigned count, indirect;
	struct dx_entry *at, *entries, *p, *q, *m;
	struct dx_root *root;
	struct dx_frame *frame = frame_in;
670
	struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
671 672
	u32 hash;

673 674 675 676 677
	frame->bh = ext4_read_dirblock(dir, 0, INDEX);
	if (IS_ERR(frame->bh))
		return (struct dx_frame *) frame->bh;

	root = (struct dx_root *) frame->bh->b_data;
678 679 680
	if (root->info.hash_version != DX_HASH_TEA &&
	    root->info.hash_version != DX_HASH_HALF_MD4 &&
	    root->info.hash_version != DX_HASH_LEGACY) {
681
		ext4_warning(dir->i_sb, "Unrecognised inode hash code %d",
682 683 684 685
			     root->info.hash_version);
		goto fail;
	}
	hinfo->hash_version = root->info.hash_version;
686 687
	if (hinfo->hash_version <= DX_HASH_TEA)
		hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
688
	hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
689 690
	if (d_name)
		ext4fs_dirhash(d_name->name, d_name->len, hinfo);
691 692 693
	hash = hinfo->hash;

	if (root->info.unused_flags & 1) {
694
		ext4_warning(dir->i_sb, "Unimplemented inode hash flags: %#06x",
695 696 697 698 699
			     root->info.unused_flags);
		goto fail;
	}

	if ((indirect = root->info.indirect_levels) > 1) {
700
		ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x",
701 702 703 704 705 706
			     root->info.indirect_levels);
		goto fail;
	}

	entries = (struct dx_entry *) (((char *)&root->info) +
				       root->info.info_length);
707 708 709

	if (dx_get_limit(entries) != dx_root_limit(dir,
						   root->info.info_length)) {
710
		ext4_warning(dir->i_sb, "dx entry: limit != root limit");
711 712 713
		goto fail;
	}

714
	dxtrace(printk("Look up %x", hash));
715
	while (1) {
716
		count = dx_get_count(entries);
717
		if (!count || count > dx_get_limit(entries)) {
718
			ext4_warning(dir->i_sb,
719
				     "dx entry: no count or count > limit");
720
			goto fail;
721 722
		}

723 724
		p = entries + 1;
		q = entries + count - 1;
725
		while (p <= q) {
726 727 728 729 730 731 732 733
			m = p + (q - p)/2;
			dxtrace(printk("."));
			if (dx_get_hash(m) > hash)
				q = m - 1;
			else
				p = m + 1;
		}

734
		if (0) { // linear search cross check
735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
			unsigned n = count - 1;
			at = entries;
			while (n--)
			{
				dxtrace(printk(","));
				if (dx_get_hash(++at) > hash)
				{
					at--;
					break;
				}
			}
			assert (at == p - 1);
		}

		at = p - 1;
		dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at)));
		frame->entries = entries;
		frame->at = at;
753 754 755 756 757 758 759 760
		if (!indirect--)
			return frame;
		frame++;
		frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
		if (IS_ERR(frame->bh)) {
			ret_err = (struct dx_frame *) frame->bh;
			frame->bh = NULL;
			goto fail;
761
		}
762
		entries = ((struct dx_node *) frame->bh->b_data)->entries;
763

764
		if (dx_get_limit(entries) != dx_node_limit (dir)) {
765
			ext4_warning(dir->i_sb,
766
				     "dx entry: limit != node limit");
767
			goto fail;
768
		}
769
	}
770
fail:
771 772 773 774
	while (frame >= frame_in) {
		brelse(frame->bh);
		frame--;
	}
775
	if (ret_err == ERR_PTR(ERR_BAD_DX_DIR))
776
		ext4_warning(dir->i_sb,
Z
Zheng Liu 已提交
777
			     "Corrupt dir inode %lu, running e2fsck is "
778
			     "recommended.", dir->i_ino);
779
	return ret_err;
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
}

static void dx_release (struct dx_frame *frames)
{
	if (frames[0].bh == NULL)
		return;

	if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels)
		brelse(frames[1].bh);
	brelse(frames[0].bh);
}

/*
 * This function increments the frame pointer to search the next leaf
 * block, and reads in the necessary intervening nodes if the search
 * should be necessary.  Whether or not the search is necessary is
 * controlled by the hash parameter.  If the hash value is even, then
 * the search is only continued if the next block starts with that
 * hash value.  This is used if we are searching for a specific file.
 *
 * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
 *
 * This function returns 1 if the caller should continue to search,
 * or 0 if it should not.  If there is an error reading one of the
 * index blocks, it will a negative error code.
 *
 * If start_hash is non-null, it will be filled in with the starting
 * hash of the next page.
 */
809
static int ext4_htree_next_block(struct inode *dir, __u32 hash,
810 811 812 813 814 815
				 struct dx_frame *frame,
				 struct dx_frame *frames,
				 __u32 *start_hash)
{
	struct dx_frame *p;
	struct buffer_head *bh;
816
	int num_frames = 0;
817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854
	__u32 bhash;

	p = frame;
	/*
	 * Find the next leaf page by incrementing the frame pointer.
	 * If we run out of entries in the interior node, loop around and
	 * increment pointer in the parent node.  When we break out of
	 * this loop, num_frames indicates the number of interior
	 * nodes need to be read.
	 */
	while (1) {
		if (++(p->at) < p->entries + dx_get_count(p->entries))
			break;
		if (p == frames)
			return 0;
		num_frames++;
		p--;
	}

	/*
	 * If the hash is 1, then continue only if the next page has a
	 * continuation hash of any value.  This is used for readdir
	 * handling.  Otherwise, check to see if the hash matches the
	 * desired contiuation hash.  If it doesn't, return since
	 * there's no point to read in the successive index pages.
	 */
	bhash = dx_get_hash(p->at);
	if (start_hash)
		*start_hash = bhash;
	if ((hash & 1) == 0) {
		if ((bhash & ~1) != hash)
			return 0;
	}
	/*
	 * If the hash is HASH_NB_ALWAYS, we always go to the next
	 * block so no check is necessary
	 */
	while (num_frames--) {
855 856 857
		bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
		if (IS_ERR(bh))
			return PTR_ERR(bh);
858
		p++;
859
		brelse(p->bh);
860 861 862 863 864 865 866 867 868 869 870 871 872
		p->bh = bh;
		p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
	}
	return 1;
}


/*
 * This function fills a red-black tree with information from a
 * directory block.  It returns the number directory entries loaded
 * into the tree.  If there is an error it is returned in err.
 */
static int htree_dirblock_to_tree(struct file *dir_file,
A
Aneesh Kumar K.V 已提交
873
				  struct inode *dir, ext4_lblk_t block,
874 875 876 877
				  struct dx_hash_info *hinfo,
				  __u32 start_hash, __u32 start_minor_hash)
{
	struct buffer_head *bh;
878
	struct ext4_dir_entry_2 *de, *top;
879
	int err = 0, count = 0;
880

A
Aneesh Kumar K.V 已提交
881 882
	dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
							(unsigned long)block));
883 884 885
	bh = ext4_read_dirblock(dir, block, DIRENT);
	if (IS_ERR(bh))
		return PTR_ERR(bh);
886

887 888
	de = (struct ext4_dir_entry_2 *) bh->b_data;
	top = (struct ext4_dir_entry_2 *) ((char *) de +
889
					   dir->i_sb->s_blocksize -
890
					   EXT4_DIR_REC_LEN(0));
891
	for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
892
		if (ext4_check_dir_entry(dir, NULL, de, bh,
893
				bh->b_data, bh->b_size,
894 895
				(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
					 + ((char *)de - bh->b_data))) {
896 897
			/* silently ignore the rest of the block */
			break;
898
		}
899
		ext4fs_dirhash(de->name, de->name_len, hinfo);
900 901 902 903 904 905
		if ((hinfo->hash < start_hash) ||
		    ((hinfo->hash == start_hash) &&
		     (hinfo->minor_hash < start_minor_hash)))
			continue;
		if (de->inode == 0)
			continue;
906
		if ((err = ext4_htree_store_dirent(dir_file,
907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925
				   hinfo->hash, hinfo->minor_hash, de)) != 0) {
			brelse(bh);
			return err;
		}
		count++;
	}
	brelse(bh);
	return count;
}


/*
 * This function fills a red-black tree with information from a
 * directory.  We start scanning the directory in hash order, starting
 * at start_hash and start_minor_hash.
 *
 * This function returns the number of entries inserted into the tree,
 * or a negative error code.
 */
926
int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
927 928 929
			 __u32 start_minor_hash, __u32 *next_hash)
{
	struct dx_hash_info hinfo;
930
	struct ext4_dir_entry_2 *de;
931 932
	struct dx_frame frames[2], *frame;
	struct inode *dir;
A
Aneesh Kumar K.V 已提交
933
	ext4_lblk_t block;
934
	int count = 0;
A
Aneesh Kumar K.V 已提交
935
	int ret, err;
936 937
	__u32 hashval;

938
	dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n",
939
		       start_hash, start_minor_hash));
A
Al Viro 已提交
940
	dir = file_inode(dir_file);
941
	if (!(ext4_test_inode_flag(dir, EXT4_INODE_INDEX))) {
942
		hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
943 944 945
		if (hinfo.hash_version <= DX_HASH_TEA)
			hinfo.hash_version +=
				EXT4_SB(dir->i_sb)->s_hash_unsigned;
946
		hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
947 948 949 950 951 952 953 954 955 956 957
		if (ext4_has_inline_data(dir)) {
			int has_inline_data = 1;
			count = htree_inlinedir_to_tree(dir_file, dir, 0,
							&hinfo, start_hash,
							start_minor_hash,
							&has_inline_data);
			if (has_inline_data) {
				*next_hash = ~0;
				return count;
			}
		}
958 959 960 961 962 963 964
		count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo,
					       start_hash, start_minor_hash);
		*next_hash = ~0;
		return count;
	}
	hinfo.hash = start_hash;
	hinfo.minor_hash = 0;
965 966 967
	frame = dx_probe(NULL, dir, &hinfo, frames);
	if (IS_ERR(frame))
		return PTR_ERR(frame);
968 969 970

	/* Add '.' and '..' from the htree header */
	if (!start_hash && !start_minor_hash) {
971 972
		de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
		if ((err = ext4_htree_store_dirent(dir_file, 0, 0, de)) != 0)
973 974 975 976
			goto errout;
		count++;
	}
	if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) {
977
		de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
978
		de = ext4_next_entry(de, dir->i_sb->s_blocksize);
979
		if ((err = ext4_htree_store_dirent(dir_file, 2, 0, de)) != 0)
980 981 982 983 984 985 986 987 988 989 990 991 992 993
			goto errout;
		count++;
	}

	while (1) {
		block = dx_get_block(frame->at);
		ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo,
					     start_hash, start_minor_hash);
		if (ret < 0) {
			err = ret;
			goto errout;
		}
		count += ret;
		hashval = ~0;
994
		ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
					    frame, frames, &hashval);
		*next_hash = hashval;
		if (ret < 0) {
			err = ret;
			goto errout;
		}
		/*
		 * Stop if:  (a) there are no more entries, or
		 * (b) we have inserted at least one entry and the
		 * next hash value is not a continuation
		 */
		if ((ret == 0) ||
		    (count && ((hashval & 1) == 0)))
			break;
	}
	dx_release(frames);
1011 1012
	dxtrace(printk(KERN_DEBUG "Fill tree: returned %d entries, "
		       "next hash: %x\n", count, *next_hash));
1013 1014 1015 1016 1017 1018
	return count;
errout:
	dx_release(frames);
	return (err);
}

T
Tao Ma 已提交
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
static inline int search_dirblock(struct buffer_head *bh,
				  struct inode *dir,
				  const struct qstr *d_name,
				  unsigned int offset,
				  struct ext4_dir_entry_2 **res_dir)
{
	return search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir,
			  d_name, offset, res_dir);
}

1029 1030 1031 1032
/*
 * Directory block splitting, compacting
 */

1033 1034 1035 1036
/*
 * Create map of hash values, offsets, and sizes, stored at end of block.
 * Returns number of entries mapped.
 */
1037 1038 1039
static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
		       struct dx_hash_info *hinfo,
		       struct dx_map_entry *map_tail)
1040 1041 1042 1043 1044
{
	int count = 0;
	char *base = (char *) de;
	struct dx_hash_info h = *hinfo;

1045
	while ((char *) de < base + blocksize) {
1046
		if (de->name_len && de->inode) {
1047
			ext4fs_dirhash(de->name, de->name_len, &h);
1048 1049
			map_tail--;
			map_tail->hash = h.hash;
1050
			map_tail->offs = ((char *) de - base)>>2;
1051
			map_tail->size = le16_to_cpu(de->rec_len);
1052 1053 1054 1055
			count++;
			cond_resched();
		}
		/* XXX: do we need to check rec_len == 0 case? -Chris */
1056
		de = ext4_next_entry(de, blocksize);
1057 1058 1059 1060
	}
	return count;
}

1061
/* Sort map by hash value */
1062 1063
static void dx_sort_map (struct dx_map_entry *map, unsigned count)
{
A
Andrew Morton 已提交
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
	struct dx_map_entry *p, *q, *top = map + count - 1;
	int more;
	/* Combsort until bubble sort doesn't suck */
	while (count > 2) {
		count = count*10/13;
		if (count - 9 < 2) /* 9, 10 -> 11 */
			count = 11;
		for (p = top, q = p - count; q >= map; p--, q--)
			if (p->hash < q->hash)
				swap(*p, *q);
	}
	/* Garden variety bubble sort */
	do {
		more = 0;
		q = top;
		while (q-- > map) {
			if (q[1].hash >= q[0].hash)
1081
				continue;
A
Andrew Morton 已提交
1082 1083
			swap(*(q+1), *q);
			more = 1;
1084 1085 1086 1087
		}
	} while(more);
}

A
Aneesh Kumar K.V 已提交
1088
static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block)
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
{
	struct dx_entry *entries = frame->entries;
	struct dx_entry *old = frame->at, *new = old + 1;
	int count = dx_get_count(entries);

	assert(count < dx_get_limit(entries));
	assert(old < entries + count);
	memmove(new + 1, new, (char *)(entries + count) - (char *)(new));
	dx_set_hash(new, hash);
	dx_set_block(new, block);
	dx_set_count(entries, count + 1);
}

/*
1103
 * NOTE! unlike strncmp, ext4_match returns 1 for success, 0 for failure.
1104
 *
1105
 * `len <= EXT4_NAME_LEN' is guaranteed by caller.
1106 1107
 * `de != NULL' is guaranteed by caller.
 */
1108 1109
static inline int ext4_match (int len, const char * const name,
			      struct ext4_dir_entry_2 * de)
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
{
	if (len != de->name_len)
		return 0;
	if (!de->inode)
		return 0;
	return !memcmp(name, de->name, len);
}

/*
 * Returns 0 if not found, -1 on failure, and 1 on success
 */
T
Tao Ma 已提交
1121 1122 1123 1124 1125 1126 1127
int search_dir(struct buffer_head *bh,
	       char *search_buf,
	       int buf_size,
	       struct inode *dir,
	       const struct qstr *d_name,
	       unsigned int offset,
	       struct ext4_dir_entry_2 **res_dir)
1128
{
1129
	struct ext4_dir_entry_2 * de;
1130 1131
	char * dlimit;
	int de_len;
1132 1133
	const char *name = d_name->name;
	int namelen = d_name->len;
1134

T
Tao Ma 已提交
1135 1136
	de = (struct ext4_dir_entry_2 *)search_buf;
	dlimit = search_buf + buf_size;
1137 1138 1139 1140 1141
	while ((char *) de < dlimit) {
		/* this code is executed quadratically often */
		/* do minimal checking `by hand' */

		if ((char *) de + namelen <= dlimit &&
1142
		    ext4_match (namelen, name, de)) {
1143
			/* found a match - just to be sure, do a full check */
1144 1145
			if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
						 bh->b_size, offset))
1146 1147 1148 1149 1150
				return -1;
			*res_dir = de;
			return 1;
		}
		/* prevent looping on a bad block */
1151 1152
		de_len = ext4_rec_len_from_disk(de->rec_len,
						dir->i_sb->s_blocksize);
1153 1154 1155
		if (de_len <= 0)
			return -1;
		offset += de_len;
1156
		de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
1157 1158 1159 1160
	}
	return 0;
}

1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
			       struct ext4_dir_entry *de)
{
	struct super_block *sb = dir->i_sb;

	if (!is_dx(dir))
		return 0;
	if (block == 0)
		return 1;
	if (de->inode == 0 &&
	    ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) ==
			sb->s_blocksize)
		return 1;
	return 0;
}
1176 1177

/*
1178
 *	ext4_find_entry()
1179 1180 1181 1182 1183 1184 1185 1186 1187
 *
 * finds an entry in the specified directory with the wanted name. It
 * returns the cache buffer in which the entry was found, and the entry
 * itself (as a parameter - res_dir). It does NOT read the inode of the
 * entry - you'll have to do that yourself if you want to.
 *
 * The returned buffer_head has ->b_count elevated.  The caller is expected
 * to brelse() it when appropriate.
 */
1188 1189
static struct buffer_head * ext4_find_entry (struct inode *dir,
					const struct qstr *d_name,
T
Tao Ma 已提交
1190 1191
					struct ext4_dir_entry_2 **res_dir,
					int *inlined)
1192
{
1193 1194 1195
	struct super_block *sb;
	struct buffer_head *bh_use[NAMEI_RA_SIZE];
	struct buffer_head *bh, *ret = NULL;
A
Aneesh Kumar K.V 已提交
1196
	ext4_lblk_t start, block, b;
1197
	const u8 *name = d_name->name;
1198 1199 1200 1201 1202
	int ra_max = 0;		/* Number of bh's in the readahead
				   buffer, bh_use[] */
	int ra_ptr = 0;		/* Current index into readahead
				   buffer */
	int num = 0;
A
Aneesh Kumar K.V 已提交
1203
	ext4_lblk_t  nblocks;
1204
	int i, namelen;
1205 1206 1207

	*res_dir = NULL;
	sb = dir->i_sb;
1208
	namelen = d_name->len;
1209
	if (namelen > EXT4_NAME_LEN)
1210
		return NULL;
1211 1212 1213 1214 1215

	if (ext4_has_inline_data(dir)) {
		int has_inline_data = 1;
		ret = ext4_find_inline_entry(dir, d_name, res_dir,
					     &has_inline_data);
T
Tao Ma 已提交
1216 1217 1218
		if (has_inline_data) {
			if (inlined)
				*inlined = 1;
1219
			return ret;
T
Tao Ma 已提交
1220
		}
1221 1222
	}

1223
	if ((namelen <= 2) && (name[0] == '.') &&
1224
	    (name[1] == '.' || name[1] == '\0')) {
1225 1226 1227 1228 1229 1230 1231 1232
		/*
		 * "." or ".." will only be in the first block
		 * NFS may look up ".."; "." should be handled by the VFS
		 */
		block = start = 0;
		nblocks = 1;
		goto restart;
	}
1233
	if (is_dx(dir)) {
1234
		bh = ext4_dx_find_entry(dir, d_name, res_dir);
1235 1236 1237 1238 1239
		/*
		 * On success, or if the error was file not found,
		 * return.  Otherwise, fall back to doing a search the
		 * old fashioned way.
		 */
1240
		if (!IS_ERR(bh) || PTR_ERR(bh) != ERR_BAD_DX_DIR)
1241
			return bh;
1242 1243
		dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
			       "falling back\n"));
1244
	}
1245 1246
	nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
	start = EXT4_I(dir)->i_dir_start_lookup;
1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
	if (start >= nblocks)
		start = 0;
	block = start;
restart:
	do {
		/*
		 * We deal with the read-ahead logic here.
		 */
		if (ra_ptr >= ra_max) {
			/* Refill the readahead buffer */
			ra_ptr = 0;
			b = block;
			for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
				/*
				 * Terminate if we reach the end of the
				 * directory and must wrap, or if our
				 * search has finished at this block.
				 */
				if (b >= nblocks || (num && block == start)) {
					bh_use[ra_max] = NULL;
					break;
				}
				num++;
1270 1271
				bh = ext4_getblk(NULL, dir, b++, 0);
				if (unlikely(IS_ERR(bh))) {
1272
					if (ra_max == 0)
1273
						return bh;
1274 1275
					break;
				}
1276 1277
				bh_use[ra_max] = bh;
				if (bh)
1278 1279
					ll_rw_block(READ | REQ_META | REQ_PRIO,
						    1, &bh);
1280 1281 1282 1283 1284 1285 1286
			}
		}
		if ((bh = bh_use[ra_ptr++]) == NULL)
			goto next;
		wait_on_buffer(bh);
		if (!buffer_uptodate(bh)) {
			/* read error, skip block & hope for the best */
1287 1288
			EXT4_ERROR_INODE(dir, "reading directory lblock %lu",
					 (unsigned long) block);
1289 1290 1291
			brelse(bh);
			goto next;
		}
1292
		if (!buffer_verified(bh) &&
1293 1294
		    !is_dx_internal_node(dir, block,
					 (struct ext4_dir_entry *)bh->b_data) &&
1295 1296 1297 1298 1299 1300 1301 1302
		    !ext4_dirent_csum_verify(dir,
				(struct ext4_dir_entry *)bh->b_data)) {
			EXT4_ERROR_INODE(dir, "checksumming directory "
					 "block %lu", (unsigned long)block);
			brelse(bh);
			goto next;
		}
		set_buffer_verified(bh);
1303
		i = search_dirblock(bh, dir, d_name,
1304
			    block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
1305
		if (i == 1) {
1306
			EXT4_I(dir)->i_dir_start_lookup = block;
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
			ret = bh;
			goto cleanup_and_exit;
		} else {
			brelse(bh);
			if (i < 0)
				goto cleanup_and_exit;
		}
	next:
		if (++block >= nblocks)
			block = 0;
	} while (block != start);

	/*
	 * If the directory has grown while we were searching, then
	 * search the last part of the directory before giving up.
	 */
	block = nblocks;
1324
	nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
1325 1326 1327 1328 1329 1330 1331 1332
	if (block < nblocks) {
		start = 0;
		goto restart;
	}

cleanup_and_exit:
	/* Clean up the read-ahead blocks */
	for (; ra_ptr < ra_max; ra_ptr++)
1333
		brelse(bh_use[ra_ptr]);
1334 1335 1336
	return ret;
}

1337
static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
1338
		       struct ext4_dir_entry_2 **res_dir)
1339
{
1340
	struct super_block * sb = dir->i_sb;
1341 1342 1343
	struct dx_hash_info	hinfo;
	struct dx_frame frames[2], *frame;
	struct buffer_head *bh;
A
Aneesh Kumar K.V 已提交
1344
	ext4_lblk_t block;
1345 1346
	int retval;

1347 1348 1349
	frame = dx_probe(d_name, dir, &hinfo, frames);
	if (IS_ERR(frame))
		return (struct buffer_head *) frame;
1350 1351
	do {
		block = dx_get_block(frame->at);
1352
		bh = ext4_read_dirblock(dir, block, DIRENT);
1353
		if (IS_ERR(bh))
1354
			goto errout;
1355

1356 1357 1358
		retval = search_dirblock(bh, dir, d_name,
					 block << EXT4_BLOCK_SIZE_BITS(sb),
					 res_dir);
1359 1360
		if (retval == 1)
			goto success;
1361
		brelse(bh);
1362
		if (retval == -1) {
1363
			bh = ERR_PTR(ERR_BAD_DX_DIR);
1364 1365 1366
			goto errout;
		}

1367
		/* Check to see if we should continue to search */
1368
		retval = ext4_htree_next_block(dir, hinfo.hash, frame,
1369 1370
					       frames, NULL);
		if (retval < 0) {
1371
			ext4_warning(sb,
1372 1373 1374
			     "error %d reading index page in directory #%lu",
			     retval, dir->i_ino);
			bh = ERR_PTR(retval);
1375 1376 1377 1378
			goto errout;
		}
	} while (retval == 1);

1379
	bh = NULL;
1380
errout:
1381
	dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name));
1382 1383 1384
success:
	dx_release(frames);
	return bh;
1385 1386
}

A
Al Viro 已提交
1387
static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
1388
{
1389 1390 1391
	struct inode *inode;
	struct ext4_dir_entry_2 *de;
	struct buffer_head *bh;
1392

1393
	if (dentry->d_name.len > EXT4_NAME_LEN)
1394 1395
		return ERR_PTR(-ENAMETOOLONG);

T
Tao Ma 已提交
1396
	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
1397 1398
	if (IS_ERR(bh))
		return (struct dentry *) bh;
1399 1400
	inode = NULL;
	if (bh) {
1401
		__u32 ino = le32_to_cpu(de->inode);
1402
		brelse(bh);
1403
		if (!ext4_valid_inum(dir->i_sb, ino)) {
1404
			EXT4_ERROR_INODE(dir, "bad inode number: %u", ino);
1405
			return ERR_PTR(-EIO);
1406
		}
1407
		if (unlikely(ino == dir->i_ino)) {
D
David Howells 已提交
1408 1409
			EXT4_ERROR_INODE(dir, "'%pd' linked to parent dir",
					 dentry);
1410 1411
			return ERR_PTR(-EIO);
		}
1412
		inode = ext4_iget_normal(dir->i_sb, ino);
1413 1414 1415 1416 1417
		if (inode == ERR_PTR(-ESTALE)) {
			EXT4_ERROR_INODE(dir,
					 "deleted inode referenced: %u",
					 ino);
			return ERR_PTR(-EIO);
1418
		}
1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430
		if (!IS_ERR(inode) && ext4_encrypted_inode(dir) &&
		    (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
		     S_ISLNK(inode->i_mode)) &&
		    !ext4_is_child_context_consistent_with_parent(dir,
								  inode)) {
			iput(inode);
			ext4_warning(inode->i_sb,
				     "Inconsistent encryption contexts: %lu/%lu\n",
				     (unsigned long) dir->i_ino,
				     (unsigned long) inode->i_ino);
			return ERR_PTR(-EPERM);
		}
1431 1432 1433 1434 1435
	}
	return d_splice_alias(inode, dentry);
}


1436
struct dentry *ext4_get_parent(struct dentry *child)
1437
{
1438
	__u32 ino;
1439
	static const struct qstr dotdot = QSTR_INIT("..", 2);
1440
	struct ext4_dir_entry_2 * de;
1441 1442
	struct buffer_head *bh;

T
Tao Ma 已提交
1443
	bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL);
1444 1445
	if (IS_ERR(bh))
		return (struct dentry *) bh;
1446 1447 1448 1449 1450
	if (!bh)
		return ERR_PTR(-ENOENT);
	ino = le32_to_cpu(de->inode);
	brelse(bh);

1451
	if (!ext4_valid_inum(child->d_inode->i_sb, ino)) {
1452 1453
		EXT4_ERROR_INODE(child->d_inode,
				 "bad parent inode number: %u", ino);
1454
		return ERR_PTR(-EIO);
1455 1456
	}

1457
	return d_obtain_alias(ext4_iget_normal(child->d_inode->i_sb, ino));
1458 1459
}

1460 1461 1462 1463
/*
 * Move count entries from end of map between two memory locations.
 * Returns pointer to last entry moved.
 */
1464
static struct ext4_dir_entry_2 *
1465 1466
dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count,
		unsigned blocksize)
1467 1468 1469 1470
{
	unsigned rec_len = 0;

	while (count--) {
1471
		struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *)
1472
						(from + (map->offs<<2));
1473
		rec_len = EXT4_DIR_REC_LEN(de->name_len);
1474
		memcpy (to, de, rec_len);
1475
		((struct ext4_dir_entry_2 *) to)->rec_len =
1476
				ext4_rec_len_to_disk(rec_len, blocksize);
1477 1478 1479 1480
		de->inode = 0;
		map++;
		to += rec_len;
	}
1481
	return (struct ext4_dir_entry_2 *) (to - rec_len);
1482 1483
}

1484 1485 1486 1487
/*
 * Compact each dir entry in the range to the minimal rec_len.
 * Returns pointer to last entry in range.
 */
1488
static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize)
1489
{
1490
	struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base;
1491 1492 1493
	unsigned rec_len = 0;

	prev = to = de;
1494
	while ((char*)de < base + blocksize) {
1495
		next = ext4_next_entry(de, blocksize);
1496
		if (de->inode && de->name_len) {
1497
			rec_len = EXT4_DIR_REC_LEN(de->name_len);
1498 1499
			if (de > to)
				memmove(to, de, rec_len);
1500
			to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize);
1501
			prev = to;
1502
			to = (struct ext4_dir_entry_2 *) (((char *) to) + rec_len);
1503 1504 1505 1506 1507 1508
		}
		de = next;
	}
	return prev;
}

1509 1510 1511 1512 1513
/*
 * Split a full leaf block to make room for a new dir entry.
 * Allocate a new block, and move entries so that they are approx. equally full.
 * Returns pointer to de in block into which the new entry will be inserted.
 */
1514
static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1515
			struct buffer_head **bh,struct dx_frame *frame,
1516
			struct dx_hash_info *hinfo)
1517 1518 1519 1520
{
	unsigned blocksize = dir->i_sb->s_blocksize;
	unsigned count, continued;
	struct buffer_head *bh2;
A
Aneesh Kumar K.V 已提交
1521
	ext4_lblk_t newblock;
1522 1523 1524
	u32 hash2;
	struct dx_map_entry *map;
	char *data1 = (*bh)->b_data, *data2;
1525
	unsigned split, move, size;
1526
	struct ext4_dir_entry_2 *de = NULL, *de2;
1527 1528
	struct ext4_dir_entry_tail *t;
	int	csum_size = 0;
1529
	int	err = 0, i;
1530

1531
	if (ext4_has_metadata_csum(dir->i_sb))
1532 1533
		csum_size = sizeof(struct ext4_dir_entry_tail);

1534 1535
	bh2 = ext4_append(handle, dir, &newblock);
	if (IS_ERR(bh2)) {
1536 1537
		brelse(*bh);
		*bh = NULL;
1538
		return (struct ext4_dir_entry_2 *) bh2;
1539 1540 1541
	}

	BUFFER_TRACE(*bh, "get_write_access");
1542
	err = ext4_journal_get_write_access(handle, *bh);
1543 1544 1545
	if (err)
		goto journal_error;

1546
	BUFFER_TRACE(frame->bh, "get_write_access");
1547
	err = ext4_journal_get_write_access(handle, frame->bh);
1548 1549 1550 1551 1552 1553 1554
	if (err)
		goto journal_error;

	data2 = bh2->b_data;

	/* create map in the end of data2 block */
	map = (struct dx_map_entry *) (data2 + blocksize);
1555
	count = dx_make_map((struct ext4_dir_entry_2 *) data1,
1556 1557
			     blocksize, hinfo, map);
	map -= count;
1558
	dx_sort_map(map, count);
1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570
	/* Split the existing block in the middle, size-wise */
	size = 0;
	move = 0;
	for (i = count-1; i >= 0; i--) {
		/* is more than half of this entry in 2nd half of the block? */
		if (size + map[i].size/2 > blocksize/2)
			break;
		size += map[i].size;
		move++;
	}
	/* map index at which we will split */
	split = count - move;
1571 1572
	hash2 = map[split].hash;
	continued = hash2 == map[split - 1].hash;
A
Aneesh Kumar K.V 已提交
1573 1574 1575
	dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
			(unsigned long)dx_get_block(frame->at),
					hash2, split, count-split));
1576 1577

	/* Fancy dance to stay within two buffers */
1578
	de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
1579
	de = dx_pack_dirents(data1, blocksize);
1580 1581
	de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
					   (char *) de,
1582
					   blocksize);
1583 1584
	de2->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) -
					    (char *) de2,
1585
					    blocksize);
1586 1587 1588 1589 1590 1591 1592 1593
	if (csum_size) {
		t = EXT4_DIRENT_TAIL(data2, blocksize);
		initialize_dirent_tail(t, blocksize);

		t = EXT4_DIRENT_TAIL(data1, blocksize);
		initialize_dirent_tail(t, blocksize);
	}

1594 1595
	dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
	dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
1596 1597

	/* Which block gets the new entry? */
1598
	if (hinfo->hash >= hash2) {
1599 1600 1601
		swap(*bh, bh2);
		de = de2;
	}
1602
	dx_insert_block(frame, hash2 + continued, newblock);
1603
	err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
1604 1605
	if (err)
		goto journal_error;
1606
	err = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
1607 1608
	if (err)
		goto journal_error;
1609 1610
	brelse(bh2);
	dxtrace(dx_show_index("frame", frame->entries));
1611
	return de;
1612 1613 1614 1615 1616 1617

journal_error:
	brelse(*bh);
	brelse(bh2);
	*bh = NULL;
	ext4_std_error(dir->i_sb, err);
1618
	return ERR_PTR(err);
1619 1620
}

1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677
int ext4_find_dest_de(struct inode *dir, struct inode *inode,
		      struct buffer_head *bh,
		      void *buf, int buf_size,
		      const char *name, int namelen,
		      struct ext4_dir_entry_2 **dest_de)
{
	struct ext4_dir_entry_2 *de;
	unsigned short reclen = EXT4_DIR_REC_LEN(namelen);
	int nlen, rlen;
	unsigned int offset = 0;
	char *top;

	de = (struct ext4_dir_entry_2 *)buf;
	top = buf + buf_size - reclen;
	while ((char *) de <= top) {
		if (ext4_check_dir_entry(dir, NULL, de, bh,
					 buf, buf_size, offset))
			return -EIO;
		if (ext4_match(namelen, name, de))
			return -EEXIST;
		nlen = EXT4_DIR_REC_LEN(de->name_len);
		rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
		if ((de->inode ? rlen - nlen : rlen) >= reclen)
			break;
		de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
		offset += rlen;
	}
	if ((char *) de > top)
		return -ENOSPC;

	*dest_de = de;
	return 0;
}

void ext4_insert_dentry(struct inode *inode,
			struct ext4_dir_entry_2 *de,
			int buf_size,
			const char *name, int namelen)
{

	int nlen, rlen;

	nlen = EXT4_DIR_REC_LEN(de->name_len);
	rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
	if (de->inode) {
		struct ext4_dir_entry_2 *de1 =
				(struct ext4_dir_entry_2 *)((char *)de + nlen);
		de1->rec_len = ext4_rec_len_to_disk(rlen - nlen, buf_size);
		de->rec_len = ext4_rec_len_to_disk(nlen, buf_size);
		de = de1;
	}
	de->file_type = EXT4_FT_UNKNOWN;
	de->inode = cpu_to_le32(inode->i_ino);
	ext4_set_de_type(inode->i_sb, de, inode->i_mode);
	de->name_len = namelen;
	memcpy(de->name, name, namelen);
}
1678 1679 1680 1681 1682 1683 1684 1685 1686
/*
 * Add a new entry into a directory (leaf) block.  If de is non-NULL,
 * it points to a directory entry which is guaranteed to be large
 * enough for new directory entry.  If de is NULL, then
 * add_dirent_to_buf will attempt search the directory block for
 * space.  It will return -ENOSPC if no space is available, and -EIO
 * and -EEXIST if directory entry already exists.
 */
static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
1687
			     struct inode *inode, struct ext4_dir_entry_2 *de,
1688
			     struct buffer_head *bh)
1689 1690 1691 1692
{
	struct inode	*dir = dentry->d_parent->d_inode;
	const char	*name = dentry->d_name.name;
	int		namelen = dentry->d_name.len;
1693
	unsigned int	blocksize = dir->i_sb->s_blocksize;
1694
	int		csum_size = 0;
1695
	int		err;
1696

1697
	if (ext4_has_metadata_csum(inode->i_sb))
1698
		csum_size = sizeof(struct ext4_dir_entry_tail);
1699 1700

	if (!de) {
1701 1702 1703 1704 1705
		err = ext4_find_dest_de(dir, inode,
					bh, bh->b_data, blocksize - csum_size,
					name, namelen, &de);
		if (err)
			return err;
1706 1707
	}
	BUFFER_TRACE(bh, "get_write_access");
1708
	err = ext4_journal_get_write_access(handle, bh);
1709
	if (err) {
1710
		ext4_std_error(dir->i_sb, err);
1711 1712 1713 1714
		return err;
	}

	/* By now the buffer is marked for journaling */
1715 1716
	ext4_insert_dentry(inode, de, blocksize, name, namelen);

1717 1718 1719 1720 1721 1722
	/*
	 * XXX shouldn't update any times until successful
	 * completion of syscall, but too many callers depend
	 * on this.
	 *
	 * XXX similarly, too many callers depend on
1723
	 * ext4_new_inode() setting the times, but error
1724 1725 1726 1727
	 * recovery deletes the inode, so the worst that can
	 * happen is that the times are slightly out of date
	 * and/or different from the directory change time.
	 */
K
Kalpak Shah 已提交
1728
	dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
1729
	ext4_update_dx_flag(dir);
1730
	dir->i_version++;
1731
	ext4_mark_inode_dirty(handle, dir);
1732
	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1733
	err = ext4_handle_dirty_dirent_node(handle, dir, bh);
1734
	if (err)
1735
		ext4_std_error(dir->i_sb, err);
1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752
	return 0;
}

/*
 * This converts a one block unindexed directory to a 3 block indexed
 * directory, and adds the dentry to the indexed directory.
 */
static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
			    struct inode *inode, struct buffer_head *bh)
{
	struct inode	*dir = dentry->d_parent->d_inode;
	const char	*name = dentry->d_name.name;
	int		namelen = dentry->d_name.len;
	struct buffer_head *bh2;
	struct dx_root	*root;
	struct dx_frame	frames[2], *frame;
	struct dx_entry *entries;
1753
	struct ext4_dir_entry_2	*de, *de2;
1754
	struct ext4_dir_entry_tail *t;
1755 1756 1757 1758 1759
	char		*data1, *top;
	unsigned	len;
	int		retval;
	unsigned	blocksize;
	struct dx_hash_info hinfo;
A
Aneesh Kumar K.V 已提交
1760
	ext4_lblk_t  block;
1761
	struct fake_dirent *fde;
1762 1763
	int		csum_size = 0;

1764
	if (ext4_has_metadata_csum(inode->i_sb))
1765
		csum_size = sizeof(struct ext4_dir_entry_tail);
1766 1767

	blocksize =  dir->i_sb->s_blocksize;
1768
	dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
1769
	BUFFER_TRACE(bh, "get_write_access");
1770
	retval = ext4_journal_get_write_access(handle, bh);
1771
	if (retval) {
1772
		ext4_std_error(dir->i_sb, retval);
1773 1774 1775 1776 1777
		brelse(bh);
		return retval;
	}
	root = (struct dx_root *) bh->b_data;

1778 1779 1780
	/* The 0th block becomes the root, move the dirents out */
	fde = &root->dotdot;
	de = (struct ext4_dir_entry_2 *)((char *)fde +
1781
		ext4_rec_len_from_disk(fde->rec_len, blocksize));
1782
	if ((char *) de >= (((char *) root) + blocksize)) {
1783
		EXT4_ERROR_INODE(dir, "invalid rec_len for '..'");
1784 1785 1786
		brelse(bh);
		return -EIO;
	}
1787
	len = ((char *) root) + (blocksize - csum_size) - (char *) de;
1788 1789

	/* Allocate new block for the 0th block's dirents */
1790 1791
	bh2 = ext4_append(handle, dir, &block);
	if (IS_ERR(bh2)) {
1792
		brelse(bh);
1793
		return PTR_ERR(bh2);
1794
	}
1795
	ext4_set_inode_flag(dir, EXT4_INODE_INDEX);
1796 1797 1798
	data1 = bh2->b_data;

	memcpy (data1, de, len);
1799
	de = (struct ext4_dir_entry_2 *) data1;
1800
	top = data1 + len;
1801
	while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top)
1802
		de = de2;
1803 1804
	de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
					   (char *) de,
1805
					   blocksize);
1806 1807 1808 1809 1810 1811

	if (csum_size) {
		t = EXT4_DIRENT_TAIL(data1, blocksize);
		initialize_dirent_tail(t, blocksize);
	}

1812
	/* Initialize the root; the dot dirents already exist */
1813
	de = (struct ext4_dir_entry_2 *) (&root->dotdot);
1814 1815
	de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2),
					   blocksize);
1816 1817
	memset (&root->info, 0, sizeof(root->info));
	root->info.info_length = sizeof(root->info);
1818
	root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
1819
	entries = root->entries;
1820 1821 1822
	dx_set_block(entries, 1);
	dx_set_count(entries, 1);
	dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info)));
1823 1824 1825

	/* Initialize as for dx_probe */
	hinfo.hash_version = root->info.hash_version;
1826 1827
	if (hinfo.hash_version <= DX_HASH_TEA)
		hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
1828 1829
	hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
	ext4fs_dirhash(name, namelen, &hinfo);
1830
	memset(frames, 0, sizeof(frames));
1831 1832 1833 1834 1835
	frame = frames;
	frame->entries = entries;
	frame->at = entries;
	frame->bh = bh;
	bh = bh2;
1836

1837 1838 1839 1840 1841 1842
	retval = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
	if (retval)
		goto out_frames;	
	retval = ext4_handle_dirty_dirent_node(handle, dir, bh);
	if (retval)
		goto out_frames;	
1843

1844 1845
	de = do_split(handle,dir, &bh, frame, &hinfo);
	if (IS_ERR(de)) {
1846 1847
		retval = PTR_ERR(de);
		goto out_frames;
1848 1849
	}
	dx_release(frames);
1850

1851 1852 1853
	retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
	brelse(bh);
	return retval;
1854 1855 1856 1857 1858 1859 1860 1861 1862
out_frames:
	/*
	 * Even if the block split failed, we have to properly write
	 * out all the changes we did so far. Otherwise we can end up
	 * with corrupted filesystem.
	 */
	ext4_mark_inode_dirty(handle, dir);
	dx_release(frames);
	return retval;
1863 1864 1865
}

/*
1866
 *	ext4_add_entry()
1867 1868
 *
 * adds a file entry to the specified directory, using the same
1869
 * semantics as ext4_find_entry(). It returns NULL if it failed.
1870 1871 1872 1873 1874
 *
 * NOTE!! The inode part of 'de' is left at 0 - which means you
 * may not sleep between calling this and putting something into
 * the entry, as someone else might have used it while you slept.
 */
1875 1876
static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
			  struct inode *inode)
1877 1878
{
	struct inode *dir = dentry->d_parent->d_inode;
1879
	struct buffer_head *bh = NULL;
1880
	struct ext4_dir_entry_2 *de;
1881
	struct ext4_dir_entry_tail *t;
1882
	struct super_block *sb;
1883 1884 1885
	int	retval;
	int	dx_fallback=0;
	unsigned blocksize;
A
Aneesh Kumar K.V 已提交
1886
	ext4_lblk_t block, blocks;
1887 1888
	int	csum_size = 0;

1889
	if (ext4_has_metadata_csum(inode->i_sb))
1890
		csum_size = sizeof(struct ext4_dir_entry_tail);
1891 1892 1893 1894 1895

	sb = dir->i_sb;
	blocksize = sb->s_blocksize;
	if (!dentry->d_name.len)
		return -EINVAL;
1896 1897 1898 1899 1900 1901 1902

	if (ext4_has_inline_data(dir)) {
		retval = ext4_try_add_inline_entry(handle, dentry, inode);
		if (retval < 0)
			return retval;
		if (retval == 1) {
			retval = 0;
1903
			goto out;
1904 1905 1906
		}
	}

1907
	if (is_dx(dir)) {
1908
		retval = ext4_dx_add_entry(handle, dentry, inode);
1909
		if (!retval || (retval != ERR_BAD_DX_DIR))
1910
			goto out;
1911
		ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
1912
		dx_fallback++;
1913
		ext4_mark_inode_dirty(handle, dir);
1914 1915
	}
	blocks = dir->i_size >> sb->s_blocksize_bits;
1916
	for (block = 0; block < blocks; block++) {
1917 1918 1919 1920
		bh = ext4_read_dirblock(dir, block, DIRENT);
		if (IS_ERR(bh))
			return PTR_ERR(bh);

1921
		retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
1922 1923
		if (retval != -ENOSPC)
			goto out;
1924 1925

		if (blocks == 1 && !dx_fallback &&
1926 1927 1928 1929 1930
		    EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
			retval = make_indexed_dir(handle, dentry, inode, bh);
			bh = NULL; /* make_indexed_dir releases bh */
			goto out;
		}
1931 1932
		brelse(bh);
	}
1933 1934 1935
	bh = ext4_append(handle, dir, &block);
	if (IS_ERR(bh))
		return PTR_ERR(bh);
1936
	de = (struct ext4_dir_entry_2 *) bh->b_data;
1937
	de->inode = 0;
1938 1939 1940 1941 1942 1943 1944
	de->rec_len = ext4_rec_len_to_disk(blocksize - csum_size, blocksize);

	if (csum_size) {
		t = EXT4_DIRENT_TAIL(bh->b_data, blocksize);
		initialize_dirent_tail(t, blocksize);
	}

1945
	retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
1946
out:
1947
	brelse(bh);
1948 1949
	if (retval == 0)
		ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
1950
	return retval;
1951 1952 1953 1954 1955
}

/*
 * Returns 0 for success, or a negative error value
 */
1956
static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1957 1958 1959 1960 1961
			     struct inode *inode)
{
	struct dx_frame frames[2], *frame;
	struct dx_entry *entries, *at;
	struct dx_hash_info hinfo;
1962
	struct buffer_head *bh;
1963
	struct inode *dir = dentry->d_parent->d_inode;
1964
	struct super_block *sb = dir->i_sb;
1965
	struct ext4_dir_entry_2 *de;
1966 1967
	int err;

1968 1969 1970
	frame = dx_probe(&dentry->d_name, dir, &hinfo, frames);
	if (IS_ERR(frame))
		return PTR_ERR(frame);
1971 1972
	entries = frame->entries;
	at = frame->at;
1973 1974 1975 1976
	bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT);
	if (IS_ERR(bh)) {
		err = PTR_ERR(bh);
		bh = NULL;
1977
		goto cleanup;
C
Carlos Maiolino 已提交
1978
	}
1979 1980

	BUFFER_TRACE(bh, "get_write_access");
1981
	err = ext4_journal_get_write_access(handle, bh);
1982 1983 1984 1985
	if (err)
		goto journal_error;

	err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
1986
	if (err != -ENOSPC)
1987 1988 1989
		goto cleanup;

	/* Block full, should compress but for now just split */
1990
	dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n",
1991 1992 1993
		       dx_get_count(entries), dx_get_limit(entries)));
	/* Need to split index? */
	if (dx_get_count(entries) == dx_get_limit(entries)) {
A
Aneesh Kumar K.V 已提交
1994
		ext4_lblk_t newblock;
1995 1996 1997 1998 1999 2000 2001 2002
		unsigned icount = dx_get_count(entries);
		int levels = frame - frames;
		struct dx_entry *entries2;
		struct dx_node *node2;
		struct buffer_head *bh2;

		if (levels && (dx_get_count(frames->entries) ==
			       dx_get_limit(frames->entries))) {
2003
			ext4_warning(sb, "Directory index full!");
2004 2005 2006
			err = -ENOSPC;
			goto cleanup;
		}
2007 2008 2009
		bh2 = ext4_append(handle, dir, &newblock);
		if (IS_ERR(bh2)) {
			err = PTR_ERR(bh2);
2010
			goto cleanup;
2011
		}
2012 2013
		node2 = (struct dx_node *)(bh2->b_data);
		entries2 = node2->entries;
2014
		memset(&node2->fake, 0, sizeof(struct fake_dirent));
2015 2016
		node2->fake.rec_len = ext4_rec_len_to_disk(sb->s_blocksize,
							   sb->s_blocksize);
2017
		BUFFER_TRACE(frame->bh, "get_write_access");
2018
		err = ext4_journal_get_write_access(handle, frame->bh);
2019 2020 2021 2022 2023
		if (err)
			goto journal_error;
		if (levels) {
			unsigned icount1 = icount/2, icount2 = icount - icount1;
			unsigned hash2 = dx_get_hash(entries + icount1);
2024 2025
			dxtrace(printk(KERN_DEBUG "Split index %i/%i\n",
				       icount1, icount2));
2026 2027

			BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
2028
			err = ext4_journal_get_write_access(handle,
2029 2030 2031 2032
							     frames[0].bh);
			if (err)
				goto journal_error;

2033 2034 2035 2036 2037
			memcpy((char *) entries2, (char *) (entries + icount1),
			       icount2 * sizeof(struct dx_entry));
			dx_set_count(entries, icount1);
			dx_set_count(entries2, icount2);
			dx_set_limit(entries2, dx_node_limit(dir));
2038 2039 2040 2041 2042 2043 2044

			/* Which index block gets the new entry? */
			if (at - entries >= icount1) {
				frame->at = at = at - entries - icount1 + entries2;
				frame->entries = entries = entries2;
				swap(frame->bh, bh2);
			}
2045 2046 2047
			dx_insert_block(frames + 0, hash2, newblock);
			dxtrace(dx_show_index("node", frames[1].entries));
			dxtrace(dx_show_index("node",
2048
			       ((struct dx_node *) bh2->b_data)->entries));
2049
			err = ext4_handle_dirty_dx_node(handle, dir, bh2);
2050 2051 2052 2053
			if (err)
				goto journal_error;
			brelse (bh2);
		} else {
2054 2055
			dxtrace(printk(KERN_DEBUG
				       "Creating second level index...\n"));
2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069
			memcpy((char *) entries2, (char *) entries,
			       icount * sizeof(struct dx_entry));
			dx_set_limit(entries2, dx_node_limit(dir));

			/* Set up root */
			dx_set_count(entries, 1);
			dx_set_block(entries + 0, newblock);
			((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;

			/* Add new access path frame */
			frame = frames + 1;
			frame->at = at = at - entries + entries2;
			frame->entries = entries = entries2;
			frame->bh = bh2;
2070
			err = ext4_journal_get_write_access(handle,
2071 2072 2073 2074
							     frame->bh);
			if (err)
				goto journal_error;
		}
2075
		err = ext4_handle_dirty_dx_node(handle, dir, frames[0].bh);
2076 2077 2078 2079
		if (err) {
			ext4_std_error(inode->i_sb, err);
			goto cleanup;
		}
2080
	}
2081 2082 2083
	de = do_split(handle, dir, &bh, frame, &hinfo);
	if (IS_ERR(de)) {
		err = PTR_ERR(de);
2084
		goto cleanup;
2085
	}
2086 2087 2088 2089
	err = add_dirent_to_buf(handle, dentry, inode, de, bh);
	goto cleanup;

journal_error:
2090
	ext4_std_error(dir->i_sb, err);
2091
cleanup:
2092
	brelse(bh);
2093 2094 2095 2096 2097
	dx_release(frames);
	return err;
}

/*
T
Tao Ma 已提交
2098 2099
 * ext4_generic_delete_entry deletes a directory entry by merging it
 * with the previous entry
2100
 */
T
Tao Ma 已提交
2101 2102 2103 2104 2105 2106 2107
int ext4_generic_delete_entry(handle_t *handle,
			      struct inode *dir,
			      struct ext4_dir_entry_2 *de_del,
			      struct buffer_head *bh,
			      void *entry_buf,
			      int buf_size,
			      int csum_size)
2108
{
2109
	struct ext4_dir_entry_2 *de, *pde;
2110
	unsigned int blocksize = dir->i_sb->s_blocksize;
T
Tao Ma 已提交
2111
	int i;
2112

2113 2114
	i = 0;
	pde = NULL;
T
Tao Ma 已提交
2115 2116
	de = (struct ext4_dir_entry_2 *)entry_buf;
	while (i < buf_size - csum_size) {
2117 2118
		if (ext4_check_dir_entry(dir, NULL, de, bh,
					 bh->b_data, bh->b_size, i))
2119 2120 2121
			return -EIO;
		if (de == de_del)  {
			if (pde)
2122
				pde->rec_len = ext4_rec_len_to_disk(
2123 2124 2125 2126 2127
					ext4_rec_len_from_disk(pde->rec_len,
							       blocksize) +
					ext4_rec_len_from_disk(de->rec_len,
							       blocksize),
					blocksize);
2128 2129 2130 2131 2132
			else
				de->inode = 0;
			dir->i_version++;
			return 0;
		}
2133
		i += ext4_rec_len_from_disk(de->rec_len, blocksize);
2134
		pde = de;
2135
		de = ext4_next_entry(de, blocksize);
2136 2137 2138 2139
	}
	return -ENOENT;
}

T
Tao Ma 已提交
2140 2141 2142 2143 2144 2145 2146
static int ext4_delete_entry(handle_t *handle,
			     struct inode *dir,
			     struct ext4_dir_entry_2 *de_del,
			     struct buffer_head *bh)
{
	int err, csum_size = 0;

2147 2148 2149 2150 2151 2152 2153 2154
	if (ext4_has_inline_data(dir)) {
		int has_inline_data = 1;
		err = ext4_delete_inline_entry(handle, dir, de_del, bh,
					       &has_inline_data);
		if (has_inline_data)
			return err;
	}

2155
	if (ext4_has_metadata_csum(dir->i_sb))
T
Tao Ma 已提交
2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180
		csum_size = sizeof(struct ext4_dir_entry_tail);

	BUFFER_TRACE(bh, "get_write_access");
	err = ext4_journal_get_write_access(handle, bh);
	if (unlikely(err))
		goto out;

	err = ext4_generic_delete_entry(handle, dir, de_del,
					bh, bh->b_data,
					dir->i_sb->s_blocksize, csum_size);
	if (err)
		goto out;

	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
	err = ext4_handle_dirty_dirent_node(handle, dir, bh);
	if (unlikely(err))
		goto out;

	return 0;
out:
	if (err != -ENOENT)
		ext4_std_error(dir->i_sb, err);
	return err;
}

2181 2182 2183 2184 2185 2186 2187 2188 2189 2190
/*
 * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2,
 * since this indicates that nlinks count was previously 1.
 */
static void ext4_inc_count(handle_t *handle, struct inode *inode)
{
	inc_nlink(inode);
	if (is_dx(inode) && inode->i_nlink > 1) {
		/* limit is 16-bit i_links_count */
		if (inode->i_nlink >= EXT4_LINK_MAX || inode->i_nlink == 2) {
M
Miklos Szeredi 已提交
2191
			set_nlink(inode, 1);
2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203
			EXT4_SET_RO_COMPAT_FEATURE(inode->i_sb,
					      EXT4_FEATURE_RO_COMPAT_DIR_NLINK);
		}
	}
}

/*
 * If a directory had nlink == 1, then we should let it be 1. This indicates
 * directory has >EXT4_LINK_MAX subdirs.
 */
static void ext4_dec_count(handle_t *handle, struct inode *inode)
{
2204 2205
	if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
		drop_nlink(inode);
2206 2207 2208
}


2209
static int ext4_add_nondir(handle_t *handle,
2210 2211
		struct dentry *dentry, struct inode *inode)
{
2212
	int err = ext4_add_entry(handle, dentry, inode);
2213
	if (!err) {
2214
		ext4_mark_inode_dirty(handle, inode);
A
Al Viro 已提交
2215
		unlock_new_inode(inode);
2216
		d_instantiate(dentry, inode);
2217 2218
		return 0;
	}
2219
	drop_nlink(inode);
A
Al Viro 已提交
2220
	unlock_new_inode(inode);
2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232
	iput(inode);
	return err;
}

/*
 * By the time this is called, we already have created
 * the directory cache entry for the new file, but it
 * is so far negative - it has no inode.
 *
 * If the create succeeds, we fill in the inode information
 * with d_instantiate().
 */
A
Al Viro 已提交
2233
static int ext4_create(struct inode *dir, struct dentry *dentry, umode_t mode,
A
Al Viro 已提交
2234
		       bool excl)
2235 2236
{
	handle_t *handle;
2237
	struct inode *inode;
2238
	int err, credits, retries = 0;
2239

2240
	dquot_initialize(dir);
2241

2242
	credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2243
		   EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
2244
retry:
2245 2246 2247
	inode = ext4_new_inode_start_handle(dir, mode, &dentry->d_name, 0,
					    NULL, EXT4_HT_DIR, credits);
	handle = ext4_journal_current_handle();
2248 2249
	err = PTR_ERR(inode);
	if (!IS_ERR(inode)) {
2250
		inode->i_op = &ext4_file_inode_operations;
R
Ross Zwisler 已提交
2251 2252 2253 2254
		if (test_opt(inode->i_sb, DAX))
			inode->i_fop = &ext4_dax_file_operations;
		else
			inode->i_fop = &ext4_file_operations;
2255
		ext4_set_aops(inode);
2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268
		err = 0;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
		if (!err && ext4_encrypted_inode(dir)) {
			err = ext4_inherit_context(dir, inode);
			if (err) {
				clear_nlink(inode);
				unlock_new_inode(inode);
				iput(inode);
			}
		}
#endif
		if (!err)
			err = ext4_add_nondir(handle, dentry, inode);
2269 2270
		if (!err && IS_DIRSYNC(dir))
			ext4_handle_sync(handle);
2271
	}
2272 2273
	if (handle)
		ext4_journal_stop(handle);
2274
	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2275 2276 2277 2278
		goto retry;
	return err;
}

2279
static int ext4_mknod(struct inode *dir, struct dentry *dentry,
A
Al Viro 已提交
2280
		      umode_t mode, dev_t rdev)
2281 2282 2283
{
	handle_t *handle;
	struct inode *inode;
2284
	int err, credits, retries = 0;
2285 2286 2287 2288

	if (!new_valid_dev(rdev))
		return -EINVAL;

2289
	dquot_initialize(dir);
2290

2291
	credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2292
		   EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
2293
retry:
2294 2295 2296
	inode = ext4_new_inode_start_handle(dir, mode, &dentry->d_name, 0,
					    NULL, EXT4_HT_DIR, credits);
	handle = ext4_journal_current_handle();
2297 2298 2299
	err = PTR_ERR(inode);
	if (!IS_ERR(inode)) {
		init_special_inode(inode, inode->i_mode, rdev);
2300 2301
		inode->i_op = &ext4_special_inode_operations;
		err = ext4_add_nondir(handle, dentry, inode);
2302 2303
		if (!err && IS_DIRSYNC(dir))
			ext4_handle_sync(handle);
2304
	}
2305 2306
	if (handle)
		ext4_journal_stop(handle);
2307
	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2308 2309 2310 2311
		goto retry;
	return err;
}

A
Al Viro 已提交
2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329
static int ext4_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
{
	handle_t *handle;
	struct inode *inode;
	int err, retries = 0;

	dquot_initialize(dir);

retry:
	inode = ext4_new_inode_start_handle(dir, mode,
					    NULL, 0, NULL,
					    EXT4_HT_DIR,
			EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
			  4 + EXT4_XATTR_TRANS_BLOCKS);
	handle = ext4_journal_current_handle();
	err = PTR_ERR(inode);
	if (!IS_ERR(inode)) {
		inode->i_op = &ext4_file_inode_operations;
R
Ross Zwisler 已提交
2330 2331 2332 2333
		if (test_opt(inode->i_sb, DAX))
			inode->i_fop = &ext4_dax_file_operations;
		else
			inode->i_fop = &ext4_file_operations;
A
Al Viro 已提交
2334
		ext4_set_aops(inode);
2335
		d_tmpfile(dentry, inode);
A
Al Viro 已提交
2336 2337
		err = ext4_orphan_add(handle, inode);
		if (err)
2338
			goto err_unlock_inode;
A
Al Viro 已提交
2339 2340 2341 2342 2343 2344 2345 2346
		mark_inode_dirty(inode);
		unlock_new_inode(inode);
	}
	if (handle)
		ext4_journal_stop(handle);
	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
		goto retry;
	return err;
2347
err_unlock_inode:
A
Al Viro 已提交
2348 2349 2350 2351 2352
	ext4_journal_stop(handle);
	unlock_new_inode(inode);
	return err;
}

2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382
struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
			  struct ext4_dir_entry_2 *de,
			  int blocksize, int csum_size,
			  unsigned int parent_ino, int dotdot_real_len)
{
	de->inode = cpu_to_le32(inode->i_ino);
	de->name_len = 1;
	de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len),
					   blocksize);
	strcpy(de->name, ".");
	ext4_set_de_type(inode->i_sb, de, S_IFDIR);

	de = ext4_next_entry(de, blocksize);
	de->inode = cpu_to_le32(parent_ino);
	de->name_len = 2;
	if (!dotdot_real_len)
		de->rec_len = ext4_rec_len_to_disk(blocksize -
					(csum_size + EXT4_DIR_REC_LEN(1)),
					blocksize);
	else
		de->rec_len = ext4_rec_len_to_disk(
				EXT4_DIR_REC_LEN(de->name_len), blocksize);
	strcpy(de->name, "..");
	ext4_set_de_type(inode->i_sb, de, S_IFDIR);

	return ext4_next_entry(de, blocksize);
}

static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
			     struct inode *inode)
2383
{
2384
	struct buffer_head *dir_block = NULL;
2385
	struct ext4_dir_entry_2 *de;
2386
	struct ext4_dir_entry_tail *t;
2387
	ext4_lblk_t block = 0;
2388
	unsigned int blocksize = dir->i_sb->s_blocksize;
2389
	int csum_size = 0;
2390
	int err;
2391

2392
	if (ext4_has_metadata_csum(dir->i_sb))
2393 2394
		csum_size = sizeof(struct ext4_dir_entry_tail);

2395 2396 2397 2398 2399 2400 2401 2402
	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
		err = ext4_try_create_inline_dir(handle, dir, inode);
		if (err < 0 && err != -ENOSPC)
			goto out;
		if (!err)
			goto out;
	}

2403
	inode->i_size = 0;
2404 2405 2406
	dir_block = ext4_append(handle, inode, &block);
	if (IS_ERR(dir_block))
		return PTR_ERR(dir_block);
2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428
	de = (struct ext4_dir_entry_2 *)dir_block->b_data;
	ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0);
	set_nlink(inode, 2);
	if (csum_size) {
		t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize);
		initialize_dirent_tail(t, blocksize);
	}

	BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
	err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
	if (err)
		goto out;
	set_buffer_verified(dir_block);
out:
	brelse(dir_block);
	return err;
}

static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
	handle_t *handle;
	struct inode *inode;
2429
	int err, credits, retries = 0;
2430

2431
	if (EXT4_DIR_LINK_MAX(dir))
2432 2433
		return -EMLINK;

2434
	dquot_initialize(dir);
2435

2436
	credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2437
		   EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
2438
retry:
2439 2440 2441 2442
	inode = ext4_new_inode_start_handle(dir, S_IFDIR | mode,
					    &dentry->d_name,
					    0, NULL, EXT4_HT_DIR, credits);
	handle = ext4_journal_current_handle();
2443 2444 2445 2446
	err = PTR_ERR(inode);
	if (IS_ERR(inode))
		goto out_stop;

2447 2448
	inode->i_op = &ext4_dir_inode_operations;
	inode->i_fop = &ext4_dir_operations;
2449
	err = ext4_init_new_dir(handle, dir, inode);
2450 2451
	if (err)
		goto out_clear_inode;
2452 2453 2454 2455 2456 2457 2458
#ifdef CONFIG_EXT4_FS_ENCRYPTION
	if (ext4_encrypted_inode(dir)) {
		err = ext4_inherit_context(dir, inode);
		if (err)
			goto out_clear_inode;
	}
#endif
2459 2460 2461
	err = ext4_mark_inode_dirty(handle, inode);
	if (!err)
		err = ext4_add_entry(handle, dentry, inode);
2462
	if (err) {
2463 2464
out_clear_inode:
		clear_nlink(inode);
A
Al Viro 已提交
2465
		unlock_new_inode(inode);
2466
		ext4_mark_inode_dirty(handle, inode);
2467
		iput(inode);
2468 2469
		goto out_stop;
	}
2470
	ext4_inc_count(handle, dir);
2471
	ext4_update_dx_flag(dir);
2472 2473 2474
	err = ext4_mark_inode_dirty(handle, dir);
	if (err)
		goto out_clear_inode;
A
Al Viro 已提交
2475
	unlock_new_inode(inode);
2476
	d_instantiate(dentry, inode);
2477 2478 2479
	if (IS_DIRSYNC(dir))
		ext4_handle_sync(handle);

2480
out_stop:
2481 2482
	if (handle)
		ext4_journal_stop(handle);
2483
	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2484 2485 2486 2487 2488 2489 2490
		goto retry;
	return err;
}

/*
 * routine to check that the specified directory is empty (for rmdir)
 */
2491
int ext4_empty_dir(struct inode *inode)
2492
{
2493
	unsigned int offset;
2494 2495 2496
	struct buffer_head *bh;
	struct ext4_dir_entry_2 *de, *de1;
	struct super_block *sb;
2497 2498
	int err = 0;

T
Tao Ma 已提交
2499 2500 2501 2502 2503 2504 2505 2506
	if (ext4_has_inline_data(inode)) {
		int has_inline_data = 1;

		err = empty_inline_dir(inode, &has_inline_data);
		if (has_inline_data)
			return err;
	}

2507
	sb = inode->i_sb;
2508 2509
	if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2)) {
		EXT4_ERROR_INODE(inode, "invalid size");
2510 2511
		return 1;
	}
2512 2513 2514 2515
	bh = ext4_read_dirblock(inode, 0, EITHER);
	if (IS_ERR(bh))
		return 1;

2516
	de = (struct ext4_dir_entry_2 *) bh->b_data;
2517
	de1 = ext4_next_entry(de, sb->s_blocksize);
2518 2519
	if (le32_to_cpu(de->inode) != inode->i_ino ||
			!le32_to_cpu(de1->inode) ||
2520 2521
			strcmp(".", de->name) ||
			strcmp("..", de1->name)) {
2522
		ext4_warning(inode->i_sb,
2523 2524 2525
			     "bad directory (dir #%lu) - no `.' or `..'",
			     inode->i_ino);
		brelse(bh);
2526 2527
		return 1;
	}
2528 2529 2530
	offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) +
		 ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize);
	de = ext4_next_entry(de1, sb->s_blocksize);
2531
	while (offset < inode->i_size) {
D
Dmitry Monakhov 已提交
2532
		if ((void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
2533
			unsigned int lblock;
2534
			err = 0;
2535
			brelse(bh);
2536
			lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
2537 2538 2539
			bh = ext4_read_dirblock(inode, lblock, EITHER);
			if (IS_ERR(bh))
				return 1;
2540
			de = (struct ext4_dir_entry_2 *) bh->b_data;
2541
		}
2542 2543
		if (ext4_check_dir_entry(inode, NULL, de, bh,
					 bh->b_data, bh->b_size, offset)) {
2544
			de = (struct ext4_dir_entry_2 *)(bh->b_data +
2545 2546 2547 2548 2549
							 sb->s_blocksize);
			offset = (offset | (sb->s_blocksize - 1)) + 1;
			continue;
		}
		if (le32_to_cpu(de->inode)) {
2550
			brelse(bh);
2551 2552
			return 0;
		}
2553 2554
		offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
		de = ext4_next_entry(de, sb->s_blocksize);
2555
	}
2556
	brelse(bh);
2557 2558 2559
	return 1;
}

2560 2561
/*
 * ext4_orphan_add() links an unlinked or truncated inode into a list of
2562 2563 2564 2565 2566
 * such inodes, starting at the superblock, in case we crash before the
 * file is closed/deleted, or in case the inode truncate spans multiple
 * transactions and the last transaction is not recovered after a crash.
 *
 * At filesystem recovery time, we walk this list deleting unlinked
2567
 * inodes and truncating linked inodes in ext4_orphan_cleanup().
2568 2569 2570
 *
 * Orphan list manipulation functions must be called under i_mutex unless
 * we are just creating the inode or deleting it.
2571
 */
2572
int ext4_orphan_add(handle_t *handle, struct inode *inode)
2573 2574
{
	struct super_block *sb = inode->i_sb;
2575
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2576
	struct ext4_iloc iloc;
2577
	int err = 0, rc;
2578
	bool dirty = false;
2579

2580
	if (!sbi->s_journal || is_bad_inode(inode))
2581 2582
		return 0;

2583 2584 2585 2586 2587 2588
	WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
		     !mutex_is_locked(&inode->i_mutex));
	/*
	 * Exit early if inode already is on orphan list. This is a big speedup
	 * since we don't have to contend on the global s_orphan_lock.
	 */
2589
	if (!list_empty(&EXT4_I(inode)->i_orphan))
2590
		return 0;
2591

2592 2593 2594 2595 2596
	/*
	 * Orphan handling is only valid for files with data blocks
	 * being truncated, or files being unlinked. Note that we either
	 * hold i_mutex, or the inode can not be referenced from outside,
	 * so i_nlink should not be bumped due to race
2597
	 */
2598 2599
	J_ASSERT((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
		  S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
2600

2601 2602
	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
	err = ext4_journal_get_write_access(handle, sbi->s_sbh);
2603
	if (err)
2604
		goto out;
2605

2606
	err = ext4_reserve_inode_write(handle, inode, &iloc);
2607
	if (err)
2608 2609 2610
		goto out;

	mutex_lock(&sbi->s_orphan_lock);
2611 2612 2613 2614
	/*
	 * Due to previous errors inode may be already a part of on-disk
	 * orphan list. If so skip on-disk list modification.
	 */
2615 2616 2617 2618 2619 2620 2621 2622 2623
	if (!NEXT_ORPHAN(inode) || NEXT_ORPHAN(inode) >
	    (le32_to_cpu(sbi->s_es->s_inodes_count))) {
		/* Insert this inode at the head of the on-disk orphan list */
		NEXT_ORPHAN(inode) = le32_to_cpu(sbi->s_es->s_last_orphan);
		sbi->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
		dirty = true;
	}
	list_add(&EXT4_I(inode)->i_orphan, &sbi->s_orphan);
	mutex_unlock(&sbi->s_orphan_lock);
2624

2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640
	if (dirty) {
		err = ext4_handle_dirty_super(handle, sb);
		rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
		if (!err)
			err = rc;
		if (err) {
			/*
			 * We have to remove inode from in-memory list if
			 * addition to on disk orphan list failed. Stray orphan
			 * list entries can cause panics at unmount time.
			 */
			mutex_lock(&sbi->s_orphan_lock);
			list_del(&EXT4_I(inode)->i_orphan);
			mutex_unlock(&sbi->s_orphan_lock);
		}
	}
2641 2642 2643
	jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
	jbd_debug(4, "orphan inode %lu will point to %d\n",
			inode->i_ino, NEXT_ORPHAN(inode));
2644
out:
2645
	ext4_std_error(sb, err);
2646 2647 2648 2649
	return err;
}

/*
2650
 * ext4_orphan_del() removes an unlinked or truncated inode from the list
2651 2652
 * of such inodes stored on disk, because it is finally being cleaned up.
 */
2653
int ext4_orphan_del(handle_t *handle, struct inode *inode)
2654 2655
{
	struct list_head *prev;
2656
	struct ext4_inode_info *ei = EXT4_I(inode);
2657
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2658
	__u32 ino_next;
2659
	struct ext4_iloc iloc;
2660 2661
	int err = 0;

2662
	if (!sbi->s_journal && !(sbi->s_mount_state & EXT4_ORPHAN_FS))
2663 2664
		return 0;

2665 2666 2667
	WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
		     !mutex_is_locked(&inode->i_mutex));
	/* Do this quick check before taking global s_orphan_lock. */
2668
	if (list_empty(&ei->i_orphan))
2669
		return 0;
2670

2671 2672 2673 2674
	if (handle) {
		/* Grab inode buffer early before taking global s_orphan_lock */
		err = ext4_reserve_inode_write(handle, inode, &iloc);
	}
2675

2676
	mutex_lock(&sbi->s_orphan_lock);
2677 2678
	jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);

2679
	prev = ei->i_orphan.prev;
2680 2681 2682 2683 2684 2685
	list_del_init(&ei->i_orphan);

	/* If we're on an error path, we may not have a valid
	 * transaction handle with which to update the orphan list on
	 * disk, but we still need to remove the inode from the linked
	 * list in memory. */
2686 2687
	if (!handle || err) {
		mutex_unlock(&sbi->s_orphan_lock);
2688
		goto out_err;
2689
	}
2690

2691
	ino_next = NEXT_ORPHAN(inode);
2692
	if (prev == &sbi->s_orphan) {
2693
		jbd_debug(4, "superblock will point to %u\n", ino_next);
2694
		BUFFER_TRACE(sbi->s_sbh, "get_write_access");
2695
		err = ext4_journal_get_write_access(handle, sbi->s_sbh);
2696 2697
		if (err) {
			mutex_unlock(&sbi->s_orphan_lock);
2698
			goto out_brelse;
2699
		}
2700
		sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
2701
		mutex_unlock(&sbi->s_orphan_lock);
2702
		err = ext4_handle_dirty_super(handle, inode->i_sb);
2703
	} else {
2704
		struct ext4_iloc iloc2;
2705
		struct inode *i_prev =
2706
			&list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode;
2707

2708
		jbd_debug(4, "orphan inode %lu will point to %u\n",
2709
			  i_prev->i_ino, ino_next);
2710
		err = ext4_reserve_inode_write(handle, i_prev, &iloc2);
2711 2712
		if (err) {
			mutex_unlock(&sbi->s_orphan_lock);
2713
			goto out_brelse;
2714
		}
2715
		NEXT_ORPHAN(i_prev) = ino_next;
2716
		err = ext4_mark_iloc_dirty(handle, i_prev, &iloc2);
2717
		mutex_unlock(&sbi->s_orphan_lock);
2718 2719 2720 2721
	}
	if (err)
		goto out_brelse;
	NEXT_ORPHAN(inode) = 0;
2722
	err = ext4_mark_iloc_dirty(handle, inode, &iloc);
2723
out_err:
2724
	ext4_std_error(inode->i_sb, err);
2725 2726 2727 2728 2729 2730 2731
	return err;

out_brelse:
	brelse(iloc.bh);
	goto out_err;
}

2732
static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
2733 2734
{
	int retval;
2735 2736 2737
	struct inode *inode;
	struct buffer_head *bh;
	struct ext4_dir_entry_2 *de;
2738
	handle_t *handle = NULL;
2739 2740 2741

	/* Initialize quotas before so that eventual writes go in
	 * separate transaction */
2742 2743
	dquot_initialize(dir);
	dquot_initialize(dentry->d_inode);
2744

2745
	retval = -ENOENT;
T
Tao Ma 已提交
2746
	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
2747 2748
	if (IS_ERR(bh))
		return PTR_ERR(bh);
2749 2750 2751 2752 2753 2754 2755 2756 2757 2758
	if (!bh)
		goto end_rmdir;

	inode = dentry->d_inode;

	retval = -EIO;
	if (le32_to_cpu(de->inode) != inode->i_ino)
		goto end_rmdir;

	retval = -ENOTEMPTY;
2759
	if (!ext4_empty_dir(inode))
2760 2761
		goto end_rmdir;

2762
	handle = ext4_journal_start(dir, EXT4_HT_DIR,
2763
				    EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
2764 2765 2766 2767 2768 2769 2770 2771 2772
	if (IS_ERR(handle)) {
		retval = PTR_ERR(handle);
		handle = NULL;
		goto end_rmdir;
	}

	if (IS_DIRSYNC(dir))
		ext4_handle_sync(handle);

2773
	retval = ext4_delete_entry(handle, dir, de, bh);
2774 2775
	if (retval)
		goto end_rmdir;
2776
	if (!EXT4_DIR_LINK_EMPTY(inode))
2777
		ext4_warning(inode->i_sb,
2778 2779
			     "empty directory has too many links (%d)",
			     inode->i_nlink);
2780 2781 2782 2783 2784 2785
	inode->i_version++;
	clear_nlink(inode);
	/* There's no need to set i_disksize: the fact that i_nlink is
	 * zero will ensure that the right thing happens during any
	 * recovery. */
	inode->i_size = 0;
2786
	ext4_orphan_add(handle, inode);
K
Kalpak Shah 已提交
2787
	inode->i_ctime = dir->i_ctime = dir->i_mtime = ext4_current_time(inode);
2788
	ext4_mark_inode_dirty(handle, inode);
2789
	ext4_dec_count(handle, dir);
2790 2791
	ext4_update_dx_flag(dir);
	ext4_mark_inode_dirty(handle, dir);
2792 2793

end_rmdir:
2794
	brelse(bh);
2795 2796
	if (handle)
		ext4_journal_stop(handle);
2797 2798 2799
	return retval;
}

2800
static int ext4_unlink(struct inode *dir, struct dentry *dentry)
2801 2802
{
	int retval;
2803 2804 2805
	struct inode *inode;
	struct buffer_head *bh;
	struct ext4_dir_entry_2 *de;
2806
	handle_t *handle = NULL;
2807

2808
	trace_ext4_unlink_enter(dir, dentry);
2809 2810
	/* Initialize quotas before so that eventual writes go
	 * in separate transaction */
2811 2812
	dquot_initialize(dir);
	dquot_initialize(dentry->d_inode);
2813

2814
	retval = -ENOENT;
T
Tao Ma 已提交
2815
	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
2816 2817
	if (IS_ERR(bh))
		return PTR_ERR(bh);
2818 2819 2820 2821 2822 2823 2824 2825 2826
	if (!bh)
		goto end_unlink;

	inode = dentry->d_inode;

	retval = -EIO;
	if (le32_to_cpu(de->inode) != inode->i_ino)
		goto end_unlink;

2827
	handle = ext4_journal_start(dir, EXT4_HT_DIR,
2828
				    EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
2829 2830 2831 2832 2833 2834 2835 2836 2837
	if (IS_ERR(handle)) {
		retval = PTR_ERR(handle);
		handle = NULL;
		goto end_unlink;
	}

	if (IS_DIRSYNC(dir))
		ext4_handle_sync(handle);

2838
	if (!inode->i_nlink) {
2839
		ext4_warning(inode->i_sb,
2840 2841
			     "Deleting nonexistent file (%lu), %d",
			     inode->i_ino, inode->i_nlink);
M
Miklos Szeredi 已提交
2842
		set_nlink(inode, 1);
2843
	}
2844
	retval = ext4_delete_entry(handle, dir, de, bh);
2845 2846
	if (retval)
		goto end_unlink;
K
Kalpak Shah 已提交
2847
	dir->i_ctime = dir->i_mtime = ext4_current_time(dir);
2848 2849
	ext4_update_dx_flag(dir);
	ext4_mark_inode_dirty(handle, dir);
2850
	drop_nlink(inode);
2851
	if (!inode->i_nlink)
2852
		ext4_orphan_add(handle, inode);
K
Kalpak Shah 已提交
2853
	inode->i_ctime = ext4_current_time(inode);
2854
	ext4_mark_inode_dirty(handle, inode);
2855 2856

end_unlink:
2857
	brelse(bh);
2858 2859
	if (handle)
		ext4_journal_stop(handle);
2860
	trace_ext4_unlink_exit(dentry, retval);
2861 2862 2863
	return retval;
}

2864 2865
static int ext4_symlink(struct inode *dir,
			struct dentry *dentry, const char *symname)
2866 2867
{
	handle_t *handle;
2868
	struct inode *inode;
2869
	int l, err, retries = 0;
2870
	int credits;
2871 2872 2873 2874 2875

	l = strlen(symname)+1;
	if (l > dir->i_sb->s_blocksize)
		return -ENAMETOOLONG;

2876
	dquot_initialize(dir);
2877

2878 2879 2880 2881
	if (l > EXT4_N_BLOCKS * 4) {
		/*
		 * For non-fast symlinks, we just allocate inode and put it on
		 * orphan list in the first transaction => we need bitmap,
2882 2883
		 * group descriptor, sb, inode block, quota blocks, and
		 * possibly selinux xattr blocks.
2884
		 */
2885 2886
		credits = 4 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
			  EXT4_XATTR_TRANS_BLOCKS;
2887 2888 2889 2890 2891 2892 2893 2894
	} else {
		/*
		 * Fast symlink. We have to add entry to directory
		 * (EXT4_DATA_TRANS_BLOCKS + EXT4_INDEX_EXTRA_TRANS_BLOCKS),
		 * allocate new inode (bitmap, group descriptor, inode block,
		 * quota blocks, sb is already counted in previous macros).
		 */
		credits = EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2895
			  EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3;
2896
	}
2897
retry:
2898 2899 2900 2901
	inode = ext4_new_inode_start_handle(dir, S_IFLNK|S_IRWXUGO,
					    &dentry->d_name, 0, NULL,
					    EXT4_HT_DIR, credits);
	handle = ext4_journal_current_handle();
2902 2903 2904 2905
	err = PTR_ERR(inode);
	if (IS_ERR(inode))
		goto out_stop;

2906
	if (l > EXT4_N_BLOCKS * 4) {
2907 2908
		inode->i_op = &ext4_symlink_inode_operations;
		ext4_set_aops(inode);
2909
		/*
2910 2911 2912 2913 2914 2915 2916 2917
		 * We cannot call page_symlink() with transaction started
		 * because it calls into ext4_write_begin() which can wait
		 * for transaction commit if we are running out of space
		 * and thus we deadlock. So we have to stop transaction now
		 * and restart it when symlink contents is written.
		 * 
		 * To keep fs consistent in case of crash, we have to put inode
		 * to orphan list in the mean time.
2918
		 */
2919 2920 2921 2922 2923
		drop_nlink(inode);
		err = ext4_orphan_add(handle, inode);
		ext4_journal_stop(handle);
		if (err)
			goto err_drop_inode;
2924
		err = __page_symlink(inode, symname, l, 1);
2925 2926 2927 2928 2929 2930
		if (err)
			goto err_drop_inode;
		/*
		 * Now inode is being linked into dir (EXT4_DATA_TRANS_BLOCKS
		 * + EXT4_INDEX_EXTRA_TRANS_BLOCKS), inode is also modified
		 */
2931
		handle = ext4_journal_start(dir, EXT4_HT_DIR,
2932 2933 2934 2935 2936 2937
				EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
				EXT4_INDEX_EXTRA_TRANS_BLOCKS + 1);
		if (IS_ERR(handle)) {
			err = PTR_ERR(handle);
			goto err_drop_inode;
		}
2938
		set_nlink(inode, 1);
2939
		err = ext4_orphan_del(handle, inode);
2940
		if (err) {
2941
			ext4_journal_stop(handle);
2942
			clear_nlink(inode);
2943
			goto err_drop_inode;
2944 2945
		}
	} else {
2946
		/* clear the extent format for fast symlink */
2947
		ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
2948
		inode->i_op = &ext4_fast_symlink_inode_operations;
2949
		memcpy((char *)&EXT4_I(inode)->i_data, symname, l);
2950 2951
		inode->i_size = l-1;
	}
2952 2953
	EXT4_I(inode)->i_disksize = inode->i_size;
	err = ext4_add_nondir(handle, dentry, inode);
2954 2955 2956
	if (!err && IS_DIRSYNC(dir))
		ext4_handle_sync(handle);

2957
out_stop:
2958 2959
	if (handle)
		ext4_journal_stop(handle);
2960
	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2961 2962
		goto retry;
	return err;
2963 2964 2965 2966
err_drop_inode:
	unlock_new_inode(inode);
	iput(inode);
	return err;
2967 2968
}

2969 2970
static int ext4_link(struct dentry *old_dentry,
		     struct inode *dir, struct dentry *dentry)
2971 2972 2973 2974 2975
{
	handle_t *handle;
	struct inode *inode = old_dentry->d_inode;
	int err, retries = 0;

2976
	if (inode->i_nlink >= EXT4_LINK_MAX)
2977
		return -EMLINK;
2978 2979 2980
	if (ext4_encrypted_inode(dir) &&
	    !ext4_is_child_context_consistent_with_parent(dir, inode))
		return -EPERM;
2981
	dquot_initialize(dir);
2982

2983
retry:
2984 2985
	handle = ext4_journal_start(dir, EXT4_HT_DIR,
		(EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
A
Al Viro 已提交
2986
		 EXT4_INDEX_EXTRA_TRANS_BLOCKS) + 1);
2987 2988 2989 2990
	if (IS_ERR(handle))
		return PTR_ERR(handle);

	if (IS_DIRSYNC(dir))
2991
		ext4_handle_sync(handle);
2992

K
Kalpak Shah 已提交
2993
	inode->i_ctime = ext4_current_time(inode);
2994
	ext4_inc_count(handle, inode);
A
Al Viro 已提交
2995
	ihold(inode);
2996

A
Al Viro 已提交
2997 2998 2999
	err = ext4_add_entry(handle, dentry, inode);
	if (!err) {
		ext4_mark_inode_dirty(handle, inode);
A
Al Viro 已提交
3000 3001 3002 3003 3004
		/* this can happen only for tmpfile being
		 * linked the first time
		 */
		if (inode->i_nlink == 1)
			ext4_orphan_del(handle, inode);
A
Al Viro 已提交
3005 3006 3007 3008 3009
		d_instantiate(dentry, inode);
	} else {
		drop_nlink(inode);
		iput(inode);
	}
3010 3011
	ext4_journal_stop(handle);
	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
3012 3013 3014 3015
		goto retry;
	return err;
}

T
Tao Ma 已提交
3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030

/*
 * Try to find buffer head where contains the parent block.
 * It should be the inode block if it is inlined or the 1st block
 * if it is a normal dir.
 */
static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
					struct inode *inode,
					int *retval,
					struct ext4_dir_entry_2 **parent_de,
					int *inlined)
{
	struct buffer_head *bh;

	if (!ext4_has_inline_data(inode)) {
3031 3032 3033
		bh = ext4_read_dirblock(inode, 0, EITHER);
		if (IS_ERR(bh)) {
			*retval = PTR_ERR(bh);
T
Tao Ma 已提交
3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044
			return NULL;
		}
		*parent_de = ext4_next_entry(
					(struct ext4_dir_entry_2 *)bh->b_data,
					inode->i_sb->s_blocksize);
		return bh;
	}

	*inlined = 1;
	return ext4_get_first_inline_block(inode, parent_de, retval);
}
3045

3046 3047 3048 3049
struct ext4_renament {
	struct inode *dir;
	struct dentry *dentry;
	struct inode *inode;
M
Miklos Szeredi 已提交
3050 3051
	bool is_dir;
	int dir_nlink_delta;
3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063

	/* entry for "dentry" */
	struct buffer_head *bh;
	struct ext4_dir_entry_2 *de;
	int inlined;

	/* entry for ".." in inode if it's a directory */
	struct buffer_head *dir_bh;
	struct ext4_dir_entry_2 *parent_de;
	int dir_inlined;
};

3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145
static int ext4_rename_dir_prepare(handle_t *handle, struct ext4_renament *ent)
{
	int retval;

	ent->dir_bh = ext4_get_first_dir_block(handle, ent->inode,
					      &retval, &ent->parent_de,
					      &ent->dir_inlined);
	if (!ent->dir_bh)
		return retval;
	if (le32_to_cpu(ent->parent_de->inode) != ent->dir->i_ino)
		return -EIO;
	BUFFER_TRACE(ent->dir_bh, "get_write_access");
	return ext4_journal_get_write_access(handle, ent->dir_bh);
}

static int ext4_rename_dir_finish(handle_t *handle, struct ext4_renament *ent,
				  unsigned dir_ino)
{
	int retval;

	ent->parent_de->inode = cpu_to_le32(dir_ino);
	BUFFER_TRACE(ent->dir_bh, "call ext4_handle_dirty_metadata");
	if (!ent->dir_inlined) {
		if (is_dx(ent->inode)) {
			retval = ext4_handle_dirty_dx_node(handle,
							   ent->inode,
							   ent->dir_bh);
		} else {
			retval = ext4_handle_dirty_dirent_node(handle,
							       ent->inode,
							       ent->dir_bh);
		}
	} else {
		retval = ext4_mark_inode_dirty(handle, ent->inode);
	}
	if (retval) {
		ext4_std_error(ent->dir->i_sb, retval);
		return retval;
	}
	return 0;
}

static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
		       unsigned ino, unsigned file_type)
{
	int retval;

	BUFFER_TRACE(ent->bh, "get write access");
	retval = ext4_journal_get_write_access(handle, ent->bh);
	if (retval)
		return retval;
	ent->de->inode = cpu_to_le32(ino);
	if (EXT4_HAS_INCOMPAT_FEATURE(ent->dir->i_sb,
				      EXT4_FEATURE_INCOMPAT_FILETYPE))
		ent->de->file_type = file_type;
	ent->dir->i_version++;
	ent->dir->i_ctime = ent->dir->i_mtime =
		ext4_current_time(ent->dir);
	ext4_mark_inode_dirty(handle, ent->dir);
	BUFFER_TRACE(ent->bh, "call ext4_handle_dirty_metadata");
	if (!ent->inlined) {
		retval = ext4_handle_dirty_dirent_node(handle,
						       ent->dir, ent->bh);
		if (unlikely(retval)) {
			ext4_std_error(ent->dir->i_sb, retval);
			return retval;
		}
	}
	brelse(ent->bh);
	ent->bh = NULL;

	return 0;
}

static int ext4_find_delete_entry(handle_t *handle, struct inode *dir,
				  const struct qstr *d_name)
{
	int retval = -ENOENT;
	struct buffer_head *bh;
	struct ext4_dir_entry_2 *de;

	bh = ext4_find_entry(dir, d_name, &de, NULL);
3146 3147
	if (IS_ERR(bh))
		return PTR_ERR(bh);
3148 3149 3150 3151 3152 3153 3154
	if (bh) {
		retval = ext4_delete_entry(handle, dir, de, bh);
		brelse(bh);
	}
	return retval;
}

3155 3156
static void ext4_rename_delete(handle_t *handle, struct ext4_renament *ent,
			       int force_reread)
3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167
{
	int retval;
	/*
	 * ent->de could have moved from under us during htree split, so make
	 * sure that we are deleting the right entry.  We might also be pointing
	 * to a stale entry in the unused part of ent->bh so just checking inum
	 * and the name isn't enough.
	 */
	if (le32_to_cpu(ent->de->inode) != ent->inode->i_ino ||
	    ent->de->name_len != ent->dentry->d_name.len ||
	    strncmp(ent->de->name, ent->dentry->d_name.name,
3168 3169
		    ent->de->name_len) ||
	    force_reread) {
3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186
		retval = ext4_find_delete_entry(handle, ent->dir,
						&ent->dentry->d_name);
	} else {
		retval = ext4_delete_entry(handle, ent->dir, ent->de, ent->bh);
		if (retval == -ENOENT) {
			retval = ext4_find_delete_entry(handle, ent->dir,
							&ent->dentry->d_name);
		}
	}

	if (retval) {
		ext4_warning(ent->dir->i_sb,
				"Deleting old file (%lu), %d, error=%d",
				ent->dir->i_ino, ent->dir->i_nlink, retval);
	}
}

M
Miklos Szeredi 已提交
3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197
static void ext4_update_dir_count(handle_t *handle, struct ext4_renament *ent)
{
	if (ent->dir_nlink_delta) {
		if (ent->dir_nlink_delta == -1)
			ext4_dec_count(handle, ent->dir);
		else
			ext4_inc_count(handle, ent->dir);
		ext4_mark_inode_dirty(handle, ent->dir);
	}
}

M
Miklos Szeredi 已提交
3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230
static struct inode *ext4_whiteout_for_rename(struct ext4_renament *ent,
					      int credits, handle_t **h)
{
	struct inode *wh;
	handle_t *handle;
	int retries = 0;

	/*
	 * for inode block, sb block, group summaries,
	 * and inode bitmap
	 */
	credits += (EXT4_MAXQUOTAS_TRANS_BLOCKS(ent->dir->i_sb) +
		    EXT4_XATTR_TRANS_BLOCKS + 4);
retry:
	wh = ext4_new_inode_start_handle(ent->dir, S_IFCHR | WHITEOUT_MODE,
					 &ent->dentry->d_name, 0, NULL,
					 EXT4_HT_DIR, credits);

	handle = ext4_journal_current_handle();
	if (IS_ERR(wh)) {
		if (handle)
			ext4_journal_stop(handle);
		if (PTR_ERR(wh) == -ENOSPC &&
		    ext4_should_retry_alloc(ent->dir->i_sb, &retries))
			goto retry;
	} else {
		*h = handle;
		init_special_inode(wh, wh->i_mode, WHITEOUT_DEV);
		wh->i_op = &ext4_special_inode_operations;
	}
	return wh;
}

3231 3232 3233
/*
 * Anybody can rename anything with this: the permission checks are left to the
 * higher-level routines.
3234 3235 3236 3237
 *
 * n.b.  old_{dentry,inode) refers to the source dentry/inode
 * while new_{dentry,inode) refers to the destination dentry/inode
 * This comes from rename(const char *oldpath, const char *newpath)
3238
 */
3239
static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
M
Miklos Szeredi 已提交
3240 3241
		       struct inode *new_dir, struct dentry *new_dentry,
		       unsigned int flags)
3242
{
3243
	handle_t *handle = NULL;
3244 3245 3246 3247 3248 3249 3250 3251 3252 3253
	struct ext4_renament old = {
		.dir = old_dir,
		.dentry = old_dentry,
		.inode = old_dentry->d_inode,
	};
	struct ext4_renament new = {
		.dir = new_dir,
		.dentry = new_dentry,
		.inode = new_dentry->d_inode,
	};
3254
	int force_reread;
3255
	int retval;
M
Miklos Szeredi 已提交
3256 3257 3258
	struct inode *whiteout = NULL;
	int credits;
	u8 old_file_type;
3259

3260 3261
	dquot_initialize(old.dir);
	dquot_initialize(new.dir);
3262 3263 3264

	/* Initialize quotas before so that eventual writes go
	 * in separate transaction */
3265 3266
	if (new.inode)
		dquot_initialize(new.inode);
3267

3268
	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
3269 3270
	if (IS_ERR(old.bh))
		return PTR_ERR(old.bh);
3271 3272 3273 3274 3275 3276 3277
	/*
	 *  Check for inode number is _not_ due to possible IO errors.
	 *  We might rmdir the source, keep it as pwd of some process
	 *  and merrily kill the link to whatever was created under the
	 *  same name. Goodbye sticky bit ;-<
	 */
	retval = -ENOENT;
3278
	if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino)
3279 3280
		goto end_rename;

3281 3282 3283 3284 3285 3286 3287 3288
	if ((old.dir != new.dir) &&
	    ext4_encrypted_inode(new.dir) &&
	    !ext4_is_child_context_consistent_with_parent(new.dir,
							  old.inode)) {
		retval = -EPERM;
		goto end_rename;
	}

3289 3290
	new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
				 &new.de, &new.inlined);
3291 3292
	if (IS_ERR(new.bh)) {
		retval = PTR_ERR(new.bh);
3293
		new.bh = NULL;
3294 3295
		goto end_rename;
	}
3296 3297 3298 3299
	if (new.bh) {
		if (!new.inode) {
			brelse(new.bh);
			new.bh = NULL;
3300 3301
		}
	}
3302 3303
	if (new.inode && !test_opt(new.dir->i_sb, NO_AUTO_DA_ALLOC))
		ext4_alloc_da_blocks(old.inode);
3304

M
Miklos Szeredi 已提交
3305 3306 3307 3308
	credits = (2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) +
		   EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);
	if (!(flags & RENAME_WHITEOUT)) {
		handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits);
3309 3310 3311 3312 3313
		if (IS_ERR(handle)) {
			retval = PTR_ERR(handle);
			handle = NULL;
			goto end_rename;
		}
M
Miklos Szeredi 已提交
3314 3315
	} else {
		whiteout = ext4_whiteout_for_rename(&old, credits, &handle);
3316 3317 3318 3319 3320
		if (IS_ERR(whiteout)) {
			retval = PTR_ERR(whiteout);
			whiteout = NULL;
			goto end_rename;
		}
M
Miklos Szeredi 已提交
3321
	}
3322

3323
	if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir))
3324 3325
		ext4_handle_sync(handle);

3326 3327
	if (S_ISDIR(old.inode->i_mode)) {
		if (new.inode) {
3328
			retval = -ENOTEMPTY;
3329
			if (!ext4_empty_dir(new.inode))
3330
				goto end_rename;
3331 3332 3333 3334
		} else {
			retval = -EMLINK;
			if (new.dir != old.dir && EXT4_DIR_LINK_MAX(new.dir))
				goto end_rename;
3335
		}
3336
		retval = ext4_rename_dir_prepare(handle, &old);
3337 3338
		if (retval)
			goto end_rename;
3339
	}
3340 3341 3342 3343 3344 3345 3346 3347 3348
	/*
	 * If we're renaming a file within an inline_data dir and adding or
	 * setting the new dirent causes a conversion from inline_data to
	 * extents/blockmap, we need to force the dirent delete code to
	 * re-read the directory, or else we end up trying to delete a dirent
	 * from what is now the extent tree root (or a block map).
	 */
	force_reread = (new.dir->i_ino == old.dir->i_ino &&
			ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA));
M
Miklos Szeredi 已提交
3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361

	old_file_type = old.de->file_type;
	if (whiteout) {
		/*
		 * Do this before adding a new entry, so the old entry is sure
		 * to be still pointing to the valid old entry.
		 */
		retval = ext4_setent(handle, &old, whiteout->i_ino,
				     EXT4_FT_CHRDEV);
		if (retval)
			goto end_rename;
		ext4_mark_inode_dirty(handle, whiteout);
	}
3362 3363
	if (!new.bh) {
		retval = ext4_add_entry(handle, new.dentry, old.inode);
3364 3365 3366
		if (retval)
			goto end_rename;
	} else {
3367
		retval = ext4_setent(handle, &new,
M
Miklos Szeredi 已提交
3368
				     old.inode->i_ino, old_file_type);
3369 3370
		if (retval)
			goto end_rename;
3371
	}
3372 3373 3374
	if (force_reread)
		force_reread = !ext4_test_inode_flag(new.dir,
						     EXT4_INODE_INLINE_DATA);
3375 3376 3377 3378 3379

	/*
	 * Like most other Unix systems, set the ctime for inodes on a
	 * rename.
	 */
3380 3381
	old.inode->i_ctime = ext4_current_time(old.inode);
	ext4_mark_inode_dirty(handle, old.inode);
3382

M
Miklos Szeredi 已提交
3383 3384 3385 3386 3387 3388
	if (!whiteout) {
		/*
		 * ok, that's it
		 */
		ext4_rename_delete(handle, &old, force_reread);
	}
3389

3390 3391 3392
	if (new.inode) {
		ext4_dec_count(handle, new.inode);
		new.inode->i_ctime = ext4_current_time(new.inode);
3393
	}
3394 3395 3396
	old.dir->i_ctime = old.dir->i_mtime = ext4_current_time(old.dir);
	ext4_update_dx_flag(old.dir);
	if (old.dir_bh) {
3397 3398
		retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
		if (retval)
3399
			goto end_rename;
3400

3401 3402
		ext4_dec_count(handle, old.dir);
		if (new.inode) {
3403 3404 3405
			/* checked ext4_empty_dir above, can't have another
			 * parent, ext4_dec_count() won't work for many-linked
			 * dirs */
3406
			clear_nlink(new.inode);
3407
		} else {
3408 3409 3410
			ext4_inc_count(handle, new.dir);
			ext4_update_dx_flag(new.dir);
			ext4_mark_inode_dirty(handle, new.dir);
3411 3412
		}
	}
3413 3414 3415 3416 3417
	ext4_mark_inode_dirty(handle, old.dir);
	if (new.inode) {
		ext4_mark_inode_dirty(handle, new.inode);
		if (!new.inode->i_nlink)
			ext4_orphan_add(handle, new.inode);
3418 3419 3420 3421
	}
	retval = 0;

end_rename:
3422 3423 3424
	brelse(old.dir_bh);
	brelse(old.bh);
	brelse(new.bh);
M
Miklos Szeredi 已提交
3425 3426 3427 3428 3429 3430
	if (whiteout) {
		if (retval)
			drop_nlink(whiteout);
		unlock_new_inode(whiteout);
		iput(whiteout);
	}
3431 3432
	if (handle)
		ext4_journal_stop(handle);
3433 3434 3435
	return retval;
}

M
Miklos Szeredi 已提交
3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457
static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
			     struct inode *new_dir, struct dentry *new_dentry)
{
	handle_t *handle = NULL;
	struct ext4_renament old = {
		.dir = old_dir,
		.dentry = old_dentry,
		.inode = old_dentry->d_inode,
	};
	struct ext4_renament new = {
		.dir = new_dir,
		.dentry = new_dentry,
		.inode = new_dentry->d_inode,
	};
	u8 new_file_type;
	int retval;

	dquot_initialize(old.dir);
	dquot_initialize(new.dir);

	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name,
				 &old.de, &old.inlined);
3458 3459
	if (IS_ERR(old.bh))
		return PTR_ERR(old.bh);
M
Miklos Szeredi 已提交
3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471
	/*
	 *  Check for inode number is _not_ due to possible IO errors.
	 *  We might rmdir the source, keep it as pwd of some process
	 *  and merrily kill the link to whatever was created under the
	 *  same name. Goodbye sticky bit ;-<
	 */
	retval = -ENOENT;
	if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino)
		goto end_rename;

	new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
				 &new.de, &new.inlined);
3472 3473
	if (IS_ERR(new.bh)) {
		retval = PTR_ERR(new.bh);
3474
		new.bh = NULL;
3475 3476
		goto end_rename;
	}
M
Miklos Szeredi 已提交
3477 3478 3479 3480 3481 3482 3483 3484

	/* RENAME_EXCHANGE case: old *and* new must both exist */
	if (!new.bh || le32_to_cpu(new.de->inode) != new.inode->i_ino)
		goto end_rename;

	handle = ext4_journal_start(old.dir, EXT4_HT_DIR,
		(2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) +
		 2 * EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2));
3485 3486 3487 3488 3489
	if (IS_ERR(handle)) {
		retval = PTR_ERR(handle);
		handle = NULL;
		goto end_rename;
	}
M
Miklos Szeredi 已提交
3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561

	if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir))
		ext4_handle_sync(handle);

	if (S_ISDIR(old.inode->i_mode)) {
		old.is_dir = true;
		retval = ext4_rename_dir_prepare(handle, &old);
		if (retval)
			goto end_rename;
	}
	if (S_ISDIR(new.inode->i_mode)) {
		new.is_dir = true;
		retval = ext4_rename_dir_prepare(handle, &new);
		if (retval)
			goto end_rename;
	}

	/*
	 * Other than the special case of overwriting a directory, parents'
	 * nlink only needs to be modified if this is a cross directory rename.
	 */
	if (old.dir != new.dir && old.is_dir != new.is_dir) {
		old.dir_nlink_delta = old.is_dir ? -1 : 1;
		new.dir_nlink_delta = -old.dir_nlink_delta;
		retval = -EMLINK;
		if ((old.dir_nlink_delta > 0 && EXT4_DIR_LINK_MAX(old.dir)) ||
		    (new.dir_nlink_delta > 0 && EXT4_DIR_LINK_MAX(new.dir)))
			goto end_rename;
	}

	new_file_type = new.de->file_type;
	retval = ext4_setent(handle, &new, old.inode->i_ino, old.de->file_type);
	if (retval)
		goto end_rename;

	retval = ext4_setent(handle, &old, new.inode->i_ino, new_file_type);
	if (retval)
		goto end_rename;

	/*
	 * Like most other Unix systems, set the ctime for inodes on a
	 * rename.
	 */
	old.inode->i_ctime = ext4_current_time(old.inode);
	new.inode->i_ctime = ext4_current_time(new.inode);
	ext4_mark_inode_dirty(handle, old.inode);
	ext4_mark_inode_dirty(handle, new.inode);

	if (old.dir_bh) {
		retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
		if (retval)
			goto end_rename;
	}
	if (new.dir_bh) {
		retval = ext4_rename_dir_finish(handle, &new, old.dir->i_ino);
		if (retval)
			goto end_rename;
	}
	ext4_update_dir_count(handle, &old);
	ext4_update_dir_count(handle, &new);
	retval = 0;

end_rename:
	brelse(old.dir_bh);
	brelse(new.dir_bh);
	brelse(old.bh);
	brelse(new.bh);
	if (handle)
		ext4_journal_stop(handle);
	return retval;
}

M
Miklos Szeredi 已提交
3562 3563 3564 3565
static int ext4_rename2(struct inode *old_dir, struct dentry *old_dentry,
			struct inode *new_dir, struct dentry *new_dentry,
			unsigned int flags)
{
M
Miklos Szeredi 已提交
3566
	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
M
Miklos Szeredi 已提交
3567 3568
		return -EINVAL;

M
Miklos Szeredi 已提交
3569 3570 3571 3572
	if (flags & RENAME_EXCHANGE) {
		return ext4_cross_rename(old_dir, old_dentry,
					 new_dir, new_dentry);
	}
M
Miklos Szeredi 已提交
3573 3574

	return ext4_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
M
Miklos Szeredi 已提交
3575 3576
}

3577 3578 3579
/*
 * directories can handle most operations...
 */
3580
const struct inode_operations ext4_dir_inode_operations = {
3581 3582 3583 3584 3585 3586 3587 3588
	.create		= ext4_create,
	.lookup		= ext4_lookup,
	.link		= ext4_link,
	.unlink		= ext4_unlink,
	.symlink	= ext4_symlink,
	.mkdir		= ext4_mkdir,
	.rmdir		= ext4_rmdir,
	.mknod		= ext4_mknod,
A
Al Viro 已提交
3589
	.tmpfile	= ext4_tmpfile,
M
Miklos Szeredi 已提交
3590
	.rename2	= ext4_rename2,
3591
	.setattr	= ext4_setattr,
3592 3593
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
3594
	.listxattr	= ext4_listxattr,
3595
	.removexattr	= generic_removexattr,
3596
	.get_acl	= ext4_get_acl,
3597
	.set_acl	= ext4_set_acl,
3598
	.fiemap         = ext4_fiemap,
3599 3600
};

3601
const struct inode_operations ext4_special_inode_operations = {
3602
	.setattr	= ext4_setattr,
3603 3604
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
3605
	.listxattr	= ext4_listxattr,
3606
	.removexattr	= generic_removexattr,
3607
	.get_acl	= ext4_get_acl,
3608
	.set_acl	= ext4_set_acl,
3609
};