namei.c 85.7 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/namei.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/namei.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 *  Directory entry file type support and forward compatibility hooks
 *	for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
 *  Hash Tree Directory indexing (c)
 *	Daniel Phillips, 2001
 *  Hash Tree Directory indexing porting
 *	Christopher Li, 2002
 *  Hash Tree Directory indexing cleanup
 *	Theodore Ts'o, 2002
 */

#include <linux/fs.h>
#include <linux/pagemap.h>
29
#include <linux/jbd2.h>
30 31 32 33 34 35 36
#include <linux/time.h>
#include <linux/fcntl.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/bio.h>
37 38
#include "ext4.h"
#include "ext4_jbd2.h"
39 40 41 42

#include "xattr.h"
#include "acl.h"

43
#include <trace/events/ext4.h>
44 45 46 47 48
/*
 * define how far ahead to read directories while searching them.
 */
#define NAMEI_RA_CHUNKS  2
#define NAMEI_RA_BLOCKS  4
D
Dave Kleikamp 已提交
49
#define NAMEI_RA_SIZE	     (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
50

51
static struct buffer_head *ext4_append(handle_t *handle,
52
					struct inode *inode,
53
					ext4_lblk_t *block)
54 55
{
	struct buffer_head *bh;
56
	int err = 0;
57

58 59
	if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
		     ((inode->i_size >> 10) >=
60 61
		      EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
		return ERR_PTR(-ENOSPC);
62

63 64
	*block = inode->i_size >> inode->i_sb->s_blocksize_bits;

65 66 67 68 69 70 71 72 73 74
	bh = ext4_bread(handle, inode, *block, 1, &err);
	if (!bh)
		return ERR_PTR(err);
	inode->i_size += inode->i_sb->s_blocksize;
	EXT4_I(inode)->i_disksize = inode->i_size;
	err = ext4_journal_get_write_access(handle, bh);
	if (err) {
		brelse(bh);
		ext4_std_error(inode->i_sb, err);
		return ERR_PTR(err);
C
Carlos Maiolino 已提交
75
	}
76 77 78
	return bh;
}

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
static int ext4_dx_csum_verify(struct inode *inode,
			       struct ext4_dir_entry *dirent);

typedef enum {
	EITHER, INDEX, DIRENT
} dirblock_type_t;

#define ext4_read_dirblock(inode, block, type) \
	__ext4_read_dirblock((inode), (block), (type), __LINE__)

static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
					      ext4_lblk_t block,
					      dirblock_type_t type,
					      unsigned int line)
{
	struct buffer_head *bh;
	struct ext4_dir_entry *dirent;
	int err = 0, is_dx_block = 0;

	bh = ext4_bread(NULL, inode, block, 0, &err);
	if (!bh) {
		if (err == 0) {
			ext4_error_inode(inode, __func__, line, block,
					       "Directory hole found");
			return ERR_PTR(-EIO);
		}
		__ext4_warning(inode->i_sb, __func__, line,
			       "error reading directory block "
			       "(ino %lu, block %lu)", inode->i_ino,
			       (unsigned long) block);
		return ERR_PTR(err);
	}
	dirent = (struct ext4_dir_entry *) bh->b_data;
	/* Determine whether or not we have an index block */
	if (is_dx(inode)) {
		if (block == 0)
			is_dx_block = 1;
		else if (ext4_rec_len_from_disk(dirent->rec_len,
						inode->i_sb->s_blocksize) ==
			 inode->i_sb->s_blocksize)
			is_dx_block = 1;
	}
	if (!is_dx_block && type == INDEX) {
		ext4_error_inode(inode, __func__, line, block,
		       "directory leaf block found instead of index block");
		return ERR_PTR(-EIO);
	}
	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) ||
	    buffer_verified(bh))
		return bh;

	/*
	 * An empty leaf block can get mistaken for a index block; for
	 * this reason, we can only check the index checksum when the
	 * caller is sure it should be an index block.
	 */
	if (is_dx_block && type == INDEX) {
		if (ext4_dx_csum_verify(inode, dirent))
			set_buffer_verified(bh);
		else {
			ext4_error_inode(inode, __func__, line, block,
				"Directory index failed checksum");
142
			brelse(bh);
143
			return ERR_PTR(-EIO);
144
		}
145
	}
146 147 148 149 150 151 152 153 154
	if (!is_dx_block) {
		if (ext4_dirent_csum_verify(inode, dirent))
			set_buffer_verified(bh);
		else {
			ext4_error_inode(inode, __func__, line, block,
				"Directory block failed checksum");
			brelse(bh);
			return ERR_PTR(-EIO);
		}
C
Carlos Maiolino 已提交
155
	}
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
	return bh;
}

#ifndef assert
#define assert(test) J_ASSERT(test)
#endif

#ifdef DX_DEBUG
#define dxtrace(command) command
#else
#define dxtrace(command)
#endif

struct fake_dirent
{
	__le32 inode;
	__le16 rec_len;
	u8 name_len;
	u8 file_type;
};

struct dx_countlimit
{
	__le16 limit;
	__le16 count;
};

struct dx_entry
{
	__le32 hash;
	__le32 block;
};

/*
 * dx_root_info is laid out so that if it should somehow get overlaid by a
 * dirent the two low bits of the hash version will be zero.  Therefore, the
 * hash version mod 4 should never be 0.  Sincerely, the paranoia department.
 */

struct dx_root
{
	struct fake_dirent dot;
	char dot_name[4];
	struct fake_dirent dotdot;
	char dotdot_name[4];
	struct dx_root_info
	{
		__le32 reserved_zero;
		u8 hash_version;
		u8 info_length; /* 8 */
		u8 indirect_levels;
		u8 unused_flags;
	}
	info;
	struct dx_entry	entries[0];
};

struct dx_node
{
	struct fake_dirent fake;
	struct dx_entry	entries[0];
};


struct dx_frame
{
	struct buffer_head *bh;
	struct dx_entry *entries;
	struct dx_entry *at;
};

struct dx_map_entry
{
	u32 hash;
230 231
	u16 offs;
	u16 size;
232 233
};

234 235 236 237 238 239 240 241
/*
 * This goes at the end of each htree block.
 */
struct dx_tail {
	u32 dt_reserved;
	__le32 dt_checksum;	/* crc32c(uuid+inum+dirblock) */
};

A
Aneesh Kumar K.V 已提交
242 243
static inline ext4_lblk_t dx_get_block(struct dx_entry *entry);
static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value);
244 245 246 247 248 249 250 251
static inline unsigned dx_get_hash(struct dx_entry *entry);
static void dx_set_hash(struct dx_entry *entry, unsigned value);
static unsigned dx_get_count(struct dx_entry *entries);
static unsigned dx_get_limit(struct dx_entry *entries);
static void dx_set_count(struct dx_entry *entries, unsigned value);
static void dx_set_limit(struct dx_entry *entries, unsigned value);
static unsigned dx_root_limit(struct inode *dir, unsigned infosize);
static unsigned dx_node_limit(struct inode *dir);
252
static struct dx_frame *dx_probe(const struct qstr *d_name,
253 254 255 256
				 struct inode *dir,
				 struct dx_hash_info *hinfo,
				 struct dx_frame *frame,
				 int *err);
257
static void dx_release(struct dx_frame *frames);
258
static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
259
		       struct dx_hash_info *hinfo, struct dx_map_entry map[]);
260
static void dx_sort_map(struct dx_map_entry *map, unsigned count);
261
static struct ext4_dir_entry_2 *dx_move_dirents(char *from, char *to,
262
		struct dx_map_entry *offsets, int count, unsigned blocksize);
263
static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize);
A
Aneesh Kumar K.V 已提交
264 265
static void dx_insert_block(struct dx_frame *frame,
					u32 hash, ext4_lblk_t block);
266
static int ext4_htree_next_block(struct inode *dir, __u32 hash,
267 268 269
				 struct dx_frame *frame,
				 struct dx_frame *frames,
				 __u32 *start_hash);
270 271 272 273
static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
		const struct qstr *d_name,
		struct ext4_dir_entry_2 **res_dir,
		int *err);
274
static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
275 276
			     struct inode *inode);

277
/* checksumming functions */
278 279
void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
			    unsigned int blocksize)
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
{
	memset(t, 0, sizeof(struct ext4_dir_entry_tail));
	t->det_rec_len = ext4_rec_len_to_disk(
			sizeof(struct ext4_dir_entry_tail), blocksize);
	t->det_reserved_ft = EXT4_FT_DIR_CSUM;
}

/* Walk through a dirent block to find a checksum "dirent" at the tail */
static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
						   struct ext4_dir_entry *de)
{
	struct ext4_dir_entry_tail *t;

#ifdef PARANOID
	struct ext4_dir_entry *d, *top;

	d = de;
	top = (struct ext4_dir_entry *)(((void *)de) +
		(EXT4_BLOCK_SIZE(inode->i_sb) -
		sizeof(struct ext4_dir_entry_tail)));
	while (d < top && d->rec_len)
		d = (struct ext4_dir_entry *)(((void *)d) +
		    le16_to_cpu(d->rec_len));

	if (d != top)
		return NULL;

	t = (struct ext4_dir_entry_tail *)d;
#else
	t = EXT4_DIRENT_TAIL(de, EXT4_BLOCK_SIZE(inode->i_sb));
#endif

	if (t->det_reserved_zero1 ||
	    le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) ||
	    t->det_reserved_zero2 ||
	    t->det_reserved_ft != EXT4_FT_DIR_CSUM)
		return NULL;

	return t;
}

static __le32 ext4_dirent_csum(struct inode *inode,
			       struct ext4_dir_entry *dirent, int size)
{
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
	struct ext4_inode_info *ei = EXT4_I(inode);
	__u32 csum;

	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
	return cpu_to_le32(csum);
}

332 333 334 335 336 337
static void warn_no_space_for_csum(struct inode *inode)
{
	ext4_warning(inode->i_sb, "no space in directory inode %lu leaf for "
		     "checksum.  Please run e2fsck -D.", inode->i_ino);
}

338 339 340 341 342 343 344 345 346 347
int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent)
{
	struct ext4_dir_entry_tail *t;

	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
		return 1;

	t = get_dirent_tail(inode, dirent);
	if (!t) {
348
		warn_no_space_for_csum(inode);
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
		return 0;
	}

	if (t->det_checksum != ext4_dirent_csum(inode, dirent,
						(void *)t - (void *)dirent))
		return 0;

	return 1;
}

static void ext4_dirent_csum_set(struct inode *inode,
				 struct ext4_dir_entry *dirent)
{
	struct ext4_dir_entry_tail *t;

	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
		return;

	t = get_dirent_tail(inode, dirent);
	if (!t) {
370
		warn_no_space_for_csum(inode);
371 372 373 374 375 376 377
		return;
	}

	t->det_checksum = ext4_dirent_csum(inode, dirent,
					   (void *)t - (void *)dirent);
}

378 379 380
int ext4_handle_dirty_dirent_node(handle_t *handle,
				  struct inode *inode,
				  struct buffer_head *bh)
381 382 383 384 385
{
	ext4_dirent_csum_set(inode, (struct ext4_dir_entry *)bh->b_data);
	return ext4_handle_dirty_metadata(handle, inode, bh);
}

386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
static struct dx_countlimit *get_dx_countlimit(struct inode *inode,
					       struct ext4_dir_entry *dirent,
					       int *offset)
{
	struct ext4_dir_entry *dp;
	struct dx_root_info *root;
	int count_offset;

	if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
		count_offset = 8;
	else if (le16_to_cpu(dirent->rec_len) == 12) {
		dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
		if (le16_to_cpu(dp->rec_len) !=
		    EXT4_BLOCK_SIZE(inode->i_sb) - 12)
			return NULL;
		root = (struct dx_root_info *)(((void *)dp + 12));
		if (root->reserved_zero ||
		    root->info_length != sizeof(struct dx_root_info))
			return NULL;
		count_offset = 32;
	} else
		return NULL;

	if (offset)
		*offset = count_offset;
	return (struct dx_countlimit *)(((void *)dirent) + count_offset);
}

static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent,
			   int count_offset, int count, struct dx_tail *t)
{
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
	struct ext4_inode_info *ei = EXT4_I(inode);
419 420
	__u32 csum;
	__le32 save_csum;
421 422 423
	int size;

	size = count_offset + (count * sizeof(struct dx_entry));
424
	save_csum = t->dt_checksum;
425 426 427
	t->dt_checksum = 0;
	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
	csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail));
428
	t->dt_checksum = save_csum;
429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452

	return cpu_to_le32(csum);
}

static int ext4_dx_csum_verify(struct inode *inode,
			       struct ext4_dir_entry *dirent)
{
	struct dx_countlimit *c;
	struct dx_tail *t;
	int count_offset, limit, count;

	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
		return 1;

	c = get_dx_countlimit(inode, dirent, &count_offset);
	if (!c) {
		EXT4_ERROR_INODE(inode, "dir seems corrupt?  Run e2fsck -D.");
		return 1;
	}
	limit = le16_to_cpu(c->limit);
	count = le16_to_cpu(c->count);
	if (count_offset + (limit * sizeof(struct dx_entry)) >
	    EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
453
		warn_no_space_for_csum(inode);
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
		return 1;
	}
	t = (struct dx_tail *)(((struct dx_entry *)c) + limit);

	if (t->dt_checksum != ext4_dx_csum(inode, dirent, count_offset,
					    count, t))
		return 0;
	return 1;
}

static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent)
{
	struct dx_countlimit *c;
	struct dx_tail *t;
	int count_offset, limit, count;

	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
		return;

	c = get_dx_countlimit(inode, dirent, &count_offset);
	if (!c) {
		EXT4_ERROR_INODE(inode, "dir seems corrupt?  Run e2fsck -D.");
		return;
	}
	limit = le16_to_cpu(c->limit);
	count = le16_to_cpu(c->count);
	if (count_offset + (limit * sizeof(struct dx_entry)) >
	    EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
483
		warn_no_space_for_csum(inode);
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
		return;
	}
	t = (struct dx_tail *)(((struct dx_entry *)c) + limit);

	t->dt_checksum = ext4_dx_csum(inode, dirent, count_offset, count, t);
}

static inline int ext4_handle_dirty_dx_node(handle_t *handle,
					    struct inode *inode,
					    struct buffer_head *bh)
{
	ext4_dx_csum_set(inode, (struct ext4_dir_entry *)bh->b_data);
	return ext4_handle_dirty_metadata(handle, inode, bh);
}

499 500 501 502
/*
 * p is at least 6 bytes before the end of page
 */
static inline struct ext4_dir_entry_2 *
503
ext4_next_entry(struct ext4_dir_entry_2 *p, unsigned long blocksize)
504 505
{
	return (struct ext4_dir_entry_2 *)((char *)p +
506
		ext4_rec_len_from_disk(p->rec_len, blocksize));
507 508
}

509 510 511 512 513
/*
 * Future: use high four bits of block for coalesce-on-delete flags
 * Mask them off for now.
 */

A
Aneesh Kumar K.V 已提交
514
static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
515 516 517 518
{
	return le32_to_cpu(entry->block) & 0x00ffffff;
}

A
Aneesh Kumar K.V 已提交
519
static inline void dx_set_block(struct dx_entry *entry, ext4_lblk_t value)
520 521 522 523
{
	entry->block = cpu_to_le32(value);
}

524
static inline unsigned dx_get_hash(struct dx_entry *entry)
525 526 527 528
{
	return le32_to_cpu(entry->hash);
}

529
static inline void dx_set_hash(struct dx_entry *entry, unsigned value)
530 531 532 533
{
	entry->hash = cpu_to_le32(value);
}

534
static inline unsigned dx_get_count(struct dx_entry *entries)
535 536 537 538
{
	return le16_to_cpu(((struct dx_countlimit *) entries)->count);
}

539
static inline unsigned dx_get_limit(struct dx_entry *entries)
540 541 542 543
{
	return le16_to_cpu(((struct dx_countlimit *) entries)->limit);
}

544
static inline void dx_set_count(struct dx_entry *entries, unsigned value)
545 546 547 548
{
	((struct dx_countlimit *) entries)->count = cpu_to_le16(value);
}

549
static inline void dx_set_limit(struct dx_entry *entries, unsigned value)
550 551 552 553
{
	((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
}

554
static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
555
{
556 557
	unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
		EXT4_DIR_REC_LEN(2) - infosize;
558 559 560 561

	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
		entry_space -= sizeof(struct dx_tail);
562
	return entry_space / sizeof(struct dx_entry);
563 564
}

565
static inline unsigned dx_node_limit(struct inode *dir)
566
{
567
	unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
568 569 570 571

	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
		entry_space -= sizeof(struct dx_tail);
572
	return entry_space / sizeof(struct dx_entry);
573 574 575 576 577 578
}

/*
 * Debug
 */
#ifdef DX_DEBUG
579
static void dx_show_index(char * label, struct dx_entry *entries)
580
{
A
Andrew Morton 已提交
581
	int i, n = dx_get_count (entries);
582
	printk(KERN_DEBUG "%s index ", label);
A
Andrew Morton 已提交
583
	for (i = 0; i < n; i++) {
584
		printk("%x->%lu ", i ? dx_get_hash(entries + i) :
A
Aneesh Kumar K.V 已提交
585
				0, (unsigned long)dx_get_block(entries + i));
A
Andrew Morton 已提交
586 587
	}
	printk("\n");
588 589 590 591 592 593 594 595 596
}

struct stats
{
	unsigned names;
	unsigned space;
	unsigned bcount;
};

597
static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_entry_2 *de,
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
				 int size, int show_names)
{
	unsigned names = 0, space = 0;
	char *base = (char *) de;
	struct dx_hash_info h = *hinfo;

	printk("names: ");
	while ((char *) de < base + size)
	{
		if (de->inode)
		{
			if (show_names)
			{
				int len = de->name_len;
				char *name = de->name;
				while (len--) printk("%c", *name++);
614
				ext4fs_dirhash(de->name, de->name_len, &h);
615
				printk(":%x.%u ", h.hash,
616
				       (unsigned) ((char *) de - base));
617
			}
618
			space += EXT4_DIR_REC_LEN(de->name_len);
619 620
			names++;
		}
621
		de = ext4_next_entry(de, size);
622 623 624 625 626 627 628 629 630
	}
	printk("(%i)\n", names);
	return (struct stats) { names, space, 1 };
}

struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
			     struct dx_entry *entries, int levels)
{
	unsigned blocksize = dir->i_sb->s_blocksize;
631
	unsigned count = dx_get_count(entries), names = 0, space = 0, i;
632 633 634 635 636 637
	unsigned bcount = 0;
	struct buffer_head *bh;
	int err;
	printk("%i indexed blocks...\n", count);
	for (i = 0; i < count; i++, entries++)
	{
A
Aneesh Kumar K.V 已提交
638 639
		ext4_lblk_t block = dx_get_block(entries);
		ext4_lblk_t hash  = i ? dx_get_hash(entries): 0;
640 641 642
		u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
		struct stats stats;
		printk("%s%3u:%03u hash %8x/%8x ",levels?"":"   ", i, block, hash, range);
643
		if (!(bh = ext4_bread (NULL,dir, block, 0,&err))) continue;
644 645
		stats = levels?
		   dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
646
		   dx_show_leaf(hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0);
647 648 649
		names += stats.names;
		space += stats.space;
		bcount += stats.bcount;
650
		brelse(bh);
651 652
	}
	if (bcount)
653
		printk(KERN_DEBUG "%snames %u, fullness %u (%u%%)\n",
654 655
		       levels ? "" : "   ", names, space/bcount,
		       (space/bcount)*100/blocksize);
656 657 658 659 660 661 662 663 664 665 666 667 668 669
	return (struct stats) { names, space, bcount};
}
#endif /* DX_DEBUG */

/*
 * Probe for a directory leaf block to search.
 *
 * dx_probe can return ERR_BAD_DX_DIR, which means there was a format
 * error in the directory index, and the caller should fall back to
 * searching the directory normally.  The callers of dx_probe **MUST**
 * check for this error code, and make sure it never gets reflected
 * back to userspace.
 */
static struct dx_frame *
670
dx_probe(const struct qstr *d_name, struct inode *dir,
671 672 673 674 675 676 677 678 679 680
	 struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
{
	unsigned count, indirect;
	struct dx_entry *at, *entries, *p, *q, *m;
	struct dx_root *root;
	struct buffer_head *bh;
	struct dx_frame *frame = frame_in;
	u32 hash;

	frame->bh = NULL;
681 682 683
	bh = ext4_read_dirblock(dir, 0, INDEX);
	if (IS_ERR(bh)) {
		*err = PTR_ERR(bh);
684
		goto fail;
C
Carlos Maiolino 已提交
685
	}
686 687 688 689
	root = (struct dx_root *) bh->b_data;
	if (root->info.hash_version != DX_HASH_TEA &&
	    root->info.hash_version != DX_HASH_HALF_MD4 &&
	    root->info.hash_version != DX_HASH_LEGACY) {
690
		ext4_warning(dir->i_sb, "Unrecognised inode hash code %d",
691 692 693 694 695 696
			     root->info.hash_version);
		brelse(bh);
		*err = ERR_BAD_DX_DIR;
		goto fail;
	}
	hinfo->hash_version = root->info.hash_version;
697 698
	if (hinfo->hash_version <= DX_HASH_TEA)
		hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
699
	hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
700 701
	if (d_name)
		ext4fs_dirhash(d_name->name, d_name->len, hinfo);
702 703 704
	hash = hinfo->hash;

	if (root->info.unused_flags & 1) {
705
		ext4_warning(dir->i_sb, "Unimplemented inode hash flags: %#06x",
706 707 708 709 710 711 712
			     root->info.unused_flags);
		brelse(bh);
		*err = ERR_BAD_DX_DIR;
		goto fail;
	}

	if ((indirect = root->info.indirect_levels) > 1) {
713
		ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x",
714 715 716 717 718 719 720 721
			     root->info.indirect_levels);
		brelse(bh);
		*err = ERR_BAD_DX_DIR;
		goto fail;
	}

	entries = (struct dx_entry *) (((char *)&root->info) +
				       root->info.info_length);
722 723 724

	if (dx_get_limit(entries) != dx_root_limit(dir,
						   root->info.info_length)) {
725
		ext4_warning(dir->i_sb, "dx entry: limit != root limit");
726 727 728 729 730
		brelse(bh);
		*err = ERR_BAD_DX_DIR;
		goto fail;
	}

731
	dxtrace(printk("Look up %x", hash));
732 733 734
	while (1)
	{
		count = dx_get_count(entries);
735
		if (!count || count > dx_get_limit(entries)) {
736
			ext4_warning(dir->i_sb,
737 738 739 740 741 742
				     "dx entry: no count or count > limit");
			brelse(bh);
			*err = ERR_BAD_DX_DIR;
			goto fail2;
		}

743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
		p = entries + 1;
		q = entries + count - 1;
		while (p <= q)
		{
			m = p + (q - p)/2;
			dxtrace(printk("."));
			if (dx_get_hash(m) > hash)
				q = m - 1;
			else
				p = m + 1;
		}

		if (0) // linear search cross check
		{
			unsigned n = count - 1;
			at = entries;
			while (n--)
			{
				dxtrace(printk(","));
				if (dx_get_hash(++at) > hash)
				{
					at--;
					break;
				}
			}
			assert (at == p - 1);
		}

		at = p - 1;
		dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at)));
		frame->bh = bh;
		frame->entries = entries;
		frame->at = at;
		if (!indirect--) return frame;
777 778 779
		bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
		if (IS_ERR(bh)) {
			*err = PTR_ERR(bh);
780
			goto fail2;
781
		}
782
		entries = ((struct dx_node *) bh->b_data)->entries;
783

784
		if (dx_get_limit(entries) != dx_node_limit (dir)) {
785
			ext4_warning(dir->i_sb,
786 787 788 789 790
				     "dx entry: limit != node limit");
			brelse(bh);
			*err = ERR_BAD_DX_DIR;
			goto fail2;
		}
791
		frame++;
792
		frame->bh = NULL;
793 794 795 796 797 798 799
	}
fail2:
	while (frame >= frame_in) {
		brelse(frame->bh);
		frame--;
	}
fail:
800
	if (*err == ERR_BAD_DX_DIR)
801
		ext4_warning(dir->i_sb,
Z
Zheng Liu 已提交
802
			     "Corrupt dir inode %lu, running e2fsck is "
803
			     "recommended.", dir->i_ino);
804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
	return NULL;
}

static void dx_release (struct dx_frame *frames)
{
	if (frames[0].bh == NULL)
		return;

	if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels)
		brelse(frames[1].bh);
	brelse(frames[0].bh);
}

/*
 * This function increments the frame pointer to search the next leaf
 * block, and reads in the necessary intervening nodes if the search
 * should be necessary.  Whether or not the search is necessary is
 * controlled by the hash parameter.  If the hash value is even, then
 * the search is only continued if the next block starts with that
 * hash value.  This is used if we are searching for a specific file.
 *
 * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
 *
 * This function returns 1 if the caller should continue to search,
 * or 0 if it should not.  If there is an error reading one of the
 * index blocks, it will a negative error code.
 *
 * If start_hash is non-null, it will be filled in with the starting
 * hash of the next page.
 */
834
static int ext4_htree_next_block(struct inode *dir, __u32 hash,
835 836 837 838 839 840
				 struct dx_frame *frame,
				 struct dx_frame *frames,
				 __u32 *start_hash)
{
	struct dx_frame *p;
	struct buffer_head *bh;
841
	int num_frames = 0;
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879
	__u32 bhash;

	p = frame;
	/*
	 * Find the next leaf page by incrementing the frame pointer.
	 * If we run out of entries in the interior node, loop around and
	 * increment pointer in the parent node.  When we break out of
	 * this loop, num_frames indicates the number of interior
	 * nodes need to be read.
	 */
	while (1) {
		if (++(p->at) < p->entries + dx_get_count(p->entries))
			break;
		if (p == frames)
			return 0;
		num_frames++;
		p--;
	}

	/*
	 * If the hash is 1, then continue only if the next page has a
	 * continuation hash of any value.  This is used for readdir
	 * handling.  Otherwise, check to see if the hash matches the
	 * desired contiuation hash.  If it doesn't, return since
	 * there's no point to read in the successive index pages.
	 */
	bhash = dx_get_hash(p->at);
	if (start_hash)
		*start_hash = bhash;
	if ((hash & 1) == 0) {
		if ((bhash & ~1) != hash)
			return 0;
	}
	/*
	 * If the hash is HASH_NB_ALWAYS, we always go to the next
	 * block so no check is necessary
	 */
	while (num_frames--) {
880 881 882
		bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
		if (IS_ERR(bh))
			return PTR_ERR(bh);
883
		p++;
884
		brelse(p->bh);
885 886 887 888 889 890 891 892 893 894 895 896 897
		p->bh = bh;
		p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
	}
	return 1;
}


/*
 * This function fills a red-black tree with information from a
 * directory block.  It returns the number directory entries loaded
 * into the tree.  If there is an error it is returned in err.
 */
static int htree_dirblock_to_tree(struct file *dir_file,
A
Aneesh Kumar K.V 已提交
898
				  struct inode *dir, ext4_lblk_t block,
899 900 901 902
				  struct dx_hash_info *hinfo,
				  __u32 start_hash, __u32 start_minor_hash)
{
	struct buffer_head *bh;
903
	struct ext4_dir_entry_2 *de, *top;
904
	int err = 0, count = 0;
905

A
Aneesh Kumar K.V 已提交
906 907
	dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
							(unsigned long)block));
908 909 910
	bh = ext4_read_dirblock(dir, block, DIRENT);
	if (IS_ERR(bh))
		return PTR_ERR(bh);
911

912 913
	de = (struct ext4_dir_entry_2 *) bh->b_data;
	top = (struct ext4_dir_entry_2 *) ((char *) de +
914
					   dir->i_sb->s_blocksize -
915
					   EXT4_DIR_REC_LEN(0));
916
	for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
917
		if (ext4_check_dir_entry(dir, NULL, de, bh,
918
				bh->b_data, bh->b_size,
919 920
				(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
					 + ((char *)de - bh->b_data))) {
921 922
			/* silently ignore the rest of the block */
			break;
923
		}
924
		ext4fs_dirhash(de->name, de->name_len, hinfo);
925 926 927 928 929 930
		if ((hinfo->hash < start_hash) ||
		    ((hinfo->hash == start_hash) &&
		     (hinfo->minor_hash < start_minor_hash)))
			continue;
		if (de->inode == 0)
			continue;
931
		if ((err = ext4_htree_store_dirent(dir_file,
932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950
				   hinfo->hash, hinfo->minor_hash, de)) != 0) {
			brelse(bh);
			return err;
		}
		count++;
	}
	brelse(bh);
	return count;
}


/*
 * This function fills a red-black tree with information from a
 * directory.  We start scanning the directory in hash order, starting
 * at start_hash and start_minor_hash.
 *
 * This function returns the number of entries inserted into the tree,
 * or a negative error code.
 */
951
int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
952 953 954
			 __u32 start_minor_hash, __u32 *next_hash)
{
	struct dx_hash_info hinfo;
955
	struct ext4_dir_entry_2 *de;
956 957
	struct dx_frame frames[2], *frame;
	struct inode *dir;
A
Aneesh Kumar K.V 已提交
958
	ext4_lblk_t block;
959
	int count = 0;
A
Aneesh Kumar K.V 已提交
960
	int ret, err;
961 962
	__u32 hashval;

963
	dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n",
964
		       start_hash, start_minor_hash));
A
Al Viro 已提交
965
	dir = file_inode(dir_file);
966
	if (!(ext4_test_inode_flag(dir, EXT4_INODE_INDEX))) {
967
		hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
968 969 970
		if (hinfo.hash_version <= DX_HASH_TEA)
			hinfo.hash_version +=
				EXT4_SB(dir->i_sb)->s_hash_unsigned;
971
		hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
972 973 974 975 976 977 978 979 980 981 982
		if (ext4_has_inline_data(dir)) {
			int has_inline_data = 1;
			count = htree_inlinedir_to_tree(dir_file, dir, 0,
							&hinfo, start_hash,
							start_minor_hash,
							&has_inline_data);
			if (has_inline_data) {
				*next_hash = ~0;
				return count;
			}
		}
983 984 985 986 987 988 989
		count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo,
					       start_hash, start_minor_hash);
		*next_hash = ~0;
		return count;
	}
	hinfo.hash = start_hash;
	hinfo.minor_hash = 0;
990
	frame = dx_probe(NULL, dir, &hinfo, frames, &err);
991 992 993 994 995
	if (!frame)
		return err;

	/* Add '.' and '..' from the htree header */
	if (!start_hash && !start_minor_hash) {
996 997
		de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
		if ((err = ext4_htree_store_dirent(dir_file, 0, 0, de)) != 0)
998 999 1000 1001
			goto errout;
		count++;
	}
	if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) {
1002
		de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
1003
		de = ext4_next_entry(de, dir->i_sb->s_blocksize);
1004
		if ((err = ext4_htree_store_dirent(dir_file, 2, 0, de)) != 0)
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
			goto errout;
		count++;
	}

	while (1) {
		block = dx_get_block(frame->at);
		ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo,
					     start_hash, start_minor_hash);
		if (ret < 0) {
			err = ret;
			goto errout;
		}
		count += ret;
		hashval = ~0;
1019
		ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
					    frame, frames, &hashval);
		*next_hash = hashval;
		if (ret < 0) {
			err = ret;
			goto errout;
		}
		/*
		 * Stop if:  (a) there are no more entries, or
		 * (b) we have inserted at least one entry and the
		 * next hash value is not a continuation
		 */
		if ((ret == 0) ||
		    (count && ((hashval & 1) == 0)))
			break;
	}
	dx_release(frames);
1036 1037
	dxtrace(printk(KERN_DEBUG "Fill tree: returned %d entries, "
		       "next hash: %x\n", count, *next_hash));
1038 1039 1040 1041 1042 1043
	return count;
errout:
	dx_release(frames);
	return (err);
}

T
Tao Ma 已提交
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
static inline int search_dirblock(struct buffer_head *bh,
				  struct inode *dir,
				  const struct qstr *d_name,
				  unsigned int offset,
				  struct ext4_dir_entry_2 **res_dir)
{
	return search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir,
			  d_name, offset, res_dir);
}

1054 1055 1056 1057
/*
 * Directory block splitting, compacting
 */

1058 1059 1060 1061
/*
 * Create map of hash values, offsets, and sizes, stored at end of block.
 * Returns number of entries mapped.
 */
1062 1063 1064
static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
		       struct dx_hash_info *hinfo,
		       struct dx_map_entry *map_tail)
1065 1066 1067 1068 1069
{
	int count = 0;
	char *base = (char *) de;
	struct dx_hash_info h = *hinfo;

1070
	while ((char *) de < base + blocksize) {
1071
		if (de->name_len && de->inode) {
1072
			ext4fs_dirhash(de->name, de->name_len, &h);
1073 1074
			map_tail--;
			map_tail->hash = h.hash;
1075
			map_tail->offs = ((char *) de - base)>>2;
1076
			map_tail->size = le16_to_cpu(de->rec_len);
1077 1078 1079 1080
			count++;
			cond_resched();
		}
		/* XXX: do we need to check rec_len == 0 case? -Chris */
1081
		de = ext4_next_entry(de, blocksize);
1082 1083 1084 1085
	}
	return count;
}

1086
/* Sort map by hash value */
1087 1088
static void dx_sort_map (struct dx_map_entry *map, unsigned count)
{
A
Andrew Morton 已提交
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
	struct dx_map_entry *p, *q, *top = map + count - 1;
	int more;
	/* Combsort until bubble sort doesn't suck */
	while (count > 2) {
		count = count*10/13;
		if (count - 9 < 2) /* 9, 10 -> 11 */
			count = 11;
		for (p = top, q = p - count; q >= map; p--, q--)
			if (p->hash < q->hash)
				swap(*p, *q);
	}
	/* Garden variety bubble sort */
	do {
		more = 0;
		q = top;
		while (q-- > map) {
			if (q[1].hash >= q[0].hash)
1106
				continue;
A
Andrew Morton 已提交
1107 1108
			swap(*(q+1), *q);
			more = 1;
1109 1110 1111 1112
		}
	} while(more);
}

A
Aneesh Kumar K.V 已提交
1113
static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block)
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
{
	struct dx_entry *entries = frame->entries;
	struct dx_entry *old = frame->at, *new = old + 1;
	int count = dx_get_count(entries);

	assert(count < dx_get_limit(entries));
	assert(old < entries + count);
	memmove(new + 1, new, (char *)(entries + count) - (char *)(new));
	dx_set_hash(new, hash);
	dx_set_block(new, block);
	dx_set_count(entries, count + 1);
}

/*
1128
 * NOTE! unlike strncmp, ext4_match returns 1 for success, 0 for failure.
1129
 *
1130
 * `len <= EXT4_NAME_LEN' is guaranteed by caller.
1131 1132
 * `de != NULL' is guaranteed by caller.
 */
1133 1134
static inline int ext4_match (int len, const char * const name,
			      struct ext4_dir_entry_2 * de)
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
{
	if (len != de->name_len)
		return 0;
	if (!de->inode)
		return 0;
	return !memcmp(name, de->name, len);
}

/*
 * Returns 0 if not found, -1 on failure, and 1 on success
 */
T
Tao Ma 已提交
1146 1147 1148 1149 1150 1151 1152
int search_dir(struct buffer_head *bh,
	       char *search_buf,
	       int buf_size,
	       struct inode *dir,
	       const struct qstr *d_name,
	       unsigned int offset,
	       struct ext4_dir_entry_2 **res_dir)
1153
{
1154
	struct ext4_dir_entry_2 * de;
1155 1156
	char * dlimit;
	int de_len;
1157 1158
	const char *name = d_name->name;
	int namelen = d_name->len;
1159

T
Tao Ma 已提交
1160 1161
	de = (struct ext4_dir_entry_2 *)search_buf;
	dlimit = search_buf + buf_size;
1162 1163 1164 1165 1166
	while ((char *) de < dlimit) {
		/* this code is executed quadratically often */
		/* do minimal checking `by hand' */

		if ((char *) de + namelen <= dlimit &&
1167
		    ext4_match (namelen, name, de)) {
1168
			/* found a match - just to be sure, do a full check */
1169 1170
			if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
						 bh->b_size, offset))
1171 1172 1173 1174 1175
				return -1;
			*res_dir = de;
			return 1;
		}
		/* prevent looping on a bad block */
1176 1177
		de_len = ext4_rec_len_from_disk(de->rec_len,
						dir->i_sb->s_blocksize);
1178 1179 1180
		if (de_len <= 0)
			return -1;
		offset += de_len;
1181
		de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
1182 1183 1184 1185
	}
	return 0;
}

1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
			       struct ext4_dir_entry *de)
{
	struct super_block *sb = dir->i_sb;

	if (!is_dx(dir))
		return 0;
	if (block == 0)
		return 1;
	if (de->inode == 0 &&
	    ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) ==
			sb->s_blocksize)
		return 1;
	return 0;
}
1201 1202

/*
1203
 *	ext4_find_entry()
1204 1205 1206 1207 1208 1209 1210 1211 1212
 *
 * finds an entry in the specified directory with the wanted name. It
 * returns the cache buffer in which the entry was found, and the entry
 * itself (as a parameter - res_dir). It does NOT read the inode of the
 * entry - you'll have to do that yourself if you want to.
 *
 * The returned buffer_head has ->b_count elevated.  The caller is expected
 * to brelse() it when appropriate.
 */
1213 1214
static struct buffer_head * ext4_find_entry (struct inode *dir,
					const struct qstr *d_name,
T
Tao Ma 已提交
1215 1216
					struct ext4_dir_entry_2 **res_dir,
					int *inlined)
1217
{
1218 1219 1220
	struct super_block *sb;
	struct buffer_head *bh_use[NAMEI_RA_SIZE];
	struct buffer_head *bh, *ret = NULL;
A
Aneesh Kumar K.V 已提交
1221
	ext4_lblk_t start, block, b;
1222
	const u8 *name = d_name->name;
1223 1224 1225 1226 1227
	int ra_max = 0;		/* Number of bh's in the readahead
				   buffer, bh_use[] */
	int ra_ptr = 0;		/* Current index into readahead
				   buffer */
	int num = 0;
A
Aneesh Kumar K.V 已提交
1228 1229
	ext4_lblk_t  nblocks;
	int i, err;
1230 1231 1232 1233
	int namelen;

	*res_dir = NULL;
	sb = dir->i_sb;
1234
	namelen = d_name->len;
1235
	if (namelen > EXT4_NAME_LEN)
1236
		return NULL;
1237 1238 1239 1240 1241

	if (ext4_has_inline_data(dir)) {
		int has_inline_data = 1;
		ret = ext4_find_inline_entry(dir, d_name, res_dir,
					     &has_inline_data);
T
Tao Ma 已提交
1242 1243 1244
		if (has_inline_data) {
			if (inlined)
				*inlined = 1;
1245
			return ret;
T
Tao Ma 已提交
1246
		}
1247 1248
	}

1249
	if ((namelen <= 2) && (name[0] == '.') &&
1250
	    (name[1] == '.' || name[1] == '\0')) {
1251 1252 1253 1254 1255 1256 1257 1258
		/*
		 * "." or ".." will only be in the first block
		 * NFS may look up ".."; "." should be handled by the VFS
		 */
		block = start = 0;
		nblocks = 1;
		goto restart;
	}
1259
	if (is_dx(dir)) {
1260
		bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
1261 1262 1263 1264 1265 1266 1267
		/*
		 * On success, or if the error was file not found,
		 * return.  Otherwise, fall back to doing a search the
		 * old fashioned way.
		 */
		if (bh || (err != ERR_BAD_DX_DIR))
			return bh;
1268 1269
		dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
			       "falling back\n"));
1270
	}
1271 1272
	nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
	start = EXT4_I(dir)->i_dir_start_lookup;
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
	if (start >= nblocks)
		start = 0;
	block = start;
restart:
	do {
		/*
		 * We deal with the read-ahead logic here.
		 */
		if (ra_ptr >= ra_max) {
			/* Refill the readahead buffer */
			ra_ptr = 0;
			b = block;
			for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
				/*
				 * Terminate if we reach the end of the
				 * directory and must wrap, or if our
				 * search has finished at this block.
				 */
				if (b >= nblocks || (num && block == start)) {
					bh_use[ra_max] = NULL;
					break;
				}
				num++;
1296
				bh = ext4_getblk(NULL, dir, b++, 0, &err);
1297 1298
				bh_use[ra_max] = bh;
				if (bh)
1299 1300
					ll_rw_block(READ | REQ_META | REQ_PRIO,
						    1, &bh);
1301 1302 1303 1304 1305 1306 1307
			}
		}
		if ((bh = bh_use[ra_ptr++]) == NULL)
			goto next;
		wait_on_buffer(bh);
		if (!buffer_uptodate(bh)) {
			/* read error, skip block & hope for the best */
1308 1309
			EXT4_ERROR_INODE(dir, "reading directory lblock %lu",
					 (unsigned long) block);
1310 1311 1312
			brelse(bh);
			goto next;
		}
1313
		if (!buffer_verified(bh) &&
1314 1315
		    !is_dx_internal_node(dir, block,
					 (struct ext4_dir_entry *)bh->b_data) &&
1316 1317 1318 1319 1320 1321 1322 1323
		    !ext4_dirent_csum_verify(dir,
				(struct ext4_dir_entry *)bh->b_data)) {
			EXT4_ERROR_INODE(dir, "checksumming directory "
					 "block %lu", (unsigned long)block);
			brelse(bh);
			goto next;
		}
		set_buffer_verified(bh);
1324
		i = search_dirblock(bh, dir, d_name,
1325
			    block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
1326
		if (i == 1) {
1327
			EXT4_I(dir)->i_dir_start_lookup = block;
1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344
			ret = bh;
			goto cleanup_and_exit;
		} else {
			brelse(bh);
			if (i < 0)
				goto cleanup_and_exit;
		}
	next:
		if (++block >= nblocks)
			block = 0;
	} while (block != start);

	/*
	 * If the directory has grown while we were searching, then
	 * search the last part of the directory before giving up.
	 */
	block = nblocks;
1345
	nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
1346 1347 1348 1349 1350 1351 1352 1353
	if (block < nblocks) {
		start = 0;
		goto restart;
	}

cleanup_and_exit:
	/* Clean up the read-ahead blocks */
	for (; ra_ptr < ra_max; ra_ptr++)
1354
		brelse(bh_use[ra_ptr]);
1355 1356 1357
	return ret;
}

1358
static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
1359
		       struct ext4_dir_entry_2 **res_dir, int *err)
1360
{
1361
	struct super_block * sb = dir->i_sb;
1362 1363 1364
	struct dx_hash_info	hinfo;
	struct dx_frame frames[2], *frame;
	struct buffer_head *bh;
A
Aneesh Kumar K.V 已提交
1365
	ext4_lblk_t block;
1366 1367
	int retval;

1368 1369
	if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
		return NULL;
1370 1371
	do {
		block = dx_get_block(frame->at);
1372 1373 1374
		bh = ext4_read_dirblock(dir, block, DIRENT);
		if (IS_ERR(bh)) {
			*err = PTR_ERR(bh);
1375 1376
			goto errout;
		}
1377 1378 1379 1380 1381 1382
		retval = search_dirblock(bh, dir, d_name,
					 block << EXT4_BLOCK_SIZE_BITS(sb),
					 res_dir);
		if (retval == 1) { 	/* Success! */
			dx_release(frames);
			return bh;
1383
		}
1384
		brelse(bh);
1385 1386 1387 1388 1389
		if (retval == -1) {
			*err = ERR_BAD_DX_DIR;
			goto errout;
		}

1390
		/* Check to see if we should continue to search */
1391
		retval = ext4_htree_next_block(dir, hinfo.hash, frame,
1392 1393
					       frames, NULL);
		if (retval < 0) {
1394
			ext4_warning(sb,
1395 1396 1397 1398 1399 1400 1401 1402 1403
			     "error reading index page in directory #%lu",
			     dir->i_ino);
			*err = retval;
			goto errout;
		}
	} while (retval == 1);

	*err = -ENOENT;
errout:
1404
	dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name));
1405 1406 1407 1408
	dx_release (frames);
	return NULL;
}

A
Al Viro 已提交
1409
static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
1410
{
1411 1412 1413
	struct inode *inode;
	struct ext4_dir_entry_2 *de;
	struct buffer_head *bh;
1414

1415
	if (dentry->d_name.len > EXT4_NAME_LEN)
1416 1417
		return ERR_PTR(-ENAMETOOLONG);

T
Tao Ma 已提交
1418
	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
1419 1420
	inode = NULL;
	if (bh) {
1421
		__u32 ino = le32_to_cpu(de->inode);
1422
		brelse(bh);
1423
		if (!ext4_valid_inum(dir->i_sb, ino)) {
1424
			EXT4_ERROR_INODE(dir, "bad inode number: %u", ino);
1425
			return ERR_PTR(-EIO);
1426
		}
1427 1428 1429 1430 1431 1432
		if (unlikely(ino == dir->i_ino)) {
			EXT4_ERROR_INODE(dir, "'%.*s' linked to parent dir",
					 dentry->d_name.len,
					 dentry->d_name.name);
			return ERR_PTR(-EIO);
		}
1433
		inode = ext4_iget(dir->i_sb, ino);
1434 1435 1436 1437 1438
		if (inode == ERR_PTR(-ESTALE)) {
			EXT4_ERROR_INODE(dir,
					 "deleted inode referenced: %u",
					 ino);
			return ERR_PTR(-EIO);
1439
		}
1440 1441 1442 1443 1444
	}
	return d_splice_alias(inode, dentry);
}


1445
struct dentry *ext4_get_parent(struct dentry *child)
1446
{
1447
	__u32 ino;
1448
	static const struct qstr dotdot = QSTR_INIT("..", 2);
1449
	struct ext4_dir_entry_2 * de;
1450 1451
	struct buffer_head *bh;

T
Tao Ma 已提交
1452
	bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL);
1453 1454 1455 1456 1457
	if (!bh)
		return ERR_PTR(-ENOENT);
	ino = le32_to_cpu(de->inode);
	brelse(bh);

1458
	if (!ext4_valid_inum(child->d_inode->i_sb, ino)) {
1459 1460
		EXT4_ERROR_INODE(child->d_inode,
				 "bad parent inode number: %u", ino);
1461
		return ERR_PTR(-EIO);
1462 1463
	}

1464
	return d_obtain_alias(ext4_iget(child->d_inode->i_sb, ino));
1465 1466
}

1467 1468 1469 1470
/*
 * Move count entries from end of map between two memory locations.
 * Returns pointer to last entry moved.
 */
1471
static struct ext4_dir_entry_2 *
1472 1473
dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count,
		unsigned blocksize)
1474 1475 1476 1477
{
	unsigned rec_len = 0;

	while (count--) {
1478
		struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *)
1479
						(from + (map->offs<<2));
1480
		rec_len = EXT4_DIR_REC_LEN(de->name_len);
1481
		memcpy (to, de, rec_len);
1482
		((struct ext4_dir_entry_2 *) to)->rec_len =
1483
				ext4_rec_len_to_disk(rec_len, blocksize);
1484 1485 1486 1487
		de->inode = 0;
		map++;
		to += rec_len;
	}
1488
	return (struct ext4_dir_entry_2 *) (to - rec_len);
1489 1490
}

1491 1492 1493 1494
/*
 * Compact each dir entry in the range to the minimal rec_len.
 * Returns pointer to last entry in range.
 */
1495
static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize)
1496
{
1497
	struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base;
1498 1499 1500
	unsigned rec_len = 0;

	prev = to = de;
1501
	while ((char*)de < base + blocksize) {
1502
		next = ext4_next_entry(de, blocksize);
1503
		if (de->inode && de->name_len) {
1504
			rec_len = EXT4_DIR_REC_LEN(de->name_len);
1505 1506
			if (de > to)
				memmove(to, de, rec_len);
1507
			to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize);
1508
			prev = to;
1509
			to = (struct ext4_dir_entry_2 *) (((char *) to) + rec_len);
1510 1511 1512 1513 1514 1515
		}
		de = next;
	}
	return prev;
}

1516 1517 1518 1519 1520
/*
 * Split a full leaf block to make room for a new dir entry.
 * Allocate a new block, and move entries so that they are approx. equally full.
 * Returns pointer to de in block into which the new entry will be inserted.
 */
1521
static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1522 1523 1524 1525 1526 1527
			struct buffer_head **bh,struct dx_frame *frame,
			struct dx_hash_info *hinfo, int *error)
{
	unsigned blocksize = dir->i_sb->s_blocksize;
	unsigned count, continued;
	struct buffer_head *bh2;
A
Aneesh Kumar K.V 已提交
1528
	ext4_lblk_t newblock;
1529 1530 1531
	u32 hash2;
	struct dx_map_entry *map;
	char *data1 = (*bh)->b_data, *data2;
1532
	unsigned split, move, size;
1533
	struct ext4_dir_entry_2 *de = NULL, *de2;
1534 1535
	struct ext4_dir_entry_tail *t;
	int	csum_size = 0;
1536
	int	err = 0, i;
1537

1538 1539 1540 1541
	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
		csum_size = sizeof(struct ext4_dir_entry_tail);

1542 1543
	bh2 = ext4_append(handle, dir, &newblock);
	if (IS_ERR(bh2)) {
1544 1545
		brelse(*bh);
		*bh = NULL;
1546 1547
		*error = PTR_ERR(bh2);
		return NULL;
1548 1549 1550
	}

	BUFFER_TRACE(*bh, "get_write_access");
1551
	err = ext4_journal_get_write_access(handle, *bh);
1552 1553 1554
	if (err)
		goto journal_error;

1555
	BUFFER_TRACE(frame->bh, "get_write_access");
1556
	err = ext4_journal_get_write_access(handle, frame->bh);
1557 1558 1559 1560 1561 1562 1563
	if (err)
		goto journal_error;

	data2 = bh2->b_data;

	/* create map in the end of data2 block */
	map = (struct dx_map_entry *) (data2 + blocksize);
1564
	count = dx_make_map((struct ext4_dir_entry_2 *) data1,
1565 1566
			     blocksize, hinfo, map);
	map -= count;
1567
	dx_sort_map(map, count);
1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579
	/* Split the existing block in the middle, size-wise */
	size = 0;
	move = 0;
	for (i = count-1; i >= 0; i--) {
		/* is more than half of this entry in 2nd half of the block? */
		if (size + map[i].size/2 > blocksize/2)
			break;
		size += map[i].size;
		move++;
	}
	/* map index at which we will split */
	split = count - move;
1580 1581
	hash2 = map[split].hash;
	continued = hash2 == map[split - 1].hash;
A
Aneesh Kumar K.V 已提交
1582 1583 1584
	dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
			(unsigned long)dx_get_block(frame->at),
					hash2, split, count-split));
1585 1586

	/* Fancy dance to stay within two buffers */
1587
	de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
1588
	de = dx_pack_dirents(data1, blocksize);
1589 1590
	de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
					   (char *) de,
1591
					   blocksize);
1592 1593
	de2->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) -
					    (char *) de2,
1594
					    blocksize);
1595 1596 1597 1598 1599 1600 1601 1602
	if (csum_size) {
		t = EXT4_DIRENT_TAIL(data2, blocksize);
		initialize_dirent_tail(t, blocksize);

		t = EXT4_DIRENT_TAIL(data1, blocksize);
		initialize_dirent_tail(t, blocksize);
	}

1603 1604
	dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
	dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
1605 1606 1607 1608 1609 1610 1611

	/* Which block gets the new entry? */
	if (hinfo->hash >= hash2)
	{
		swap(*bh, bh2);
		de = de2;
	}
1612
	dx_insert_block(frame, hash2 + continued, newblock);
1613
	err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
1614 1615
	if (err)
		goto journal_error;
1616
	err = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
1617 1618
	if (err)
		goto journal_error;
1619 1620
	brelse(bh2);
	dxtrace(dx_show_index("frame", frame->entries));
1621
	return de;
1622 1623 1624 1625 1626 1627 1628 1629

journal_error:
	brelse(*bh);
	brelse(bh2);
	*bh = NULL;
	ext4_std_error(dir->i_sb, err);
	*error = err;
	return NULL;
1630 1631
}

1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688
int ext4_find_dest_de(struct inode *dir, struct inode *inode,
		      struct buffer_head *bh,
		      void *buf, int buf_size,
		      const char *name, int namelen,
		      struct ext4_dir_entry_2 **dest_de)
{
	struct ext4_dir_entry_2 *de;
	unsigned short reclen = EXT4_DIR_REC_LEN(namelen);
	int nlen, rlen;
	unsigned int offset = 0;
	char *top;

	de = (struct ext4_dir_entry_2 *)buf;
	top = buf + buf_size - reclen;
	while ((char *) de <= top) {
		if (ext4_check_dir_entry(dir, NULL, de, bh,
					 buf, buf_size, offset))
			return -EIO;
		if (ext4_match(namelen, name, de))
			return -EEXIST;
		nlen = EXT4_DIR_REC_LEN(de->name_len);
		rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
		if ((de->inode ? rlen - nlen : rlen) >= reclen)
			break;
		de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
		offset += rlen;
	}
	if ((char *) de > top)
		return -ENOSPC;

	*dest_de = de;
	return 0;
}

void ext4_insert_dentry(struct inode *inode,
			struct ext4_dir_entry_2 *de,
			int buf_size,
			const char *name, int namelen)
{

	int nlen, rlen;

	nlen = EXT4_DIR_REC_LEN(de->name_len);
	rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
	if (de->inode) {
		struct ext4_dir_entry_2 *de1 =
				(struct ext4_dir_entry_2 *)((char *)de + nlen);
		de1->rec_len = ext4_rec_len_to_disk(rlen - nlen, buf_size);
		de->rec_len = ext4_rec_len_to_disk(nlen, buf_size);
		de = de1;
	}
	de->file_type = EXT4_FT_UNKNOWN;
	de->inode = cpu_to_le32(inode->i_ino);
	ext4_set_de_type(inode->i_sb, de, inode->i_mode);
	de->name_len = namelen;
	memcpy(de->name, name, namelen);
}
1689 1690 1691 1692 1693 1694 1695 1696 1697
/*
 * Add a new entry into a directory (leaf) block.  If de is non-NULL,
 * it points to a directory entry which is guaranteed to be large
 * enough for new directory entry.  If de is NULL, then
 * add_dirent_to_buf will attempt search the directory block for
 * space.  It will return -ENOSPC if no space is available, and -EIO
 * and -EEXIST if directory entry already exists.
 */
static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
1698
			     struct inode *inode, struct ext4_dir_entry_2 *de,
1699
			     struct buffer_head *bh)
1700 1701 1702 1703
{
	struct inode	*dir = dentry->d_parent->d_inode;
	const char	*name = dentry->d_name.name;
	int		namelen = dentry->d_name.len;
1704
	unsigned int	blocksize = dir->i_sb->s_blocksize;
1705
	int		csum_size = 0;
1706
	int		err;
1707 1708 1709 1710

	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
		csum_size = sizeof(struct ext4_dir_entry_tail);
1711 1712

	if (!de) {
1713 1714 1715 1716 1717
		err = ext4_find_dest_de(dir, inode,
					bh, bh->b_data, blocksize - csum_size,
					name, namelen, &de);
		if (err)
			return err;
1718 1719
	}
	BUFFER_TRACE(bh, "get_write_access");
1720
	err = ext4_journal_get_write_access(handle, bh);
1721
	if (err) {
1722
		ext4_std_error(dir->i_sb, err);
1723 1724 1725 1726
		return err;
	}

	/* By now the buffer is marked for journaling */
1727 1728
	ext4_insert_dentry(inode, de, blocksize, name, namelen);

1729 1730 1731 1732 1733 1734
	/*
	 * XXX shouldn't update any times until successful
	 * completion of syscall, but too many callers depend
	 * on this.
	 *
	 * XXX similarly, too many callers depend on
1735
	 * ext4_new_inode() setting the times, but error
1736 1737 1738 1739
	 * recovery deletes the inode, so the worst that can
	 * happen is that the times are slightly out of date
	 * and/or different from the directory change time.
	 */
K
Kalpak Shah 已提交
1740
	dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
1741
	ext4_update_dx_flag(dir);
1742
	dir->i_version++;
1743
	ext4_mark_inode_dirty(handle, dir);
1744
	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1745
	err = ext4_handle_dirty_dirent_node(handle, dir, bh);
1746
	if (err)
1747
		ext4_std_error(dir->i_sb, err);
1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764
	return 0;
}

/*
 * This converts a one block unindexed directory to a 3 block indexed
 * directory, and adds the dentry to the indexed directory.
 */
static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
			    struct inode *inode, struct buffer_head *bh)
{
	struct inode	*dir = dentry->d_parent->d_inode;
	const char	*name = dentry->d_name.name;
	int		namelen = dentry->d_name.len;
	struct buffer_head *bh2;
	struct dx_root	*root;
	struct dx_frame	frames[2], *frame;
	struct dx_entry *entries;
1765
	struct ext4_dir_entry_2	*de, *de2;
1766
	struct ext4_dir_entry_tail *t;
1767 1768 1769 1770 1771
	char		*data1, *top;
	unsigned	len;
	int		retval;
	unsigned	blocksize;
	struct dx_hash_info hinfo;
A
Aneesh Kumar K.V 已提交
1772
	ext4_lblk_t  block;
1773
	struct fake_dirent *fde;
1774 1775 1776 1777 1778
	int		csum_size = 0;

	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
		csum_size = sizeof(struct ext4_dir_entry_tail);
1779 1780

	blocksize =  dir->i_sb->s_blocksize;
1781
	dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
1782
	retval = ext4_journal_get_write_access(handle, bh);
1783
	if (retval) {
1784
		ext4_std_error(dir->i_sb, retval);
1785 1786 1787 1788 1789
		brelse(bh);
		return retval;
	}
	root = (struct dx_root *) bh->b_data;

1790 1791 1792
	/* The 0th block becomes the root, move the dirents out */
	fde = &root->dotdot;
	de = (struct ext4_dir_entry_2 *)((char *)fde +
1793
		ext4_rec_len_from_disk(fde->rec_len, blocksize));
1794
	if ((char *) de >= (((char *) root) + blocksize)) {
1795
		EXT4_ERROR_INODE(dir, "invalid rec_len for '..'");
1796 1797 1798
		brelse(bh);
		return -EIO;
	}
1799
	len = ((char *) root) + (blocksize - csum_size) - (char *) de;
1800 1801

	/* Allocate new block for the 0th block's dirents */
1802 1803
	bh2 = ext4_append(handle, dir, &block);
	if (IS_ERR(bh2)) {
1804
		brelse(bh);
1805
		return PTR_ERR(bh2);
1806
	}
1807
	ext4_set_inode_flag(dir, EXT4_INODE_INDEX);
1808 1809 1810
	data1 = bh2->b_data;

	memcpy (data1, de, len);
1811
	de = (struct ext4_dir_entry_2 *) data1;
1812
	top = data1 + len;
1813
	while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top)
1814
		de = de2;
1815 1816
	de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
					   (char *) de,
1817
					   blocksize);
1818 1819 1820 1821 1822 1823

	if (csum_size) {
		t = EXT4_DIRENT_TAIL(data1, blocksize);
		initialize_dirent_tail(t, blocksize);
	}

1824
	/* Initialize the root; the dot dirents already exist */
1825
	de = (struct ext4_dir_entry_2 *) (&root->dotdot);
1826 1827
	de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2),
					   blocksize);
1828 1829
	memset (&root->info, 0, sizeof(root->info));
	root->info.info_length = sizeof(root->info);
1830
	root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
1831
	entries = root->entries;
1832 1833 1834
	dx_set_block(entries, 1);
	dx_set_count(entries, 1);
	dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info)));
1835 1836 1837

	/* Initialize as for dx_probe */
	hinfo.hash_version = root->info.hash_version;
1838 1839
	if (hinfo.hash_version <= DX_HASH_TEA)
		hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
1840 1841
	hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
	ext4fs_dirhash(name, namelen, &hinfo);
1842 1843 1844 1845 1846
	frame = frames;
	frame->entries = entries;
	frame->at = entries;
	frame->bh = bh;
	bh = bh2;
1847

1848
	ext4_handle_dirty_dx_node(handle, dir, frame->bh);
1849
	ext4_handle_dirty_dirent_node(handle, dir, bh);
1850

1851
	de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
1852 1853 1854 1855 1856 1857 1858 1859
	if (!de) {
		/*
		 * Even if the block split failed, we have to properly write
		 * out all the changes we did so far. Otherwise we can end up
		 * with corrupted filesystem.
		 */
		ext4_mark_inode_dirty(handle, dir);
		dx_release(frames);
1860
		return retval;
1861 1862
	}
	dx_release(frames);
1863

1864 1865 1866
	retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
	brelse(bh);
	return retval;
1867 1868 1869
}

/*
1870
 *	ext4_add_entry()
1871 1872
 *
 * adds a file entry to the specified directory, using the same
1873
 * semantics as ext4_find_entry(). It returns NULL if it failed.
1874 1875 1876 1877 1878
 *
 * NOTE!! The inode part of 'de' is left at 0 - which means you
 * may not sleep between calling this and putting something into
 * the entry, as someone else might have used it while you slept.
 */
1879 1880
static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
			  struct inode *inode)
1881 1882
{
	struct inode *dir = dentry->d_parent->d_inode;
1883
	struct buffer_head *bh;
1884
	struct ext4_dir_entry_2 *de;
1885
	struct ext4_dir_entry_tail *t;
1886
	struct super_block *sb;
1887 1888 1889
	int	retval;
	int	dx_fallback=0;
	unsigned blocksize;
A
Aneesh Kumar K.V 已提交
1890
	ext4_lblk_t block, blocks;
1891 1892 1893 1894 1895
	int	csum_size = 0;

	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
		csum_size = sizeof(struct ext4_dir_entry_tail);
1896 1897 1898 1899 1900

	sb = dir->i_sb;
	blocksize = sb->s_blocksize;
	if (!dentry->d_name.len)
		return -EINVAL;
1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911

	if (ext4_has_inline_data(dir)) {
		retval = ext4_try_add_inline_entry(handle, dentry, inode);
		if (retval < 0)
			return retval;
		if (retval == 1) {
			retval = 0;
			return retval;
		}
	}

1912
	if (is_dx(dir)) {
1913
		retval = ext4_dx_add_entry(handle, dentry, inode);
1914 1915
		if (!retval || (retval != ERR_BAD_DX_DIR))
			return retval;
1916
		ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
1917
		dx_fallback++;
1918
		ext4_mark_inode_dirty(handle, dir);
1919 1920
	}
	blocks = dir->i_size >> sb->s_blocksize_bits;
1921
	for (block = 0; block < blocks; block++) {
1922 1923 1924 1925
		bh = ext4_read_dirblock(dir, block, DIRENT);
		if (IS_ERR(bh))
			return PTR_ERR(bh);

1926
		retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
1927 1928
		if (retval != -ENOSPC) {
			brelse(bh);
1929
			return retval;
1930
		}
1931 1932

		if (blocks == 1 && !dx_fallback &&
1933 1934
		    EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
			return make_indexed_dir(handle, dentry, inode, bh);
1935 1936
		brelse(bh);
	}
1937 1938 1939
	bh = ext4_append(handle, dir, &block);
	if (IS_ERR(bh))
		return PTR_ERR(bh);
1940
	de = (struct ext4_dir_entry_2 *) bh->b_data;
1941
	de->inode = 0;
1942 1943 1944 1945 1946 1947 1948
	de->rec_len = ext4_rec_len_to_disk(blocksize - csum_size, blocksize);

	if (csum_size) {
		t = EXT4_DIRENT_TAIL(bh->b_data, blocksize);
		initialize_dirent_tail(t, blocksize);
	}

1949 1950
	retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
	brelse(bh);
1951 1952
	if (retval == 0)
		ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
1953
	return retval;
1954 1955 1956 1957 1958
}

/*
 * Returns 0 for success, or a negative error value
 */
1959
static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1960 1961 1962 1963 1964
			     struct inode *inode)
{
	struct dx_frame frames[2], *frame;
	struct dx_entry *entries, *at;
	struct dx_hash_info hinfo;
1965
	struct buffer_head *bh;
1966
	struct inode *dir = dentry->d_parent->d_inode;
1967
	struct super_block *sb = dir->i_sb;
1968
	struct ext4_dir_entry_2 *de;
1969 1970
	int err;

1971
	frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
1972 1973 1974 1975
	if (!frame)
		return err;
	entries = frame->entries;
	at = frame->at;
1976 1977 1978 1979
	bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT);
	if (IS_ERR(bh)) {
		err = PTR_ERR(bh);
		bh = NULL;
1980
		goto cleanup;
C
Carlos Maiolino 已提交
1981
	}
1982 1983

	BUFFER_TRACE(bh, "get_write_access");
1984
	err = ext4_journal_get_write_access(handle, bh);
1985 1986 1987 1988
	if (err)
		goto journal_error;

	err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
1989
	if (err != -ENOSPC)
1990 1991 1992
		goto cleanup;

	/* Block full, should compress but for now just split */
1993
	dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n",
1994 1995 1996
		       dx_get_count(entries), dx_get_limit(entries)));
	/* Need to split index? */
	if (dx_get_count(entries) == dx_get_limit(entries)) {
A
Aneesh Kumar K.V 已提交
1997
		ext4_lblk_t newblock;
1998 1999 2000 2001 2002 2003 2004 2005
		unsigned icount = dx_get_count(entries);
		int levels = frame - frames;
		struct dx_entry *entries2;
		struct dx_node *node2;
		struct buffer_head *bh2;

		if (levels && (dx_get_count(frames->entries) ==
			       dx_get_limit(frames->entries))) {
2006
			ext4_warning(sb, "Directory index full!");
2007 2008 2009
			err = -ENOSPC;
			goto cleanup;
		}
2010 2011 2012
		bh2 = ext4_append(handle, dir, &newblock);
		if (IS_ERR(bh2)) {
			err = PTR_ERR(bh2);
2013
			goto cleanup;
2014
		}
2015 2016
		node2 = (struct dx_node *)(bh2->b_data);
		entries2 = node2->entries;
2017
		memset(&node2->fake, 0, sizeof(struct fake_dirent));
2018 2019
		node2->fake.rec_len = ext4_rec_len_to_disk(sb->s_blocksize,
							   sb->s_blocksize);
2020
		BUFFER_TRACE(frame->bh, "get_write_access");
2021
		err = ext4_journal_get_write_access(handle, frame->bh);
2022 2023 2024 2025 2026
		if (err)
			goto journal_error;
		if (levels) {
			unsigned icount1 = icount/2, icount2 = icount - icount1;
			unsigned hash2 = dx_get_hash(entries + icount1);
2027 2028
			dxtrace(printk(KERN_DEBUG "Split index %i/%i\n",
				       icount1, icount2));
2029 2030

			BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
2031
			err = ext4_journal_get_write_access(handle,
2032 2033 2034 2035
							     frames[0].bh);
			if (err)
				goto journal_error;

2036 2037 2038 2039 2040
			memcpy((char *) entries2, (char *) (entries + icount1),
			       icount2 * sizeof(struct dx_entry));
			dx_set_count(entries, icount1);
			dx_set_count(entries2, icount2);
			dx_set_limit(entries2, dx_node_limit(dir));
2041 2042 2043 2044 2045 2046 2047

			/* Which index block gets the new entry? */
			if (at - entries >= icount1) {
				frame->at = at = at - entries - icount1 + entries2;
				frame->entries = entries = entries2;
				swap(frame->bh, bh2);
			}
2048 2049 2050
			dx_insert_block(frames + 0, hash2, newblock);
			dxtrace(dx_show_index("node", frames[1].entries));
			dxtrace(dx_show_index("node",
2051
			       ((struct dx_node *) bh2->b_data)->entries));
2052
			err = ext4_handle_dirty_dx_node(handle, dir, bh2);
2053 2054 2055 2056
			if (err)
				goto journal_error;
			brelse (bh2);
		} else {
2057 2058
			dxtrace(printk(KERN_DEBUG
				       "Creating second level index...\n"));
2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072
			memcpy((char *) entries2, (char *) entries,
			       icount * sizeof(struct dx_entry));
			dx_set_limit(entries2, dx_node_limit(dir));

			/* Set up root */
			dx_set_count(entries, 1);
			dx_set_block(entries + 0, newblock);
			((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;

			/* Add new access path frame */
			frame = frames + 1;
			frame->at = at = at - entries + entries2;
			frame->entries = entries = entries2;
			frame->bh = bh2;
2073
			err = ext4_journal_get_write_access(handle,
2074 2075 2076 2077
							     frame->bh);
			if (err)
				goto journal_error;
		}
2078
		err = ext4_handle_dirty_dx_node(handle, dir, frames[0].bh);
2079 2080 2081 2082
		if (err) {
			ext4_std_error(inode->i_sb, err);
			goto cleanup;
		}
2083 2084 2085 2086 2087 2088 2089 2090
	}
	de = do_split(handle, dir, &bh, frame, &hinfo, &err);
	if (!de)
		goto cleanup;
	err = add_dirent_to_buf(handle, dentry, inode, de, bh);
	goto cleanup;

journal_error:
2091
	ext4_std_error(dir->i_sb, err);
2092
cleanup:
2093
	brelse(bh);
2094 2095 2096 2097 2098
	dx_release(frames);
	return err;
}

/*
T
Tao Ma 已提交
2099 2100
 * ext4_generic_delete_entry deletes a directory entry by merging it
 * with the previous entry
2101
 */
T
Tao Ma 已提交
2102 2103 2104 2105 2106 2107 2108
int ext4_generic_delete_entry(handle_t *handle,
			      struct inode *dir,
			      struct ext4_dir_entry_2 *de_del,
			      struct buffer_head *bh,
			      void *entry_buf,
			      int buf_size,
			      int csum_size)
2109
{
2110
	struct ext4_dir_entry_2 *de, *pde;
2111
	unsigned int blocksize = dir->i_sb->s_blocksize;
T
Tao Ma 已提交
2112
	int i;
2113

2114 2115
	i = 0;
	pde = NULL;
T
Tao Ma 已提交
2116 2117
	de = (struct ext4_dir_entry_2 *)entry_buf;
	while (i < buf_size - csum_size) {
2118 2119
		if (ext4_check_dir_entry(dir, NULL, de, bh,
					 bh->b_data, bh->b_size, i))
2120 2121 2122
			return -EIO;
		if (de == de_del)  {
			if (pde)
2123
				pde->rec_len = ext4_rec_len_to_disk(
2124 2125 2126 2127 2128
					ext4_rec_len_from_disk(pde->rec_len,
							       blocksize) +
					ext4_rec_len_from_disk(de->rec_len,
							       blocksize),
					blocksize);
2129 2130 2131 2132 2133
			else
				de->inode = 0;
			dir->i_version++;
			return 0;
		}
2134
		i += ext4_rec_len_from_disk(de->rec_len, blocksize);
2135
		pde = de;
2136
		de = ext4_next_entry(de, blocksize);
2137 2138 2139 2140
	}
	return -ENOENT;
}

T
Tao Ma 已提交
2141 2142 2143 2144 2145 2146 2147
static int ext4_delete_entry(handle_t *handle,
			     struct inode *dir,
			     struct ext4_dir_entry_2 *de_del,
			     struct buffer_head *bh)
{
	int err, csum_size = 0;

2148 2149 2150 2151 2152 2153 2154 2155
	if (ext4_has_inline_data(dir)) {
		int has_inline_data = 1;
		err = ext4_delete_inline_entry(handle, dir, de_del, bh,
					       &has_inline_data);
		if (has_inline_data)
			return err;
	}

T
Tao Ma 已提交
2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182
	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
		csum_size = sizeof(struct ext4_dir_entry_tail);

	BUFFER_TRACE(bh, "get_write_access");
	err = ext4_journal_get_write_access(handle, bh);
	if (unlikely(err))
		goto out;

	err = ext4_generic_delete_entry(handle, dir, de_del,
					bh, bh->b_data,
					dir->i_sb->s_blocksize, csum_size);
	if (err)
		goto out;

	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
	err = ext4_handle_dirty_dirent_node(handle, dir, bh);
	if (unlikely(err))
		goto out;

	return 0;
out:
	if (err != -ENOENT)
		ext4_std_error(dir->i_sb, err);
	return err;
}

2183 2184 2185 2186 2187 2188 2189 2190 2191 2192
/*
 * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2,
 * since this indicates that nlinks count was previously 1.
 */
static void ext4_inc_count(handle_t *handle, struct inode *inode)
{
	inc_nlink(inode);
	if (is_dx(inode) && inode->i_nlink > 1) {
		/* limit is 16-bit i_links_count */
		if (inode->i_nlink >= EXT4_LINK_MAX || inode->i_nlink == 2) {
M
Miklos Szeredi 已提交
2193
			set_nlink(inode, 1);
2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205
			EXT4_SET_RO_COMPAT_FEATURE(inode->i_sb,
					      EXT4_FEATURE_RO_COMPAT_DIR_NLINK);
		}
	}
}

/*
 * If a directory had nlink == 1, then we should let it be 1. This indicates
 * directory has >EXT4_LINK_MAX subdirs.
 */
static void ext4_dec_count(handle_t *handle, struct inode *inode)
{
2206 2207
	if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
		drop_nlink(inode);
2208 2209 2210
}


2211
static int ext4_add_nondir(handle_t *handle,
2212 2213
		struct dentry *dentry, struct inode *inode)
{
2214
	int err = ext4_add_entry(handle, dentry, inode);
2215
	if (!err) {
2216
		ext4_mark_inode_dirty(handle, inode);
A
Al Viro 已提交
2217
		unlock_new_inode(inode);
2218
		d_instantiate(dentry, inode);
2219 2220
		return 0;
	}
2221
	drop_nlink(inode);
A
Al Viro 已提交
2222
	unlock_new_inode(inode);
2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234
	iput(inode);
	return err;
}

/*
 * By the time this is called, we already have created
 * the directory cache entry for the new file, but it
 * is so far negative - it has no inode.
 *
 * If the create succeeds, we fill in the inode information
 * with d_instantiate().
 */
A
Al Viro 已提交
2235
static int ext4_create(struct inode *dir, struct dentry *dentry, umode_t mode,
A
Al Viro 已提交
2236
		       bool excl)
2237 2238
{
	handle_t *handle;
2239
	struct inode *inode;
2240
	int err, credits, retries = 0;
2241

2242
	dquot_initialize(dir);
2243

2244
	credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2245
		   EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
2246
retry:
2247 2248 2249
	inode = ext4_new_inode_start_handle(dir, mode, &dentry->d_name, 0,
					    NULL, EXT4_HT_DIR, credits);
	handle = ext4_journal_current_handle();
2250 2251
	err = PTR_ERR(inode);
	if (!IS_ERR(inode)) {
2252 2253 2254 2255
		inode->i_op = &ext4_file_inode_operations;
		inode->i_fop = &ext4_file_operations;
		ext4_set_aops(inode);
		err = ext4_add_nondir(handle, dentry, inode);
2256 2257
		if (!err && IS_DIRSYNC(dir))
			ext4_handle_sync(handle);
2258
	}
2259 2260
	if (handle)
		ext4_journal_stop(handle);
2261
	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2262 2263 2264 2265
		goto retry;
	return err;
}

2266
static int ext4_mknod(struct inode *dir, struct dentry *dentry,
A
Al Viro 已提交
2267
		      umode_t mode, dev_t rdev)
2268 2269 2270
{
	handle_t *handle;
	struct inode *inode;
2271
	int err, credits, retries = 0;
2272 2273 2274 2275

	if (!new_valid_dev(rdev))
		return -EINVAL;

2276
	dquot_initialize(dir);
2277

2278
	credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2279
		   EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
2280
retry:
2281 2282 2283
	inode = ext4_new_inode_start_handle(dir, mode, &dentry->d_name, 0,
					    NULL, EXT4_HT_DIR, credits);
	handle = ext4_journal_current_handle();
2284 2285 2286
	err = PTR_ERR(inode);
	if (!IS_ERR(inode)) {
		init_special_inode(inode, inode->i_mode, rdev);
2287 2288
		inode->i_op = &ext4_special_inode_operations;
		err = ext4_add_nondir(handle, dentry, inode);
2289 2290
		if (!err && IS_DIRSYNC(dir))
			ext4_handle_sync(handle);
2291
	}
2292 2293
	if (handle)
		ext4_journal_stop(handle);
2294
	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2295 2296 2297 2298
		goto retry;
	return err;
}

A
Al Viro 已提交
2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337
static int ext4_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
{
	handle_t *handle;
	struct inode *inode;
	int err, retries = 0;

	dquot_initialize(dir);

retry:
	inode = ext4_new_inode_start_handle(dir, mode,
					    NULL, 0, NULL,
					    EXT4_HT_DIR,
			EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
			  4 + EXT4_XATTR_TRANS_BLOCKS);
	handle = ext4_journal_current_handle();
	err = PTR_ERR(inode);
	if (!IS_ERR(inode)) {
		inode->i_op = &ext4_file_inode_operations;
		inode->i_fop = &ext4_file_operations;
		ext4_set_aops(inode);
		err = ext4_orphan_add(handle, inode);
		if (err)
			goto err_drop_inode;
		mark_inode_dirty(inode);
		d_tmpfile(dentry, inode);
		unlock_new_inode(inode);
	}
	if (handle)
		ext4_journal_stop(handle);
	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
		goto retry;
	return err;
err_drop_inode:
	ext4_journal_stop(handle);
	unlock_new_inode(inode);
	iput(inode);
	return err;
}

2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367
struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
			  struct ext4_dir_entry_2 *de,
			  int blocksize, int csum_size,
			  unsigned int parent_ino, int dotdot_real_len)
{
	de->inode = cpu_to_le32(inode->i_ino);
	de->name_len = 1;
	de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len),
					   blocksize);
	strcpy(de->name, ".");
	ext4_set_de_type(inode->i_sb, de, S_IFDIR);

	de = ext4_next_entry(de, blocksize);
	de->inode = cpu_to_le32(parent_ino);
	de->name_len = 2;
	if (!dotdot_real_len)
		de->rec_len = ext4_rec_len_to_disk(blocksize -
					(csum_size + EXT4_DIR_REC_LEN(1)),
					blocksize);
	else
		de->rec_len = ext4_rec_len_to_disk(
				EXT4_DIR_REC_LEN(de->name_len), blocksize);
	strcpy(de->name, "..");
	ext4_set_de_type(inode->i_sb, de, S_IFDIR);

	return ext4_next_entry(de, blocksize);
}

static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
			     struct inode *inode)
2368
{
2369
	struct buffer_head *dir_block = NULL;
2370
	struct ext4_dir_entry_2 *de;
2371
	struct ext4_dir_entry_tail *t;
2372
	ext4_lblk_t block = 0;
2373
	unsigned int blocksize = dir->i_sb->s_blocksize;
2374
	int csum_size = 0;
2375
	int err;
2376

2377 2378 2379 2380
	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
		csum_size = sizeof(struct ext4_dir_entry_tail);

2381 2382 2383 2384 2385 2386 2387 2388
	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
		err = ext4_try_create_inline_dir(handle, dir, inode);
		if (err < 0 && err != -ENOSPC)
			goto out;
		if (!err)
			goto out;
	}

2389
	inode->i_size = 0;
2390 2391 2392
	dir_block = ext4_append(handle, inode, &block);
	if (IS_ERR(dir_block))
		return PTR_ERR(dir_block);
2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418
	BUFFER_TRACE(dir_block, "get_write_access");
	err = ext4_journal_get_write_access(handle, dir_block);
	if (err)
		goto out;
	de = (struct ext4_dir_entry_2 *)dir_block->b_data;
	ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0);
	set_nlink(inode, 2);
	if (csum_size) {
		t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize);
		initialize_dirent_tail(t, blocksize);
	}

	BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
	err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
	if (err)
		goto out;
	set_buffer_verified(dir_block);
out:
	brelse(dir_block);
	return err;
}

static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
	handle_t *handle;
	struct inode *inode;
2419
	int err, credits, retries = 0;
2420

2421
	if (EXT4_DIR_LINK_MAX(dir))
2422 2423
		return -EMLINK;

2424
	dquot_initialize(dir);
2425

2426
	credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2427
		   EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
2428
retry:
2429 2430 2431 2432
	inode = ext4_new_inode_start_handle(dir, S_IFDIR | mode,
					    &dentry->d_name,
					    0, NULL, EXT4_HT_DIR, credits);
	handle = ext4_journal_current_handle();
2433 2434 2435 2436
	err = PTR_ERR(inode);
	if (IS_ERR(inode))
		goto out_stop;

2437 2438
	inode->i_op = &ext4_dir_inode_operations;
	inode->i_fop = &ext4_dir_operations;
2439
	err = ext4_init_new_dir(handle, dir, inode);
2440 2441 2442 2443 2444
	if (err)
		goto out_clear_inode;
	err = ext4_mark_inode_dirty(handle, inode);
	if (!err)
		err = ext4_add_entry(handle, dentry, inode);
2445
	if (err) {
2446 2447
out_clear_inode:
		clear_nlink(inode);
A
Al Viro 已提交
2448
		unlock_new_inode(inode);
2449
		ext4_mark_inode_dirty(handle, inode);
2450
		iput(inode);
2451 2452
		goto out_stop;
	}
2453
	ext4_inc_count(handle, dir);
2454
	ext4_update_dx_flag(dir);
2455 2456 2457
	err = ext4_mark_inode_dirty(handle, dir);
	if (err)
		goto out_clear_inode;
A
Al Viro 已提交
2458
	unlock_new_inode(inode);
2459
	d_instantiate(dentry, inode);
2460 2461 2462
	if (IS_DIRSYNC(dir))
		ext4_handle_sync(handle);

2463
out_stop:
2464 2465
	if (handle)
		ext4_journal_stop(handle);
2466
	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2467 2468 2469 2470 2471 2472 2473
		goto retry;
	return err;
}

/*
 * routine to check that the specified directory is empty (for rmdir)
 */
2474
static int empty_dir(struct inode *inode)
2475
{
2476
	unsigned int offset;
2477 2478 2479
	struct buffer_head *bh;
	struct ext4_dir_entry_2 *de, *de1;
	struct super_block *sb;
2480 2481
	int err = 0;

T
Tao Ma 已提交
2482 2483 2484 2485 2486 2487 2488 2489
	if (ext4_has_inline_data(inode)) {
		int has_inline_data = 1;

		err = empty_inline_dir(inode, &has_inline_data);
		if (has_inline_data)
			return err;
	}

2490
	sb = inode->i_sb;
2491 2492
	if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2)) {
		EXT4_ERROR_INODE(inode, "invalid size");
2493 2494
		return 1;
	}
2495 2496 2497 2498
	bh = ext4_read_dirblock(inode, 0, EITHER);
	if (IS_ERR(bh))
		return 1;

2499
	de = (struct ext4_dir_entry_2 *) bh->b_data;
2500
	de1 = ext4_next_entry(de, sb->s_blocksize);
2501 2502
	if (le32_to_cpu(de->inode) != inode->i_ino ||
			!le32_to_cpu(de1->inode) ||
2503 2504
			strcmp(".", de->name) ||
			strcmp("..", de1->name)) {
2505
		ext4_warning(inode->i_sb,
2506 2507 2508
			     "bad directory (dir #%lu) - no `.' or `..'",
			     inode->i_ino);
		brelse(bh);
2509 2510
		return 1;
	}
2511 2512 2513
	offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) +
		 ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize);
	de = ext4_next_entry(de1, sb->s_blocksize);
2514
	while (offset < inode->i_size) {
2515
		if (!bh ||
2516 2517
		    (void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
			unsigned int lblock;
2518
			err = 0;
2519
			brelse(bh);
2520
			lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
2521 2522 2523
			bh = ext4_read_dirblock(inode, lblock, EITHER);
			if (IS_ERR(bh))
				return 1;
2524
			de = (struct ext4_dir_entry_2 *) bh->b_data;
2525
		}
2526 2527
		if (ext4_check_dir_entry(inode, NULL, de, bh,
					 bh->b_data, bh->b_size, offset)) {
2528
			de = (struct ext4_dir_entry_2 *)(bh->b_data +
2529 2530 2531 2532 2533
							 sb->s_blocksize);
			offset = (offset | (sb->s_blocksize - 1)) + 1;
			continue;
		}
		if (le32_to_cpu(de->inode)) {
2534
			brelse(bh);
2535 2536
			return 0;
		}
2537 2538
		offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
		de = ext4_next_entry(de, sb->s_blocksize);
2539
	}
2540
	brelse(bh);
2541 2542 2543
	return 1;
}

2544
/* ext4_orphan_add() links an unlinked or truncated inode into a list of
2545 2546 2547 2548 2549
 * such inodes, starting at the superblock, in case we crash before the
 * file is closed/deleted, or in case the inode truncate spans multiple
 * transactions and the last transaction is not recovered after a crash.
 *
 * At filesystem recovery time, we walk this list deleting unlinked
2550
 * inodes and truncating linked inodes in ext4_orphan_cleanup().
2551
 */
2552
int ext4_orphan_add(handle_t *handle, struct inode *inode)
2553 2554
{
	struct super_block *sb = inode->i_sb;
2555
	struct ext4_iloc iloc;
2556 2557
	int err = 0, rc;

2558
	if (!EXT4_SB(sb)->s_journal)
2559 2560
		return 0;

2561
	mutex_lock(&EXT4_SB(sb)->s_orphan_lock);
2562
	if (!list_empty(&EXT4_I(inode)->i_orphan))
2563 2564
		goto out_unlock;

2565 2566 2567 2568 2569
	/*
	 * Orphan handling is only valid for files with data blocks
	 * being truncated, or files being unlinked. Note that we either
	 * hold i_mutex, or the inode can not be referenced from outside,
	 * so i_nlink should not be bumped due to race
2570
	 */
2571 2572
	J_ASSERT((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
		  S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
2573

2574 2575
	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
	err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
2576 2577 2578
	if (err)
		goto out_unlock;

2579
	err = ext4_reserve_inode_write(handle, inode, &iloc);
2580 2581
	if (err)
		goto out_unlock;
2582 2583 2584 2585 2586 2587 2588
	/*
	 * Due to previous errors inode may be already a part of on-disk
	 * orphan list. If so skip on-disk list modification.
	 */
	if (NEXT_ORPHAN(inode) && NEXT_ORPHAN(inode) <=
		(le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)))
			goto mem_insert;
2589 2590

	/* Insert this inode at the head of the on-disk orphan list... */
2591 2592
	NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan);
	EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
2593
	err = ext4_handle_dirty_super(handle, sb);
2594
	rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605
	if (!err)
		err = rc;

	/* Only add to the head of the in-memory list if all the
	 * previous operations succeeded.  If the orphan_add is going to
	 * fail (possibly taking the journal offline), we can't risk
	 * leaving the inode on the orphan list: stray orphan-list
	 * entries can cause panics at unmount time.
	 *
	 * This is safe: on error we're going to ignore the orphan list
	 * anyway on the next recovery. */
2606
mem_insert:
2607
	if (!err)
2608
		list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
2609 2610 2611 2612 2613

	jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
	jbd_debug(4, "orphan inode %lu will point to %d\n",
			inode->i_ino, NEXT_ORPHAN(inode));
out_unlock:
2614
	mutex_unlock(&EXT4_SB(sb)->s_orphan_lock);
2615
	ext4_std_error(inode->i_sb, err);
2616 2617 2618 2619
	return err;
}

/*
2620
 * ext4_orphan_del() removes an unlinked or truncated inode from the list
2621 2622
 * of such inodes stored on disk, because it is finally being cleaned up.
 */
2623
int ext4_orphan_del(handle_t *handle, struct inode *inode)
2624 2625
{
	struct list_head *prev;
2626 2627
	struct ext4_inode_info *ei = EXT4_I(inode);
	struct ext4_sb_info *sbi;
2628
	__u32 ino_next;
2629
	struct ext4_iloc iloc;
2630 2631
	int err = 0;

2632 2633
	if ((!EXT4_SB(inode->i_sb)->s_journal) &&
	    !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS))
2634 2635
		return 0;

2636 2637 2638
	mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
	if (list_empty(&ei->i_orphan))
		goto out;
2639 2640 2641

	ino_next = NEXT_ORPHAN(inode);
	prev = ei->i_orphan.prev;
2642
	sbi = EXT4_SB(inode->i_sb);
2643 2644 2645 2646 2647 2648 2649 2650 2651

	jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);

	list_del_init(&ei->i_orphan);

	/* If we're on an error path, we may not have a valid
	 * transaction handle with which to update the orphan list on
	 * disk, but we still need to remove the inode from the linked
	 * list in memory. */
2652
	if (!handle)
2653 2654
		goto out;

2655
	err = ext4_reserve_inode_write(handle, inode, &iloc);
2656 2657 2658 2659
	if (err)
		goto out_err;

	if (prev == &sbi->s_orphan) {
2660
		jbd_debug(4, "superblock will point to %u\n", ino_next);
2661
		BUFFER_TRACE(sbi->s_sbh, "get_write_access");
2662
		err = ext4_journal_get_write_access(handle, sbi->s_sbh);
2663 2664 2665
		if (err)
			goto out_brelse;
		sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
2666
		err = ext4_handle_dirty_super(handle, inode->i_sb);
2667
	} else {
2668
		struct ext4_iloc iloc2;
2669
		struct inode *i_prev =
2670
			&list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode;
2671

2672
		jbd_debug(4, "orphan inode %lu will point to %u\n",
2673
			  i_prev->i_ino, ino_next);
2674
		err = ext4_reserve_inode_write(handle, i_prev, &iloc2);
2675 2676 2677
		if (err)
			goto out_brelse;
		NEXT_ORPHAN(i_prev) = ino_next;
2678
		err = ext4_mark_iloc_dirty(handle, i_prev, &iloc2);
2679 2680 2681 2682
	}
	if (err)
		goto out_brelse;
	NEXT_ORPHAN(inode) = 0;
2683
	err = ext4_mark_iloc_dirty(handle, inode, &iloc);
2684 2685

out_err:
2686
	ext4_std_error(inode->i_sb, err);
2687
out:
2688
	mutex_unlock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
2689 2690 2691 2692 2693 2694 2695
	return err;

out_brelse:
	brelse(iloc.bh);
	goto out_err;
}

2696
static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
2697 2698
{
	int retval;
2699 2700 2701
	struct inode *inode;
	struct buffer_head *bh;
	struct ext4_dir_entry_2 *de;
2702
	handle_t *handle = NULL;
2703 2704 2705

	/* Initialize quotas before so that eventual writes go in
	 * separate transaction */
2706 2707
	dquot_initialize(dir);
	dquot_initialize(dentry->d_inode);
2708

2709
	retval = -ENOENT;
T
Tao Ma 已提交
2710
	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
2711 2712 2713 2714 2715 2716 2717 2718 2719 2720
	if (!bh)
		goto end_rmdir;

	inode = dentry->d_inode;

	retval = -EIO;
	if (le32_to_cpu(de->inode) != inode->i_ino)
		goto end_rmdir;

	retval = -ENOTEMPTY;
2721
	if (!empty_dir(inode))
2722 2723
		goto end_rmdir;

2724
	handle = ext4_journal_start(dir, EXT4_HT_DIR,
2725
				    EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
2726 2727 2728 2729 2730 2731 2732 2733 2734
	if (IS_ERR(handle)) {
		retval = PTR_ERR(handle);
		handle = NULL;
		goto end_rmdir;
	}

	if (IS_DIRSYNC(dir))
		ext4_handle_sync(handle);

2735
	retval = ext4_delete_entry(handle, dir, de, bh);
2736 2737
	if (retval)
		goto end_rmdir;
2738
	if (!EXT4_DIR_LINK_EMPTY(inode))
2739
		ext4_warning(inode->i_sb,
2740 2741
			     "empty directory has too many links (%d)",
			     inode->i_nlink);
2742 2743 2744 2745 2746 2747
	inode->i_version++;
	clear_nlink(inode);
	/* There's no need to set i_disksize: the fact that i_nlink is
	 * zero will ensure that the right thing happens during any
	 * recovery. */
	inode->i_size = 0;
2748
	ext4_orphan_add(handle, inode);
K
Kalpak Shah 已提交
2749
	inode->i_ctime = dir->i_ctime = dir->i_mtime = ext4_current_time(inode);
2750
	ext4_mark_inode_dirty(handle, inode);
2751
	ext4_dec_count(handle, dir);
2752 2753
	ext4_update_dx_flag(dir);
	ext4_mark_inode_dirty(handle, dir);
2754 2755

end_rmdir:
2756
	brelse(bh);
2757 2758
	if (handle)
		ext4_journal_stop(handle);
2759 2760 2761
	return retval;
}

2762
static int ext4_unlink(struct inode *dir, struct dentry *dentry)
2763 2764
{
	int retval;
2765 2766 2767
	struct inode *inode;
	struct buffer_head *bh;
	struct ext4_dir_entry_2 *de;
2768
	handle_t *handle = NULL;
2769

2770
	trace_ext4_unlink_enter(dir, dentry);
2771 2772
	/* Initialize quotas before so that eventual writes go
	 * in separate transaction */
2773 2774
	dquot_initialize(dir);
	dquot_initialize(dentry->d_inode);
2775

2776
	retval = -ENOENT;
T
Tao Ma 已提交
2777
	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
2778 2779 2780 2781 2782 2783 2784 2785 2786
	if (!bh)
		goto end_unlink;

	inode = dentry->d_inode;

	retval = -EIO;
	if (le32_to_cpu(de->inode) != inode->i_ino)
		goto end_unlink;

2787
	handle = ext4_journal_start(dir, EXT4_HT_DIR,
2788
				    EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
2789 2790 2791 2792 2793 2794 2795 2796 2797
	if (IS_ERR(handle)) {
		retval = PTR_ERR(handle);
		handle = NULL;
		goto end_unlink;
	}

	if (IS_DIRSYNC(dir))
		ext4_handle_sync(handle);

2798
	if (!inode->i_nlink) {
2799
		ext4_warning(inode->i_sb,
2800 2801
			     "Deleting nonexistent file (%lu), %d",
			     inode->i_ino, inode->i_nlink);
M
Miklos Szeredi 已提交
2802
		set_nlink(inode, 1);
2803
	}
2804
	retval = ext4_delete_entry(handle, dir, de, bh);
2805 2806
	if (retval)
		goto end_unlink;
K
Kalpak Shah 已提交
2807
	dir->i_ctime = dir->i_mtime = ext4_current_time(dir);
2808 2809
	ext4_update_dx_flag(dir);
	ext4_mark_inode_dirty(handle, dir);
2810
	drop_nlink(inode);
2811
	if (!inode->i_nlink)
2812
		ext4_orphan_add(handle, inode);
K
Kalpak Shah 已提交
2813
	inode->i_ctime = ext4_current_time(inode);
2814
	ext4_mark_inode_dirty(handle, inode);
2815 2816 2817
	retval = 0;

end_unlink:
2818
	brelse(bh);
2819 2820
	if (handle)
		ext4_journal_stop(handle);
2821
	trace_ext4_unlink_exit(dentry, retval);
2822 2823 2824
	return retval;
}

2825 2826
static int ext4_symlink(struct inode *dir,
			struct dentry *dentry, const char *symname)
2827 2828
{
	handle_t *handle;
2829
	struct inode *inode;
2830
	int l, err, retries = 0;
2831
	int credits;
2832 2833 2834 2835 2836

	l = strlen(symname)+1;
	if (l > dir->i_sb->s_blocksize)
		return -ENAMETOOLONG;

2837
	dquot_initialize(dir);
2838

2839 2840 2841 2842
	if (l > EXT4_N_BLOCKS * 4) {
		/*
		 * For non-fast symlinks, we just allocate inode and put it on
		 * orphan list in the first transaction => we need bitmap,
2843 2844
		 * group descriptor, sb, inode block, quota blocks, and
		 * possibly selinux xattr blocks.
2845
		 */
2846 2847
		credits = 4 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
			  EXT4_XATTR_TRANS_BLOCKS;
2848 2849 2850 2851 2852 2853 2854 2855
	} else {
		/*
		 * Fast symlink. We have to add entry to directory
		 * (EXT4_DATA_TRANS_BLOCKS + EXT4_INDEX_EXTRA_TRANS_BLOCKS),
		 * allocate new inode (bitmap, group descriptor, inode block,
		 * quota blocks, sb is already counted in previous macros).
		 */
		credits = EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2856
			  EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3;
2857
	}
2858
retry:
2859 2860 2861 2862
	inode = ext4_new_inode_start_handle(dir, S_IFLNK|S_IRWXUGO,
					    &dentry->d_name, 0, NULL,
					    EXT4_HT_DIR, credits);
	handle = ext4_journal_current_handle();
2863 2864 2865 2866
	err = PTR_ERR(inode);
	if (IS_ERR(inode))
		goto out_stop;

2867
	if (l > EXT4_N_BLOCKS * 4) {
2868 2869
		inode->i_op = &ext4_symlink_inode_operations;
		ext4_set_aops(inode);
2870
		/*
2871 2872 2873 2874 2875 2876 2877 2878
		 * We cannot call page_symlink() with transaction started
		 * because it calls into ext4_write_begin() which can wait
		 * for transaction commit if we are running out of space
		 * and thus we deadlock. So we have to stop transaction now
		 * and restart it when symlink contents is written.
		 * 
		 * To keep fs consistent in case of crash, we have to put inode
		 * to orphan list in the mean time.
2879
		 */
2880 2881 2882 2883 2884
		drop_nlink(inode);
		err = ext4_orphan_add(handle, inode);
		ext4_journal_stop(handle);
		if (err)
			goto err_drop_inode;
2885
		err = __page_symlink(inode, symname, l, 1);
2886 2887 2888 2889 2890 2891
		if (err)
			goto err_drop_inode;
		/*
		 * Now inode is being linked into dir (EXT4_DATA_TRANS_BLOCKS
		 * + EXT4_INDEX_EXTRA_TRANS_BLOCKS), inode is also modified
		 */
2892
		handle = ext4_journal_start(dir, EXT4_HT_DIR,
2893 2894 2895 2896 2897 2898
				EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
				EXT4_INDEX_EXTRA_TRANS_BLOCKS + 1);
		if (IS_ERR(handle)) {
			err = PTR_ERR(handle);
			goto err_drop_inode;
		}
2899
		set_nlink(inode, 1);
2900
		err = ext4_orphan_del(handle, inode);
2901
		if (err) {
2902
			ext4_journal_stop(handle);
2903
			clear_nlink(inode);
2904
			goto err_drop_inode;
2905 2906
		}
	} else {
2907
		/* clear the extent format for fast symlink */
2908
		ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
2909
		inode->i_op = &ext4_fast_symlink_inode_operations;
2910
		memcpy((char *)&EXT4_I(inode)->i_data, symname, l);
2911 2912
		inode->i_size = l-1;
	}
2913 2914
	EXT4_I(inode)->i_disksize = inode->i_size;
	err = ext4_add_nondir(handle, dentry, inode);
2915 2916 2917
	if (!err && IS_DIRSYNC(dir))
		ext4_handle_sync(handle);

2918
out_stop:
2919 2920
	if (handle)
		ext4_journal_stop(handle);
2921
	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2922 2923
		goto retry;
	return err;
2924 2925 2926 2927
err_drop_inode:
	unlock_new_inode(inode);
	iput(inode);
	return err;
2928 2929
}

2930 2931
static int ext4_link(struct dentry *old_dentry,
		     struct inode *dir, struct dentry *dentry)
2932 2933 2934 2935 2936
{
	handle_t *handle;
	struct inode *inode = old_dentry->d_inode;
	int err, retries = 0;

2937
	if (inode->i_nlink >= EXT4_LINK_MAX)
2938
		return -EMLINK;
2939

2940
	dquot_initialize(dir);
2941

2942
retry:
2943 2944
	handle = ext4_journal_start(dir, EXT4_HT_DIR,
		(EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
A
Al Viro 已提交
2945
		 EXT4_INDEX_EXTRA_TRANS_BLOCKS) + 1);
2946 2947 2948 2949
	if (IS_ERR(handle))
		return PTR_ERR(handle);

	if (IS_DIRSYNC(dir))
2950
		ext4_handle_sync(handle);
2951

K
Kalpak Shah 已提交
2952
	inode->i_ctime = ext4_current_time(inode);
2953
	ext4_inc_count(handle, inode);
A
Al Viro 已提交
2954
	ihold(inode);
2955

A
Al Viro 已提交
2956 2957 2958
	err = ext4_add_entry(handle, dentry, inode);
	if (!err) {
		ext4_mark_inode_dirty(handle, inode);
A
Al Viro 已提交
2959 2960 2961 2962 2963
		/* this can happen only for tmpfile being
		 * linked the first time
		 */
		if (inode->i_nlink == 1)
			ext4_orphan_del(handle, inode);
A
Al Viro 已提交
2964 2965 2966 2967 2968
		d_instantiate(dentry, inode);
	} else {
		drop_nlink(inode);
		iput(inode);
	}
2969 2970
	ext4_journal_stop(handle);
	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2971 2972 2973 2974
		goto retry;
	return err;
}

T
Tao Ma 已提交
2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989

/*
 * Try to find buffer head where contains the parent block.
 * It should be the inode block if it is inlined or the 1st block
 * if it is a normal dir.
 */
static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
					struct inode *inode,
					int *retval,
					struct ext4_dir_entry_2 **parent_de,
					int *inlined)
{
	struct buffer_head *bh;

	if (!ext4_has_inline_data(inode)) {
2990 2991 2992
		bh = ext4_read_dirblock(inode, 0, EITHER);
		if (IS_ERR(bh)) {
			*retval = PTR_ERR(bh);
T
Tao Ma 已提交
2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003
			return NULL;
		}
		*parent_de = ext4_next_entry(
					(struct ext4_dir_entry_2 *)bh->b_data,
					inode->i_sb->s_blocksize);
		return bh;
	}

	*inlined = 1;
	return ext4_get_first_inline_block(inode, parent_de, retval);
}
3004 3005 3006 3007 3008

/*
 * Anybody can rename anything with this: the permission checks are left to the
 * higher-level routines.
 */
3009 3010
static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
		       struct inode *new_dir, struct dentry *new_dentry)
3011 3012
{
	handle_t *handle;
3013 3014 3015
	struct inode *old_inode, *new_inode;
	struct buffer_head *old_bh, *new_bh, *dir_bh;
	struct ext4_dir_entry_2 *old_de, *new_de;
3016
	int retval, force_da_alloc = 0;
T
Tao Ma 已提交
3017 3018
	int inlined = 0, new_inlined = 0;
	struct ext4_dir_entry_2 *parent_de;
3019

3020 3021
	dquot_initialize(old_dir);
	dquot_initialize(new_dir);
3022

3023 3024 3025 3026 3027
	old_bh = new_bh = dir_bh = NULL;

	/* Initialize quotas before so that eventual writes go
	 * in separate transaction */
	if (new_dentry->d_inode)
3028
		dquot_initialize(new_dentry->d_inode);
3029 3030 3031
	handle = ext4_journal_start(old_dir, EXT4_HT_DIR,
		(2 * EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
		 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2));
3032 3033 3034 3035
	if (IS_ERR(handle))
		return PTR_ERR(handle);

	if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
3036
		ext4_handle_sync(handle);
3037

T
Tao Ma 已提交
3038
	old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de, NULL);
3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050
	/*
	 *  Check for inode number is _not_ due to possible IO errors.
	 *  We might rmdir the source, keep it as pwd of some process
	 *  and merrily kill the link to whatever was created under the
	 *  same name. Goodbye sticky bit ;-<
	 */
	old_inode = old_dentry->d_inode;
	retval = -ENOENT;
	if (!old_bh || le32_to_cpu(old_de->inode) != old_inode->i_ino)
		goto end_rename;

	new_inode = new_dentry->d_inode;
T
Tao Ma 已提交
3051 3052
	new_bh = ext4_find_entry(new_dir, &new_dentry->d_name,
				 &new_de, &new_inlined);
3053 3054
	if (new_bh) {
		if (!new_inode) {
3055
			brelse(new_bh);
3056 3057 3058 3059 3060 3061
			new_bh = NULL;
		}
	}
	if (S_ISDIR(old_inode->i_mode)) {
		if (new_inode) {
			retval = -ENOTEMPTY;
3062
			if (!empty_dir(new_inode))
3063 3064 3065
				goto end_rename;
		}
		retval = -EIO;
T
Tao Ma 已提交
3066 3067 3068 3069
		dir_bh = ext4_get_first_dir_block(handle, old_inode,
						  &retval, &parent_de,
						  &inlined);
		if (!dir_bh)
3070
			goto end_rename;
T
Tao Ma 已提交
3071
		if (le32_to_cpu(parent_de->inode) != old_dir->i_ino)
3072 3073
			goto end_rename;
		retval = -EMLINK;
3074
		if (!new_inode && new_dir != old_dir &&
3075
		    EXT4_DIR_LINK_MAX(new_dir))
3076
			goto end_rename;
3077 3078 3079 3080
		BUFFER_TRACE(dir_bh, "get_write_access");
		retval = ext4_journal_get_write_access(handle, dir_bh);
		if (retval)
			goto end_rename;
3081 3082
	}
	if (!new_bh) {
3083
		retval = ext4_add_entry(handle, new_dentry, old_inode);
3084 3085 3086 3087
		if (retval)
			goto end_rename;
	} else {
		BUFFER_TRACE(new_bh, "get write access");
3088 3089 3090
		retval = ext4_journal_get_write_access(handle, new_bh);
		if (retval)
			goto end_rename;
3091
		new_de->inode = cpu_to_le32(old_inode->i_ino);
3092 3093
		if (EXT4_HAS_INCOMPAT_FEATURE(new_dir->i_sb,
					      EXT4_FEATURE_INCOMPAT_FILETYPE))
3094 3095
			new_de->file_type = old_de->file_type;
		new_dir->i_version++;
3096 3097 3098
		new_dir->i_ctime = new_dir->i_mtime =
					ext4_current_time(new_dir);
		ext4_mark_inode_dirty(handle, new_dir);
3099
		BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata");
T
Tao Ma 已提交
3100 3101 3102 3103 3104 3105 3106
		if (!new_inlined) {
			retval = ext4_handle_dirty_dirent_node(handle,
							       new_dir, new_bh);
			if (unlikely(retval)) {
				ext4_std_error(new_dir->i_sb, retval);
				goto end_rename;
			}
3107
		}
3108 3109 3110 3111 3112 3113 3114 3115
		brelse(new_bh);
		new_bh = NULL;
	}

	/*
	 * Like most other Unix systems, set the ctime for inodes on a
	 * rename.
	 */
K
Kalpak Shah 已提交
3116
	old_inode->i_ctime = ext4_current_time(old_inode);
3117
	ext4_mark_inode_dirty(handle, old_inode);
3118 3119 3120 3121 3122 3123 3124

	/*
	 * ok, that's it
	 */
	if (le32_to_cpu(old_de->inode) != old_inode->i_ino ||
	    old_de->name_len != old_dentry->d_name.len ||
	    strncmp(old_de->name, old_dentry->d_name.name, old_de->name_len) ||
3125
	    (retval = ext4_delete_entry(handle, old_dir,
3126 3127 3128 3129 3130 3131
					old_de, old_bh)) == -ENOENT) {
		/* old_de could have moved from under us during htree split, so
		 * make sure that we are deleting the right entry.  We might
		 * also be pointing to a stale entry in the unused part of
		 * old_bh so just checking inum and the name isn't enough. */
		struct buffer_head *old_bh2;
3132
		struct ext4_dir_entry_2 *old_de2;
3133

T
Tao Ma 已提交
3134 3135
		old_bh2 = ext4_find_entry(old_dir, &old_dentry->d_name,
					  &old_de2, NULL);
3136
		if (old_bh2) {
3137
			retval = ext4_delete_entry(handle, old_dir,
3138 3139 3140 3141 3142
						   old_de2, old_bh2);
			brelse(old_bh2);
		}
	}
	if (retval) {
3143
		ext4_warning(old_dir->i_sb,
3144 3145 3146 3147 3148
				"Deleting old file (%lu), %d, error=%d",
				old_dir->i_ino, old_dir->i_nlink, retval);
	}

	if (new_inode) {
3149
		ext4_dec_count(handle, new_inode);
K
Kalpak Shah 已提交
3150
		new_inode->i_ctime = ext4_current_time(new_inode);
3151
	}
K
Kalpak Shah 已提交
3152
	old_dir->i_ctime = old_dir->i_mtime = ext4_current_time(old_dir);
3153
	ext4_update_dx_flag(old_dir);
3154
	if (dir_bh) {
T
Tao Ma 已提交
3155
		parent_de->inode = cpu_to_le32(new_dir->i_ino);
3156
		BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
T
Tao Ma 已提交
3157 3158 3159 3160 3161 3162 3163 3164 3165
		if (!inlined) {
			if (is_dx(old_inode)) {
				retval = ext4_handle_dirty_dx_node(handle,
								   old_inode,
								   dir_bh);
			} else {
				retval = ext4_handle_dirty_dirent_node(handle,
							old_inode, dir_bh);
			}
3166
		} else {
T
Tao Ma 已提交
3167
			retval = ext4_mark_inode_dirty(handle, old_inode);
3168
		}
3169 3170 3171 3172
		if (retval) {
			ext4_std_error(old_dir->i_sb, retval);
			goto end_rename;
		}
3173
		ext4_dec_count(handle, old_dir);
3174
		if (new_inode) {
3175
			/* checked empty_dir above, can't have another parent,
3176
			 * ext4_dec_count() won't work for many-linked dirs */
3177
			clear_nlink(new_inode);
3178
		} else {
3179
			ext4_inc_count(handle, new_dir);
3180 3181
			ext4_update_dx_flag(new_dir);
			ext4_mark_inode_dirty(handle, new_dir);
3182 3183
		}
	}
3184
	ext4_mark_inode_dirty(handle, old_dir);
3185
	if (new_inode) {
3186
		ext4_mark_inode_dirty(handle, new_inode);
3187
		if (!new_inode->i_nlink)
3188
			ext4_orphan_add(handle, new_inode);
3189 3190
		if (!test_opt(new_dir->i_sb, NO_AUTO_DA_ALLOC))
			force_da_alloc = 1;
3191 3192 3193 3194
	}
	retval = 0;

end_rename:
3195 3196 3197
	brelse(dir_bh);
	brelse(old_bh);
	brelse(new_bh);
3198
	ext4_journal_stop(handle);
3199 3200
	if (retval == 0 && force_da_alloc)
		ext4_alloc_da_blocks(old_inode);
3201 3202 3203 3204 3205 3206
	return retval;
}

/*
 * directories can handle most operations...
 */
3207
const struct inode_operations ext4_dir_inode_operations = {
3208 3209 3210 3211 3212 3213 3214 3215
	.create		= ext4_create,
	.lookup		= ext4_lookup,
	.link		= ext4_link,
	.unlink		= ext4_unlink,
	.symlink	= ext4_symlink,
	.mkdir		= ext4_mkdir,
	.rmdir		= ext4_rmdir,
	.mknod		= ext4_mknod,
A
Al Viro 已提交
3216
	.tmpfile	= ext4_tmpfile,
3217 3218
	.rename		= ext4_rename,
	.setattr	= ext4_setattr,
3219 3220
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
3221
	.listxattr	= ext4_listxattr,
3222
	.removexattr	= generic_removexattr,
3223
	.get_acl	= ext4_get_acl,
3224
	.fiemap         = ext4_fiemap,
3225 3226
};

3227
const struct inode_operations ext4_special_inode_operations = {
3228
	.setattr	= ext4_setattr,
3229 3230
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
3231
	.listxattr	= ext4_listxattr,
3232
	.removexattr	= generic_removexattr,
3233
	.get_acl	= ext4_get_acl,
3234
};