namei.c 69.4 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/namei.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/namei.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 *  Directory entry file type support and forward compatibility hooks
 *	for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
 *  Hash Tree Directory indexing (c)
 *	Daniel Phillips, 2001
 *  Hash Tree Directory indexing porting
 *	Christopher Li, 2002
 *  Hash Tree Directory indexing cleanup
 *	Theodore Ts'o, 2002
 */

#include <linux/fs.h>
#include <linux/pagemap.h>
29
#include <linux/jbd2.h>
30 31 32 33 34 35 36
#include <linux/time.h>
#include <linux/fcntl.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/bio.h>
37 38
#include "ext4.h"
#include "ext4_jbd2.h"
39 40 41 42 43 44 45 46 47

#include "xattr.h"
#include "acl.h"

/*
 * define how far ahead to read directories while searching them.
 */
#define NAMEI_RA_CHUNKS  2
#define NAMEI_RA_BLOCKS  4
D
Dave Kleikamp 已提交
48
#define NAMEI_RA_SIZE	     (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
49 50
#define NAMEI_RA_INDEX(c,b)  (((c) * NAMEI_RA_BLOCKS) + (b))

51
static struct buffer_head *ext4_append(handle_t *handle,
52
					struct inode *inode,
A
Aneesh Kumar K.V 已提交
53
					ext4_lblk_t *block, int *err)
54 55 56 57 58
{
	struct buffer_head *bh;

	*block = inode->i_size >> inode->i_sb->s_blocksize_bits;

59 60
	bh = ext4_bread(handle, inode, *block, 1, err);
	if (bh) {
61
		inode->i_size += inode->i_sb->s_blocksize;
62
		EXT4_I(inode)->i_disksize = inode->i_size;
63 64 65 66 67
		*err = ext4_journal_get_write_access(handle, bh);
		if (*err) {
			brelse(bh);
			bh = NULL;
		}
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
	}
	return bh;
}

#ifndef assert
#define assert(test) J_ASSERT(test)
#endif

#ifdef DX_DEBUG
#define dxtrace(command) command
#else
#define dxtrace(command)
#endif

struct fake_dirent
{
	__le32 inode;
	__le16 rec_len;
	u8 name_len;
	u8 file_type;
};

struct dx_countlimit
{
	__le16 limit;
	__le16 count;
};

struct dx_entry
{
	__le32 hash;
	__le32 block;
};

/*
 * dx_root_info is laid out so that if it should somehow get overlaid by a
 * dirent the two low bits of the hash version will be zero.  Therefore, the
 * hash version mod 4 should never be 0.  Sincerely, the paranoia department.
 */

struct dx_root
{
	struct fake_dirent dot;
	char dot_name[4];
	struct fake_dirent dotdot;
	char dotdot_name[4];
	struct dx_root_info
	{
		__le32 reserved_zero;
		u8 hash_version;
		u8 info_length; /* 8 */
		u8 indirect_levels;
		u8 unused_flags;
	}
	info;
	struct dx_entry	entries[0];
};

struct dx_node
{
	struct fake_dirent fake;
	struct dx_entry	entries[0];
};


struct dx_frame
{
	struct buffer_head *bh;
	struct dx_entry *entries;
	struct dx_entry *at;
};

struct dx_map_entry
{
	u32 hash;
143 144
	u16 offs;
	u16 size;
145 146
};

A
Aneesh Kumar K.V 已提交
147 148
static inline ext4_lblk_t dx_get_block(struct dx_entry *entry);
static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value);
149 150 151 152 153 154 155 156
static inline unsigned dx_get_hash(struct dx_entry *entry);
static void dx_set_hash(struct dx_entry *entry, unsigned value);
static unsigned dx_get_count(struct dx_entry *entries);
static unsigned dx_get_limit(struct dx_entry *entries);
static void dx_set_count(struct dx_entry *entries, unsigned value);
static void dx_set_limit(struct dx_entry *entries, unsigned value);
static unsigned dx_root_limit(struct inode *dir, unsigned infosize);
static unsigned dx_node_limit(struct inode *dir);
157
static struct dx_frame *dx_probe(const struct qstr *d_name,
158 159 160 161
				 struct inode *dir,
				 struct dx_hash_info *hinfo,
				 struct dx_frame *frame,
				 int *err);
162
static void dx_release(struct dx_frame *frames);
163
static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
164
		       struct dx_hash_info *hinfo, struct dx_map_entry map[]);
165
static void dx_sort_map(struct dx_map_entry *map, unsigned count);
166
static struct ext4_dir_entry_2 *dx_move_dirents(char *from, char *to,
167
		struct dx_map_entry *offsets, int count, unsigned blocksize);
168
static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize);
A
Aneesh Kumar K.V 已提交
169 170
static void dx_insert_block(struct dx_frame *frame,
					u32 hash, ext4_lblk_t block);
171
static int ext4_htree_next_block(struct inode *dir, __u32 hash,
172 173 174
				 struct dx_frame *frame,
				 struct dx_frame *frames,
				 __u32 *start_hash);
175 176 177 178
static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
		const struct qstr *d_name,
		struct ext4_dir_entry_2 **res_dir,
		int *err);
179
static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
180 181
			     struct inode *inode);

182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
unsigned int ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize)
{
	unsigned len = le16_to_cpu(dlen);

	if (len == EXT4_MAX_REC_LEN || len == 0)
		return blocksize;
	return (len & 65532) | ((len & 3) << 16);
}
  
__le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
{
	if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3))
		BUG();
	if (len < 65536)
		return cpu_to_le16(len);
	if (len == blocksize) {
		if (blocksize == 65536)
			return cpu_to_le16(EXT4_MAX_REC_LEN);
		else 
			return cpu_to_le16(0);
	}
	return cpu_to_le16((len & 65532) | ((len >> 16) & 3));
}

206 207 208 209
/*
 * p is at least 6 bytes before the end of page
 */
static inline struct ext4_dir_entry_2 *
210
ext4_next_entry(struct ext4_dir_entry_2 *p, unsigned long blocksize)
211 212
{
	return (struct ext4_dir_entry_2 *)((char *)p +
213
		ext4_rec_len_from_disk(p->rec_len, blocksize));
214 215
}

216 217 218 219 220
/*
 * Future: use high four bits of block for coalesce-on-delete flags
 * Mask them off for now.
 */

A
Aneesh Kumar K.V 已提交
221
static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
222 223 224 225
{
	return le32_to_cpu(entry->block) & 0x00ffffff;
}

A
Aneesh Kumar K.V 已提交
226
static inline void dx_set_block(struct dx_entry *entry, ext4_lblk_t value)
227 228 229 230
{
	entry->block = cpu_to_le32(value);
}

231
static inline unsigned dx_get_hash(struct dx_entry *entry)
232 233 234 235
{
	return le32_to_cpu(entry->hash);
}

236
static inline void dx_set_hash(struct dx_entry *entry, unsigned value)
237 238 239 240
{
	entry->hash = cpu_to_le32(value);
}

241
static inline unsigned dx_get_count(struct dx_entry *entries)
242 243 244 245
{
	return le16_to_cpu(((struct dx_countlimit *) entries)->count);
}

246
static inline unsigned dx_get_limit(struct dx_entry *entries)
247 248 249 250
{
	return le16_to_cpu(((struct dx_countlimit *) entries)->limit);
}

251
static inline void dx_set_count(struct dx_entry *entries, unsigned value)
252 253 254 255
{
	((struct dx_countlimit *) entries)->count = cpu_to_le16(value);
}

256
static inline void dx_set_limit(struct dx_entry *entries, unsigned value)
257 258 259 260
{
	((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
}

261
static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
262
{
263 264
	unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
		EXT4_DIR_REC_LEN(2) - infosize;
265
	return entry_space / sizeof(struct dx_entry);
266 267
}

268
static inline unsigned dx_node_limit(struct inode *dir)
269
{
270
	unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
271
	return entry_space / sizeof(struct dx_entry);
272 273 274 275 276 277
}

/*
 * Debug
 */
#ifdef DX_DEBUG
278
static void dx_show_index(char * label, struct dx_entry *entries)
279
{
A
Andrew Morton 已提交
280
	int i, n = dx_get_count (entries);
281
	printk(KERN_DEBUG "%s index ", label);
A
Andrew Morton 已提交
282
	for (i = 0; i < n; i++) {
283
		printk("%x->%lu ", i ? dx_get_hash(entries + i) :
A
Aneesh Kumar K.V 已提交
284
				0, (unsigned long)dx_get_block(entries + i));
A
Andrew Morton 已提交
285 286
	}
	printk("\n");
287 288 289 290 291 292 293 294 295
}

struct stats
{
	unsigned names;
	unsigned space;
	unsigned bcount;
};

296
static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_entry_2 *de,
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
				 int size, int show_names)
{
	unsigned names = 0, space = 0;
	char *base = (char *) de;
	struct dx_hash_info h = *hinfo;

	printk("names: ");
	while ((char *) de < base + size)
	{
		if (de->inode)
		{
			if (show_names)
			{
				int len = de->name_len;
				char *name = de->name;
				while (len--) printk("%c", *name++);
313
				ext4fs_dirhash(de->name, de->name_len, &h);
314 315 316
				printk(":%x.%u ", h.hash,
				       ((char *) de - base));
			}
317
			space += EXT4_DIR_REC_LEN(de->name_len);
318 319
			names++;
		}
320
		de = ext4_next_entry(de, size);
321 322 323 324 325 326 327 328 329
	}
	printk("(%i)\n", names);
	return (struct stats) { names, space, 1 };
}

struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
			     struct dx_entry *entries, int levels)
{
	unsigned blocksize = dir->i_sb->s_blocksize;
330
	unsigned count = dx_get_count(entries), names = 0, space = 0, i;
331 332 333 334 335 336
	unsigned bcount = 0;
	struct buffer_head *bh;
	int err;
	printk("%i indexed blocks...\n", count);
	for (i = 0; i < count; i++, entries++)
	{
A
Aneesh Kumar K.V 已提交
337 338
		ext4_lblk_t block = dx_get_block(entries);
		ext4_lblk_t hash  = i ? dx_get_hash(entries): 0;
339 340 341
		u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
		struct stats stats;
		printk("%s%3u:%03u hash %8x/%8x ",levels?"":"   ", i, block, hash, range);
342
		if (!(bh = ext4_bread (NULL,dir, block, 0,&err))) continue;
343 344
		stats = levels?
		   dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
345
		   dx_show_leaf(hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0);
346 347 348
		names += stats.names;
		space += stats.space;
		bcount += stats.bcount;
349
		brelse(bh);
350 351
	}
	if (bcount)
352 353 354
		printk(KERN_DEBUG "%snames %u, fullness %u (%u%%)\n", 
		       levels ? "" : "   ", names, space/bcount,
		       (space/bcount)*100/blocksize);
355 356 357 358 359 360 361 362 363 364 365 366 367 368
	return (struct stats) { names, space, bcount};
}
#endif /* DX_DEBUG */

/*
 * Probe for a directory leaf block to search.
 *
 * dx_probe can return ERR_BAD_DX_DIR, which means there was a format
 * error in the directory index, and the caller should fall back to
 * searching the directory normally.  The callers of dx_probe **MUST**
 * check for this error code, and make sure it never gets reflected
 * back to userspace.
 */
static struct dx_frame *
369
dx_probe(const struct qstr *d_name, struct inode *dir,
370 371 372 373 374 375 376 377 378 379
	 struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
{
	unsigned count, indirect;
	struct dx_entry *at, *entries, *p, *q, *m;
	struct dx_root *root;
	struct buffer_head *bh;
	struct dx_frame *frame = frame_in;
	u32 hash;

	frame->bh = NULL;
380
	if (!(bh = ext4_bread (NULL,dir, 0, 0, err)))
381 382 383 384 385
		goto fail;
	root = (struct dx_root *) bh->b_data;
	if (root->info.hash_version != DX_HASH_TEA &&
	    root->info.hash_version != DX_HASH_HALF_MD4 &&
	    root->info.hash_version != DX_HASH_LEGACY) {
386
		ext4_warning(dir->i_sb, __func__,
387 388 389 390 391 392 393
			     "Unrecognised inode hash code %d",
			     root->info.hash_version);
		brelse(bh);
		*err = ERR_BAD_DX_DIR;
		goto fail;
	}
	hinfo->hash_version = root->info.hash_version;
394 395
	if (hinfo->hash_version <= DX_HASH_TEA)
		hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
396
	hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
397 398
	if (d_name)
		ext4fs_dirhash(d_name->name, d_name->len, hinfo);
399 400 401
	hash = hinfo->hash;

	if (root->info.unused_flags & 1) {
402
		ext4_warning(dir->i_sb, __func__,
403 404 405 406 407 408 409 410
			     "Unimplemented inode hash flags: %#06x",
			     root->info.unused_flags);
		brelse(bh);
		*err = ERR_BAD_DX_DIR;
		goto fail;
	}

	if ((indirect = root->info.indirect_levels) > 1) {
411
		ext4_warning(dir->i_sb, __func__,
412 413 414 415 416 417 418 419 420
			     "Unimplemented inode hash depth: %#06x",
			     root->info.indirect_levels);
		brelse(bh);
		*err = ERR_BAD_DX_DIR;
		goto fail;
	}

	entries = (struct dx_entry *) (((char *)&root->info) +
				       root->info.info_length);
421 422 423

	if (dx_get_limit(entries) != dx_root_limit(dir,
						   root->info.info_length)) {
424
		ext4_warning(dir->i_sb, __func__,
425 426 427 428 429 430
			     "dx entry: limit != root limit");
		brelse(bh);
		*err = ERR_BAD_DX_DIR;
		goto fail;
	}

431
	dxtrace(printk("Look up %x", hash));
432 433 434
	while (1)
	{
		count = dx_get_count(entries);
435
		if (!count || count > dx_get_limit(entries)) {
436
			ext4_warning(dir->i_sb, __func__,
437 438 439 440 441 442
				     "dx entry: no count or count > limit");
			brelse(bh);
			*err = ERR_BAD_DX_DIR;
			goto fail2;
		}

443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
		p = entries + 1;
		q = entries + count - 1;
		while (p <= q)
		{
			m = p + (q - p)/2;
			dxtrace(printk("."));
			if (dx_get_hash(m) > hash)
				q = m - 1;
			else
				p = m + 1;
		}

		if (0) // linear search cross check
		{
			unsigned n = count - 1;
			at = entries;
			while (n--)
			{
				dxtrace(printk(","));
				if (dx_get_hash(++at) > hash)
				{
					at--;
					break;
				}
			}
			assert (at == p - 1);
		}

		at = p - 1;
		dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at)));
		frame->bh = bh;
		frame->entries = entries;
		frame->at = at;
		if (!indirect--) return frame;
477
		if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err)))
478 479
			goto fail2;
		at = entries = ((struct dx_node *) bh->b_data)->entries;
480
		if (dx_get_limit(entries) != dx_node_limit (dir)) {
481
			ext4_warning(dir->i_sb, __func__,
482 483 484 485 486
				     "dx entry: limit != node limit");
			brelse(bh);
			*err = ERR_BAD_DX_DIR;
			goto fail2;
		}
487
		frame++;
488
		frame->bh = NULL;
489 490 491 492 493 494 495
	}
fail2:
	while (frame >= frame_in) {
		brelse(frame->bh);
		frame--;
	}
fail:
496
	if (*err == ERR_BAD_DX_DIR)
497
		ext4_warning(dir->i_sb, __func__,
498 499
			     "Corrupt dir inode %ld, running e2fsck is "
			     "recommended.", dir->i_ino);
500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
	return NULL;
}

static void dx_release (struct dx_frame *frames)
{
	if (frames[0].bh == NULL)
		return;

	if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels)
		brelse(frames[1].bh);
	brelse(frames[0].bh);
}

/*
 * This function increments the frame pointer to search the next leaf
 * block, and reads in the necessary intervening nodes if the search
 * should be necessary.  Whether or not the search is necessary is
 * controlled by the hash parameter.  If the hash value is even, then
 * the search is only continued if the next block starts with that
 * hash value.  This is used if we are searching for a specific file.
 *
 * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
 *
 * This function returns 1 if the caller should continue to search,
 * or 0 if it should not.  If there is an error reading one of the
 * index blocks, it will a negative error code.
 *
 * If start_hash is non-null, it will be filled in with the starting
 * hash of the next page.
 */
530
static int ext4_htree_next_block(struct inode *dir, __u32 hash,
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
				 struct dx_frame *frame,
				 struct dx_frame *frames,
				 __u32 *start_hash)
{
	struct dx_frame *p;
	struct buffer_head *bh;
	int err, num_frames = 0;
	__u32 bhash;

	p = frame;
	/*
	 * Find the next leaf page by incrementing the frame pointer.
	 * If we run out of entries in the interior node, loop around and
	 * increment pointer in the parent node.  When we break out of
	 * this loop, num_frames indicates the number of interior
	 * nodes need to be read.
	 */
	while (1) {
		if (++(p->at) < p->entries + dx_get_count(p->entries))
			break;
		if (p == frames)
			return 0;
		num_frames++;
		p--;
	}

	/*
	 * If the hash is 1, then continue only if the next page has a
	 * continuation hash of any value.  This is used for readdir
	 * handling.  Otherwise, check to see if the hash matches the
	 * desired contiuation hash.  If it doesn't, return since
	 * there's no point to read in the successive index pages.
	 */
	bhash = dx_get_hash(p->at);
	if (start_hash)
		*start_hash = bhash;
	if ((hash & 1) == 0) {
		if ((bhash & ~1) != hash)
			return 0;
	}
	/*
	 * If the hash is HASH_NB_ALWAYS, we always go to the next
	 * block so no check is necessary
	 */
	while (num_frames--) {
576
		if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at),
577 578 579
				      0, &err)))
			return err; /* Failure */
		p++;
580
		brelse(p->bh);
581 582 583 584 585 586 587 588 589 590 591 592 593
		p->bh = bh;
		p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
	}
	return 1;
}


/*
 * This function fills a red-black tree with information from a
 * directory block.  It returns the number directory entries loaded
 * into the tree.  If there is an error it is returned in err.
 */
static int htree_dirblock_to_tree(struct file *dir_file,
A
Aneesh Kumar K.V 已提交
594
				  struct inode *dir, ext4_lblk_t block,
595 596 597 598
				  struct dx_hash_info *hinfo,
				  __u32 start_hash, __u32 start_minor_hash)
{
	struct buffer_head *bh;
599
	struct ext4_dir_entry_2 *de, *top;
600 601
	int err, count = 0;

A
Aneesh Kumar K.V 已提交
602 603
	dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
							(unsigned long)block));
604
	if (!(bh = ext4_bread (NULL, dir, block, 0, &err)))
605 606
		return err;

607 608
	de = (struct ext4_dir_entry_2 *) bh->b_data;
	top = (struct ext4_dir_entry_2 *) ((char *) de +
609
					   dir->i_sb->s_blocksize -
610
					   EXT4_DIR_REC_LEN(0));
611
	for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
612 613 614 615 616 617
		if (!ext4_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
					(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
						+((char *)de - bh->b_data))) {
			/* On error, skip the f_pos to the next block. */
			dir_file->f_pos = (dir_file->f_pos |
					(dir->i_sb->s_blocksize - 1)) + 1;
618
			brelse(bh);
619 620
			return count;
		}
621
		ext4fs_dirhash(de->name, de->name_len, hinfo);
622 623 624 625 626 627
		if ((hinfo->hash < start_hash) ||
		    ((hinfo->hash == start_hash) &&
		     (hinfo->minor_hash < start_minor_hash)))
			continue;
		if (de->inode == 0)
			continue;
628
		if ((err = ext4_htree_store_dirent(dir_file,
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
				   hinfo->hash, hinfo->minor_hash, de)) != 0) {
			brelse(bh);
			return err;
		}
		count++;
	}
	brelse(bh);
	return count;
}


/*
 * This function fills a red-black tree with information from a
 * directory.  We start scanning the directory in hash order, starting
 * at start_hash and start_minor_hash.
 *
 * This function returns the number of entries inserted into the tree,
 * or a negative error code.
 */
648
int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
649 650 651
			 __u32 start_minor_hash, __u32 *next_hash)
{
	struct dx_hash_info hinfo;
652
	struct ext4_dir_entry_2 *de;
653 654
	struct dx_frame frames[2], *frame;
	struct inode *dir;
A
Aneesh Kumar K.V 已提交
655
	ext4_lblk_t block;
656
	int count = 0;
A
Aneesh Kumar K.V 已提交
657
	int ret, err;
658 659
	__u32 hashval;

660 661
	dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n", 
		       start_hash, start_minor_hash));
662
	dir = dir_file->f_path.dentry->d_inode;
663 664
	if (!(EXT4_I(dir)->i_flags & EXT4_INDEX_FL)) {
		hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
665 666 667
		if (hinfo.hash_version <= DX_HASH_TEA)
			hinfo.hash_version +=
				EXT4_SB(dir->i_sb)->s_hash_unsigned;
668
		hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
669 670 671 672 673 674 675
		count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo,
					       start_hash, start_minor_hash);
		*next_hash = ~0;
		return count;
	}
	hinfo.hash = start_hash;
	hinfo.minor_hash = 0;
676
	frame = dx_probe(NULL, dir, &hinfo, frames, &err);
677 678 679 680 681
	if (!frame)
		return err;

	/* Add '.' and '..' from the htree header */
	if (!start_hash && !start_minor_hash) {
682 683
		de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
		if ((err = ext4_htree_store_dirent(dir_file, 0, 0, de)) != 0)
684 685 686 687
			goto errout;
		count++;
	}
	if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) {
688
		de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
689
		de = ext4_next_entry(de, dir->i_sb->s_blocksize);
690
		if ((err = ext4_htree_store_dirent(dir_file, 2, 0, de)) != 0)
691 692 693 694 695 696 697 698 699 700 701 702 703 704
			goto errout;
		count++;
	}

	while (1) {
		block = dx_get_block(frame->at);
		ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo,
					     start_hash, start_minor_hash);
		if (ret < 0) {
			err = ret;
			goto errout;
		}
		count += ret;
		hashval = ~0;
705
		ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721
					    frame, frames, &hashval);
		*next_hash = hashval;
		if (ret < 0) {
			err = ret;
			goto errout;
		}
		/*
		 * Stop if:  (a) there are no more entries, or
		 * (b) we have inserted at least one entry and the
		 * next hash value is not a continuation
		 */
		if ((ret == 0) ||
		    (count && ((hashval & 1) == 0)))
			break;
	}
	dx_release(frames);
722 723
	dxtrace(printk(KERN_DEBUG "Fill tree: returned %d entries, "
		       "next hash: %x\n", count, *next_hash));
724 725 726 727 728 729 730 731 732 733 734
	return count;
errout:
	dx_release(frames);
	return (err);
}


/*
 * Directory block splitting, compacting
 */

735 736 737 738
/*
 * Create map of hash values, offsets, and sizes, stored at end of block.
 * Returns number of entries mapped.
 */
739 740 741
static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
		       struct dx_hash_info *hinfo,
		       struct dx_map_entry *map_tail)
742 743 744 745 746
{
	int count = 0;
	char *base = (char *) de;
	struct dx_hash_info h = *hinfo;

747
	while ((char *) de < base + blocksize) {
748
		if (de->name_len && de->inode) {
749
			ext4fs_dirhash(de->name, de->name_len, &h);
750 751
			map_tail--;
			map_tail->hash = h.hash;
752
			map_tail->offs = ((char *) de - base)>>2;
753
			map_tail->size = le16_to_cpu(de->rec_len);
754 755 756 757
			count++;
			cond_resched();
		}
		/* XXX: do we need to check rec_len == 0 case? -Chris */
758
		de = ext4_next_entry(de, blocksize);
759 760 761 762
	}
	return count;
}

763
/* Sort map by hash value */
764 765
static void dx_sort_map (struct dx_map_entry *map, unsigned count)
{
A
Andrew Morton 已提交
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
	struct dx_map_entry *p, *q, *top = map + count - 1;
	int more;
	/* Combsort until bubble sort doesn't suck */
	while (count > 2) {
		count = count*10/13;
		if (count - 9 < 2) /* 9, 10 -> 11 */
			count = 11;
		for (p = top, q = p - count; q >= map; p--, q--)
			if (p->hash < q->hash)
				swap(*p, *q);
	}
	/* Garden variety bubble sort */
	do {
		more = 0;
		q = top;
		while (q-- > map) {
			if (q[1].hash >= q[0].hash)
783
				continue;
A
Andrew Morton 已提交
784 785
			swap(*(q+1), *q);
			more = 1;
786 787 788 789
		}
	} while(more);
}

A
Aneesh Kumar K.V 已提交
790
static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block)
791 792 793 794 795 796 797 798 799 800 801 802 803
{
	struct dx_entry *entries = frame->entries;
	struct dx_entry *old = frame->at, *new = old + 1;
	int count = dx_get_count(entries);

	assert(count < dx_get_limit(entries));
	assert(old < entries + count);
	memmove(new + 1, new, (char *)(entries + count) - (char *)(new));
	dx_set_hash(new, hash);
	dx_set_block(new, block);
	dx_set_count(entries, count + 1);
}

804
static void ext4_update_dx_flag(struct inode *inode)
805
{
806 807 808
	if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
				     EXT4_FEATURE_COMPAT_DIR_INDEX))
		EXT4_I(inode)->i_flags &= ~EXT4_INDEX_FL;
809 810 811
}

/*
812
 * NOTE! unlike strncmp, ext4_match returns 1 for success, 0 for failure.
813
 *
814
 * `len <= EXT4_NAME_LEN' is guaranteed by caller.
815 816
 * `de != NULL' is guaranteed by caller.
 */
817 818
static inline int ext4_match (int len, const char * const name,
			      struct ext4_dir_entry_2 * de)
819 820 821 822 823 824 825 826 827 828 829
{
	if (len != de->name_len)
		return 0;
	if (!de->inode)
		return 0;
	return !memcmp(name, de->name, len);
}

/*
 * Returns 0 if not found, -1 on failure, and 1 on success
 */
830
static inline int search_dirblock(struct buffer_head *bh,
831
				  struct inode *dir,
832
				  const struct qstr *d_name,
833
				  unsigned int offset,
834
				  struct ext4_dir_entry_2 ** res_dir)
835
{
836
	struct ext4_dir_entry_2 * de;
837 838
	char * dlimit;
	int de_len;
839 840
	const char *name = d_name->name;
	int namelen = d_name->len;
841

842
	de = (struct ext4_dir_entry_2 *) bh->b_data;
843 844 845 846 847 848
	dlimit = bh->b_data + dir->i_sb->s_blocksize;
	while ((char *) de < dlimit) {
		/* this code is executed quadratically often */
		/* do minimal checking `by hand' */

		if ((char *) de + namelen <= dlimit &&
849
		    ext4_match (namelen, name, de)) {
850
			/* found a match - just to be sure, do a full check */
851
			if (!ext4_check_dir_entry("ext4_find_entry",
852 853 854 855 856 857
						  dir, de, bh, offset))
				return -1;
			*res_dir = de;
			return 1;
		}
		/* prevent looping on a bad block */
858 859
		de_len = ext4_rec_len_from_disk(de->rec_len,
						dir->i_sb->s_blocksize);
860 861 862
		if (de_len <= 0)
			return -1;
		offset += de_len;
863
		de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
864 865 866 867 868 869
	}
	return 0;
}


/*
870
 *	ext4_find_entry()
871 872 873 874 875 876 877 878 879
 *
 * finds an entry in the specified directory with the wanted name. It
 * returns the cache buffer in which the entry was found, and the entry
 * itself (as a parameter - res_dir). It does NOT read the inode of the
 * entry - you'll have to do that yourself if you want to.
 *
 * The returned buffer_head has ->b_count elevated.  The caller is expected
 * to brelse() it when appropriate.
 */
880 881
static struct buffer_head * ext4_find_entry (struct inode *dir,
					const struct qstr *d_name,
882
					struct ext4_dir_entry_2 ** res_dir)
883
{
884 885 886
	struct super_block *sb;
	struct buffer_head *bh_use[NAMEI_RA_SIZE];
	struct buffer_head *bh, *ret = NULL;
A
Aneesh Kumar K.V 已提交
887
	ext4_lblk_t start, block, b;
888 889 890 891 892
	int ra_max = 0;		/* Number of bh's in the readahead
				   buffer, bh_use[] */
	int ra_ptr = 0;		/* Current index into readahead
				   buffer */
	int num = 0;
A
Aneesh Kumar K.V 已提交
893 894
	ext4_lblk_t  nblocks;
	int i, err;
895 896 897 898
	int namelen;

	*res_dir = NULL;
	sb = dir->i_sb;
899
	namelen = d_name->len;
900
	if (namelen > EXT4_NAME_LEN)
901 902
		return NULL;
	if (is_dx(dir)) {
903
		bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
904 905 906 907 908 909 910
		/*
		 * On success, or if the error was file not found,
		 * return.  Otherwise, fall back to doing a search the
		 * old fashioned way.
		 */
		if (bh || (err != ERR_BAD_DX_DIR))
			return bh;
911 912
		dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
			       "falling back\n"));
913
	}
914 915
	nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
	start = EXT4_I(dir)->i_dir_start_lookup;
916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938
	if (start >= nblocks)
		start = 0;
	block = start;
restart:
	do {
		/*
		 * We deal with the read-ahead logic here.
		 */
		if (ra_ptr >= ra_max) {
			/* Refill the readahead buffer */
			ra_ptr = 0;
			b = block;
			for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
				/*
				 * Terminate if we reach the end of the
				 * directory and must wrap, or if our
				 * search has finished at this block.
				 */
				if (b >= nblocks || (num && block == start)) {
					bh_use[ra_max] = NULL;
					break;
				}
				num++;
939
				bh = ext4_getblk(NULL, dir, b++, 0, &err);
940 941 942 943 944 945 946 947 948 949
				bh_use[ra_max] = bh;
				if (bh)
					ll_rw_block(READ_META, 1, &bh);
			}
		}
		if ((bh = bh_use[ra_ptr++]) == NULL)
			goto next;
		wait_on_buffer(bh);
		if (!buffer_uptodate(bh)) {
			/* read error, skip block & hope for the best */
950
			ext4_error(sb, __func__, "reading directory #%lu "
A
Aneesh Kumar K.V 已提交
951 952
				   "offset %lu", dir->i_ino,
				   (unsigned long)block);
953 954 955
			brelse(bh);
			goto next;
		}
956
		i = search_dirblock(bh, dir, d_name,
957
			    block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
958
		if (i == 1) {
959
			EXT4_I(dir)->i_dir_start_lookup = block;
960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976
			ret = bh;
			goto cleanup_and_exit;
		} else {
			brelse(bh);
			if (i < 0)
				goto cleanup_and_exit;
		}
	next:
		if (++block >= nblocks)
			block = 0;
	} while (block != start);

	/*
	 * If the directory has grown while we were searching, then
	 * search the last part of the directory before giving up.
	 */
	block = nblocks;
977
	nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
978 979 980 981 982 983 984 985
	if (block < nblocks) {
		start = 0;
		goto restart;
	}

cleanup_and_exit:
	/* Clean up the read-ahead blocks */
	for (; ra_ptr < ra_max; ra_ptr++)
986
		brelse(bh_use[ra_ptr]);
987 988 989
	return ret;
}

990
static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
991
		       struct ext4_dir_entry_2 **res_dir, int *err)
992 993 994 995 996
{
	struct super_block * sb;
	struct dx_hash_info	hinfo;
	u32 hash;
	struct dx_frame frames[2], *frame;
997
	struct ext4_dir_entry_2 *de, *top;
998
	struct buffer_head *bh;
A
Aneesh Kumar K.V 已提交
999
	ext4_lblk_t block;
1000
	int retval;
1001 1002
	int namelen = d_name->len;
	const u8 *name = d_name->name;
1003 1004 1005 1006

	sb = dir->i_sb;
	/* NFS may look up ".." - look at dx_root directory block */
	if (namelen > 2 || name[0] != '.'||(name[1] != '.' && name[1] != '\0')){
1007
		if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
			return NULL;
	} else {
		frame = frames;
		frame->bh = NULL;			/* for dx_release() */
		frame->at = (struct dx_entry *)frames;	/* hack for zero entry*/
		dx_set_block(frame->at, 0);		/* dx_root block is 0 */
	}
	hash = hinfo.hash;
	do {
		block = dx_get_block(frame->at);
1018
		if (!(bh = ext4_bread (NULL,dir, block, 0, err)))
1019
			goto errout;
1020 1021 1022
		de = (struct ext4_dir_entry_2 *) bh->b_data;
		top = (struct ext4_dir_entry_2 *) ((char *) de + sb->s_blocksize -
				       EXT4_DIR_REC_LEN(0));
1023
		for (; de < top; de = ext4_next_entry(de, sb->s_blocksize)) {
1024 1025 1026 1027 1028
			int off = (block << EXT4_BLOCK_SIZE_BITS(sb))
				  + ((char *) de - bh->b_data);

			if (!ext4_check_dir_entry(__func__, dir, de, bh, off)) {
				brelse(bh);
1029
				*err = ERR_BAD_DX_DIR;
1030 1031
				goto errout;
			}
1032 1033 1034 1035 1036 1037

			if (ext4_match(namelen, name, de)) {
				*res_dir = de;
				dx_release(frames);
				return bh;
			}
1038
		}
1039
		brelse(bh);
1040
		/* Check to see if we should continue to search */
1041
		retval = ext4_htree_next_block(dir, hash, frame,
1042 1043
					       frames, NULL);
		if (retval < 0) {
1044
			ext4_warning(sb, __func__,
1045 1046 1047 1048 1049 1050 1051 1052 1053
			     "error reading index page in directory #%lu",
			     dir->i_ino);
			*err = retval;
			goto errout;
		}
	} while (retval == 1);

	*err = -ENOENT;
errout:
1054
	dxtrace(printk(KERN_DEBUG "%s not found\n", name));
1055 1056 1057 1058
	dx_release (frames);
	return NULL;
}

1059
static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
1060
{
1061 1062 1063
	struct inode *inode;
	struct ext4_dir_entry_2 *de;
	struct buffer_head *bh;
1064

1065
	if (dentry->d_name.len > EXT4_NAME_LEN)
1066 1067
		return ERR_PTR(-ENAMETOOLONG);

1068
	bh = ext4_find_entry(dir, &dentry->d_name, &de);
1069 1070
	inode = NULL;
	if (bh) {
1071
		__u32 ino = le32_to_cpu(de->inode);
1072
		brelse(bh);
1073 1074
		if (!ext4_valid_inum(dir->i_sb, ino)) {
			ext4_error(dir->i_sb, "ext4_lookup",
1075
				   "bad inode number: %u", ino);
1076
			return ERR_PTR(-EIO);
1077
		}
1078
		inode = ext4_iget(dir->i_sb, ino);
1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
		if (unlikely(IS_ERR(inode))) {
			if (PTR_ERR(inode) == -ESTALE) {
				ext4_error(dir->i_sb, __func__,
						"deleted inode referenced: %u",
						ino);
				return ERR_PTR(-EIO);
			} else {
				return ERR_CAST(inode);
			}
		}
1089 1090 1091 1092 1093
	}
	return d_splice_alias(inode, dentry);
}


1094
struct dentry *ext4_get_parent(struct dentry *child)
1095
{
1096
	__u32 ino;
1097
	struct inode *inode;
1098 1099 1100 1101
	static const struct qstr dotdot = {
		.name = "..",
		.len = 2,
	};
1102
	struct ext4_dir_entry_2 * de;
1103 1104
	struct buffer_head *bh;

1105
	bh = ext4_find_entry(child->d_inode, &dotdot, &de);
1106 1107 1108 1109 1110 1111
	inode = NULL;
	if (!bh)
		return ERR_PTR(-ENOENT);
	ino = le32_to_cpu(de->inode);
	brelse(bh);

1112 1113
	if (!ext4_valid_inum(child->d_inode->i_sb, ino)) {
		ext4_error(child->d_inode->i_sb, "ext4_get_parent",
1114
			   "bad inode number: %u", ino);
1115
		return ERR_PTR(-EIO);
1116 1117
	}

1118
	return d_obtain_alias(ext4_iget(child->d_inode->i_sb, ino));
1119 1120 1121
}

#define S_SHIFT 12
1122 1123 1124 1125 1126 1127 1128 1129
static unsigned char ext4_type_by_mode[S_IFMT >> S_SHIFT] = {
	[S_IFREG >> S_SHIFT]	= EXT4_FT_REG_FILE,
	[S_IFDIR >> S_SHIFT]	= EXT4_FT_DIR,
	[S_IFCHR >> S_SHIFT]	= EXT4_FT_CHRDEV,
	[S_IFBLK >> S_SHIFT]	= EXT4_FT_BLKDEV,
	[S_IFIFO >> S_SHIFT]	= EXT4_FT_FIFO,
	[S_IFSOCK >> S_SHIFT]	= EXT4_FT_SOCK,
	[S_IFLNK >> S_SHIFT]	= EXT4_FT_SYMLINK,
1130 1131
};

1132 1133
static inline void ext4_set_de_type(struct super_block *sb,
				struct ext4_dir_entry_2 *de,
1134
				umode_t mode) {
1135 1136
	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FILETYPE))
		de->file_type = ext4_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
1137 1138
}

1139 1140 1141 1142
/*
 * Move count entries from end of map between two memory locations.
 * Returns pointer to last entry moved.
 */
1143
static struct ext4_dir_entry_2 *
1144 1145
dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count,
		unsigned blocksize)
1146 1147 1148 1149
{
	unsigned rec_len = 0;

	while (count--) {
1150 1151
		struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *) 
						(from + (map->offs<<2));
1152
		rec_len = EXT4_DIR_REC_LEN(de->name_len);
1153
		memcpy (to, de, rec_len);
1154
		((struct ext4_dir_entry_2 *) to)->rec_len =
1155
				ext4_rec_len_to_disk(rec_len, blocksize);
1156 1157 1158 1159
		de->inode = 0;
		map++;
		to += rec_len;
	}
1160
	return (struct ext4_dir_entry_2 *) (to - rec_len);
1161 1162
}

1163 1164 1165 1166
/*
 * Compact each dir entry in the range to the minimal rec_len.
 * Returns pointer to last entry in range.
 */
1167
static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize)
1168
{
1169
	struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base;
1170 1171 1172
	unsigned rec_len = 0;

	prev = to = de;
1173
	while ((char*)de < base + blocksize) {
1174
		next = ext4_next_entry(de, blocksize);
1175
		if (de->inode && de->name_len) {
1176
			rec_len = EXT4_DIR_REC_LEN(de->name_len);
1177 1178
			if (de > to)
				memmove(to, de, rec_len);
1179
			to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize);
1180
			prev = to;
1181
			to = (struct ext4_dir_entry_2 *) (((char *) to) + rec_len);
1182 1183 1184 1185 1186 1187
		}
		de = next;
	}
	return prev;
}

1188 1189 1190 1191 1192
/*
 * Split a full leaf block to make room for a new dir entry.
 * Allocate a new block, and move entries so that they are approx. equally full.
 * Returns pointer to de in block into which the new entry will be inserted.
 */
1193
static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1194 1195 1196 1197 1198 1199
			struct buffer_head **bh,struct dx_frame *frame,
			struct dx_hash_info *hinfo, int *error)
{
	unsigned blocksize = dir->i_sb->s_blocksize;
	unsigned count, continued;
	struct buffer_head *bh2;
A
Aneesh Kumar K.V 已提交
1200
	ext4_lblk_t newblock;
1201 1202 1203
	u32 hash2;
	struct dx_map_entry *map;
	char *data1 = (*bh)->b_data, *data2;
1204
	unsigned split, move, size;
1205
	struct ext4_dir_entry_2 *de = NULL, *de2;
1206
	int	err = 0, i;
1207

1208
	bh2 = ext4_append (handle, dir, &newblock, &err);
1209 1210 1211 1212 1213 1214 1215
	if (!(bh2)) {
		brelse(*bh);
		*bh = NULL;
		goto errout;
	}

	BUFFER_TRACE(*bh, "get_write_access");
1216
	err = ext4_journal_get_write_access(handle, *bh);
1217 1218 1219
	if (err)
		goto journal_error;

1220
	BUFFER_TRACE(frame->bh, "get_write_access");
1221
	err = ext4_journal_get_write_access(handle, frame->bh);
1222 1223 1224 1225 1226 1227 1228
	if (err)
		goto journal_error;

	data2 = bh2->b_data;

	/* create map in the end of data2 block */
	map = (struct dx_map_entry *) (data2 + blocksize);
1229
	count = dx_make_map((struct ext4_dir_entry_2 *) data1,
1230 1231
			     blocksize, hinfo, map);
	map -= count;
1232
	dx_sort_map(map, count);
1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
	/* Split the existing block in the middle, size-wise */
	size = 0;
	move = 0;
	for (i = count-1; i >= 0; i--) {
		/* is more than half of this entry in 2nd half of the block? */
		if (size + map[i].size/2 > blocksize/2)
			break;
		size += map[i].size;
		move++;
	}
	/* map index at which we will split */
	split = count - move;
1245 1246
	hash2 = map[split].hash;
	continued = hash2 == map[split - 1].hash;
A
Aneesh Kumar K.V 已提交
1247 1248 1249
	dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
			(unsigned long)dx_get_block(frame->at),
					hash2, split, count-split));
1250 1251

	/* Fancy dance to stay within two buffers */
1252
	de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
1253
	de = dx_pack_dirents(data1, blocksize);
1254 1255 1256 1257
	de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de,
					   blocksize);
	de2->rec_len = ext4_rec_len_to_disk(data2 + blocksize - (char *) de2,
					    blocksize);
1258 1259
	dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
	dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
1260 1261 1262 1263 1264 1265 1266

	/* Which block gets the new entry? */
	if (hinfo->hash >= hash2)
	{
		swap(*bh, bh2);
		de = de2;
	}
1267
	dx_insert_block(frame, hash2 + continued, newblock);
1268
	err = ext4_handle_dirty_metadata(handle, dir, bh2);
1269 1270
	if (err)
		goto journal_error;
1271
	err = ext4_handle_dirty_metadata(handle, dir, frame->bh);
1272 1273
	if (err)
		goto journal_error;
1274 1275
	brelse(bh2);
	dxtrace(dx_show_index("frame", frame->entries));
1276
	return de;
1277 1278 1279 1280 1281 1282 1283 1284 1285

journal_error:
	brelse(*bh);
	brelse(bh2);
	*bh = NULL;
	ext4_std_error(dir->i_sb, err);
errout:
	*error = err;
	return NULL;
1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
}

/*
 * Add a new entry into a directory (leaf) block.  If de is non-NULL,
 * it points to a directory entry which is guaranteed to be large
 * enough for new directory entry.  If de is NULL, then
 * add_dirent_to_buf will attempt search the directory block for
 * space.  It will return -ENOSPC if no space is available, and -EIO
 * and -EEXIST if directory entry already exists.
 *
 * NOTE!  bh is NOT released in the case where ENOSPC is returned.  In
 * all other cases bh is released.
 */
static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
1300
			     struct inode *inode, struct ext4_dir_entry_2 *de,
1301
			     struct buffer_head *bh)
1302 1303 1304 1305
{
	struct inode	*dir = dentry->d_parent->d_inode;
	const char	*name = dentry->d_name.name;
	int		namelen = dentry->d_name.len;
1306
	unsigned int	offset = 0;
1307
	unsigned int	blocksize = dir->i_sb->s_blocksize;
1308 1309 1310 1311
	unsigned short	reclen;
	int		nlen, rlen, err;
	char		*top;

1312
	reclen = EXT4_DIR_REC_LEN(namelen);
1313
	if (!de) {
1314
		de = (struct ext4_dir_entry_2 *)bh->b_data;
1315
		top = bh->b_data + blocksize - reclen;
1316
		while ((char *) de <= top) {
1317
			if (!ext4_check_dir_entry("ext4_add_entry", dir, de,
1318
						  bh, offset)) {
1319
				brelse(bh);
1320 1321
				return -EIO;
			}
1322 1323
			if (ext4_match(namelen, name, de)) {
				brelse(bh);
1324 1325
				return -EEXIST;
			}
1326
			nlen = EXT4_DIR_REC_LEN(de->name_len);
1327
			rlen = ext4_rec_len_from_disk(de->rec_len, blocksize);
1328 1329
			if ((de->inode? rlen - nlen: rlen) >= reclen)
				break;
1330
			de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
1331 1332 1333 1334 1335 1336
			offset += rlen;
		}
		if ((char *) de > top)
			return -ENOSPC;
	}
	BUFFER_TRACE(bh, "get_write_access");
1337
	err = ext4_journal_get_write_access(handle, bh);
1338
	if (err) {
1339
		ext4_std_error(dir->i_sb, err);
1340 1341 1342 1343 1344
		brelse(bh);
		return err;
	}

	/* By now the buffer is marked for journaling */
1345
	nlen = EXT4_DIR_REC_LEN(de->name_len);
1346
	rlen = ext4_rec_len_from_disk(de->rec_len, blocksize);
1347
	if (de->inode) {
1348
		struct ext4_dir_entry_2 *de1 = (struct ext4_dir_entry_2 *)((char *)de + nlen);
1349 1350
		de1->rec_len = ext4_rec_len_to_disk(rlen - nlen, blocksize);
		de->rec_len = ext4_rec_len_to_disk(nlen, blocksize);
1351 1352
		de = de1;
	}
1353
	de->file_type = EXT4_FT_UNKNOWN;
1354 1355
	if (inode) {
		de->inode = cpu_to_le32(inode->i_ino);
1356
		ext4_set_de_type(dir->i_sb, de, inode->i_mode);
1357 1358 1359
	} else
		de->inode = 0;
	de->name_len = namelen;
1360
	memcpy(de->name, name, namelen);
1361 1362 1363 1364 1365 1366
	/*
	 * XXX shouldn't update any times until successful
	 * completion of syscall, but too many callers depend
	 * on this.
	 *
	 * XXX similarly, too many callers depend on
1367
	 * ext4_new_inode() setting the times, but error
1368 1369 1370 1371
	 * recovery deletes the inode, so the worst that can
	 * happen is that the times are slightly out of date
	 * and/or different from the directory change time.
	 */
K
Kalpak Shah 已提交
1372
	dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
1373
	ext4_update_dx_flag(dir);
1374
	dir->i_version++;
1375
	ext4_mark_inode_dirty(handle, dir);
1376 1377
	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
	err = ext4_handle_dirty_metadata(handle, dir, bh);
1378
	if (err)
1379
		ext4_std_error(dir->i_sb, err);
1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397
	brelse(bh);
	return 0;
}

/*
 * This converts a one block unindexed directory to a 3 block indexed
 * directory, and adds the dentry to the indexed directory.
 */
static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
			    struct inode *inode, struct buffer_head *bh)
{
	struct inode	*dir = dentry->d_parent->d_inode;
	const char	*name = dentry->d_name.name;
	int		namelen = dentry->d_name.len;
	struct buffer_head *bh2;
	struct dx_root	*root;
	struct dx_frame	frames[2], *frame;
	struct dx_entry *entries;
1398
	struct ext4_dir_entry_2	*de, *de2;
1399 1400 1401 1402 1403
	char		*data1, *top;
	unsigned	len;
	int		retval;
	unsigned	blocksize;
	struct dx_hash_info hinfo;
A
Aneesh Kumar K.V 已提交
1404
	ext4_lblk_t  block;
1405 1406 1407
	struct fake_dirent *fde;

	blocksize =  dir->i_sb->s_blocksize;
1408
	dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
1409
	retval = ext4_journal_get_write_access(handle, bh);
1410
	if (retval) {
1411
		ext4_std_error(dir->i_sb, retval);
1412 1413 1414 1415 1416
		brelse(bh);
		return retval;
	}
	root = (struct dx_root *) bh->b_data;

1417 1418 1419
	/* The 0th block becomes the root, move the dirents out */
	fde = &root->dotdot;
	de = (struct ext4_dir_entry_2 *)((char *)fde +
1420
		ext4_rec_len_from_disk(fde->rec_len, blocksize));
1421 1422 1423 1424 1425 1426 1427 1428 1429 1430
	if ((char *) de >= (((char *) root) + blocksize)) {
		ext4_error(dir->i_sb, __func__,
			   "invalid rec_len for '..' in inode %lu",
			   dir->i_ino);
		brelse(bh);
		return -EIO;
	}
	len = ((char *) root) + blocksize - (char *) de;

	/* Allocate new block for the 0th block's dirents */
1431
	bh2 = ext4_append(handle, dir, &block, &retval);
1432 1433 1434 1435
	if (!(bh2)) {
		brelse(bh);
		return retval;
	}
1436
	EXT4_I(dir)->i_flags |= EXT4_INDEX_FL;
1437 1438 1439
	data1 = bh2->b_data;

	memcpy (data1, de, len);
1440
	de = (struct ext4_dir_entry_2 *) data1;
1441
	top = data1 + len;
1442
	while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top)
1443
		de = de2;
1444 1445
	de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de,
					   blocksize);
1446
	/* Initialize the root; the dot dirents already exist */
1447
	de = (struct ext4_dir_entry_2 *) (&root->dotdot);
1448 1449
	de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2),
					   blocksize);
1450 1451
	memset (&root->info, 0, sizeof(root->info));
	root->info.info_length = sizeof(root->info);
1452
	root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
1453
	entries = root->entries;
1454 1455 1456
	dx_set_block(entries, 1);
	dx_set_count(entries, 1);
	dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info)));
1457 1458 1459

	/* Initialize as for dx_probe */
	hinfo.hash_version = root->info.hash_version;
1460 1461
	if (hinfo.hash_version <= DX_HASH_TEA)
		hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
1462 1463
	hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
	ext4fs_dirhash(name, namelen, &hinfo);
1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
	frame = frames;
	frame->entries = entries;
	frame->at = entries;
	frame->bh = bh;
	bh = bh2;
	de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
	dx_release (frames);
	if (!(de))
		return retval;

	return add_dirent_to_buf(handle, dentry, inode, de, bh);
}

/*
1478
 *	ext4_add_entry()
1479 1480
 *
 * adds a file entry to the specified directory, using the same
1481
 * semantics as ext4_find_entry(). It returns NULL if it failed.
1482 1483 1484 1485 1486
 *
 * NOTE!! The inode part of 'de' is left at 0 - which means you
 * may not sleep between calling this and putting something into
 * the entry, as someone else might have used it while you slept.
 */
1487 1488
static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
			  struct inode *inode)
1489 1490
{
	struct inode *dir = dentry->d_parent->d_inode;
1491
	struct buffer_head *bh;
1492
	struct ext4_dir_entry_2 *de;
1493
	struct super_block *sb;
1494 1495 1496
	int	retval;
	int	dx_fallback=0;
	unsigned blocksize;
A
Aneesh Kumar K.V 已提交
1497
	ext4_lblk_t block, blocks;
1498 1499 1500 1501 1502 1503

	sb = dir->i_sb;
	blocksize = sb->s_blocksize;
	if (!dentry->d_name.len)
		return -EINVAL;
	if (is_dx(dir)) {
1504
		retval = ext4_dx_add_entry(handle, dentry, inode);
1505 1506
		if (!retval || (retval != ERR_BAD_DX_DIR))
			return retval;
1507
		EXT4_I(dir)->i_flags &= ~EXT4_INDEX_FL;
1508
		dx_fallback++;
1509
		ext4_mark_inode_dirty(handle, dir);
1510 1511
	}
	blocks = dir->i_size >> sb->s_blocksize_bits;
1512
	for (block = 0; block < blocks; block++) {
1513
		bh = ext4_bread(handle, dir, block, 0, &retval);
1514 1515 1516 1517 1518 1519 1520
		if(!bh)
			return retval;
		retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
		if (retval != -ENOSPC)
			return retval;

		if (blocks == 1 && !dx_fallback &&
1521 1522
		    EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
			return make_indexed_dir(handle, dentry, inode, bh);
1523 1524
		brelse(bh);
	}
1525
	bh = ext4_append(handle, dir, &block, &retval);
1526 1527
	if (!bh)
		return retval;
1528
	de = (struct ext4_dir_entry_2 *) bh->b_data;
1529
	de->inode = 0;
1530
	de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize);
1531
	return add_dirent_to_buf(handle, dentry, inode, de, bh);
1532 1533 1534 1535 1536
}

/*
 * Returns 0 for success, or a negative error value
 */
1537
static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1538 1539 1540 1541 1542
			     struct inode *inode)
{
	struct dx_frame frames[2], *frame;
	struct dx_entry *entries, *at;
	struct dx_hash_info hinfo;
1543
	struct buffer_head *bh;
1544
	struct inode *dir = dentry->d_parent->d_inode;
1545
	struct super_block *sb = dir->i_sb;
1546
	struct ext4_dir_entry_2 *de;
1547 1548
	int err;

1549
	frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
1550 1551 1552 1553 1554
	if (!frame)
		return err;
	entries = frame->entries;
	at = frame->at;

1555
	if (!(bh = ext4_bread(handle,dir, dx_get_block(frame->at), 0, &err)))
1556 1557 1558
		goto cleanup;

	BUFFER_TRACE(bh, "get_write_access");
1559
	err = ext4_journal_get_write_access(handle, bh);
1560 1561 1562 1563 1564 1565 1566 1567 1568 1569
	if (err)
		goto journal_error;

	err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
	if (err != -ENOSPC) {
		bh = NULL;
		goto cleanup;
	}

	/* Block full, should compress but for now just split */
1570
	dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n",
1571 1572 1573
		       dx_get_count(entries), dx_get_limit(entries)));
	/* Need to split index? */
	if (dx_get_count(entries) == dx_get_limit(entries)) {
A
Aneesh Kumar K.V 已提交
1574
		ext4_lblk_t newblock;
1575 1576 1577 1578 1579 1580 1581 1582
		unsigned icount = dx_get_count(entries);
		int levels = frame - frames;
		struct dx_entry *entries2;
		struct dx_node *node2;
		struct buffer_head *bh2;

		if (levels && (dx_get_count(frames->entries) ==
			       dx_get_limit(frames->entries))) {
1583
			ext4_warning(sb, __func__,
1584 1585 1586 1587
				     "Directory index full!");
			err = -ENOSPC;
			goto cleanup;
		}
1588
		bh2 = ext4_append (handle, dir, &newblock, &err);
1589 1590 1591 1592
		if (!(bh2))
			goto cleanup;
		node2 = (struct dx_node *)(bh2->b_data);
		entries2 = node2->entries;
1593
		memset(&node2->fake, 0, sizeof(struct fake_dirent));
1594 1595
		node2->fake.rec_len = ext4_rec_len_to_disk(sb->s_blocksize,
							   sb->s_blocksize);
1596
		BUFFER_TRACE(frame->bh, "get_write_access");
1597
		err = ext4_journal_get_write_access(handle, frame->bh);
1598 1599 1600 1601 1602
		if (err)
			goto journal_error;
		if (levels) {
			unsigned icount1 = icount/2, icount2 = icount - icount1;
			unsigned hash2 = dx_get_hash(entries + icount1);
1603 1604
			dxtrace(printk(KERN_DEBUG "Split index %i/%i\n",
				       icount1, icount2));
1605 1606

			BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
1607
			err = ext4_journal_get_write_access(handle,
1608 1609 1610 1611
							     frames[0].bh);
			if (err)
				goto journal_error;

1612 1613 1614 1615 1616
			memcpy((char *) entries2, (char *) (entries + icount1),
			       icount2 * sizeof(struct dx_entry));
			dx_set_count(entries, icount1);
			dx_set_count(entries2, icount2);
			dx_set_limit(entries2, dx_node_limit(dir));
1617 1618 1619 1620 1621 1622 1623

			/* Which index block gets the new entry? */
			if (at - entries >= icount1) {
				frame->at = at = at - entries - icount1 + entries2;
				frame->entries = entries = entries2;
				swap(frame->bh, bh2);
			}
1624 1625 1626
			dx_insert_block(frames + 0, hash2, newblock);
			dxtrace(dx_show_index("node", frames[1].entries));
			dxtrace(dx_show_index("node",
1627
			       ((struct dx_node *) bh2->b_data)->entries));
1628
			err = ext4_handle_dirty_metadata(handle, inode, bh2);
1629 1630 1631 1632
			if (err)
				goto journal_error;
			brelse (bh2);
		} else {
1633 1634
			dxtrace(printk(KERN_DEBUG
				       "Creating second level index...\n"));
1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648
			memcpy((char *) entries2, (char *) entries,
			       icount * sizeof(struct dx_entry));
			dx_set_limit(entries2, dx_node_limit(dir));

			/* Set up root */
			dx_set_count(entries, 1);
			dx_set_block(entries + 0, newblock);
			((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;

			/* Add new access path frame */
			frame = frames + 1;
			frame->at = at = at - entries + entries2;
			frame->entries = entries = entries2;
			frame->bh = bh2;
1649
			err = ext4_journal_get_write_access(handle,
1650 1651 1652 1653
							     frame->bh);
			if (err)
				goto journal_error;
		}
1654
		ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
1655 1656 1657 1658 1659
	}
	de = do_split(handle, dir, &bh, frame, &hinfo, &err);
	if (!de)
		goto cleanup;
	err = add_dirent_to_buf(handle, dentry, inode, de, bh);
1660
	bh = NULL;
1661 1662 1663
	goto cleanup;

journal_error:
1664
	ext4_std_error(dir->i_sb, err);
1665 1666 1667 1668 1669 1670 1671 1672
cleanup:
	if (bh)
		brelse(bh);
	dx_release(frames);
	return err;
}

/*
1673
 * ext4_delete_entry deletes a directory entry by merging it with the
1674 1675
 * previous entry
 */
1676 1677 1678 1679
static int ext4_delete_entry(handle_t *handle,
			     struct inode *dir,
			     struct ext4_dir_entry_2 *de_del,
			     struct buffer_head *bh)
1680
{
1681
	struct ext4_dir_entry_2 *de, *pde;
1682
	unsigned int blocksize = dir->i_sb->s_blocksize;
1683 1684 1685 1686
	int i;

	i = 0;
	pde = NULL;
1687
	de = (struct ext4_dir_entry_2 *) bh->b_data;
1688
	while (i < bh->b_size) {
1689
		if (!ext4_check_dir_entry("ext4_delete_entry", dir, de, bh, i))
1690 1691 1692
			return -EIO;
		if (de == de_del)  {
			BUFFER_TRACE(bh, "get_write_access");
1693
			ext4_journal_get_write_access(handle, bh);
1694
			if (pde)
1695
				pde->rec_len = ext4_rec_len_to_disk(
1696 1697 1698 1699 1700
					ext4_rec_len_from_disk(pde->rec_len,
							       blocksize) +
					ext4_rec_len_from_disk(de->rec_len,
							       blocksize),
					blocksize);
1701 1702 1703
			else
				de->inode = 0;
			dir->i_version++;
1704 1705
			BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
			ext4_handle_dirty_metadata(handle, dir, bh);
1706 1707
			return 0;
		}
1708
		i += ext4_rec_len_from_disk(de->rec_len, blocksize);
1709
		pde = de;
1710
		de = ext4_next_entry(de, blocksize);
1711 1712 1713 1714
	}
	return -ENOENT;
}

1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
/*
 * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2,
 * since this indicates that nlinks count was previously 1.
 */
static void ext4_inc_count(handle_t *handle, struct inode *inode)
{
	inc_nlink(inode);
	if (is_dx(inode) && inode->i_nlink > 1) {
		/* limit is 16-bit i_links_count */
		if (inode->i_nlink >= EXT4_LINK_MAX || inode->i_nlink == 2) {
			inode->i_nlink = 1;
			EXT4_SET_RO_COMPAT_FEATURE(inode->i_sb,
					      EXT4_FEATURE_RO_COMPAT_DIR_NLINK);
		}
	}
}

/*
 * If a directory had nlink == 1, then we should let it be 1. This indicates
 * directory has >EXT4_LINK_MAX subdirs.
 */
static void ext4_dec_count(handle_t *handle, struct inode *inode)
{
	drop_nlink(inode);
	if (S_ISDIR(inode->i_mode) && inode->i_nlink == 0)
		inc_nlink(inode);
}


1744
static int ext4_add_nondir(handle_t *handle,
1745 1746
		struct dentry *dentry, struct inode *inode)
{
1747
	int err = ext4_add_entry(handle, dentry, inode);
1748
	if (!err) {
1749
		ext4_mark_inode_dirty(handle, inode);
1750
		d_instantiate(dentry, inode);
A
Al Viro 已提交
1751
		unlock_new_inode(inode);
1752 1753
		return 0;
	}
1754
	drop_nlink(inode);
A
Al Viro 已提交
1755
	unlock_new_inode(inode);
1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767
	iput(inode);
	return err;
}

/*
 * By the time this is called, we already have created
 * the directory cache entry for the new file, but it
 * is so far negative - it has no inode.
 *
 * If the create succeeds, we fill in the inode information
 * with d_instantiate().
 */
1768 1769
static int ext4_create(struct inode *dir, struct dentry *dentry, int mode,
		       struct nameidata *nd)
1770 1771
{
	handle_t *handle;
1772
	struct inode *inode;
1773 1774 1775
	int err, retries = 0;

retry:
1776 1777 1778
	handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
					EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
					2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb));
1779 1780 1781 1782
	if (IS_ERR(handle))
		return PTR_ERR(handle);

	if (IS_DIRSYNC(dir))
1783
		ext4_handle_sync(handle);
1784

1785
	inode = ext4_new_inode(handle, dir, mode, &dentry->d_name, 0);
1786 1787
	err = PTR_ERR(inode);
	if (!IS_ERR(inode)) {
1788 1789 1790 1791
		inode->i_op = &ext4_file_inode_operations;
		inode->i_fop = &ext4_file_operations;
		ext4_set_aops(inode);
		err = ext4_add_nondir(handle, dentry, inode);
1792
	}
1793 1794
	ext4_journal_stop(handle);
	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
1795 1796 1797 1798
		goto retry;
	return err;
}

1799 1800
static int ext4_mknod(struct inode *dir, struct dentry *dentry,
		      int mode, dev_t rdev)
1801 1802 1803 1804 1805 1806 1807 1808 1809
{
	handle_t *handle;
	struct inode *inode;
	int err, retries = 0;

	if (!new_valid_dev(rdev))
		return -EINVAL;

retry:
1810 1811 1812
	handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
					EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
					2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb));
1813 1814 1815 1816
	if (IS_ERR(handle))
		return PTR_ERR(handle);

	if (IS_DIRSYNC(dir))
1817
		ext4_handle_sync(handle);
1818

1819
	inode = ext4_new_inode(handle, dir, mode, &dentry->d_name, 0);
1820 1821 1822
	err = PTR_ERR(inode);
	if (!IS_ERR(inode)) {
		init_special_inode(inode, inode->i_mode, rdev);
T
Theodore Ts'o 已提交
1823
#ifdef CONFIG_EXT4_FS_XATTR
1824
		inode->i_op = &ext4_special_inode_operations;
1825
#endif
1826
		err = ext4_add_nondir(handle, dentry, inode);
1827
	}
1828 1829
	ext4_journal_stop(handle);
	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
1830 1831 1832 1833
		goto retry;
	return err;
}

1834
static int ext4_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1835 1836
{
	handle_t *handle;
1837 1838 1839
	struct inode *inode;
	struct buffer_head *dir_block;
	struct ext4_dir_entry_2 *de;
1840
	unsigned int blocksize = dir->i_sb->s_blocksize;
1841 1842
	int err, retries = 0;

1843
	if (EXT4_DIR_LINK_MAX(dir))
1844 1845 1846
		return -EMLINK;

retry:
1847 1848 1849
	handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
					EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
					2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb));
1850 1851 1852 1853
	if (IS_ERR(handle))
		return PTR_ERR(handle);

	if (IS_DIRSYNC(dir))
1854
		ext4_handle_sync(handle);
1855

1856 1857
	inode = ext4_new_inode(handle, dir, S_IFDIR | mode,
			       &dentry->d_name, 0);
1858 1859 1860 1861
	err = PTR_ERR(inode);
	if (IS_ERR(inode))
		goto out_stop;

1862 1863 1864
	inode->i_op = &ext4_dir_inode_operations;
	inode->i_fop = &ext4_dir_operations;
	inode->i_size = EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
1865
	dir_block = ext4_bread(handle, inode, 0, 1, &err);
1866 1867
	if (!dir_block)
		goto out_clear_inode;
1868
	BUFFER_TRACE(dir_block, "get_write_access");
1869 1870
	ext4_journal_get_write_access(handle, dir_block);
	de = (struct ext4_dir_entry_2 *) dir_block->b_data;
1871 1872
	de->inode = cpu_to_le32(inode->i_ino);
	de->name_len = 1;
1873 1874
	de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len),
					   blocksize);
1875
	strcpy(de->name, ".");
1876
	ext4_set_de_type(dir->i_sb, de, S_IFDIR);
1877
	de = ext4_next_entry(de, blocksize);
1878
	de->inode = cpu_to_le32(dir->i_ino);
1879 1880
	de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(1),
					   blocksize);
1881
	de->name_len = 2;
1882
	strcpy(de->name, "..");
1883
	ext4_set_de_type(dir->i_sb, de, S_IFDIR);
1884
	inode->i_nlink = 2;
1885 1886
	BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
	ext4_handle_dirty_metadata(handle, dir, dir_block);
1887
	brelse(dir_block);
1888
	ext4_mark_inode_dirty(handle, inode);
1889
	err = ext4_add_entry(handle, dentry, inode);
1890
	if (err) {
1891 1892
out_clear_inode:
		clear_nlink(inode);
A
Al Viro 已提交
1893
		unlock_new_inode(inode);
1894
		ext4_mark_inode_dirty(handle, inode);
1895
		iput(inode);
1896 1897
		goto out_stop;
	}
1898
	ext4_inc_count(handle, dir);
1899 1900
	ext4_update_dx_flag(dir);
	ext4_mark_inode_dirty(handle, dir);
1901
	d_instantiate(dentry, inode);
A
Al Viro 已提交
1902
	unlock_new_inode(inode);
1903
out_stop:
1904 1905
	ext4_journal_stop(handle);
	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
1906 1907 1908 1909 1910 1911 1912
		goto retry;
	return err;
}

/*
 * routine to check that the specified directory is empty (for rmdir)
 */
1913
static int empty_dir(struct inode *inode)
1914
{
1915
	unsigned int offset;
1916 1917 1918
	struct buffer_head *bh;
	struct ext4_dir_entry_2 *de, *de1;
	struct super_block *sb;
1919 1920 1921
	int err = 0;

	sb = inode->i_sb;
1922
	if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
1923
	    !(bh = ext4_bread(NULL, inode, 0, 0, &err))) {
1924
		if (err)
1925
			ext4_error(inode->i_sb, __func__,
1926 1927 1928
				   "error %d reading directory #%lu offset 0",
				   err, inode->i_ino);
		else
1929
			ext4_warning(inode->i_sb, __func__,
1930 1931 1932 1933
				     "bad directory (dir #%lu) - no data block",
				     inode->i_ino);
		return 1;
	}
1934
	de = (struct ext4_dir_entry_2 *) bh->b_data;
1935
	de1 = ext4_next_entry(de, sb->s_blocksize);
1936 1937
	if (le32_to_cpu(de->inode) != inode->i_ino ||
			!le32_to_cpu(de1->inode) ||
1938 1939 1940 1941 1942 1943
			strcmp(".", de->name) ||
			strcmp("..", de1->name)) {
		ext4_warning(inode->i_sb, "empty_dir",
			     "bad directory (dir #%lu) - no `.' or `..'",
			     inode->i_ino);
		brelse(bh);
1944 1945
		return 1;
	}
1946 1947 1948
	offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) +
		 ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize);
	de = ext4_next_entry(de1, sb->s_blocksize);
1949
	while (offset < inode->i_size) {
1950 1951 1952
		if (!bh ||
			(void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
			err = 0;
1953 1954
			brelse(bh);
			bh = ext4_bread(NULL, inode,
1955
				offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err);
1956 1957
			if (!bh) {
				if (err)
1958
					ext4_error(sb, __func__,
1959
						   "error %d reading directory"
1960
						   " #%lu offset %u",
1961 1962 1963 1964
						   err, inode->i_ino, offset);
				offset += sb->s_blocksize;
				continue;
			}
1965
			de = (struct ext4_dir_entry_2 *) bh->b_data;
1966
		}
1967 1968
		if (!ext4_check_dir_entry("empty_dir", inode, de, bh, offset)) {
			de = (struct ext4_dir_entry_2 *)(bh->b_data +
1969 1970 1971 1972 1973
							 sb->s_blocksize);
			offset = (offset | (sb->s_blocksize - 1)) + 1;
			continue;
		}
		if (le32_to_cpu(de->inode)) {
1974
			brelse(bh);
1975 1976
			return 0;
		}
1977 1978
		offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
		de = ext4_next_entry(de, sb->s_blocksize);
1979
	}
1980
	brelse(bh);
1981 1982 1983
	return 1;
}

1984
/* ext4_orphan_add() links an unlinked or truncated inode into a list of
1985 1986 1987 1988 1989
 * such inodes, starting at the superblock, in case we crash before the
 * file is closed/deleted, or in case the inode truncate spans multiple
 * transactions and the last transaction is not recovered after a crash.
 *
 * At filesystem recovery time, we walk this list deleting unlinked
1990
 * inodes and truncating linked inodes in ext4_orphan_cleanup().
1991
 */
1992
int ext4_orphan_add(handle_t *handle, struct inode *inode)
1993 1994
{
	struct super_block *sb = inode->i_sb;
1995
	struct ext4_iloc iloc;
1996 1997
	int err = 0, rc;

1998 1999 2000
	if (!ext4_handle_valid(handle))
		return 0;

2001
	mutex_lock(&EXT4_SB(sb)->s_orphan_lock);
2002
	if (!list_empty(&EXT4_I(inode)->i_orphan))
2003 2004 2005 2006 2007 2008
		goto out_unlock;

	/* Orphan handling is only valid for files with data blocks
	 * being truncated, or files being unlinked. */

	/* @@@ FIXME: Observation from aviro:
2009
	 * I think I can trigger J_ASSERT in ext4_orphan_add().  We block
2010
	 * here (on s_orphan_lock), so race with ext4_link() which might bump
2011 2012
	 * ->i_nlink. For, say it, character device. Not a regular file,
	 * not a directory, not a symlink and ->i_nlink > 0.
2013 2014 2015 2016
	 *
	 * tytso, 4/25/2009: I'm not sure how that could happen;
	 * shouldn't the fs core protect us from these sort of
	 * unlink()/link() races?
2017
	 */
2018 2019
	J_ASSERT((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
		  S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
2020

2021 2022
	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
	err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
2023 2024 2025
	if (err)
		goto out_unlock;

2026
	err = ext4_reserve_inode_write(handle, inode, &iloc);
2027 2028 2029 2030
	if (err)
		goto out_unlock;

	/* Insert this inode at the head of the on-disk orphan list... */
2031 2032
	NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan);
	EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
2033
	err = ext4_handle_dirty_metadata(handle, inode, EXT4_SB(sb)->s_sbh);
2034
	rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046
	if (!err)
		err = rc;

	/* Only add to the head of the in-memory list if all the
	 * previous operations succeeded.  If the orphan_add is going to
	 * fail (possibly taking the journal offline), we can't risk
	 * leaving the inode on the orphan list: stray orphan-list
	 * entries can cause panics at unmount time.
	 *
	 * This is safe: on error we're going to ignore the orphan list
	 * anyway on the next recovery. */
	if (!err)
2047
		list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
2048 2049 2050 2051 2052

	jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
	jbd_debug(4, "orphan inode %lu will point to %d\n",
			inode->i_ino, NEXT_ORPHAN(inode));
out_unlock:
2053
	mutex_unlock(&EXT4_SB(sb)->s_orphan_lock);
2054
	ext4_std_error(inode->i_sb, err);
2055 2056 2057 2058
	return err;
}

/*
2059
 * ext4_orphan_del() removes an unlinked or truncated inode from the list
2060 2061
 * of such inodes stored on disk, because it is finally being cleaned up.
 */
2062
int ext4_orphan_del(handle_t *handle, struct inode *inode)
2063 2064
{
	struct list_head *prev;
2065 2066
	struct ext4_inode_info *ei = EXT4_I(inode);
	struct ext4_sb_info *sbi;
2067
	__u32 ino_next;
2068
	struct ext4_iloc iloc;
2069 2070
	int err = 0;

2071 2072
	/* ext4_handle_valid() assumes a valid handle_t pointer */
	if (handle && !ext4_handle_valid(handle))
2073 2074
		return 0;

2075 2076 2077
	mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
	if (list_empty(&ei->i_orphan))
		goto out;
2078 2079 2080

	ino_next = NEXT_ORPHAN(inode);
	prev = ei->i_orphan.prev;
2081
	sbi = EXT4_SB(inode->i_sb);
2082 2083 2084 2085 2086 2087 2088 2089 2090

	jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);

	list_del_init(&ei->i_orphan);

	/* If we're on an error path, we may not have a valid
	 * transaction handle with which to update the orphan list on
	 * disk, but we still need to remove the inode from the linked
	 * list in memory. */
2091
	if (sbi->s_journal && !handle)
2092 2093
		goto out;

2094
	err = ext4_reserve_inode_write(handle, inode, &iloc);
2095 2096 2097 2098
	if (err)
		goto out_err;

	if (prev == &sbi->s_orphan) {
2099
		jbd_debug(4, "superblock will point to %u\n", ino_next);
2100
		BUFFER_TRACE(sbi->s_sbh, "get_write_access");
2101
		err = ext4_journal_get_write_access(handle, sbi->s_sbh);
2102 2103 2104
		if (err)
			goto out_brelse;
		sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
2105
		err = ext4_handle_dirty_metadata(handle, inode, sbi->s_sbh);
2106
	} else {
2107
		struct ext4_iloc iloc2;
2108
		struct inode *i_prev =
2109
			&list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode;
2110

2111
		jbd_debug(4, "orphan inode %lu will point to %u\n",
2112
			  i_prev->i_ino, ino_next);
2113
		err = ext4_reserve_inode_write(handle, i_prev, &iloc2);
2114 2115 2116
		if (err)
			goto out_brelse;
		NEXT_ORPHAN(i_prev) = ino_next;
2117
		err = ext4_mark_iloc_dirty(handle, i_prev, &iloc2);
2118 2119 2120 2121
	}
	if (err)
		goto out_brelse;
	NEXT_ORPHAN(inode) = 0;
2122
	err = ext4_mark_iloc_dirty(handle, inode, &iloc);
2123 2124

out_err:
2125
	ext4_std_error(inode->i_sb, err);
2126
out:
2127
	mutex_unlock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
2128 2129 2130 2131 2132 2133 2134
	return err;

out_brelse:
	brelse(iloc.bh);
	goto out_err;
}

2135
static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
2136 2137
{
	int retval;
2138 2139 2140
	struct inode *inode;
	struct buffer_head *bh;
	struct ext4_dir_entry_2 *de;
2141 2142 2143 2144
	handle_t *handle;

	/* Initialize quotas before so that eventual writes go in
	 * separate transaction */
2145
	vfs_dq_init(dentry->d_inode);
2146
	handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
2147 2148 2149 2150
	if (IS_ERR(handle))
		return PTR_ERR(handle);

	retval = -ENOENT;
2151
	bh = ext4_find_entry(dir, &dentry->d_name, &de);
2152 2153 2154 2155
	if (!bh)
		goto end_rmdir;

	if (IS_DIRSYNC(dir))
2156
		ext4_handle_sync(handle);
2157 2158 2159 2160 2161 2162 2163 2164

	inode = dentry->d_inode;

	retval = -EIO;
	if (le32_to_cpu(de->inode) != inode->i_ino)
		goto end_rmdir;

	retval = -ENOTEMPTY;
2165
	if (!empty_dir(inode))
2166 2167
		goto end_rmdir;

2168
	retval = ext4_delete_entry(handle, dir, de, bh);
2169 2170
	if (retval)
		goto end_rmdir;
2171
	if (!EXT4_DIR_LINK_EMPTY(inode))
2172 2173 2174
		ext4_warning(inode->i_sb, "ext4_rmdir",
			     "empty directory has too many links (%d)",
			     inode->i_nlink);
2175 2176 2177 2178 2179 2180
	inode->i_version++;
	clear_nlink(inode);
	/* There's no need to set i_disksize: the fact that i_nlink is
	 * zero will ensure that the right thing happens during any
	 * recovery. */
	inode->i_size = 0;
2181
	ext4_orphan_add(handle, inode);
K
Kalpak Shah 已提交
2182
	inode->i_ctime = dir->i_ctime = dir->i_mtime = ext4_current_time(inode);
2183
	ext4_mark_inode_dirty(handle, inode);
2184
	ext4_dec_count(handle, dir);
2185 2186
	ext4_update_dx_flag(dir);
	ext4_mark_inode_dirty(handle, dir);
2187 2188

end_rmdir:
2189
	ext4_journal_stop(handle);
2190
	brelse(bh);
2191 2192 2193
	return retval;
}

2194
static int ext4_unlink(struct inode *dir, struct dentry *dentry)
2195 2196
{
	int retval;
2197 2198 2199
	struct inode *inode;
	struct buffer_head *bh;
	struct ext4_dir_entry_2 *de;
2200 2201 2202 2203
	handle_t *handle;

	/* Initialize quotas before so that eventual writes go
	 * in separate transaction */
2204
	vfs_dq_init(dentry->d_inode);
2205
	handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
2206 2207 2208 2209
	if (IS_ERR(handle))
		return PTR_ERR(handle);

	if (IS_DIRSYNC(dir))
2210
		ext4_handle_sync(handle);
2211 2212

	retval = -ENOENT;
2213
	bh = ext4_find_entry(dir, &dentry->d_name, &de);
2214 2215 2216 2217 2218 2219 2220 2221 2222 2223
	if (!bh)
		goto end_unlink;

	inode = dentry->d_inode;

	retval = -EIO;
	if (le32_to_cpu(de->inode) != inode->i_ino)
		goto end_unlink;

	if (!inode->i_nlink) {
2224 2225 2226
		ext4_warning(inode->i_sb, "ext4_unlink",
			     "Deleting nonexistent file (%lu), %d",
			     inode->i_ino, inode->i_nlink);
2227 2228
		inode->i_nlink = 1;
	}
2229
	retval = ext4_delete_entry(handle, dir, de, bh);
2230 2231
	if (retval)
		goto end_unlink;
K
Kalpak Shah 已提交
2232
	dir->i_ctime = dir->i_mtime = ext4_current_time(dir);
2233 2234
	ext4_update_dx_flag(dir);
	ext4_mark_inode_dirty(handle, dir);
2235
	drop_nlink(inode);
2236
	if (!inode->i_nlink)
2237
		ext4_orphan_add(handle, inode);
K
Kalpak Shah 已提交
2238
	inode->i_ctime = ext4_current_time(inode);
2239
	ext4_mark_inode_dirty(handle, inode);
2240 2241 2242
	retval = 0;

end_unlink:
2243
	ext4_journal_stop(handle);
2244
	brelse(bh);
2245 2246 2247
	return retval;
}

2248 2249
static int ext4_symlink(struct inode *dir,
			struct dentry *dentry, const char *symname)
2250 2251
{
	handle_t *handle;
2252
	struct inode *inode;
2253 2254 2255 2256 2257 2258 2259
	int l, err, retries = 0;

	l = strlen(symname)+1;
	if (l > dir->i_sb->s_blocksize)
		return -ENAMETOOLONG;

retry:
2260 2261 2262
	handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
					EXT4_INDEX_EXTRA_TRANS_BLOCKS + 5 +
					2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb));
2263 2264 2265 2266
	if (IS_ERR(handle))
		return PTR_ERR(handle);

	if (IS_DIRSYNC(dir))
2267
		ext4_handle_sync(handle);
2268

2269 2270
	inode = ext4_new_inode(handle, dir, S_IFLNK|S_IRWXUGO,
			       &dentry->d_name, 0);
2271 2272 2273 2274
	err = PTR_ERR(inode);
	if (IS_ERR(inode))
		goto out_stop;

2275
	if (l > sizeof(EXT4_I(inode)->i_data)) {
2276 2277
		inode->i_op = &ext4_symlink_inode_operations;
		ext4_set_aops(inode);
2278
		/*
2279
		 * page_symlink() calls into ext4_prepare/commit_write.
2280 2281 2282
		 * We have a transaction open.  All is sweetness.  It also sets
		 * i_size in generic_commit_write().
		 */
2283
		err = __page_symlink(inode, symname, l, 1);
2284
		if (err) {
2285
			clear_nlink(inode);
A
Al Viro 已提交
2286
			unlock_new_inode(inode);
2287
			ext4_mark_inode_dirty(handle, inode);
2288
			iput(inode);
2289 2290 2291
			goto out_stop;
		}
	} else {
2292 2293
		/* clear the extent format for fast symlink */
		EXT4_I(inode)->i_flags &= ~EXT4_EXTENTS_FL;
2294
		inode->i_op = &ext4_fast_symlink_inode_operations;
2295
		memcpy((char *)&EXT4_I(inode)->i_data, symname, l);
2296 2297
		inode->i_size = l-1;
	}
2298 2299
	EXT4_I(inode)->i_disksize = inode->i_size;
	err = ext4_add_nondir(handle, dentry, inode);
2300
out_stop:
2301 2302
	ext4_journal_stop(handle);
	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2303 2304 2305 2306
		goto retry;
	return err;
}

2307 2308
static int ext4_link(struct dentry *old_dentry,
		     struct inode *dir, struct dentry *dentry)
2309 2310 2311 2312 2313
{
	handle_t *handle;
	struct inode *inode = old_dentry->d_inode;
	int err, retries = 0;

2314
	if (inode->i_nlink >= EXT4_LINK_MAX)
2315
		return -EMLINK;
2316

2317 2318 2319 2320 2321 2322
	/*
	 * Return -ENOENT if we've raced with unlink and i_nlink is 0.  Doing
	 * otherwise has the potential to corrupt the orphan inode list.
	 */
	if (inode->i_nlink == 0)
		return -ENOENT;
2323 2324

retry:
2325 2326
	handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
					EXT4_INDEX_EXTRA_TRANS_BLOCKS);
2327 2328 2329 2330
	if (IS_ERR(handle))
		return PTR_ERR(handle);

	if (IS_DIRSYNC(dir))
2331
		ext4_handle_sync(handle);
2332

K
Kalpak Shah 已提交
2333
	inode->i_ctime = ext4_current_time(inode);
2334
	ext4_inc_count(handle, inode);
2335 2336
	atomic_inc(&inode->i_count);

A
Al Viro 已提交
2337 2338 2339 2340 2341 2342 2343 2344
	err = ext4_add_entry(handle, dentry, inode);
	if (!err) {
		ext4_mark_inode_dirty(handle, inode);
		d_instantiate(dentry, inode);
	} else {
		drop_nlink(inode);
		iput(inode);
	}
2345 2346
	ext4_journal_stop(handle);
	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2347 2348 2349 2350
		goto retry;
	return err;
}

2351 2352
#define PARENT_INO(buffer, size) \
	(ext4_next_entry((struct ext4_dir_entry_2 *)(buffer), size)->inode)
2353 2354 2355 2356 2357

/*
 * Anybody can rename anything with this: the permission checks are left to the
 * higher-level routines.
 */
2358 2359
static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
		       struct inode *new_dir, struct dentry *new_dentry)
2360 2361
{
	handle_t *handle;
2362 2363 2364
	struct inode *old_inode, *new_inode;
	struct buffer_head *old_bh, *new_bh, *dir_bh;
	struct ext4_dir_entry_2 *old_de, *new_de;
2365
	int retval, force_da_alloc = 0;
2366 2367 2368 2369 2370 2371

	old_bh = new_bh = dir_bh = NULL;

	/* Initialize quotas before so that eventual writes go
	 * in separate transaction */
	if (new_dentry->d_inode)
2372
		vfs_dq_init(new_dentry->d_inode);
2373 2374 2375
	handle = ext4_journal_start(old_dir, 2 *
					EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
					EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);
2376 2377 2378 2379
	if (IS_ERR(handle))
		return PTR_ERR(handle);

	if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
2380
		ext4_handle_sync(handle);
2381

2382
	old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de);
2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394
	/*
	 *  Check for inode number is _not_ due to possible IO errors.
	 *  We might rmdir the source, keep it as pwd of some process
	 *  and merrily kill the link to whatever was created under the
	 *  same name. Goodbye sticky bit ;-<
	 */
	old_inode = old_dentry->d_inode;
	retval = -ENOENT;
	if (!old_bh || le32_to_cpu(old_de->inode) != old_inode->i_ino)
		goto end_rename;

	new_inode = new_dentry->d_inode;
2395
	new_bh = ext4_find_entry(new_dir, &new_dentry->d_name, &new_de);
2396 2397
	if (new_bh) {
		if (!new_inode) {
2398
			brelse(new_bh);
2399 2400 2401 2402 2403 2404
			new_bh = NULL;
		}
	}
	if (S_ISDIR(old_inode->i_mode)) {
		if (new_inode) {
			retval = -ENOTEMPTY;
2405
			if (!empty_dir(new_inode))
2406 2407 2408
				goto end_rename;
		}
		retval = -EIO;
2409
		dir_bh = ext4_bread(handle, old_inode, 0, 0, &retval);
2410 2411
		if (!dir_bh)
			goto end_rename;
2412 2413
		if (le32_to_cpu(PARENT_INO(dir_bh->b_data,
				old_dir->i_sb->s_blocksize)) != old_dir->i_ino)
2414 2415
			goto end_rename;
		retval = -EMLINK;
2416
		if (!new_inode && new_dir != old_dir &&
2417
		    EXT4_DIR_LINK_MAX(new_dir))
2418 2419 2420
			goto end_rename;
	}
	if (!new_bh) {
2421
		retval = ext4_add_entry(handle, new_dentry, old_inode);
2422 2423 2424 2425
		if (retval)
			goto end_rename;
	} else {
		BUFFER_TRACE(new_bh, "get write access");
2426
		ext4_journal_get_write_access(handle, new_bh);
2427
		new_de->inode = cpu_to_le32(old_inode->i_ino);
2428 2429
		if (EXT4_HAS_INCOMPAT_FEATURE(new_dir->i_sb,
					      EXT4_FEATURE_INCOMPAT_FILETYPE))
2430 2431
			new_de->file_type = old_de->file_type;
		new_dir->i_version++;
2432 2433 2434
		new_dir->i_ctime = new_dir->i_mtime =
					ext4_current_time(new_dir);
		ext4_mark_inode_dirty(handle, new_dir);
2435 2436
		BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata");
		ext4_handle_dirty_metadata(handle, new_dir, new_bh);
2437 2438 2439 2440 2441 2442 2443 2444
		brelse(new_bh);
		new_bh = NULL;
	}

	/*
	 * Like most other Unix systems, set the ctime for inodes on a
	 * rename.
	 */
K
Kalpak Shah 已提交
2445
	old_inode->i_ctime = ext4_current_time(old_inode);
2446
	ext4_mark_inode_dirty(handle, old_inode);
2447 2448 2449 2450 2451 2452 2453

	/*
	 * ok, that's it
	 */
	if (le32_to_cpu(old_de->inode) != old_inode->i_ino ||
	    old_de->name_len != old_dentry->d_name.len ||
	    strncmp(old_de->name, old_dentry->d_name.name, old_de->name_len) ||
2454
	    (retval = ext4_delete_entry(handle, old_dir,
2455 2456 2457 2458 2459 2460
					old_de, old_bh)) == -ENOENT) {
		/* old_de could have moved from under us during htree split, so
		 * make sure that we are deleting the right entry.  We might
		 * also be pointing to a stale entry in the unused part of
		 * old_bh so just checking inum and the name isn't enough. */
		struct buffer_head *old_bh2;
2461
		struct ext4_dir_entry_2 *old_de2;
2462

2463
		old_bh2 = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de2);
2464
		if (old_bh2) {
2465
			retval = ext4_delete_entry(handle, old_dir,
2466 2467 2468 2469 2470
						   old_de2, old_bh2);
			brelse(old_bh2);
		}
	}
	if (retval) {
2471
		ext4_warning(old_dir->i_sb, "ext4_rename",
2472 2473 2474 2475 2476
				"Deleting old file (%lu), %d, error=%d",
				old_dir->i_ino, old_dir->i_nlink, retval);
	}

	if (new_inode) {
2477
		ext4_dec_count(handle, new_inode);
K
Kalpak Shah 已提交
2478
		new_inode->i_ctime = ext4_current_time(new_inode);
2479
	}
K
Kalpak Shah 已提交
2480
	old_dir->i_ctime = old_dir->i_mtime = ext4_current_time(old_dir);
2481
	ext4_update_dx_flag(old_dir);
2482 2483
	if (dir_bh) {
		BUFFER_TRACE(dir_bh, "get_write_access");
2484
		ext4_journal_get_write_access(handle, dir_bh);
2485 2486
		PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
						cpu_to_le32(new_dir->i_ino);
2487 2488
		BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
		ext4_handle_dirty_metadata(handle, old_dir, dir_bh);
2489
		ext4_dec_count(handle, old_dir);
2490
		if (new_inode) {
2491
			/* checked empty_dir above, can't have another parent,
2492
			 * ext4_dec_count() won't work for many-linked dirs */
2493
			new_inode->i_nlink = 0;
2494
		} else {
2495
			ext4_inc_count(handle, new_dir);
2496 2497
			ext4_update_dx_flag(new_dir);
			ext4_mark_inode_dirty(handle, new_dir);
2498 2499
		}
	}
2500
	ext4_mark_inode_dirty(handle, old_dir);
2501
	if (new_inode) {
2502
		ext4_mark_inode_dirty(handle, new_inode);
2503
		if (!new_inode->i_nlink)
2504
			ext4_orphan_add(handle, new_inode);
2505 2506
		if (!test_opt(new_dir->i_sb, NO_AUTO_DA_ALLOC))
			force_da_alloc = 1;
2507 2508 2509 2510
	}
	retval = 0;

end_rename:
2511 2512 2513
	brelse(dir_bh);
	brelse(old_bh);
	brelse(new_bh);
2514
	ext4_journal_stop(handle);
2515 2516
	if (retval == 0 && force_da_alloc)
		ext4_alloc_da_blocks(old_inode);
2517 2518 2519 2520 2521 2522
	return retval;
}

/*
 * directories can handle most operations...
 */
2523
const struct inode_operations ext4_dir_inode_operations = {
2524 2525 2526 2527 2528 2529 2530 2531 2532 2533
	.create		= ext4_create,
	.lookup		= ext4_lookup,
	.link		= ext4_link,
	.unlink		= ext4_unlink,
	.symlink	= ext4_symlink,
	.mkdir		= ext4_mkdir,
	.rmdir		= ext4_rmdir,
	.mknod		= ext4_mknod,
	.rename		= ext4_rename,
	.setattr	= ext4_setattr,
T
Theodore Ts'o 已提交
2534
#ifdef CONFIG_EXT4_FS_XATTR
2535 2536
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
2537
	.listxattr	= ext4_listxattr,
2538 2539
	.removexattr	= generic_removexattr,
#endif
2540
	.check_acl	= ext4_check_acl,
2541
	.fiemap         = ext4_fiemap,
2542 2543
};

2544
const struct inode_operations ext4_special_inode_operations = {
2545
	.setattr	= ext4_setattr,
T
Theodore Ts'o 已提交
2546
#ifdef CONFIG_EXT4_FS_XATTR
2547 2548
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
2549
	.listxattr	= ext4_listxattr,
2550 2551
	.removexattr	= generic_removexattr,
#endif
2552
	.check_acl	= ext4_check_acl,
2553
};