xattr.c 34.2 KB
Newer Older
1
/*
2
 * linux/fs/ext4/xattr.c
3 4 5 6
 *
 * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
 *
 * Fix by Harrison Xing <harrison@mountainviewdata.com>.
7
 * Ext4 code with a lot of help from Eric Jarman <ejarman@acm.org>.
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
 * Extended attributes for symlinks and special files added per
 *  suggestion of Luka Renko <luka.renko@hermes.si>.
 * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
 *  Red Hat Inc.
 * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz
 *  and Andreas Gruenbacher <agruen@suse.de>.
 */

/*
 * Extended attributes are stored directly in inodes (on file systems with
 * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl
 * field contains the block number if an inode uses an additional block. All
 * attributes must fit in the inode and one additional block. Blocks that
 * contain the identical set of attributes may be shared among several inodes.
 * Identical blocks are detected by keeping a cache of blocks that have
 * recently been accessed.
 *
 * The attributes in inodes and on blocks have a different header; the entries
 * are stored in the same format:
 *
 *   +------------------+
 *   | header           |
 *   | entry 1          | |
 *   | entry 2          | | growing downwards
 *   | entry 3          | v
 *   | four null bytes  |
 *   | . . .            |
 *   | value 1          | ^
 *   | value 3          | | growing upwards
 *   | value 2          | |
 *   +------------------+
 *
 * The header is followed by multiple entry descriptors. In disk blocks, the
 * entry descriptors are kept sorted. In inodes, they are unsorted. The
 * attribute values are aligned to the end of the block in no specific order.
 *
 * Locking strategy
 * ----------------
46
 * EXT4_I(inode)->i_file_acl is protected by EXT4_I(inode)->xattr_sem.
47 48 49 50 51 52 53 54 55
 * EA blocks are only changed if they are exclusive to an inode, so
 * holding xattr_sem also means that nothing but the EA block's reference
 * count can change. Multiple writers to the same block are synchronized
 * by the buffer lock.
 */

#include <linux/init.h>
#include <linux/fs.h>
#include <linux/slab.h>
56
#include <linux/ext4_jbd2.h>
57
#include <linux/ext4_fs.h>
58 59 60 61 62 63
#include <linux/mbcache.h>
#include <linux/quotaops.h>
#include <linux/rwsem.h>
#include "xattr.h"
#include "acl.h"

64 65
#define BHDR(bh) ((struct ext4_xattr_header *)((bh)->b_data))
#define ENTRY(ptr) ((struct ext4_xattr_entry *)(ptr))
66 67 68 69
#define BFIRST(bh) ENTRY(BHDR(bh)+1)
#define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)

#define IHDR(inode, raw_inode) \
70
	((struct ext4_xattr_ibody_header *) \
71
		((void *)raw_inode + \
72 73 74
		 EXT4_GOOD_OLD_INODE_SIZE + \
		 EXT4_I(inode)->i_extra_isize))
#define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
75

76
#ifdef EXT4_XATTR_DEBUG
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
# define ea_idebug(inode, f...) do { \
		printk(KERN_DEBUG "inode %s:%lu: ", \
			inode->i_sb->s_id, inode->i_ino); \
		printk(f); \
		printk("\n"); \
	} while (0)
# define ea_bdebug(bh, f...) do { \
		char b[BDEVNAME_SIZE]; \
		printk(KERN_DEBUG "block %s:%lu: ", \
			bdevname(bh->b_bdev, b), \
			(unsigned long) bh->b_blocknr); \
		printk(f); \
		printk("\n"); \
	} while (0)
#else
# define ea_idebug(f...)
# define ea_bdebug(f...)
#endif

96 97 98
static void ext4_xattr_cache_insert(struct buffer_head *);
static struct buffer_head *ext4_xattr_cache_find(struct inode *,
						 struct ext4_xattr_header *,
99
						 struct mb_cache_entry **);
100 101
static void ext4_xattr_rehash(struct ext4_xattr_header *,
			      struct ext4_xattr_entry *);
102

103
static struct mb_cache *ext4_xattr_cache;
104

105 106 107 108 109
static struct xattr_handler *ext4_xattr_handler_map[] = {
	[EXT4_XATTR_INDEX_USER]		     = &ext4_xattr_user_handler,
#ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
	[EXT4_XATTR_INDEX_POSIX_ACL_ACCESS]  = &ext4_xattr_acl_access_handler,
	[EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &ext4_xattr_acl_default_handler,
110
#endif
111 112 113
	[EXT4_XATTR_INDEX_TRUSTED]	     = &ext4_xattr_trusted_handler,
#ifdef CONFIG_EXT4DEV_FS_SECURITY
	[EXT4_XATTR_INDEX_SECURITY]	     = &ext4_xattr_security_handler,
114 115 116
#endif
};

117 118 119 120 121 122
struct xattr_handler *ext4_xattr_handlers[] = {
	&ext4_xattr_user_handler,
	&ext4_xattr_trusted_handler,
#ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
	&ext4_xattr_acl_access_handler,
	&ext4_xattr_acl_default_handler,
123
#endif
124 125
#ifdef CONFIG_EXT4DEV_FS_SECURITY
	&ext4_xattr_security_handler,
126 127 128 129 130
#endif
	NULL
};

static inline struct xattr_handler *
131
ext4_xattr_handler(int name_index)
132 133 134
{
	struct xattr_handler *handler = NULL;

135 136
	if (name_index > 0 && name_index < ARRAY_SIZE(ext4_xattr_handler_map))
		handler = ext4_xattr_handler_map[name_index];
137 138 139 140 141 142 143 144 145
	return handler;
}

/*
 * Inode operation listxattr()
 *
 * dentry->d_inode->i_mutex: don't care
 */
ssize_t
146
ext4_listxattr(struct dentry *dentry, char *buffer, size_t size)
147
{
148
	return ext4_xattr_list(dentry->d_inode, buffer, size);
149 150 151
}

static int
152
ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end)
153 154
{
	while (!IS_LAST_ENTRY(entry)) {
155
		struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(entry);
156 157 158 159 160 161 162 163
		if ((void *)next >= end)
			return -EIO;
		entry = next;
	}
	return 0;
}

static inline int
164
ext4_xattr_check_block(struct buffer_head *bh)
165 166 167
{
	int error;

168
	if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
169 170
	    BHDR(bh)->h_blocks != cpu_to_le32(1))
		return -EIO;
171
	error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
172 173 174 175
	return error;
}

static inline int
176
ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
177 178 179 180 181 182 183 184 185 186
{
	size_t value_size = le32_to_cpu(entry->e_value_size);

	if (entry->e_value_block != 0 || value_size > size ||
	    le16_to_cpu(entry->e_value_offs) + value_size > size)
		return -EIO;
	return 0;
}

static int
187
ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
188 189
		      const char *name, size_t size, int sorted)
{
190
	struct ext4_xattr_entry *entry;
191 192 193 194 195 196 197
	size_t name_len;
	int cmp = 1;

	if (name == NULL)
		return -EINVAL;
	name_len = strlen(name);
	entry = *pentry;
198
	for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
199 200 201 202 203 204 205 206 207
		cmp = name_index - entry->e_name_index;
		if (!cmp)
			cmp = name_len - entry->e_name_len;
		if (!cmp)
			cmp = memcmp(name, entry->e_name, name_len);
		if (cmp <= 0 && (sorted || cmp == 0))
			break;
	}
	*pentry = entry;
208
	if (!cmp && ext4_xattr_check_entry(entry, size))
209 210 211 212 213
			return -EIO;
	return cmp ? -ENODATA : 0;
}

static int
214
ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
215 216 217
		     void *buffer, size_t buffer_size)
{
	struct buffer_head *bh = NULL;
218
	struct ext4_xattr_entry *entry;
219 220 221 222 223 224 225
	size_t size;
	int error;

	ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
		  name_index, name, buffer, (long)buffer_size);

	error = -ENODATA;
226
	if (!EXT4_I(inode)->i_file_acl)
227
		goto cleanup;
228 229
	ea_idebug(inode, "reading block %u", EXT4_I(inode)->i_file_acl);
	bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
230 231 232 233
	if (!bh)
		goto cleanup;
	ea_bdebug(bh, "b_count=%d, refcount=%d",
		atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
234 235
	if (ext4_xattr_check_block(bh)) {
bad_block:	ext4_error(inode->i_sb, __FUNCTION__,
236
			   "inode %lu: bad block %llu", inode->i_ino,
237
			   EXT4_I(inode)->i_file_acl);
238 239 240
		error = -EIO;
		goto cleanup;
	}
241
	ext4_xattr_cache_insert(bh);
242
	entry = BFIRST(bh);
243
	error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
	if (error == -EIO)
		goto bad_block;
	if (error)
		goto cleanup;
	size = le32_to_cpu(entry->e_value_size);
	if (buffer) {
		error = -ERANGE;
		if (size > buffer_size)
			goto cleanup;
		memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
		       size);
	}
	error = size;

cleanup:
	brelse(bh);
	return error;
}

static int
264
ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
265 266
		     void *buffer, size_t buffer_size)
{
267 268 269 270
	struct ext4_xattr_ibody_header *header;
	struct ext4_xattr_entry *entry;
	struct ext4_inode *raw_inode;
	struct ext4_iloc iloc;
271 272 273 274
	size_t size;
	void *end;
	int error;

275
	if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR))
276
		return -ENODATA;
277
	error = ext4_get_inode_loc(inode, &iloc);
278 279
	if (error)
		return error;
280
	raw_inode = ext4_raw_inode(&iloc);
281 282
	header = IHDR(inode, raw_inode);
	entry = IFIRST(header);
283 284
	end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
	error = ext4_xattr_check_names(entry, end);
285 286
	if (error)
		goto cleanup;
287
	error = ext4_xattr_find_entry(&entry, name_index, name,
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
				      end - (void *)entry, 0);
	if (error)
		goto cleanup;
	size = le32_to_cpu(entry->e_value_size);
	if (buffer) {
		error = -ERANGE;
		if (size > buffer_size)
			goto cleanup;
		memcpy(buffer, (void *)IFIRST(header) +
		       le16_to_cpu(entry->e_value_offs), size);
	}
	error = size;

cleanup:
	brelse(iloc.bh);
	return error;
}

/*
307
 * ext4_xattr_get()
308 309 310 311 312 313 314 315 316
 *
 * Copy an extended attribute into the buffer
 * provided, or compute the buffer size required.
 * Buffer is NULL to compute the size of the buffer required.
 *
 * Returns a negative error number on failure, or the number of bytes
 * used / required on success.
 */
int
317
ext4_xattr_get(struct inode *inode, int name_index, const char *name,
318 319 320 321
	       void *buffer, size_t buffer_size)
{
	int error;

322 323
	down_read(&EXT4_I(inode)->xattr_sem);
	error = ext4_xattr_ibody_get(inode, name_index, name, buffer,
324 325
				     buffer_size);
	if (error == -ENODATA)
326
		error = ext4_xattr_block_get(inode, name_index, name, buffer,
327
					     buffer_size);
328
	up_read(&EXT4_I(inode)->xattr_sem);
329 330 331 332
	return error;
}

static int
333
ext4_xattr_list_entries(struct inode *inode, struct ext4_xattr_entry *entry,
334 335 336 337
			char *buffer, size_t buffer_size)
{
	size_t rest = buffer_size;

338
	for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
339
		struct xattr_handler *handler =
340
			ext4_xattr_handler(entry->e_name_index);
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357

		if (handler) {
			size_t size = handler->list(inode, buffer, rest,
						    entry->e_name,
						    entry->e_name_len);
			if (buffer) {
				if (size > rest)
					return -ERANGE;
				buffer += size;
			}
			rest -= size;
		}
	}
	return buffer_size - rest;
}

static int
358
ext4_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size)
359 360 361 362 363 364 365 366
{
	struct buffer_head *bh = NULL;
	int error;

	ea_idebug(inode, "buffer=%p, buffer_size=%ld",
		  buffer, (long)buffer_size);

	error = 0;
367
	if (!EXT4_I(inode)->i_file_acl)
368
		goto cleanup;
369 370
	ea_idebug(inode, "reading block %u", EXT4_I(inode)->i_file_acl);
	bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
371 372 373 374 375
	error = -EIO;
	if (!bh)
		goto cleanup;
	ea_bdebug(bh, "b_count=%d, refcount=%d",
		atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
376 377
	if (ext4_xattr_check_block(bh)) {
		ext4_error(inode->i_sb, __FUNCTION__,
378
			   "inode %lu: bad block %llu", inode->i_ino,
379
			   EXT4_I(inode)->i_file_acl);
380 381 382
		error = -EIO;
		goto cleanup;
	}
383 384
	ext4_xattr_cache_insert(bh);
	error = ext4_xattr_list_entries(inode, BFIRST(bh), buffer, buffer_size);
385 386 387 388 389 390 391 392

cleanup:
	brelse(bh);

	return error;
}

static int
393
ext4_xattr_ibody_list(struct inode *inode, char *buffer, size_t buffer_size)
394
{
395 396 397
	struct ext4_xattr_ibody_header *header;
	struct ext4_inode *raw_inode;
	struct ext4_iloc iloc;
398 399 400
	void *end;
	int error;

401
	if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR))
402
		return 0;
403
	error = ext4_get_inode_loc(inode, &iloc);
404 405
	if (error)
		return error;
406
	raw_inode = ext4_raw_inode(&iloc);
407
	header = IHDR(inode, raw_inode);
408 409
	end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
	error = ext4_xattr_check_names(IFIRST(header), end);
410 411
	if (error)
		goto cleanup;
412
	error = ext4_xattr_list_entries(inode, IFIRST(header),
413 414 415 416 417 418 419 420
					buffer, buffer_size);

cleanup:
	brelse(iloc.bh);
	return error;
}

/*
421
 * ext4_xattr_list()
422 423 424 425 426 427 428 429 430
 *
 * Copy a list of attribute names into the buffer
 * provided, or compute the buffer size required.
 * Buffer is NULL to compute the size of the buffer required.
 *
 * Returns a negative error number on failure, or the number of bytes
 * used / required on success.
 */
int
431
ext4_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
432 433 434
{
	int i_error, b_error;

435 436
	down_read(&EXT4_I(inode)->xattr_sem);
	i_error = ext4_xattr_ibody_list(inode, buffer, buffer_size);
437 438 439 440 441 442 443
	if (i_error < 0) {
		b_error = 0;
	} else {
		if (buffer) {
			buffer += i_error;
			buffer_size -= i_error;
		}
444
		b_error = ext4_xattr_block_list(inode, buffer, buffer_size);
445 446 447
		if (b_error < 0)
			i_error = 0;
	}
448
	up_read(&EXT4_I(inode)->xattr_sem);
449 450 451 452
	return i_error + b_error;
}

/*
453
 * If the EXT4_FEATURE_COMPAT_EXT_ATTR feature of this file system is
454 455
 * not set, set it.
 */
456
static void ext4_xattr_update_super_block(handle_t *handle,
457 458
					  struct super_block *sb)
{
459
	if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR))
460 461
		return;

462
	if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
463
		EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR);
464
		sb->s_dirt = 1;
465
		ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
466 467 468 469 470 471 472 473
	}
}

/*
 * Release the xattr block BH: If the reference count is > 1, decrement
 * it; otherwise free the block.
 */
static void
474
ext4_xattr_release_block(handle_t *handle, struct inode *inode,
475 476 477 478
			 struct buffer_head *bh)
{
	struct mb_cache_entry *ce = NULL;

479
	ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev, bh->b_blocknr);
480 481 482 483
	if (BHDR(bh)->h_refcount == cpu_to_le32(1)) {
		ea_bdebug(bh, "refcount now=0; freeing");
		if (ce)
			mb_cache_entry_free(ce);
484
		ext4_free_blocks(handle, inode, bh->b_blocknr, 1);
485
		get_bh(bh);
486
		ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
487
	} else {
488
		if (ext4_journal_get_write_access(handle, bh) == 0) {
489 490 491
			lock_buffer(bh);
			BHDR(bh)->h_refcount = cpu_to_le32(
				le32_to_cpu(BHDR(bh)->h_refcount) - 1);
492
			ext4_journal_dirty_metadata(handle, bh);
493 494 495 496 497 498 499 500 501 502 503 504
			if (IS_SYNC(inode))
				handle->h_sync = 1;
			DQUOT_FREE_BLOCK(inode, 1);
			unlock_buffer(bh);
			ea_bdebug(bh, "refcount now=%d; releasing",
				  le32_to_cpu(BHDR(bh)->h_refcount));
		}
		if (ce)
			mb_cache_entry_release(ce);
	}
}

505
struct ext4_xattr_info {
506 507 508 509 510 511
	int name_index;
	const char *name;
	const void *value;
	size_t value_len;
};

512 513
struct ext4_xattr_search {
	struct ext4_xattr_entry *first;
514 515
	void *base;
	void *end;
516
	struct ext4_xattr_entry *here;
517 518 519 520
	int not_found;
};

static int
521
ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
522
{
523
	struct ext4_xattr_entry *last;
524 525 526 527
	size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);

	/* Compute min_offs and last. */
	last = s->first;
528
	for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
529 530 531 532 533 534 535 536 537 538
		if (!last->e_value_block && last->e_value_size) {
			size_t offs = le16_to_cpu(last->e_value_offs);
			if (offs < min_offs)
				min_offs = offs;
		}
	}
	free = min_offs - ((void *)last - s->base) - sizeof(__u32);
	if (!s->not_found) {
		if (!s->here->e_value_block && s->here->e_value_size) {
			size_t size = le32_to_cpu(s->here->e_value_size);
539
			free += EXT4_XATTR_SIZE(size);
540
		}
541
		free += EXT4_XATTR_LEN(name_len);
542 543
	}
	if (i->value) {
544 545 546
		if (free < EXT4_XATTR_SIZE(i->value_len) ||
		    free < EXT4_XATTR_LEN(name_len) +
			   EXT4_XATTR_SIZE(i->value_len))
547 548 549 550 551
			return -ENOSPC;
	}

	if (i->value && s->not_found) {
		/* Insert the new name. */
552
		size_t size = EXT4_XATTR_LEN(name_len);
553 554 555 556 557 558 559 560 561 562 563
		size_t rest = (void *)last - (void *)s->here + sizeof(__u32);
		memmove((void *)s->here + size, s->here, rest);
		memset(s->here, 0, size);
		s->here->e_name_index = i->name_index;
		s->here->e_name_len = name_len;
		memcpy(s->here->e_name, i->name, name_len);
	} else {
		if (!s->here->e_value_block && s->here->e_value_size) {
			void *first_val = s->base + min_offs;
			size_t offs = le16_to_cpu(s->here->e_value_offs);
			void *val = s->base + offs;
564
			size_t size = EXT4_XATTR_SIZE(
565 566
				le32_to_cpu(s->here->e_value_size));

567
			if (i->value && size == EXT4_XATTR_SIZE(i->value_len)) {
568 569 570 571
				/* The old and the new value have the same
				   size. Just replace. */
				s->here->e_value_size =
					cpu_to_le32(i->value_len);
572 573
				memset(val + size - EXT4_XATTR_PAD, 0,
				       EXT4_XATTR_PAD); /* Clear pad bytes. */
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
				memcpy(val, i->value, i->value_len);
				return 0;
			}

			/* Remove the old value. */
			memmove(first_val + size, first_val, val - first_val);
			memset(first_val, 0, size);
			s->here->e_value_size = 0;
			s->here->e_value_offs = 0;
			min_offs += size;

			/* Adjust all value offsets. */
			last = s->first;
			while (!IS_LAST_ENTRY(last)) {
				size_t o = le16_to_cpu(last->e_value_offs);
				if (!last->e_value_block &&
				    last->e_value_size && o < offs)
					last->e_value_offs =
						cpu_to_le16(o + size);
593
				last = EXT4_XATTR_NEXT(last);
594 595 596 597
			}
		}
		if (!i->value) {
			/* Remove the old name. */
598
			size_t size = EXT4_XATTR_LEN(name_len);
599 600 601 602 603 604 605 606 607 608 609
			last = ENTRY((void *)last - size);
			memmove(s->here, (void *)s->here + size,
				(void *)last - (void *)s->here + sizeof(__u32));
			memset(last, 0, size);
		}
	}

	if (i->value) {
		/* Insert the new value. */
		s->here->e_value_size = cpu_to_le32(i->value_len);
		if (i->value_len) {
610
			size_t size = EXT4_XATTR_SIZE(i->value_len);
611 612
			void *val = s->base + min_offs - size;
			s->here->e_value_offs = cpu_to_le16(min_offs - size);
613 614
			memset(val + size - EXT4_XATTR_PAD, 0,
			       EXT4_XATTR_PAD); /* Clear the pad bytes. */
615 616 617 618 619 620
			memcpy(val, i->value, i->value_len);
		}
	}
	return 0;
}

621 622
struct ext4_xattr_block_find {
	struct ext4_xattr_search s;
623 624 625 626
	struct buffer_head *bh;
};

static int
627 628
ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
		      struct ext4_xattr_block_find *bs)
629 630 631 632 633 634 635
{
	struct super_block *sb = inode->i_sb;
	int error;

	ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
		  i->name_index, i->name, i->value, (long)i->value_len);

636
	if (EXT4_I(inode)->i_file_acl) {
637
		/* The inode already has an extended attribute block. */
638
		bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl);
639 640 641 642 643 644
		error = -EIO;
		if (!bs->bh)
			goto cleanup;
		ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
			atomic_read(&(bs->bh->b_count)),
			le32_to_cpu(BHDR(bs->bh)->h_refcount));
645 646
		if (ext4_xattr_check_block(bs->bh)) {
			ext4_error(sb, __FUNCTION__,
647
				"inode %lu: bad block %llu", inode->i_ino,
648
				EXT4_I(inode)->i_file_acl);
649 650 651 652 653 654 655 656
			error = -EIO;
			goto cleanup;
		}
		/* Find the named attribute. */
		bs->s.base = BHDR(bs->bh);
		bs->s.first = BFIRST(bs->bh);
		bs->s.end = bs->bh->b_data + bs->bh->b_size;
		bs->s.here = bs->s.first;
657
		error = ext4_xattr_find_entry(&bs->s.here, i->name_index,
658 659 660 661 662 663 664 665 666 667 668 669
					      i->name, bs->bh->b_size, 1);
		if (error && error != -ENODATA)
			goto cleanup;
		bs->s.not_found = error;
	}
	error = 0;

cleanup:
	return error;
}

static int
670 671 672
ext4_xattr_block_set(handle_t *handle, struct inode *inode,
		     struct ext4_xattr_info *i,
		     struct ext4_xattr_block_find *bs)
673 674 675
{
	struct super_block *sb = inode->i_sb;
	struct buffer_head *new_bh = NULL;
676
	struct ext4_xattr_search *s = &bs->s;
677 678 679
	struct mb_cache_entry *ce = NULL;
	int error;

680
#define header(x) ((struct ext4_xattr_header *)(x))
681 682 683 684

	if (i->value && i->value_len > sb->s_blocksize)
		return -ENOSPC;
	if (s->base) {
685
		ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev,
686 687 688 689 690 691 692
					bs->bh->b_blocknr);
		if (header(s->base)->h_refcount == cpu_to_le32(1)) {
			if (ce) {
				mb_cache_entry_free(ce);
				ce = NULL;
			}
			ea_bdebug(bs->bh, "modifying in-place");
693
			error = ext4_journal_get_write_access(handle, bs->bh);
694 695 696
			if (error)
				goto cleanup;
			lock_buffer(bs->bh);
697
			error = ext4_xattr_set_entry(i, s);
698 699
			if (!error) {
				if (!IS_LAST_ENTRY(s->first))
700
					ext4_xattr_rehash(header(s->base),
701
							  s->here);
702
				ext4_xattr_cache_insert(bs->bh);
703 704 705 706 707
			}
			unlock_buffer(bs->bh);
			if (error == -EIO)
				goto bad_block;
			if (!error)
708
				error = ext4_journal_dirty_metadata(handle,
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
								    bs->bh);
			if (error)
				goto cleanup;
			goto inserted;
		} else {
			int offset = (char *)s->here - bs->bh->b_data;

			if (ce) {
				mb_cache_entry_release(ce);
				ce = NULL;
			}
			ea_bdebug(bs->bh, "cloning");
			s->base = kmalloc(bs->bh->b_size, GFP_KERNEL);
			error = -ENOMEM;
			if (s->base == NULL)
				goto cleanup;
			memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
			s->first = ENTRY(header(s->base)+1);
			header(s->base)->h_refcount = cpu_to_le32(1);
			s->here = ENTRY(s->base + offset);
			s->end = s->base + bs->bh->b_size;
		}
	} else {
		/* Allocate a buffer where we construct the new block. */
		s->base = kmalloc(sb->s_blocksize, GFP_KERNEL);
		/* assert(header == s->base) */
		error = -ENOMEM;
		if (s->base == NULL)
			goto cleanup;
		memset(s->base, 0, sb->s_blocksize);
739
		header(s->base)->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
740 741 742 743 744 745 746
		header(s->base)->h_blocks = cpu_to_le32(1);
		header(s->base)->h_refcount = cpu_to_le32(1);
		s->first = ENTRY(header(s->base)+1);
		s->here = ENTRY(header(s->base)+1);
		s->end = s->base + sb->s_blocksize;
	}

747
	error = ext4_xattr_set_entry(i, s);
748 749 750 751 752
	if (error == -EIO)
		goto bad_block;
	if (error)
		goto cleanup;
	if (!IS_LAST_ENTRY(s->first))
753
		ext4_xattr_rehash(header(s->base), s->here);
754 755 756

inserted:
	if (!IS_LAST_ENTRY(s->first)) {
757
		new_bh = ext4_xattr_cache_find(inode, header(s->base), &ce);
758 759 760 761 762 763 764 765 766 767
		if (new_bh) {
			/* We found an identical block in the cache. */
			if (new_bh == bs->bh)
				ea_bdebug(new_bh, "keeping");
			else {
				/* The old block is released after updating
				   the inode. */
				error = -EDQUOT;
				if (DQUOT_ALLOC_BLOCK(inode, 1))
					goto cleanup;
768
				error = ext4_journal_get_write_access(handle,
769 770 771 772 773 774 775 776 777
								      new_bh);
				if (error)
					goto cleanup_dquot;
				lock_buffer(new_bh);
				BHDR(new_bh)->h_refcount = cpu_to_le32(1 +
					le32_to_cpu(BHDR(new_bh)->h_refcount));
				ea_bdebug(new_bh, "reusing; refcount now=%d",
					le32_to_cpu(BHDR(new_bh)->h_refcount));
				unlock_buffer(new_bh);
778
				error = ext4_journal_dirty_metadata(handle,
779 780 781 782 783 784 785 786 787 788 789 790 791
								    new_bh);
				if (error)
					goto cleanup_dquot;
			}
			mb_cache_entry_release(ce);
			ce = NULL;
		} else if (bs->bh && s->base == bs->bh->b_data) {
			/* We were modifying this block in-place. */
			ea_bdebug(bs->bh, "keeping this block");
			new_bh = bs->bh;
			get_bh(new_bh);
		} else {
			/* We need to allocate a new block */
792 793 794 795 796
			ext4_fsblk_t goal = le32_to_cpu(
					EXT4_SB(sb)->s_es->s_first_data_block) +
				(ext4_fsblk_t)EXT4_I(inode)->i_block_group *
				EXT4_BLOCKS_PER_GROUP(sb);
			ext4_fsblk_t block = ext4_new_block(handle, inode,
797 798 799 800 801 802 803 804
							goal, &error);
			if (error)
				goto cleanup;
			ea_idebug(inode, "creating block %d", block);

			new_bh = sb_getblk(sb, block);
			if (!new_bh) {
getblk_failed:
805
				ext4_free_blocks(handle, inode, block, 1);
806 807 808 809
				error = -EIO;
				goto cleanup;
			}
			lock_buffer(new_bh);
810
			error = ext4_journal_get_create_access(handle, new_bh);
811 812 813 814 815 816 817
			if (error) {
				unlock_buffer(new_bh);
				goto getblk_failed;
			}
			memcpy(new_bh->b_data, s->base, new_bh->b_size);
			set_buffer_uptodate(new_bh);
			unlock_buffer(new_bh);
818 819
			ext4_xattr_cache_insert(new_bh);
			error = ext4_journal_dirty_metadata(handle, new_bh);
820 821 822 823 824 825
			if (error)
				goto cleanup;
		}
	}

	/* Update the inode. */
826
	EXT4_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
827 828 829

	/* Drop the previous xattr block. */
	if (bs->bh && bs->bh != new_bh)
830
		ext4_xattr_release_block(handle, inode, bs->bh);
831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
	error = 0;

cleanup:
	if (ce)
		mb_cache_entry_release(ce);
	brelse(new_bh);
	if (!(bs->bh && s->base == bs->bh->b_data))
		kfree(s->base);

	return error;

cleanup_dquot:
	DQUOT_FREE_BLOCK(inode, 1);
	goto cleanup;

bad_block:
847
	ext4_error(inode->i_sb, __FUNCTION__,
848
		   "inode %lu: bad block %llu", inode->i_ino,
849
		   EXT4_I(inode)->i_file_acl);
850 851 852 853 854
	goto cleanup;

#undef header
}

855 856 857
struct ext4_xattr_ibody_find {
	struct ext4_xattr_search s;
	struct ext4_iloc iloc;
858 859 860
};

static int
861 862
ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
		      struct ext4_xattr_ibody_find *is)
863
{
864 865
	struct ext4_xattr_ibody_header *header;
	struct ext4_inode *raw_inode;
866 867
	int error;

868
	if (EXT4_I(inode)->i_extra_isize == 0)
869
		return 0;
870
	raw_inode = ext4_raw_inode(&is->iloc);
871 872 873
	header = IHDR(inode, raw_inode);
	is->s.base = is->s.first = IFIRST(header);
	is->s.here = is->s.first;
874 875 876
	is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
	if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) {
		error = ext4_xattr_check_names(IFIRST(header), is->s.end);
877 878 879
		if (error)
			return error;
		/* Find the named attribute. */
880
		error = ext4_xattr_find_entry(&is->s.here, i->name_index,
881 882 883 884 885 886 887 888 889 890
					      i->name, is->s.end -
					      (void *)is->s.base, 0);
		if (error && error != -ENODATA)
			return error;
		is->s.not_found = error;
	}
	return 0;
}

static int
891 892 893
ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
		     struct ext4_xattr_info *i,
		     struct ext4_xattr_ibody_find *is)
894
{
895 896
	struct ext4_xattr_ibody_header *header;
	struct ext4_xattr_search *s = &is->s;
897 898
	int error;

899
	if (EXT4_I(inode)->i_extra_isize == 0)
900
		return -ENOSPC;
901
	error = ext4_xattr_set_entry(i, s);
902 903
	if (error)
		return error;
904
	header = IHDR(inode, ext4_raw_inode(&is->iloc));
905
	if (!IS_LAST_ENTRY(s->first)) {
906 907
		header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
		EXT4_I(inode)->i_state |= EXT4_STATE_XATTR;
908 909
	} else {
		header->h_magic = cpu_to_le32(0);
910
		EXT4_I(inode)->i_state &= ~EXT4_STATE_XATTR;
911 912 913 914 915
	}
	return 0;
}

/*
916
 * ext4_xattr_set_handle()
917 918 919 920 921 922 923 924 925 926 927
 *
 * Create, replace or remove an extended attribute for this inode. Buffer
 * is NULL to remove an existing extended attribute, and non-NULL to
 * either replace an existing extended attribute, or create a new extended
 * attribute. The flags XATTR_REPLACE and XATTR_CREATE
 * specify that an extended attribute must exist and must not exist
 * previous to the call, respectively.
 *
 * Returns 0, or a negative error number on failure.
 */
int
928
ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
929 930 931
		      const char *name, const void *value, size_t value_len,
		      int flags)
{
932
	struct ext4_xattr_info i = {
933 934 935 936 937 938
		.name_index = name_index,
		.name = name,
		.value = value,
		.value_len = value_len,

	};
939
	struct ext4_xattr_ibody_find is = {
940 941
		.s = { .not_found = -ENODATA, },
	};
942
	struct ext4_xattr_block_find bs = {
943 944 945 946 947 948 949 950
		.s = { .not_found = -ENODATA, },
	};
	int error;

	if (!name)
		return -EINVAL;
	if (strlen(name) > 255)
		return -ERANGE;
951 952
	down_write(&EXT4_I(inode)->xattr_sem);
	error = ext4_get_inode_loc(inode, &is.iloc);
953 954 955
	if (error)
		goto cleanup;

956 957 958 959
	if (EXT4_I(inode)->i_state & EXT4_STATE_NEW) {
		struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
		EXT4_I(inode)->i_state &= ~EXT4_STATE_NEW;
960 961
	}

962
	error = ext4_xattr_ibody_find(inode, &i, &is);
963 964 965
	if (error)
		goto cleanup;
	if (is.s.not_found)
966
		error = ext4_xattr_block_find(inode, &i, &bs);
967 968 969 970 971 972 973 974 975 976 977 978 979 980
	if (error)
		goto cleanup;
	if (is.s.not_found && bs.s.not_found) {
		error = -ENODATA;
		if (flags & XATTR_REPLACE)
			goto cleanup;
		error = 0;
		if (!value)
			goto cleanup;
	} else {
		error = -EEXIST;
		if (flags & XATTR_CREATE)
			goto cleanup;
	}
981
	error = ext4_journal_get_write_access(handle, is.iloc.bh);
982 983 984 985
	if (error)
		goto cleanup;
	if (!value) {
		if (!is.s.not_found)
986
			error = ext4_xattr_ibody_set(handle, inode, &i, &is);
987
		else if (!bs.s.not_found)
988
			error = ext4_xattr_block_set(handle, inode, &i, &bs);
989
	} else {
990
		error = ext4_xattr_ibody_set(handle, inode, &i, &is);
991 992
		if (!error && !bs.s.not_found) {
			i.value = NULL;
993
			error = ext4_xattr_block_set(handle, inode, &i, &bs);
994
		} else if (error == -ENOSPC) {
995
			error = ext4_xattr_block_set(handle, inode, &i, &bs);
996 997 998 999
			if (error)
				goto cleanup;
			if (!is.s.not_found) {
				i.value = NULL;
1000
				error = ext4_xattr_ibody_set(handle, inode, &i,
1001 1002 1003 1004 1005
							     &is);
			}
		}
	}
	if (!error) {
1006
		ext4_xattr_update_super_block(handle, inode->i_sb);
1007
		inode->i_ctime = CURRENT_TIME_SEC;
1008
		error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
1009
		/*
1010
		 * The bh is consumed by ext4_mark_iloc_dirty, even with
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
		 * error != 0.
		 */
		is.iloc.bh = NULL;
		if (IS_SYNC(inode))
			handle->h_sync = 1;
	}

cleanup:
	brelse(is.iloc.bh);
	brelse(bs.bh);
1021
	up_write(&EXT4_I(inode)->xattr_sem);
1022 1023 1024 1025
	return error;
}

/*
1026
 * ext4_xattr_set()
1027
 *
1028
 * Like ext4_xattr_set_handle, but start from an inode. This extended
1029 1030 1031 1032 1033
 * attribute modification is a filesystem transaction by itself.
 *
 * Returns 0, or a negative error number on failure.
 */
int
1034
ext4_xattr_set(struct inode *inode, int name_index, const char *name,
1035 1036 1037 1038 1039 1040
	       const void *value, size_t value_len, int flags)
{
	handle_t *handle;
	int error, retries = 0;

retry:
1041
	handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
1042 1043 1044 1045 1046
	if (IS_ERR(handle)) {
		error = PTR_ERR(handle);
	} else {
		int error2;

1047
		error = ext4_xattr_set_handle(handle, inode, name_index, name,
1048
					      value, value_len, flags);
1049
		error2 = ext4_journal_stop(handle);
1050
		if (error == -ENOSPC &&
1051
		    ext4_should_retry_alloc(inode->i_sb, &retries))
1052 1053 1054 1055 1056 1057 1058 1059 1060
			goto retry;
		if (error == 0)
			error = error2;
	}

	return error;
}

/*
1061
 * ext4_xattr_delete_inode()
1062 1063 1064 1065 1066 1067
 *
 * Free extended attribute resources associated with this inode. This
 * is called immediately before an inode is freed. We have exclusive
 * access to the inode.
 */
void
1068
ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
1069 1070 1071
{
	struct buffer_head *bh = NULL;

1072
	if (!EXT4_I(inode)->i_file_acl)
1073
		goto cleanup;
1074
	bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
1075
	if (!bh) {
1076
		ext4_error(inode->i_sb, __FUNCTION__,
1077
			"inode %lu: block %llu read error", inode->i_ino,
1078
			EXT4_I(inode)->i_file_acl);
1079 1080
		goto cleanup;
	}
1081
	if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
1082
	    BHDR(bh)->h_blocks != cpu_to_le32(1)) {
1083
		ext4_error(inode->i_sb, __FUNCTION__,
1084
			"inode %lu: bad block %llu", inode->i_ino,
1085
			EXT4_I(inode)->i_file_acl);
1086 1087
		goto cleanup;
	}
1088 1089
	ext4_xattr_release_block(handle, inode, bh);
	EXT4_I(inode)->i_file_acl = 0;
1090 1091 1092 1093 1094 1095

cleanup:
	brelse(bh);
}

/*
1096
 * ext4_xattr_put_super()
1097 1098 1099 1100
 *
 * This is called when a file system is unmounted.
 */
void
1101
ext4_xattr_put_super(struct super_block *sb)
1102 1103 1104 1105 1106
{
	mb_cache_shrink(sb->s_bdev);
}

/*
1107
 * ext4_xattr_cache_insert()
1108 1109 1110 1111 1112 1113 1114
 *
 * Create a new entry in the extended attribute cache, and insert
 * it unless such an entry is already in the cache.
 *
 * Returns 0, or a negative error number on failure.
 */
static void
1115
ext4_xattr_cache_insert(struct buffer_head *bh)
1116 1117 1118 1119 1120
{
	__u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
	struct mb_cache_entry *ce;
	int error;

1121
	ce = mb_cache_entry_alloc(ext4_xattr_cache);
1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139
	if (!ce) {
		ea_bdebug(bh, "out of memory");
		return;
	}
	error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, &hash);
	if (error) {
		mb_cache_entry_free(ce);
		if (error == -EBUSY) {
			ea_bdebug(bh, "already in cache");
			error = 0;
		}
	} else {
		ea_bdebug(bh, "inserting [%x]", (int)hash);
		mb_cache_entry_release(ce);
	}
}

/*
1140
 * ext4_xattr_cmp()
1141 1142 1143 1144 1145 1146 1147
 *
 * Compare two extended attribute blocks for equality.
 *
 * Returns 0 if the blocks are equal, 1 if they differ, and
 * a negative error number on errors.
 */
static int
1148 1149
ext4_xattr_cmp(struct ext4_xattr_header *header1,
	       struct ext4_xattr_header *header2)
1150
{
1151
	struct ext4_xattr_entry *entry1, *entry2;
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170

	entry1 = ENTRY(header1+1);
	entry2 = ENTRY(header2+1);
	while (!IS_LAST_ENTRY(entry1)) {
		if (IS_LAST_ENTRY(entry2))
			return 1;
		if (entry1->e_hash != entry2->e_hash ||
		    entry1->e_name_index != entry2->e_name_index ||
		    entry1->e_name_len != entry2->e_name_len ||
		    entry1->e_value_size != entry2->e_value_size ||
		    memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
			return 1;
		if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
			return -EIO;
		if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
			   (char *)header2 + le16_to_cpu(entry2->e_value_offs),
			   le32_to_cpu(entry1->e_value_size)))
			return 1;

1171 1172
		entry1 = EXT4_XATTR_NEXT(entry1);
		entry2 = EXT4_XATTR_NEXT(entry2);
1173 1174 1175 1176 1177 1178 1179
	}
	if (!IS_LAST_ENTRY(entry2))
		return 1;
	return 0;
}

/*
1180
 * ext4_xattr_cache_find()
1181 1182 1183 1184 1185 1186 1187
 *
 * Find an identical extended attribute block.
 *
 * Returns a pointer to the block found, or NULL if such a block was
 * not found or an error occurred.
 */
static struct buffer_head *
1188
ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
1189 1190 1191 1192 1193 1194 1195 1196 1197
		      struct mb_cache_entry **pce)
{
	__u32 hash = le32_to_cpu(header->h_hash);
	struct mb_cache_entry *ce;

	if (!header->h_hash)
		return NULL;  /* never share */
	ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
again:
1198
	ce = mb_cache_entry_find_first(ext4_xattr_cache, 0,
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
				       inode->i_sb->s_bdev, hash);
	while (ce) {
		struct buffer_head *bh;

		if (IS_ERR(ce)) {
			if (PTR_ERR(ce) == -EAGAIN)
				goto again;
			break;
		}
		bh = sb_bread(inode->i_sb, ce->e_block);
		if (!bh) {
1210
			ext4_error(inode->i_sb, __FUNCTION__,
1211 1212 1213
				"inode %lu: block %lu read error",
				inode->i_ino, (unsigned long) ce->e_block);
		} else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
1214
				EXT4_XATTR_REFCOUNT_MAX) {
1215 1216 1217
			ea_idebug(inode, "block %lu refcount %d>=%d",
				  (unsigned long) ce->e_block,
				  le32_to_cpu(BHDR(bh)->h_refcount),
1218 1219
					  EXT4_XATTR_REFCOUNT_MAX);
		} else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
			*pce = ce;
			return bh;
		}
		brelse(bh);
		ce = mb_cache_entry_find_next(ce, 0, inode->i_sb->s_bdev, hash);
	}
	return NULL;
}

#define NAME_HASH_SHIFT 5
#define VALUE_HASH_SHIFT 16

/*
1233
 * ext4_xattr_hash_entry()
1234 1235 1236
 *
 * Compute the hash of an extended attribute.
 */
1237 1238
static inline void ext4_xattr_hash_entry(struct ext4_xattr_header *header,
					 struct ext4_xattr_entry *entry)
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
{
	__u32 hash = 0;
	char *name = entry->e_name;
	int n;

	for (n=0; n < entry->e_name_len; n++) {
		hash = (hash << NAME_HASH_SHIFT) ^
		       (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
		       *name++;
	}

	if (entry->e_value_block == 0 && entry->e_value_size != 0) {
		__le32 *value = (__le32 *)((char *)header +
			le16_to_cpu(entry->e_value_offs));
		for (n = (le32_to_cpu(entry->e_value_size) +
1254
		     EXT4_XATTR_ROUND) >> EXT4_XATTR_PAD_BITS; n; n--) {
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
			hash = (hash << VALUE_HASH_SHIFT) ^
			       (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
			       le32_to_cpu(*value++);
		}
	}
	entry->e_hash = cpu_to_le32(hash);
}

#undef NAME_HASH_SHIFT
#undef VALUE_HASH_SHIFT

#define BLOCK_HASH_SHIFT 16

/*
1269
 * ext4_xattr_rehash()
1270 1271 1272
 *
 * Re-compute the extended attribute hash value after an entry has changed.
 */
1273 1274
static void ext4_xattr_rehash(struct ext4_xattr_header *header,
			      struct ext4_xattr_entry *entry)
1275
{
1276
	struct ext4_xattr_entry *here;
1277 1278
	__u32 hash = 0;

1279
	ext4_xattr_hash_entry(header, entry);
1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
	here = ENTRY(header+1);
	while (!IS_LAST_ENTRY(here)) {
		if (!here->e_hash) {
			/* Block is not shared if an entry's hash value == 0 */
			hash = 0;
			break;
		}
		hash = (hash << BLOCK_HASH_SHIFT) ^
		       (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
		       le32_to_cpu(here->e_hash);
1290
		here = EXT4_XATTR_NEXT(here);
1291 1292 1293 1294 1295 1296 1297
	}
	header->h_hash = cpu_to_le32(hash);
}

#undef BLOCK_HASH_SHIFT

int __init
1298
init_ext4_xattr(void)
1299
{
1300
	ext4_xattr_cache = mb_cache_create("ext4_xattr", NULL,
1301 1302
		sizeof(struct mb_cache_entry) +
		sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]), 1, 6);
1303
	if (!ext4_xattr_cache)
1304 1305 1306 1307 1308
		return -ENOMEM;
	return 0;
}

void
1309
exit_ext4_xattr(void)
1310
{
1311 1312 1313
	if (ext4_xattr_cache)
		mb_cache_destroy(ext4_xattr_cache);
	ext4_xattr_cache = NULL;
1314
}