xattr.c 46.1 KB
Newer Older
1
/*
2
 * linux/fs/ext4/xattr.c
3 4 5 6
 *
 * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
 *
 * Fix by Harrison Xing <harrison@mountainviewdata.com>.
7
 * Ext4 code with a lot of help from Eric Jarman <ejarman@acm.org>.
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
 * Extended attributes for symlinks and special files added per
 *  suggestion of Luka Renko <luka.renko@hermes.si>.
 * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
 *  Red Hat Inc.
 * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz
 *  and Andreas Gruenbacher <agruen@suse.de>.
 */

/*
 * Extended attributes are stored directly in inodes (on file systems with
 * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl
 * field contains the block number if an inode uses an additional block. All
 * attributes must fit in the inode and one additional block. Blocks that
 * contain the identical set of attributes may be shared among several inodes.
 * Identical blocks are detected by keeping a cache of blocks that have
 * recently been accessed.
 *
 * The attributes in inodes and on blocks have a different header; the entries
 * are stored in the same format:
 *
 *   +------------------+
 *   | header           |
 *   | entry 1          | |
 *   | entry 2          | | growing downwards
 *   | entry 3          | v
 *   | four null bytes  |
 *   | . . .            |
 *   | value 1          | ^
 *   | value 3          | | growing upwards
 *   | value 2          | |
 *   +------------------+
 *
 * The header is followed by multiple entry descriptors. In disk blocks, the
 * entry descriptors are kept sorted. In inodes, they are unsorted. The
 * attribute values are aligned to the end of the block in no specific order.
 *
 * Locking strategy
 * ----------------
46
 * EXT4_I(inode)->i_file_acl is protected by EXT4_I(inode)->xattr_sem.
47 48 49 50 51 52 53 54 55 56 57 58
 * EA blocks are only changed if they are exclusive to an inode, so
 * holding xattr_sem also means that nothing but the EA block's reference
 * count can change. Multiple writers to the same block are synchronized
 * by the buffer lock.
 */

#include <linux/init.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/mbcache.h>
#include <linux/quotaops.h>
#include <linux/rwsem.h>
59 60
#include "ext4_jbd2.h"
#include "ext4.h"
61 62 63
#include "xattr.h"
#include "acl.h"

64
#ifdef EXT4_XATTR_DEBUG
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
# define ea_idebug(inode, f...) do { \
		printk(KERN_DEBUG "inode %s:%lu: ", \
			inode->i_sb->s_id, inode->i_ino); \
		printk(f); \
		printk("\n"); \
	} while (0)
# define ea_bdebug(bh, f...) do { \
		char b[BDEVNAME_SIZE]; \
		printk(KERN_DEBUG "block %s:%lu: ", \
			bdevname(bh->b_bdev, b), \
			(unsigned long) bh->b_blocknr); \
		printk(f); \
		printk("\n"); \
	} while (0)
#else
80 81
# define ea_idebug(inode, fmt, ...)	no_printk(fmt, ##__VA_ARGS__)
# define ea_bdebug(bh, fmt, ...)	no_printk(fmt, ##__VA_ARGS__)
82 83
#endif

84
static void ext4_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
85 86
static struct buffer_head *ext4_xattr_cache_find(struct inode *,
						 struct ext4_xattr_header *,
87
						 struct mb_cache_entry **);
88 89
static void ext4_xattr_rehash(struct ext4_xattr_header *,
			      struct ext4_xattr_entry *);
90
static int ext4_xattr_list(struct dentry *dentry, char *buffer,
91
			   size_t buffer_size);
92

S
Stephen Hemminger 已提交
93
static const struct xattr_handler *ext4_xattr_handler_map[] = {
94
	[EXT4_XATTR_INDEX_USER]		     = &ext4_xattr_user_handler,
T
Theodore Ts'o 已提交
95
#ifdef CONFIG_EXT4_FS_POSIX_ACL
96 97
	[EXT4_XATTR_INDEX_POSIX_ACL_ACCESS]  = &posix_acl_access_xattr_handler,
	[EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
98
#endif
99
	[EXT4_XATTR_INDEX_TRUSTED]	     = &ext4_xattr_trusted_handler,
T
Theodore Ts'o 已提交
100
#ifdef CONFIG_EXT4_FS_SECURITY
101
	[EXT4_XATTR_INDEX_SECURITY]	     = &ext4_xattr_security_handler,
102 103 104
#endif
};

S
Stephen Hemminger 已提交
105
const struct xattr_handler *ext4_xattr_handlers[] = {
106 107
	&ext4_xattr_user_handler,
	&ext4_xattr_trusted_handler,
T
Theodore Ts'o 已提交
108
#ifdef CONFIG_EXT4_FS_POSIX_ACL
109 110
	&posix_acl_access_xattr_handler,
	&posix_acl_default_xattr_handler,
111
#endif
T
Theodore Ts'o 已提交
112
#ifdef CONFIG_EXT4_FS_SECURITY
113
	&ext4_xattr_security_handler,
114 115 116 117
#endif
	NULL
};

118 119 120
#define EXT4_GET_MB_CACHE(inode)	(((struct ext4_sb_info *) \
				inode->i_sb->s_fs_info)->s_mb_cache)

121 122 123 124 125
static __le32 ext4_xattr_block_csum(struct inode *inode,
				    sector_t block_nr,
				    struct ext4_xattr_header *hdr)
{
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
126 127 128
	__u32 csum;
	__le32 save_csum;
	__le64 dsk_block_nr = cpu_to_le64(block_nr);
129

130
	save_csum = hdr->h_checksum;
131
	hdr->h_checksum = 0;
132 133
	csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr,
			   sizeof(dsk_block_nr));
134 135
	csum = ext4_chksum(sbi, csum, (__u8 *)hdr,
			   EXT4_BLOCK_SIZE(inode->i_sb));
136

137
	hdr->h_checksum = save_csum;
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
	return cpu_to_le32(csum);
}

static int ext4_xattr_block_csum_verify(struct inode *inode,
					sector_t block_nr,
					struct ext4_xattr_header *hdr)
{
	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
	    (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
		return 0;
	return 1;
}

static void ext4_xattr_block_csum_set(struct inode *inode,
				      sector_t block_nr,
				      struct ext4_xattr_header *hdr)
{
	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
		return;

	hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
}

static inline int ext4_handle_dirty_xattr_block(handle_t *handle,
						struct inode *inode,
						struct buffer_head *bh)
{
	ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh));
	return ext4_handle_dirty_metadata(handle, inode, bh);
}

S
Stephen Hemminger 已提交
171
static inline const struct xattr_handler *
172
ext4_xattr_handler(int name_index)
173
{
S
Stephen Hemminger 已提交
174
	const struct xattr_handler *handler = NULL;
175

176 177
	if (name_index > 0 && name_index < ARRAY_SIZE(ext4_xattr_handler_map))
		handler = ext4_xattr_handler_map[name_index];
178 179 180 181 182 183 184 185 186
	return handler;
}

/*
 * Inode operation listxattr()
 *
 * dentry->d_inode->i_mutex: don't care
 */
ssize_t
187
ext4_listxattr(struct dentry *dentry, char *buffer, size_t size)
188
{
189
	return ext4_xattr_list(dentry, buffer, size);
190 191 192
}

static int
193 194
ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
		       void *value_start)
195
{
196 197 198 199
	struct ext4_xattr_entry *e = entry;

	while (!IS_LAST_ENTRY(e)) {
		struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
200 201
		if ((void *)next >= end)
			return -EIO;
202
		e = next;
203
	}
204 205 206 207 208 209 210 211 212 213 214

	while (!IS_LAST_ENTRY(entry)) {
		if (entry->e_value_size != 0 &&
		    (value_start + le16_to_cpu(entry->e_value_offs) <
		     (void *)e + sizeof(__u32) ||
		     value_start + le16_to_cpu(entry->e_value_offs) +
		    le32_to_cpu(entry->e_value_size) > end))
			return -EIO;
		entry = EXT4_XATTR_NEXT(entry);
	}

215 216 217 218
	return 0;
}

static inline int
219
ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
220
{
221 222 223 224 225
	int error;

	if (buffer_verified(bh))
		return 0;

226
	if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
227 228
	    BHDR(bh)->h_blocks != cpu_to_le32(1))
		return -EIO;
229 230
	if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
		return -EIO;
231 232
	error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
				       bh->b_data);
233 234 235
	if (!error)
		set_buffer_verified(bh);
	return error;
236 237 238
}

static inline int
239
ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
240 241 242 243 244 245 246 247 248 249
{
	size_t value_size = le32_to_cpu(entry->e_value_size);

	if (entry->e_value_block != 0 || value_size > size ||
	    le16_to_cpu(entry->e_value_offs) + value_size > size)
		return -EIO;
	return 0;
}

static int
250
ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
251 252
		      const char *name, size_t size, int sorted)
{
253
	struct ext4_xattr_entry *entry;
254 255 256 257 258 259 260
	size_t name_len;
	int cmp = 1;

	if (name == NULL)
		return -EINVAL;
	name_len = strlen(name);
	entry = *pentry;
261
	for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
262 263 264 265 266 267 268 269 270
		cmp = name_index - entry->e_name_index;
		if (!cmp)
			cmp = name_len - entry->e_name_len;
		if (!cmp)
			cmp = memcmp(name, entry->e_name, name_len);
		if (cmp <= 0 && (sorted || cmp == 0))
			break;
	}
	*pentry = entry;
271
	if (!cmp && ext4_xattr_check_entry(entry, size))
272 273 274 275 276
			return -EIO;
	return cmp ? -ENODATA : 0;
}

static int
277
ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
278 279 280
		     void *buffer, size_t buffer_size)
{
	struct buffer_head *bh = NULL;
281
	struct ext4_xattr_entry *entry;
282 283
	size_t size;
	int error;
284
	struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
285 286 287 288 289

	ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
		  name_index, name, buffer, (long)buffer_size);

	error = -ENODATA;
290
	if (!EXT4_I(inode)->i_file_acl)
291
		goto cleanup;
292 293
	ea_idebug(inode, "reading block %llu",
		  (unsigned long long)EXT4_I(inode)->i_file_acl);
294
	bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
295 296 297 298
	if (!bh)
		goto cleanup;
	ea_bdebug(bh, "b_count=%d, refcount=%d",
		atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
299
	if (ext4_xattr_check_block(inode, bh)) {
300
bad_block:
301 302
		EXT4_ERROR_INODE(inode, "bad block %llu",
				 EXT4_I(inode)->i_file_acl);
303 304 305
		error = -EIO;
		goto cleanup;
	}
306
	ext4_xattr_cache_insert(ext4_mb_cache, bh);
307
	entry = BFIRST(bh);
308
	error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
	if (error == -EIO)
		goto bad_block;
	if (error)
		goto cleanup;
	size = le32_to_cpu(entry->e_value_size);
	if (buffer) {
		error = -ERANGE;
		if (size > buffer_size)
			goto cleanup;
		memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
		       size);
	}
	error = size;

cleanup:
	brelse(bh);
	return error;
}

T
Tao Ma 已提交
328
int
329
ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
330 331
		     void *buffer, size_t buffer_size)
{
332 333 334 335
	struct ext4_xattr_ibody_header *header;
	struct ext4_xattr_entry *entry;
	struct ext4_inode *raw_inode;
	struct ext4_iloc iloc;
336 337 338 339
	size_t size;
	void *end;
	int error;

340
	if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
341
		return -ENODATA;
342
	error = ext4_get_inode_loc(inode, &iloc);
343 344
	if (error)
		return error;
345
	raw_inode = ext4_raw_inode(&iloc);
346 347
	header = IHDR(inode, raw_inode);
	entry = IFIRST(header);
348
	end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
349
	error = ext4_xattr_check_names(entry, end, entry);
350 351
	if (error)
		goto cleanup;
352
	error = ext4_xattr_find_entry(&entry, name_index, name,
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
				      end - (void *)entry, 0);
	if (error)
		goto cleanup;
	size = le32_to_cpu(entry->e_value_size);
	if (buffer) {
		error = -ERANGE;
		if (size > buffer_size)
			goto cleanup;
		memcpy(buffer, (void *)IFIRST(header) +
		       le16_to_cpu(entry->e_value_offs), size);
	}
	error = size;

cleanup:
	brelse(iloc.bh);
	return error;
}

/*
372
 * ext4_xattr_get()
373 374 375 376 377 378 379 380 381
 *
 * Copy an extended attribute into the buffer
 * provided, or compute the buffer size required.
 * Buffer is NULL to compute the size of the buffer required.
 *
 * Returns a negative error number on failure, or the number of bytes
 * used / required on success.
 */
int
382
ext4_xattr_get(struct inode *inode, int name_index, const char *name,
383 384 385 386
	       void *buffer, size_t buffer_size)
{
	int error;

387 388 389
	if (strlen(name) > 255)
		return -ERANGE;

390 391
	down_read(&EXT4_I(inode)->xattr_sem);
	error = ext4_xattr_ibody_get(inode, name_index, name, buffer,
392 393
				     buffer_size);
	if (error == -ENODATA)
394
		error = ext4_xattr_block_get(inode, name_index, name, buffer,
395
					     buffer_size);
396
	up_read(&EXT4_I(inode)->xattr_sem);
397 398 399 400
	return error;
}

static int
401
ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
402 403 404 405
			char *buffer, size_t buffer_size)
{
	size_t rest = buffer_size;

406
	for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
S
Stephen Hemminger 已提交
407
		const struct xattr_handler *handler =
408
			ext4_xattr_handler(entry->e_name_index);
409 410

		if (handler) {
411
			size_t size = handler->list(dentry, buffer, rest,
412
						    entry->e_name,
413 414
						    entry->e_name_len,
						    handler->flags);
415 416 417 418 419 420 421 422 423 424 425 426
			if (buffer) {
				if (size > rest)
					return -ERANGE;
				buffer += size;
			}
			rest -= size;
		}
	}
	return buffer_size - rest;
}

static int
427
ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
428
{
429
	struct inode *inode = dentry->d_inode;
430 431
	struct buffer_head *bh = NULL;
	int error;
432
	struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
433 434 435 436 437

	ea_idebug(inode, "buffer=%p, buffer_size=%ld",
		  buffer, (long)buffer_size);

	error = 0;
438
	if (!EXT4_I(inode)->i_file_acl)
439
		goto cleanup;
440 441
	ea_idebug(inode, "reading block %llu",
		  (unsigned long long)EXT4_I(inode)->i_file_acl);
442
	bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
443 444 445 446 447
	error = -EIO;
	if (!bh)
		goto cleanup;
	ea_bdebug(bh, "b_count=%d, refcount=%d",
		atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
448
	if (ext4_xattr_check_block(inode, bh)) {
449 450
		EXT4_ERROR_INODE(inode, "bad block %llu",
				 EXT4_I(inode)->i_file_acl);
451 452 453
		error = -EIO;
		goto cleanup;
	}
454
	ext4_xattr_cache_insert(ext4_mb_cache, bh);
455
	error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
456 457 458 459 460 461 462 463

cleanup:
	brelse(bh);

	return error;
}

static int
464
ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
465
{
466
	struct inode *inode = dentry->d_inode;
467 468 469
	struct ext4_xattr_ibody_header *header;
	struct ext4_inode *raw_inode;
	struct ext4_iloc iloc;
470 471 472
	void *end;
	int error;

473
	if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
474
		return 0;
475
	error = ext4_get_inode_loc(inode, &iloc);
476 477
	if (error)
		return error;
478
	raw_inode = ext4_raw_inode(&iloc);
479
	header = IHDR(inode, raw_inode);
480
	end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
481
	error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
482 483
	if (error)
		goto cleanup;
484
	error = ext4_xattr_list_entries(dentry, IFIRST(header),
485 486 487 488 489 490 491 492
					buffer, buffer_size);

cleanup:
	brelse(iloc.bh);
	return error;
}

/*
493
 * ext4_xattr_list()
494 495 496 497 498 499 500 501
 *
 * Copy a list of attribute names into the buffer
 * provided, or compute the buffer size required.
 * Buffer is NULL to compute the size of the buffer required.
 *
 * Returns a negative error number on failure, or the number of bytes
 * used / required on success.
 */
502
static int
503
ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
504
{
505
	int ret, ret2;
506

507
	down_read(&EXT4_I(dentry->d_inode)->xattr_sem);
508 509 510 511 512 513
	ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
	if (ret < 0)
		goto errout;
	if (buffer) {
		buffer += ret;
		buffer_size -= ret;
514
	}
515 516 517 518 519
	ret = ext4_xattr_block_list(dentry, buffer, buffer_size);
	if (ret < 0)
		goto errout;
	ret += ret2;
errout:
520
	up_read(&EXT4_I(dentry->d_inode)->xattr_sem);
521
	return ret;
522 523 524
}

/*
525
 * If the EXT4_FEATURE_COMPAT_EXT_ATTR feature of this file system is
526 527
 * not set, set it.
 */
528
static void ext4_xattr_update_super_block(handle_t *handle,
529 530
					  struct super_block *sb)
{
531
	if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR))
532 533
		return;

534
	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
535
	if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
536
		EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR);
T
Theodore Ts'o 已提交
537
		ext4_handle_dirty_super(handle, sb);
538 539 540 541
	}
}

/*
542 543
 * Release the xattr block BH: If the reference count is > 1, decrement it;
 * otherwise free the block.
544 545
 */
static void
546
ext4_xattr_release_block(handle_t *handle, struct inode *inode,
547 548 549
			 struct buffer_head *bh)
{
	struct mb_cache_entry *ce = NULL;
550
	int error = 0;
551
	struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
552

553
	ce = mb_cache_entry_get(ext4_mb_cache, bh->b_bdev, bh->b_blocknr);
554
	BUFFER_TRACE(bh, "get_write_access");
555 556 557 558 559
	error = ext4_journal_get_write_access(handle, bh);
	if (error)
		goto out;

	lock_buffer(bh);
560 561 562 563 564
	if (BHDR(bh)->h_refcount == cpu_to_le32(1)) {
		ea_bdebug(bh, "refcount now=0; freeing");
		if (ce)
			mb_cache_entry_free(ce);
		get_bh(bh);
565
		unlock_buffer(bh);
566 567 568
		ext4_free_blocks(handle, inode, bh, 0, 1,
				 EXT4_FREE_BLOCKS_METADATA |
				 EXT4_FREE_BLOCKS_FORGET);
569
	} else {
M
Marcin Slusarz 已提交
570
		le32_add_cpu(&BHDR(bh)->h_refcount, -1);
571 572
		if (ce)
			mb_cache_entry_release(ce);
573 574 575 576 577 578 579 580 581 582 583 584 585
		/*
		 * Beware of this ugliness: Releasing of xattr block references
		 * from different inodes can race and so we have to protect
		 * from a race where someone else frees the block (and releases
		 * its journal_head) before we are done dirtying the buffer. In
		 * nojournal mode this race is harmless and we actually cannot
		 * call ext4_handle_dirty_xattr_block() with locked buffer as
		 * that function can call sync_dirty_buffer() so for that case
		 * we handle the dirtying after unlocking the buffer.
		 */
		if (ext4_handle_valid(handle))
			error = ext4_handle_dirty_xattr_block(handle, inode,
							      bh);
586
		unlock_buffer(bh);
587 588 589
		if (!ext4_handle_valid(handle))
			error = ext4_handle_dirty_xattr_block(handle, inode,
							      bh);
590
		if (IS_SYNC(inode))
591
			ext4_handle_sync(handle);
592
		dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
593 594
		ea_bdebug(bh, "refcount now=%d; releasing",
			  le32_to_cpu(BHDR(bh)->h_refcount));
595
	}
596 597 598
out:
	ext4_std_error(inode->i_sb, error);
	return;
599 600
}

601 602 603 604 605 606 607 608 609 610 611 612 613
/*
 * Find the available free space for EAs. This also returns the total number of
 * bytes used by EA entries.
 */
static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
				    size_t *min_offs, void *base, int *total)
{
	for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
		if (!last->e_value_block && last->e_value_size) {
			size_t offs = le16_to_cpu(last->e_value_offs);
			if (offs < *min_offs)
				*min_offs = offs;
		}
614 615
		if (total)
			*total += EXT4_XATTR_LEN(last->e_name_len);
616 617 618 619
	}
	return (*min_offs - ((void *)last - base) - sizeof(__u32));
}

620
static int
621
ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
622
{
623
	struct ext4_xattr_entry *last;
624 625 626 627
	size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);

	/* Compute min_offs and last. */
	last = s->first;
628
	for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
629 630 631 632 633 634 635 636 637 638
		if (!last->e_value_block && last->e_value_size) {
			size_t offs = le16_to_cpu(last->e_value_offs);
			if (offs < min_offs)
				min_offs = offs;
		}
	}
	free = min_offs - ((void *)last - s->base) - sizeof(__u32);
	if (!s->not_found) {
		if (!s->here->e_value_block && s->here->e_value_size) {
			size_t size = le32_to_cpu(s->here->e_value_size);
639
			free += EXT4_XATTR_SIZE(size);
640
		}
641
		free += EXT4_XATTR_LEN(name_len);
642 643
	}
	if (i->value) {
644 645 646
		if (free < EXT4_XATTR_SIZE(i->value_len) ||
		    free < EXT4_XATTR_LEN(name_len) +
			   EXT4_XATTR_SIZE(i->value_len))
647 648 649 650 651
			return -ENOSPC;
	}

	if (i->value && s->not_found) {
		/* Insert the new name. */
652
		size_t size = EXT4_XATTR_LEN(name_len);
653 654 655 656 657 658 659 660 661 662 663
		size_t rest = (void *)last - (void *)s->here + sizeof(__u32);
		memmove((void *)s->here + size, s->here, rest);
		memset(s->here, 0, size);
		s->here->e_name_index = i->name_index;
		s->here->e_name_len = name_len;
		memcpy(s->here->e_name, i->name, name_len);
	} else {
		if (!s->here->e_value_block && s->here->e_value_size) {
			void *first_val = s->base + min_offs;
			size_t offs = le16_to_cpu(s->here->e_value_offs);
			void *val = s->base + offs;
664
			size_t size = EXT4_XATTR_SIZE(
665 666
				le32_to_cpu(s->here->e_value_size));

667
			if (i->value && size == EXT4_XATTR_SIZE(i->value_len)) {
668 669 670 671
				/* The old and the new value have the same
				   size. Just replace. */
				s->here->e_value_size =
					cpu_to_le32(i->value_len);
672 673 674 675 676 677 678 679
				if (i->value == EXT4_ZERO_XATTR_VALUE) {
					memset(val, 0, size);
				} else {
					/* Clear pad bytes first. */
					memset(val + size - EXT4_XATTR_PAD, 0,
					       EXT4_XATTR_PAD);
					memcpy(val, i->value, i->value_len);
				}
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
				return 0;
			}

			/* Remove the old value. */
			memmove(first_val + size, first_val, val - first_val);
			memset(first_val, 0, size);
			s->here->e_value_size = 0;
			s->here->e_value_offs = 0;
			min_offs += size;

			/* Adjust all value offsets. */
			last = s->first;
			while (!IS_LAST_ENTRY(last)) {
				size_t o = le16_to_cpu(last->e_value_offs);
				if (!last->e_value_block &&
				    last->e_value_size && o < offs)
					last->e_value_offs =
						cpu_to_le16(o + size);
698
				last = EXT4_XATTR_NEXT(last);
699 700 701 702
			}
		}
		if (!i->value) {
			/* Remove the old name. */
703
			size_t size = EXT4_XATTR_LEN(name_len);
704 705 706 707 708 709 710 711 712 713 714
			last = ENTRY((void *)last - size);
			memmove(s->here, (void *)s->here + size,
				(void *)last - (void *)s->here + sizeof(__u32));
			memset(last, 0, size);
		}
	}

	if (i->value) {
		/* Insert the new value. */
		s->here->e_value_size = cpu_to_le32(i->value_len);
		if (i->value_len) {
715
			size_t size = EXT4_XATTR_SIZE(i->value_len);
716 717
			void *val = s->base + min_offs - size;
			s->here->e_value_offs = cpu_to_le16(min_offs - size);
718 719 720 721 722 723 724 725
			if (i->value == EXT4_ZERO_XATTR_VALUE) {
				memset(val, 0, size);
			} else {
				/* Clear the pad bytes first. */
				memset(val + size - EXT4_XATTR_PAD, 0,
				       EXT4_XATTR_PAD);
				memcpy(val, i->value, i->value_len);
			}
726 727 728 729 730
		}
	}
	return 0;
}

731 732
struct ext4_xattr_block_find {
	struct ext4_xattr_search s;
733 734 735 736
	struct buffer_head *bh;
};

static int
737 738
ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
		      struct ext4_xattr_block_find *bs)
739 740 741 742 743 744 745
{
	struct super_block *sb = inode->i_sb;
	int error;

	ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
		  i->name_index, i->name, i->value, (long)i->value_len);

746
	if (EXT4_I(inode)->i_file_acl) {
747
		/* The inode already has an extended attribute block. */
748
		bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl);
749 750 751 752 753 754
		error = -EIO;
		if (!bs->bh)
			goto cleanup;
		ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
			atomic_read(&(bs->bh->b_count)),
			le32_to_cpu(BHDR(bs->bh)->h_refcount));
755
		if (ext4_xattr_check_block(inode, bs->bh)) {
756 757
			EXT4_ERROR_INODE(inode, "bad block %llu",
					 EXT4_I(inode)->i_file_acl);
758 759 760 761 762 763 764 765
			error = -EIO;
			goto cleanup;
		}
		/* Find the named attribute. */
		bs->s.base = BHDR(bs->bh);
		bs->s.first = BFIRST(bs->bh);
		bs->s.end = bs->bh->b_data + bs->bh->b_size;
		bs->s.here = bs->s.first;
766
		error = ext4_xattr_find_entry(&bs->s.here, i->name_index,
767 768 769 770 771 772 773 774 775 776 777 778
					      i->name, bs->bh->b_size, 1);
		if (error && error != -ENODATA)
			goto cleanup;
		bs->s.not_found = error;
	}
	error = 0;

cleanup:
	return error;
}

static int
779 780 781
ext4_xattr_block_set(handle_t *handle, struct inode *inode,
		     struct ext4_xattr_info *i,
		     struct ext4_xattr_block_find *bs)
782 783 784
{
	struct super_block *sb = inode->i_sb;
	struct buffer_head *new_bh = NULL;
785
	struct ext4_xattr_search *s = &bs->s;
786
	struct mb_cache_entry *ce = NULL;
787
	int error = 0;
788
	struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
789

790
#define header(x) ((struct ext4_xattr_header *)(x))
791 792 793 794

	if (i->value && i->value_len > sb->s_blocksize)
		return -ENOSPC;
	if (s->base) {
795
		ce = mb_cache_entry_get(ext4_mb_cache, bs->bh->b_bdev,
796
					bs->bh->b_blocknr);
797
		BUFFER_TRACE(bs->bh, "get_write_access");
798 799 800 801 802
		error = ext4_journal_get_write_access(handle, bs->bh);
		if (error)
			goto cleanup;
		lock_buffer(bs->bh);

803 804 805 806 807 808
		if (header(s->base)->h_refcount == cpu_to_le32(1)) {
			if (ce) {
				mb_cache_entry_free(ce);
				ce = NULL;
			}
			ea_bdebug(bs->bh, "modifying in-place");
809
			error = ext4_xattr_set_entry(i, s);
810 811
			if (!error) {
				if (!IS_LAST_ENTRY(s->first))
812
					ext4_xattr_rehash(header(s->base),
813
							  s->here);
814 815
				ext4_xattr_cache_insert(ext4_mb_cache,
					bs->bh);
816 817 818 819 820
			}
			unlock_buffer(bs->bh);
			if (error == -EIO)
				goto bad_block;
			if (!error)
821 822 823
				error = ext4_handle_dirty_xattr_block(handle,
								      inode,
								      bs->bh);
824 825 826 827 828 829
			if (error)
				goto cleanup;
			goto inserted;
		} else {
			int offset = (char *)s->here - bs->bh->b_data;

830
			unlock_buffer(bs->bh);
831 832 833 834 835
			if (ce) {
				mb_cache_entry_release(ce);
				ce = NULL;
			}
			ea_bdebug(bs->bh, "cloning");
836
			s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
837 838 839 840 841 842 843 844 845 846 847
			error = -ENOMEM;
			if (s->base == NULL)
				goto cleanup;
			memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
			s->first = ENTRY(header(s->base)+1);
			header(s->base)->h_refcount = cpu_to_le32(1);
			s->here = ENTRY(s->base + offset);
			s->end = s->base + bs->bh->b_size;
		}
	} else {
		/* Allocate a buffer where we construct the new block. */
848
		s->base = kzalloc(sb->s_blocksize, GFP_NOFS);
849 850 851 852
		/* assert(header == s->base) */
		error = -ENOMEM;
		if (s->base == NULL)
			goto cleanup;
853
		header(s->base)->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
854 855 856 857 858 859 860
		header(s->base)->h_blocks = cpu_to_le32(1);
		header(s->base)->h_refcount = cpu_to_le32(1);
		s->first = ENTRY(header(s->base)+1);
		s->here = ENTRY(header(s->base)+1);
		s->end = s->base + sb->s_blocksize;
	}

861
	error = ext4_xattr_set_entry(i, s);
862 863 864 865 866
	if (error == -EIO)
		goto bad_block;
	if (error)
		goto cleanup;
	if (!IS_LAST_ENTRY(s->first))
867
		ext4_xattr_rehash(header(s->base), s->here);
868 869 870

inserted:
	if (!IS_LAST_ENTRY(s->first)) {
871
		new_bh = ext4_xattr_cache_find(inode, header(s->base), &ce);
872 873 874 875 876 877 878
		if (new_bh) {
			/* We found an identical block in the cache. */
			if (new_bh == bs->bh)
				ea_bdebug(new_bh, "keeping");
			else {
				/* The old block is released after updating
				   the inode. */
879 880
				error = dquot_alloc_block(inode,
						EXT4_C2B(EXT4_SB(sb), 1));
881
				if (error)
882
					goto cleanup;
883
				BUFFER_TRACE(new_bh, "get_write_access");
884
				error = ext4_journal_get_write_access(handle,
885 886 887 888
								      new_bh);
				if (error)
					goto cleanup_dquot;
				lock_buffer(new_bh);
M
Marcin Slusarz 已提交
889
				le32_add_cpu(&BHDR(new_bh)->h_refcount, 1);
890 891 892
				ea_bdebug(new_bh, "reusing; refcount now=%d",
					le32_to_cpu(BHDR(new_bh)->h_refcount));
				unlock_buffer(new_bh);
893 894 895
				error = ext4_handle_dirty_xattr_block(handle,
								      inode,
								      new_bh);
896 897 898 899 900 901 902 903 904 905 906 907
				if (error)
					goto cleanup_dquot;
			}
			mb_cache_entry_release(ce);
			ce = NULL;
		} else if (bs->bh && s->base == bs->bh->b_data) {
			/* We were modifying this block in-place. */
			ea_bdebug(bs->bh, "keeping this block");
			new_bh = bs->bh;
			get_bh(new_bh);
		} else {
			/* We need to allocate a new block */
908 909 910
			ext4_fsblk_t goal, block;

			goal = ext4_group_first_block_no(sb,
911
						EXT4_I(inode)->i_block_group);
912 913

			/* non-extent files can't have physical blocks past 2^32 */
914
			if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
915 916
				goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;

917 918
			block = ext4_new_meta_blocks(handle, inode, goal, 0,
						     NULL, &error);
919 920
			if (error)
				goto cleanup;
921

922
			if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
923 924
				BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS);

925 926
			ea_idebug(inode, "creating block %llu",
				  (unsigned long long)block);
927 928

			new_bh = sb_getblk(sb, block);
929
			if (unlikely(!new_bh)) {
930
				error = -ENOMEM;
931
getblk_failed:
932
				ext4_free_blocks(handle, inode, NULL, block, 1,
933
						 EXT4_FREE_BLOCKS_METADATA);
934 935 936
				goto cleanup;
			}
			lock_buffer(new_bh);
937
			error = ext4_journal_get_create_access(handle, new_bh);
938 939
			if (error) {
				unlock_buffer(new_bh);
940
				error = -EIO;
941 942 943 944 945
				goto getblk_failed;
			}
			memcpy(new_bh->b_data, s->base, new_bh->b_size);
			set_buffer_uptodate(new_bh);
			unlock_buffer(new_bh);
946
			ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
947 948
			error = ext4_handle_dirty_xattr_block(handle,
							      inode, new_bh);
949 950 951 952 953 954
			if (error)
				goto cleanup;
		}
	}

	/* Update the inode. */
955
	EXT4_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
956 957 958

	/* Drop the previous xattr block. */
	if (bs->bh && bs->bh != new_bh)
959
		ext4_xattr_release_block(handle, inode, bs->bh);
960 961 962 963 964 965 966 967 968 969 970 971
	error = 0;

cleanup:
	if (ce)
		mb_cache_entry_release(ce);
	brelse(new_bh);
	if (!(bs->bh && s->base == bs->bh->b_data))
		kfree(s->base);

	return error;

cleanup_dquot:
972
	dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1));
973 974 975
	goto cleanup;

bad_block:
976 977
	EXT4_ERROR_INODE(inode, "bad block %llu",
			 EXT4_I(inode)->i_file_acl);
978 979 980 981 982
	goto cleanup;

#undef header
}

T
Tao Ma 已提交
983 984
int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
			  struct ext4_xattr_ibody_find *is)
985
{
986 987
	struct ext4_xattr_ibody_header *header;
	struct ext4_inode *raw_inode;
988 989
	int error;

990
	if (EXT4_I(inode)->i_extra_isize == 0)
991
		return 0;
992
	raw_inode = ext4_raw_inode(&is->iloc);
993 994 995
	header = IHDR(inode, raw_inode);
	is->s.base = is->s.first = IFIRST(header);
	is->s.here = is->s.first;
996
	is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
997
	if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
998 999
		error = ext4_xattr_check_names(IFIRST(header), is->s.end,
					       IFIRST(header));
1000 1001 1002
		if (error)
			return error;
		/* Find the named attribute. */
1003
		error = ext4_xattr_find_entry(&is->s.here, i->name_index,
1004 1005 1006 1007 1008 1009 1010 1011 1012
					      i->name, is->s.end -
					      (void *)is->s.base, 0);
		if (error && error != -ENODATA)
			return error;
		is->s.not_found = error;
	}
	return 0;
}

1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
				struct ext4_xattr_info *i,
				struct ext4_xattr_ibody_find *is)
{
	struct ext4_xattr_ibody_header *header;
	struct ext4_xattr_search *s = &is->s;
	int error;

	if (EXT4_I(inode)->i_extra_isize == 0)
		return -ENOSPC;
	error = ext4_xattr_set_entry(i, s);
	if (error) {
		if (error == -ENOSPC &&
		    ext4_has_inline_data(inode)) {
			error = ext4_try_to_evict_inline_data(handle, inode,
					EXT4_XATTR_LEN(strlen(i->name) +
					EXT4_XATTR_SIZE(i->value_len)));
			if (error)
				return error;
			error = ext4_xattr_ibody_find(inode, i, is);
			if (error)
				return error;
			error = ext4_xattr_set_entry(i, s);
		}
		if (error)
			return error;
	}
	header = IHDR(inode, ext4_raw_inode(&is->iloc));
	if (!IS_LAST_ENTRY(s->first)) {
		header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
		ext4_set_inode_state(inode, EXT4_STATE_XATTR);
	} else {
		header->h_magic = cpu_to_le32(0);
		ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
	}
	return 0;
}

static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
				struct ext4_xattr_info *i,
				struct ext4_xattr_ibody_find *is)
1054
{
1055 1056
	struct ext4_xattr_ibody_header *header;
	struct ext4_xattr_search *s = &is->s;
1057 1058
	int error;

1059
	if (EXT4_I(inode)->i_extra_isize == 0)
1060
		return -ENOSPC;
1061
	error = ext4_xattr_set_entry(i, s);
1062 1063
	if (error)
		return error;
1064
	header = IHDR(inode, ext4_raw_inode(&is->iloc));
1065
	if (!IS_LAST_ENTRY(s->first)) {
1066
		header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
1067
		ext4_set_inode_state(inode, EXT4_STATE_XATTR);
1068 1069
	} else {
		header->h_magic = cpu_to_le32(0);
1070
		ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
1071 1072 1073 1074 1075
	}
	return 0;
}

/*
1076
 * ext4_xattr_set_handle()
1077
 *
1078
 * Create, replace or remove an extended attribute for this inode.  Value
1079 1080 1081 1082 1083 1084 1085 1086 1087
 * is NULL to remove an existing extended attribute, and non-NULL to
 * either replace an existing extended attribute, or create a new extended
 * attribute. The flags XATTR_REPLACE and XATTR_CREATE
 * specify that an extended attribute must exist and must not exist
 * previous to the call, respectively.
 *
 * Returns 0, or a negative error number on failure.
 */
int
1088
ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
1089 1090 1091
		      const char *name, const void *value, size_t value_len,
		      int flags)
{
1092
	struct ext4_xattr_info i = {
1093 1094 1095 1096 1097 1098
		.name_index = name_index,
		.name = name,
		.value = value,
		.value_len = value_len,

	};
1099
	struct ext4_xattr_ibody_find is = {
1100 1101
		.s = { .not_found = -ENODATA, },
	};
1102
	struct ext4_xattr_block_find bs = {
1103 1104
		.s = { .not_found = -ENODATA, },
	};
K
Kalpak Shah 已提交
1105
	unsigned long no_expand;
1106 1107 1108 1109 1110 1111
	int error;

	if (!name)
		return -EINVAL;
	if (strlen(name) > 255)
		return -ERANGE;
1112
	down_write(&EXT4_I(inode)->xattr_sem);
1113 1114
	no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
	ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
K
Kalpak Shah 已提交
1115

1116
	error = ext4_reserve_inode_write(handle, inode, &is.iloc);
1117 1118 1119
	if (error)
		goto cleanup;

1120
	if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) {
1121 1122
		struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
1123
		ext4_clear_inode_state(inode, EXT4_STATE_NEW);
1124 1125
	}

1126
	error = ext4_xattr_ibody_find(inode, &i, &is);
1127 1128 1129
	if (error)
		goto cleanup;
	if (is.s.not_found)
1130
		error = ext4_xattr_block_find(inode, &i, &bs);
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
	if (error)
		goto cleanup;
	if (is.s.not_found && bs.s.not_found) {
		error = -ENODATA;
		if (flags & XATTR_REPLACE)
			goto cleanup;
		error = 0;
		if (!value)
			goto cleanup;
	} else {
		error = -EEXIST;
		if (flags & XATTR_CREATE)
			goto cleanup;
	}
	if (!value) {
		if (!is.s.not_found)
1147
			error = ext4_xattr_ibody_set(handle, inode, &i, &is);
1148
		else if (!bs.s.not_found)
1149
			error = ext4_xattr_block_set(handle, inode, &i, &bs);
1150
	} else {
1151
		error = ext4_xattr_ibody_set(handle, inode, &i, &is);
1152 1153
		if (!error && !bs.s.not_found) {
			i.value = NULL;
1154
			error = ext4_xattr_block_set(handle, inode, &i, &bs);
1155
		} else if (error == -ENOSPC) {
1156 1157 1158 1159 1160
			if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
				error = ext4_xattr_block_find(inode, &i, &bs);
				if (error)
					goto cleanup;
			}
1161
			error = ext4_xattr_block_set(handle, inode, &i, &bs);
1162 1163 1164 1165
			if (error)
				goto cleanup;
			if (!is.s.not_found) {
				i.value = NULL;
1166
				error = ext4_xattr_ibody_set(handle, inode, &i,
1167 1168 1169 1170 1171
							     &is);
			}
		}
	}
	if (!error) {
1172
		ext4_xattr_update_super_block(handle, inode->i_sb);
K
Kalpak Shah 已提交
1173
		inode->i_ctime = ext4_current_time(inode);
1174
		if (!value)
1175
			ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
1176
		error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
1177
		/*
1178
		 * The bh is consumed by ext4_mark_iloc_dirty, even with
1179 1180 1181 1182
		 * error != 0.
		 */
		is.iloc.bh = NULL;
		if (IS_SYNC(inode))
1183
			ext4_handle_sync(handle);
1184 1185 1186 1187 1188
	}

cleanup:
	brelse(is.iloc.bh);
	brelse(bs.bh);
K
Kalpak Shah 已提交
1189
	if (no_expand == 0)
1190
		ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
1191
	up_write(&EXT4_I(inode)->xattr_sem);
1192 1193 1194 1195
	return error;
}

/*
1196
 * ext4_xattr_set()
1197
 *
1198
 * Like ext4_xattr_set_handle, but start from an inode. This extended
1199 1200 1201 1202 1203
 * attribute modification is a filesystem transaction by itself.
 *
 * Returns 0, or a negative error number on failure.
 */
int
1204
ext4_xattr_set(struct inode *inode, int name_index, const char *name,
1205 1206 1207 1208
	       const void *value, size_t value_len, int flags)
{
	handle_t *handle;
	int error, retries = 0;
1209
	int credits = ext4_jbd2_credits_xattr(inode);
1210 1211

retry:
1212
	handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
1213 1214 1215 1216 1217
	if (IS_ERR(handle)) {
		error = PTR_ERR(handle);
	} else {
		int error2;

1218
		error = ext4_xattr_set_handle(handle, inode, name_index, name,
1219
					      value, value_len, flags);
1220
		error2 = ext4_journal_stop(handle);
1221
		if (error == -ENOSPC &&
1222
		    ext4_should_retry_alloc(inode->i_sb, &retries))
1223 1224 1225 1226 1227 1228 1229 1230
			goto retry;
		if (error == 0)
			error = error2;
	}

	return error;
}

1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
/*
 * Shift the EA entries in the inode to create space for the increased
 * i_extra_isize.
 */
static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry,
				     int value_offs_shift, void *to,
				     void *from, size_t n, int blocksize)
{
	struct ext4_xattr_entry *last = entry;
	int new_offs;

	/* Adjust the value offsets of the entries */
	for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
		if (!last->e_value_block && last->e_value_size) {
			new_offs = le16_to_cpu(last->e_value_offs) +
							value_offs_shift;
			BUG_ON(new_offs + le32_to_cpu(last->e_value_size)
				 > blocksize);
			last->e_value_offs = cpu_to_le16(new_offs);
		}
	}
	/* Shift the entries by n bytes */
	memmove(to, from, n);
}

/*
 * Expand an inode by new_extra_isize bytes when EAs are present.
 * Returns 0 on success or negative error number on failure.
 */
int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
			       struct ext4_inode *raw_inode, handle_t *handle)
{
	struct ext4_xattr_ibody_header *header;
	struct ext4_xattr_entry *entry, *last, *first;
	struct buffer_head *bh = NULL;
	struct ext4_xattr_ibody_find *is = NULL;
	struct ext4_xattr_block_find *bs = NULL;
	char *buffer = NULL, *b_entry_name = NULL;
	size_t min_offs, free;
1270
	int total_ino;
1271 1272
	void *base, *start, *end;
	int extra_isize = 0, error = 0, tried_min_extra_isize = 0;
A
Aneesh Kumar K.V 已提交
1273
	int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317

	down_write(&EXT4_I(inode)->xattr_sem);
retry:
	if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) {
		up_write(&EXT4_I(inode)->xattr_sem);
		return 0;
	}

	header = IHDR(inode, raw_inode);
	entry = IFIRST(header);

	/*
	 * Check if enough free space is available in the inode to shift the
	 * entries ahead by new_extra_isize.
	 */

	base = start = entry;
	end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
	min_offs = end - base;
	last = entry;
	total_ino = sizeof(struct ext4_xattr_ibody_header);

	free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
	if (free >= new_extra_isize) {
		entry = IFIRST(header);
		ext4_xattr_shift_entries(entry,	EXT4_I(inode)->i_extra_isize
				- new_extra_isize, (void *)raw_inode +
				EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize,
				(void *)header, total_ino,
				inode->i_sb->s_blocksize);
		EXT4_I(inode)->i_extra_isize = new_extra_isize;
		error = 0;
		goto cleanup;
	}

	/*
	 * Enough free space isn't available in the inode, check if
	 * EA block can hold new_extra_isize bytes.
	 */
	if (EXT4_I(inode)->i_file_acl) {
		bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
		error = -EIO;
		if (!bh)
			goto cleanup;
1318
		if (ext4_xattr_check_block(inode, bh)) {
1319 1320
			EXT4_ERROR_INODE(inode, "bad block %llu",
					 EXT4_I(inode)->i_file_acl);
1321 1322 1323 1324 1325 1326 1327
			error = -EIO;
			goto cleanup;
		}
		base = BHDR(bh);
		first = BFIRST(bh);
		end = bh->b_data + bh->b_size;
		min_offs = end - base;
1328
		free = ext4_xattr_free_space(first, &min_offs, base, NULL);
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
		if (free < new_extra_isize) {
			if (!tried_min_extra_isize && s_min_extra_isize) {
				tried_min_extra_isize++;
				new_extra_isize = s_min_extra_isize;
				brelse(bh);
				goto retry;
			}
			error = -1;
			goto cleanup;
		}
	} else {
		free = inode->i_sb->s_blocksize;
	}

	while (new_extra_isize > 0) {
		size_t offs, size, entry_size;
		struct ext4_xattr_entry *small_entry = NULL;
		struct ext4_xattr_info i = {
			.value = NULL,
			.value_len = 0,
		};
		unsigned int total_size;  /* EA entry size + value size */
		unsigned int shift_bytes; /* No. of bytes to shift EAs by? */
		unsigned int min_total_size = ~0U;

		is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
		bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
		if (!is || !bs) {
			error = -ENOMEM;
			goto cleanup;
		}

		is->s.not_found = -ENODATA;
		bs->s.not_found = -ENODATA;
		is->iloc.bh = NULL;
		bs->bh = NULL;

		last = IFIRST(header);
		/* Find the entry best suited to be pushed into EA block */
		entry = NULL;
		for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
			total_size =
			EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
					EXT4_XATTR_LEN(last->e_name_len);
			if (total_size <= free && total_size < min_total_size) {
				if (total_size < new_extra_isize) {
					small_entry = last;
				} else {
					entry = last;
					min_total_size = total_size;
				}
			}
		}

		if (entry == NULL) {
			if (small_entry) {
				entry = small_entry;
			} else {
				if (!tried_min_extra_isize &&
				    s_min_extra_isize) {
					tried_min_extra_isize++;
					new_extra_isize = s_min_extra_isize;
D
Dave Jones 已提交
1391 1392
					kfree(is); is = NULL;
					kfree(bs); bs = NULL;
1393
					brelse(bh);
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
					goto retry;
				}
				error = -1;
				goto cleanup;
			}
		}
		offs = le16_to_cpu(entry->e_value_offs);
		size = le32_to_cpu(entry->e_value_size);
		entry_size = EXT4_XATTR_LEN(entry->e_name_len);
		i.name_index = entry->e_name_index,
		buffer = kmalloc(EXT4_XATTR_SIZE(size), GFP_NOFS);
		b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
		if (!buffer || !b_entry_name) {
			error = -ENOMEM;
			goto cleanup;
		}
		/* Save the entry name and the entry value */
		memcpy(buffer, (void *)IFIRST(header) + offs,
		       EXT4_XATTR_SIZE(size));
		memcpy(b_entry_name, entry->e_name, entry->e_name_len);
		b_entry_name[entry->e_name_len] = '\0';
		i.name = b_entry_name;

		error = ext4_get_inode_loc(inode, &is->iloc);
		if (error)
			goto cleanup;

		error = ext4_xattr_ibody_find(inode, &i, is);
		if (error)
			goto cleanup;

		/* Remove the chosen entry from the inode */
		error = ext4_xattr_ibody_set(handle, inode, &i, is);
1427 1428
		if (error)
			goto cleanup;
1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447

		entry = IFIRST(header);
		if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize)
			shift_bytes = new_extra_isize;
		else
			shift_bytes = entry_size + size;
		/* Adjust the offsets and shift the remaining entries ahead */
		ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize -
			shift_bytes, (void *)raw_inode +
			EXT4_GOOD_OLD_INODE_SIZE + extra_isize + shift_bytes,
			(void *)header, total_ino - entry_size,
			inode->i_sb->s_blocksize);

		extra_isize += shift_bytes;
		new_extra_isize -= shift_bytes;
		EXT4_I(inode)->i_extra_isize = extra_isize;

		i.name = b_entry_name;
		i.value = buffer;
A
Aneesh Kumar K.V 已提交
1448
		i.value_len = size;
1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
		error = ext4_xattr_block_find(inode, &i, bs);
		if (error)
			goto cleanup;

		/* Add entry which was removed from the inode into the block */
		error = ext4_xattr_block_set(handle, inode, &i, bs);
		if (error)
			goto cleanup;
		kfree(b_entry_name);
		kfree(buffer);
1459 1460
		b_entry_name = NULL;
		buffer = NULL;
1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482
		brelse(is->iloc.bh);
		kfree(is);
		kfree(bs);
	}
	brelse(bh);
	up_write(&EXT4_I(inode)->xattr_sem);
	return 0;

cleanup:
	kfree(b_entry_name);
	kfree(buffer);
	if (is)
		brelse(is->iloc.bh);
	kfree(is);
	kfree(bs);
	brelse(bh);
	up_write(&EXT4_I(inode)->xattr_sem);
	return error;
}



1483
/*
1484
 * ext4_xattr_delete_inode()
1485 1486 1487 1488 1489 1490
 *
 * Free extended attribute resources associated with this inode. This
 * is called immediately before an inode is freed. We have exclusive
 * access to the inode.
 */
void
1491
ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
1492 1493 1494
{
	struct buffer_head *bh = NULL;

1495
	if (!EXT4_I(inode)->i_file_acl)
1496
		goto cleanup;
1497
	bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
1498
	if (!bh) {
1499 1500
		EXT4_ERROR_INODE(inode, "block %llu read error",
				 EXT4_I(inode)->i_file_acl);
1501 1502
		goto cleanup;
	}
1503
	if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
1504
	    BHDR(bh)->h_blocks != cpu_to_le32(1)) {
1505 1506
		EXT4_ERROR_INODE(inode, "bad block %llu",
				 EXT4_I(inode)->i_file_acl);
1507 1508
		goto cleanup;
	}
1509 1510
	ext4_xattr_release_block(handle, inode, bh);
	EXT4_I(inode)->i_file_acl = 0;
1511 1512 1513 1514 1515 1516

cleanup:
	brelse(bh);
}

/*
1517
 * ext4_xattr_put_super()
1518 1519 1520 1521
 *
 * This is called when a file system is unmounted.
 */
void
1522
ext4_xattr_put_super(struct super_block *sb)
1523 1524 1525 1526 1527
{
	mb_cache_shrink(sb->s_bdev);
}

/*
1528
 * ext4_xattr_cache_insert()
1529 1530 1531 1532 1533 1534 1535
 *
 * Create a new entry in the extended attribute cache, and insert
 * it unless such an entry is already in the cache.
 *
 * Returns 0, or a negative error number on failure.
 */
static void
1536
ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh)
1537 1538 1539 1540 1541
{
	__u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
	struct mb_cache_entry *ce;
	int error;

1542
	ce = mb_cache_entry_alloc(ext4_mb_cache, GFP_NOFS);
1543 1544 1545 1546
	if (!ce) {
		ea_bdebug(bh, "out of memory");
		return;
	}
1547
	error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash);
1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
	if (error) {
		mb_cache_entry_free(ce);
		if (error == -EBUSY) {
			ea_bdebug(bh, "already in cache");
			error = 0;
		}
	} else {
		ea_bdebug(bh, "inserting [%x]", (int)hash);
		mb_cache_entry_release(ce);
	}
}

/*
1561
 * ext4_xattr_cmp()
1562 1563 1564 1565 1566 1567 1568
 *
 * Compare two extended attribute blocks for equality.
 *
 * Returns 0 if the blocks are equal, 1 if they differ, and
 * a negative error number on errors.
 */
static int
1569 1570
ext4_xattr_cmp(struct ext4_xattr_header *header1,
	       struct ext4_xattr_header *header2)
1571
{
1572
	struct ext4_xattr_entry *entry1, *entry2;
1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591

	entry1 = ENTRY(header1+1);
	entry2 = ENTRY(header2+1);
	while (!IS_LAST_ENTRY(entry1)) {
		if (IS_LAST_ENTRY(entry2))
			return 1;
		if (entry1->e_hash != entry2->e_hash ||
		    entry1->e_name_index != entry2->e_name_index ||
		    entry1->e_name_len != entry2->e_name_len ||
		    entry1->e_value_size != entry2->e_value_size ||
		    memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
			return 1;
		if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
			return -EIO;
		if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
			   (char *)header2 + le16_to_cpu(entry2->e_value_offs),
			   le32_to_cpu(entry1->e_value_size)))
			return 1;

1592 1593
		entry1 = EXT4_XATTR_NEXT(entry1);
		entry2 = EXT4_XATTR_NEXT(entry2);
1594 1595 1596 1597 1598 1599 1600
	}
	if (!IS_LAST_ENTRY(entry2))
		return 1;
	return 0;
}

/*
1601
 * ext4_xattr_cache_find()
1602 1603 1604 1605 1606 1607 1608
 *
 * Find an identical extended attribute block.
 *
 * Returns a pointer to the block found, or NULL if such a block was
 * not found or an error occurred.
 */
static struct buffer_head *
1609
ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
1610 1611 1612 1613
		      struct mb_cache_entry **pce)
{
	__u32 hash = le32_to_cpu(header->h_hash);
	struct mb_cache_entry *ce;
1614
	struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
1615 1616 1617 1618 1619

	if (!header->h_hash)
		return NULL;  /* never share */
	ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
again:
1620
	ce = mb_cache_entry_find_first(ext4_mb_cache, inode->i_sb->s_bdev,
1621
				       hash);
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
	while (ce) {
		struct buffer_head *bh;

		if (IS_ERR(ce)) {
			if (PTR_ERR(ce) == -EAGAIN)
				goto again;
			break;
		}
		bh = sb_bread(inode->i_sb, ce->e_block);
		if (!bh) {
1632 1633
			EXT4_ERROR_INODE(inode, "block %lu read error",
					 (unsigned long) ce->e_block);
1634
		} else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
1635
				EXT4_XATTR_REFCOUNT_MAX) {
1636 1637 1638
			ea_idebug(inode, "block %lu refcount %d>=%d",
				  (unsigned long) ce->e_block,
				  le32_to_cpu(BHDR(bh)->h_refcount),
1639 1640
					  EXT4_XATTR_REFCOUNT_MAX);
		} else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
1641 1642 1643 1644
			*pce = ce;
			return bh;
		}
		brelse(bh);
1645
		ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash);
1646 1647 1648 1649 1650 1651 1652 1653
	}
	return NULL;
}

#define NAME_HASH_SHIFT 5
#define VALUE_HASH_SHIFT 16

/*
1654
 * ext4_xattr_hash_entry()
1655 1656 1657
 *
 * Compute the hash of an extended attribute.
 */
1658 1659
static inline void ext4_xattr_hash_entry(struct ext4_xattr_header *header,
					 struct ext4_xattr_entry *entry)
1660 1661 1662 1663 1664
{
	__u32 hash = 0;
	char *name = entry->e_name;
	int n;

1665
	for (n = 0; n < entry->e_name_len; n++) {
1666 1667 1668 1669 1670 1671 1672 1673 1674
		hash = (hash << NAME_HASH_SHIFT) ^
		       (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
		       *name++;
	}

	if (entry->e_value_block == 0 && entry->e_value_size != 0) {
		__le32 *value = (__le32 *)((char *)header +
			le16_to_cpu(entry->e_value_offs));
		for (n = (le32_to_cpu(entry->e_value_size) +
1675
		     EXT4_XATTR_ROUND) >> EXT4_XATTR_PAD_BITS; n; n--) {
1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
			hash = (hash << VALUE_HASH_SHIFT) ^
			       (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
			       le32_to_cpu(*value++);
		}
	}
	entry->e_hash = cpu_to_le32(hash);
}

#undef NAME_HASH_SHIFT
#undef VALUE_HASH_SHIFT

#define BLOCK_HASH_SHIFT 16

/*
1690
 * ext4_xattr_rehash()
1691 1692 1693
 *
 * Re-compute the extended attribute hash value after an entry has changed.
 */
1694 1695
static void ext4_xattr_rehash(struct ext4_xattr_header *header,
			      struct ext4_xattr_entry *entry)
1696
{
1697
	struct ext4_xattr_entry *here;
1698 1699
	__u32 hash = 0;

1700
	ext4_xattr_hash_entry(header, entry);
1701 1702 1703 1704 1705 1706 1707 1708 1709 1710
	here = ENTRY(header+1);
	while (!IS_LAST_ENTRY(here)) {
		if (!here->e_hash) {
			/* Block is not shared if an entry's hash value == 0 */
			hash = 0;
			break;
		}
		hash = (hash << BLOCK_HASH_SHIFT) ^
		       (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
		       le32_to_cpu(here->e_hash);
1711
		here = EXT4_XATTR_NEXT(here);
1712 1713 1714 1715 1716 1717
	}
	header->h_hash = cpu_to_le32(hash);
}

#undef BLOCK_HASH_SHIFT

1718 1719 1720 1721
#define	HASH_BUCKET_BITS	10

struct mb_cache *
ext4_xattr_create_cache(char *name)
1722
{
1723
	return mb_cache_create(name, HASH_BUCKET_BITS);
1724 1725
}

1726
void ext4_xattr_destroy_cache(struct mb_cache *cache)
1727
{
1728 1729
	if (cache)
		mb_cache_destroy(cache);
1730
}
1731