balloc.c 24.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
/*
 * balloc.c
 *
 * PURPOSE
 *	Block allocation handling routines for the OSTA-UDF(tm) filesystem.
 *
 * COPYRIGHT
 *	This file is distributed under the terms of the GNU General Public
 *	License (GPL). Copies of the GPL can be obtained from:
 *		ftp://prep.ai.mit.edu/pub/gnu/GPL
 *	Each contributing author retains all rights to their own work.
 *
 *  (C) 1999-2001 Ben Fennema
 *  (C) 1999 Stelias Computing Inc
 *
 * HISTORY
 *
 *  02/24/99 blf  Created.
 *
 */

#include "udfdecl.h"

#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/bitops.h>

#include "udf_i.h"
#include "udf_sb.h"

#define udf_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
#define udf_set_bit(nr,addr) ext2_set_bit(nr,addr)
#define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
#define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size)
#define udf_find_next_one_bit(addr, size, offset) find_next_one_bit(addr, size, offset)

#define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x)
#define leNUM_to_cpup(x,y) xleNUM_to_cpup(x,y)
#define xleNUM_to_cpup(x,y) (le ## x ## _to_cpup(y))
#define uintBPL_t uint(BITS_PER_LONG)
#define uint(x) xuint(x)
#define xuint(x) __le ## x

44
static inline int find_next_one_bit (void * addr, int size, int offset)
L
Linus Torvalds 已提交
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
{
	uintBPL_t * p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG);
	int result = offset & ~(BITS_PER_LONG-1);
	unsigned long tmp;

	if (offset >= size)
		return size;
	size -= result;
	offset &= (BITS_PER_LONG-1);
	if (offset)
	{
		tmp = leBPL_to_cpup(p++);
		tmp &= ~0UL << offset;
		if (size < BITS_PER_LONG)
			goto found_first;
		if (tmp)
			goto found_middle;
		size -= BITS_PER_LONG;
		result += BITS_PER_LONG;
	}
	while (size & ~(BITS_PER_LONG-1))
	{
		if ((tmp = leBPL_to_cpup(p++)))
			goto found_middle;
		result += BITS_PER_LONG;
		size -= BITS_PER_LONG;
	}
	if (!size)
		return result;
	tmp = leBPL_to_cpup(p);
found_first:
	tmp &= ~0UL >> (BITS_PER_LONG-size);
found_middle:
	return result + ffz(~tmp);
}

#define find_first_one_bit(addr, size)\
	find_next_one_bit((addr), (size), 0)

static int read_block_bitmap(struct super_block * sb,
	struct udf_bitmap *bitmap, unsigned int block, unsigned long bitmap_nr)
{
	struct buffer_head *bh = NULL;
	int retval = 0;
	kernel_lb_addr loc;

	loc.logicalBlockNum = bitmap->s_extPosition;
	loc.partitionReferenceNum = UDF_SB_PARTITION(sb);

	bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
	if (!bh)
	{
		retval = -EIO;
	}
	bitmap->s_block_bitmap[bitmap_nr] = bh;
	return retval;
}

static int __load_block_bitmap(struct super_block * sb,
	struct udf_bitmap *bitmap, unsigned int block_group)
{
	int retval = 0;
	int nr_groups = bitmap->s_nr_groups;

	if (block_group >= nr_groups)
	{
		udf_debug("block_group (%d) > nr_groups (%d)\n", block_group, nr_groups);
	}

	if (bitmap->s_block_bitmap[block_group])
		return block_group;
	else
	{
		retval = read_block_bitmap(sb, bitmap, block_group, block_group);
		if (retval < 0)
			return retval;
		return block_group;
	}
}

static inline int load_block_bitmap(struct super_block * sb,
	struct udf_bitmap *bitmap, unsigned int block_group)
{
	int slot;

	slot = __load_block_bitmap(sb, bitmap, block_group);

	if (slot < 0)
		return slot;

	if (!bitmap->s_block_bitmap[slot])
		return -EIO;

	return slot;
}

static void udf_bitmap_free_blocks(struct super_block * sb,
	struct inode * inode,
	struct udf_bitmap *bitmap,
	kernel_lb_addr bloc, uint32_t offset, uint32_t count)
{
	struct udf_sb_info *sbi = UDF_SB(sb);
	struct buffer_head * bh = NULL;
	unsigned long block;
	unsigned long block_group;
	unsigned long bit;
	unsigned long i;
	int bitmap_nr;
	unsigned long overflow;

I
Ingo Molnar 已提交
155
	mutex_lock(&sbi->s_alloc_mutex);
L
Linus Torvalds 已提交
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
	if (bloc.logicalBlockNum < 0 ||
		(bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum))
	{
		udf_debug("%d < %d || %d + %d > %d\n",
			bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
			UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
		goto error_return;
	}

	block = bloc.logicalBlockNum + offset + (sizeof(struct spaceBitmapDesc) << 3);

do_more:
	overflow = 0;
	block_group = block >> (sb->s_blocksize_bits + 3);
	bit = block % (sb->s_blocksize << 3);

	/*
	 * Check to see if we are freeing blocks across a group boundary.
	 */
	if (bit + count > (sb->s_blocksize << 3))
	{
		overflow = bit + count - (sb->s_blocksize << 3);
		count -= overflow;
	}
	bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
	if (bitmap_nr < 0)
		goto error_return;

	bh = bitmap->s_block_bitmap[bitmap_nr];
	for (i=0; i < count; i++)
	{
		if (udf_set_bit(bit + i, bh->b_data))
		{
			udf_debug("bit %ld already set\n", bit + i);
			udf_debug("byte=%2x\n", ((char *)bh->b_data)[(bit + i) >> 3]);
		}
		else
		{
			if (inode)
				DQUOT_FREE_BLOCK(inode, 1);
			if (UDF_SB_LVIDBH(sb))
			{
				UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
					cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)])+1);
			}
		}
	}
	mark_buffer_dirty(bh);
	if (overflow)
	{
		block += count;
		count = overflow;
		goto do_more;
	}
error_return:
	sb->s_dirt = 1;
	if (UDF_SB_LVIDBH(sb))
		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
I
Ingo Molnar 已提交
214
	mutex_unlock(&sbi->s_alloc_mutex);
L
Linus Torvalds 已提交
215 216 217 218 219 220 221 222 223 224 225 226 227 228
	return;
}

static int udf_bitmap_prealloc_blocks(struct super_block * sb,
	struct inode * inode,
	struct udf_bitmap *bitmap, uint16_t partition, uint32_t first_block,
	uint32_t block_count)
{
	struct udf_sb_info *sbi = UDF_SB(sb);
	int alloc_count = 0;
	int bit, block, block_group, group_start;
	int nr_groups, bitmap_nr;
	struct buffer_head *bh;

I
Ingo Molnar 已提交
229
	mutex_lock(&sbi->s_alloc_mutex);
L
Linus Torvalds 已提交
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
	if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
		goto out;

	if (first_block + block_count > UDF_SB_PARTLEN(sb, partition))
		block_count = UDF_SB_PARTLEN(sb, partition) - first_block;

repeat:
	nr_groups = (UDF_SB_PARTLEN(sb, partition) +
		(sizeof(struct spaceBitmapDesc) << 3) + (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8);
	block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
	block_group = block >> (sb->s_blocksize_bits + 3);
	group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);

	bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
	if (bitmap_nr < 0)
		goto out;
	bh = bitmap->s_block_bitmap[bitmap_nr];

	bit = block % (sb->s_blocksize << 3);

	while (bit < (sb->s_blocksize << 3) && block_count > 0)
	{
		if (!udf_test_bit(bit, bh->b_data))
			goto out;
		else if (DQUOT_PREALLOC_BLOCK(inode, 1))
			goto out;
		else if (!udf_clear_bit(bit, bh->b_data))
		{
			udf_debug("bit already cleared for block %d\n", bit);
			DQUOT_FREE_BLOCK(inode, 1);
			goto out;
		}
		block_count --;
		alloc_count ++;
		bit ++;
		block ++;
	}
	mark_buffer_dirty(bh);
	if (block_count > 0)
		goto repeat;
out:
	if (UDF_SB_LVIDBH(sb))
	{
		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-alloc_count);
		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
	}
	sb->s_dirt = 1;
I
Ingo Molnar 已提交
278
	mutex_unlock(&sbi->s_alloc_mutex);
L
Linus Torvalds 已提交
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
	return alloc_count;
}

static int udf_bitmap_new_block(struct super_block * sb,
	struct inode * inode,
	struct udf_bitmap *bitmap, uint16_t partition, uint32_t goal, int *err)
{
	struct udf_sb_info *sbi = UDF_SB(sb);
	int newbit, bit=0, block, block_group, group_start;
	int end_goal, nr_groups, bitmap_nr, i;
	struct buffer_head *bh = NULL;
	char *ptr;
	int newblock = 0;

	*err = -ENOSPC;
I
Ingo Molnar 已提交
294
	mutex_lock(&sbi->s_alloc_mutex);
L
Linus Torvalds 已提交
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366

repeat:
	if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
		goal = 0;

	nr_groups = bitmap->s_nr_groups;
	block = goal + (sizeof(struct spaceBitmapDesc) << 3);
	block_group = block >> (sb->s_blocksize_bits + 3);
	group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);

	bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
	if (bitmap_nr < 0)
		goto error_return;
	bh = bitmap->s_block_bitmap[bitmap_nr];
	ptr = memscan((char *)bh->b_data + group_start, 0xFF, sb->s_blocksize - group_start);

	if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize)
	{
		bit = block % (sb->s_blocksize << 3);

		if (udf_test_bit(bit, bh->b_data))
		{
			goto got_block;
		}
		end_goal = (bit + 63) & ~63;
		bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
		if (bit < end_goal)
			goto got_block;
		ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, sb->s_blocksize - ((bit + 7) >> 3));
		newbit = (ptr - ((char *)bh->b_data)) << 3;
		if (newbit < sb->s_blocksize << 3)
		{
			bit = newbit;
			goto search_back;
		}
		newbit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, bit);
		if (newbit < sb->s_blocksize << 3)
		{
			bit = newbit;
			goto got_block;
		}
	}

	for (i=0; i<(nr_groups*2); i++)
	{
		block_group ++;
		if (block_group >= nr_groups)
			block_group = 0;
		group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);

		bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
		if (bitmap_nr < 0)
			goto error_return;
		bh = bitmap->s_block_bitmap[bitmap_nr];
		if (i < nr_groups)
		{
			ptr = memscan((char *)bh->b_data + group_start, 0xFF, sb->s_blocksize - group_start);
			if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize)
			{
				bit = (ptr - ((char *)bh->b_data)) << 3;
				break;
			}
		}
		else
		{
			bit = udf_find_next_one_bit((char *)bh->b_data, sb->s_blocksize << 3, group_start << 3);
			if (bit < sb->s_blocksize << 3)
				break;
		}
	}
	if (i >= (nr_groups*2))
	{
I
Ingo Molnar 已提交
367
		mutex_unlock(&sbi->s_alloc_mutex);
L
Linus Torvalds 已提交
368 369 370 371 372 373 374 375
		return newblock;
	}
	if (bit < sb->s_blocksize << 3)
		goto search_back;
	else
		bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3);
	if (bit >= sb->s_blocksize << 3)
	{
I
Ingo Molnar 已提交
376
		mutex_unlock(&sbi->s_alloc_mutex);
L
Linus Torvalds 已提交
377 378 379 380 381 382 383 384 385 386 387 388 389
		return 0;
	}

search_back:
	for (i=0; i<7 && bit > (group_start << 3) && udf_test_bit(bit - 1, bh->b_data); i++, bit--);

got_block:

	/*
	 * Check quota for allocation of this block.
	 */
	if (inode && DQUOT_ALLOC_BLOCK(inode, 1))
	{
I
Ingo Molnar 已提交
390
		mutex_unlock(&sbi->s_alloc_mutex);
L
Linus Torvalds 已提交
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
		*err = -EDQUOT;
		return 0;
	}

	newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
		(sizeof(struct spaceBitmapDesc) << 3);

	if (!udf_clear_bit(bit, bh->b_data))
	{
		udf_debug("bit already cleared for block %d\n", bit);
		goto repeat;
	}

	mark_buffer_dirty(bh);

	if (UDF_SB_LVIDBH(sb))
	{
		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-1);
		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
	}
	sb->s_dirt = 1;
I
Ingo Molnar 已提交
413
	mutex_unlock(&sbi->s_alloc_mutex);
L
Linus Torvalds 已提交
414 415 416 417 418
	*err = 0;
	return newblock;

error_return:
	*err = -EIO;
I
Ingo Molnar 已提交
419
	mutex_unlock(&sbi->s_alloc_mutex);
L
Linus Torvalds 已提交
420 421 422 423 424 425 426 427 428 429
	return 0;
}

static void udf_table_free_blocks(struct super_block * sb,
	struct inode * inode,
	struct inode * table,
	kernel_lb_addr bloc, uint32_t offset, uint32_t count)
{
	struct udf_sb_info *sbi = UDF_SB(sb);
	uint32_t start, end;
J
Jan Kara 已提交
430 431 432
	uint32_t elen;
	kernel_lb_addr eloc;
	struct extent_position oepos, epos;
L
Linus Torvalds 已提交
433 434 435
	int8_t etype;
	int i;

I
Ingo Molnar 已提交
436
	mutex_lock(&sbi->s_alloc_mutex);
L
Linus Torvalds 已提交
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
	if (bloc.logicalBlockNum < 0 ||
		(bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum))
	{
		udf_debug("%d < %d || %d + %d > %d\n",
			bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
			UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
		goto error_return;
	}

	/* We do this up front - There are some error conditions that could occure,
	   but.. oh well */
	if (inode)
		DQUOT_FREE_BLOCK(inode, count);
	if (UDF_SB_LVIDBH(sb))
	{
		UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)])+count);
		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
	}

	start = bloc.logicalBlockNum + offset;
	end = bloc.logicalBlockNum + offset + count - 1;

J
Jan Kara 已提交
460
	epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
L
Linus Torvalds 已提交
461
	elen = 0;
J
Jan Kara 已提交
462 463
	epos.block = oepos.block = UDF_I_LOCATION(table);
	epos.bh = oepos.bh = NULL;
L
Linus Torvalds 已提交
464 465

	while (count && (etype =
J
Jan Kara 已提交
466
		udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
L
Linus Torvalds 已提交
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
	{
		if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) ==
			start))
		{
			if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits))
			{
				count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
				start += ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
				elen = (etype << 30) | (0x40000000 - sb->s_blocksize);
			}
			else
			{
				elen = (etype << 30) |
					(elen + (count << sb->s_blocksize_bits));
				start += count;
				count = 0;
			}
J
Jan Kara 已提交
484
			udf_write_aext(table, &oepos, eloc, elen, 1);
L
Linus Torvalds 已提交
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
		}
		else if (eloc.logicalBlockNum == (end + 1))
		{
			if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits))
			{
				count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
				end -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
				eloc.logicalBlockNum -=
					((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
				elen = (etype << 30) | (0x40000000 - sb->s_blocksize);
			}
			else
			{
				eloc.logicalBlockNum = start;
				elen = (etype << 30) |
					(elen + (count << sb->s_blocksize_bits));
				end -= count;
				count = 0;
			}
J
Jan Kara 已提交
504
			udf_write_aext(table, &oepos, eloc, elen, 1);
L
Linus Torvalds 已提交
505 506
		}

J
Jan Kara 已提交
507
		if (epos.bh != oepos.bh)
L
Linus Torvalds 已提交
508 509
		{
			i = -1;
J
Jan Kara 已提交
510
			oepos.block = epos.block;
J
Jan Kara 已提交
511 512
			brelse(oepos.bh);
			get_bh(epos.bh);
J
Jan Kara 已提交
513 514
			oepos.bh = epos.bh;
			oepos.offset = 0;
L
Linus Torvalds 已提交
515 516
		}
		else
J
Jan Kara 已提交
517
			oepos.offset = epos.offset;
L
Linus Torvalds 已提交
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548
	}

	if (count)
	{
		/* NOTE: we CANNOT use udf_add_aext here, as it can try to allocate
				 a new block, and since we hold the super block lock already
				 very bad things would happen :)

				 We copy the behavior of udf_add_aext, but instead of
				 trying to allocate a new block close to the existing one,
				 we just steal a block from the extent we are trying to add.

				 It would be nice if the blocks were close together, but it
				 isn't required.
		*/

		int adsize;
		short_ad *sad = NULL;
		long_ad *lad = NULL;
		struct allocExtDesc *aed;

		eloc.logicalBlockNum = start;
		elen = EXT_RECORDED_ALLOCATED |
			(count << sb->s_blocksize_bits);

		if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
			adsize = sizeof(short_ad);
		else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
			adsize = sizeof(long_ad);
		else
		{
J
Jan Kara 已提交
549 550
			brelse(oepos.bh);
			brelse(epos.bh);
L
Linus Torvalds 已提交
551 552 553
			goto error_return;
		}

J
Jan Kara 已提交
554
		if (epos.offset + (2 * adsize) > sb->s_blocksize)
L
Linus Torvalds 已提交
555 556 557 558
		{
			char *sptr, *dptr;
			int loffset;
	
J
Jan Kara 已提交
559
			brelse(oepos.bh);
J
Jan Kara 已提交
560
			oepos = epos;
L
Linus Torvalds 已提交
561 562

			/* Steal a block from the extent being free'd */
J
Jan Kara 已提交
563
			epos.block.logicalBlockNum = eloc.logicalBlockNum;
L
Linus Torvalds 已提交
564 565 566
			eloc.logicalBlockNum ++;
			elen -= sb->s_blocksize;

J
Jan Kara 已提交
567 568
			if (!(epos.bh = udf_tread(sb,
				udf_get_lb_pblock(sb, epos.block, 0))))
L
Linus Torvalds 已提交
569
			{
J
Jan Kara 已提交
570
				brelse(oepos.bh);
L
Linus Torvalds 已提交
571 572
				goto error_return;
			}
J
Jan Kara 已提交
573 574 575
			aed = (struct allocExtDesc *)(epos.bh->b_data);
			aed->previousAllocExtLocation = cpu_to_le32(oepos.block.logicalBlockNum);
			if (epos.offset + adsize > sb->s_blocksize)
L
Linus Torvalds 已提交
576
			{
J
Jan Kara 已提交
577
				loffset = epos.offset;
L
Linus Torvalds 已提交
578
				aed->lengthAllocDescs = cpu_to_le32(adsize);
J
Jan Kara 已提交
579
				sptr = UDF_I_DATA(inode) + epos.offset -
580 581
					udf_file_entry_alloc_offset(inode) +
					UDF_I_LENEATTR(inode) - adsize;
J
Jan Kara 已提交
582
				dptr = epos.bh->b_data + sizeof(struct allocExtDesc);
L
Linus Torvalds 已提交
583
				memcpy(dptr, sptr, adsize);
J
Jan Kara 已提交
584
				epos.offset = sizeof(struct allocExtDesc) + adsize;
L
Linus Torvalds 已提交
585 586 587
			}
			else
			{
J
Jan Kara 已提交
588
				loffset = epos.offset + adsize;
L
Linus Torvalds 已提交
589
				aed->lengthAllocDescs = cpu_to_le32(0);
J
Jan Kara 已提交
590 591
				sptr = oepos.bh->b_data + epos.offset;
				epos.offset = sizeof(struct allocExtDesc);
L
Linus Torvalds 已提交
592

J
Jan Kara 已提交
593
				if (oepos.bh)
L
Linus Torvalds 已提交
594
				{
J
Jan Kara 已提交
595
					aed = (struct allocExtDesc *)oepos.bh->b_data;
L
Linus Torvalds 已提交
596 597 598 599 600 601 602 603 604 605
					aed->lengthAllocDescs =
						cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
				}
				else
				{
					UDF_I_LENALLOC(table) += adsize;
					mark_inode_dirty(table);
				}
			}
			if (UDF_SB_UDFREV(sb) >= 0x0200)
J
Jan Kara 已提交
606 607
				udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1,
					epos.block.logicalBlockNum, sizeof(tag));
L
Linus Torvalds 已提交
608
			else
J
Jan Kara 已提交
609 610
				udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 2, 1,
					epos.block.logicalBlockNum, sizeof(tag));
L
Linus Torvalds 已提交
611 612 613 614 615 616 617 618
			switch (UDF_I_ALLOCTYPE(table))
			{
				case ICBTAG_FLAG_AD_SHORT:
				{
					sad = (short_ad *)sptr;
					sad->extLength = cpu_to_le32(
						EXT_NEXT_EXTENT_ALLOCDECS |
						sb->s_blocksize);
J
Jan Kara 已提交
619
					sad->extPosition = cpu_to_le32(epos.block.logicalBlockNum);
L
Linus Torvalds 已提交
620 621 622 623 624 625 626 627
					break;
				}
				case ICBTAG_FLAG_AD_LONG:
				{
					lad = (long_ad *)sptr;
					lad->extLength = cpu_to_le32(
						EXT_NEXT_EXTENT_ALLOCDECS |
						sb->s_blocksize);
J
Jan Kara 已提交
628
					lad->extLocation = cpu_to_lelb(epos.block);
L
Linus Torvalds 已提交
629 630 631
					break;
				}
			}
J
Jan Kara 已提交
632
			if (oepos.bh)
L
Linus Torvalds 已提交
633
			{
J
Jan Kara 已提交
634 635
				udf_update_tag(oepos.bh->b_data, loffset);
				mark_buffer_dirty(oepos.bh);
L
Linus Torvalds 已提交
636 637 638 639 640 641 642
			}
			else
				mark_inode_dirty(table);
		}

		if (elen) /* It's possible that stealing the block emptied the extent */
		{
J
Jan Kara 已提交
643
			udf_write_aext(table, &epos, eloc, elen, 1);
L
Linus Torvalds 已提交
644

J
Jan Kara 已提交
645
			if (!epos.bh)
L
Linus Torvalds 已提交
646 647 648 649 650 651
			{
				UDF_I_LENALLOC(table) += adsize;
				mark_inode_dirty(table);
			}
			else
			{
J
Jan Kara 已提交
652
				aed = (struct allocExtDesc *)epos.bh->b_data;
L
Linus Torvalds 已提交
653 654
				aed->lengthAllocDescs =
					cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
J
Jan Kara 已提交
655 656
				udf_update_tag(epos.bh->b_data, epos.offset);
				mark_buffer_dirty(epos.bh);
L
Linus Torvalds 已提交
657 658 659 660
			}
		}
	}

J
Jan Kara 已提交
661 662
	brelse(epos.bh);
	brelse(oepos.bh);
L
Linus Torvalds 已提交
663 664 665

error_return:
	sb->s_dirt = 1;
I
Ingo Molnar 已提交
666
	mutex_unlock(&sbi->s_alloc_mutex);
L
Linus Torvalds 已提交
667 668 669 670 671 672 673 674 675 676
	return;
}

static int udf_table_prealloc_blocks(struct super_block * sb,
	struct inode * inode,
	struct inode *table, uint16_t partition, uint32_t first_block,
	uint32_t block_count)
{
	struct udf_sb_info *sbi = UDF_SB(sb);
	int alloc_count = 0;
J
Jan Kara 已提交
677 678 679
	uint32_t elen, adsize;
	kernel_lb_addr eloc;
	struct extent_position epos;
L
Linus Torvalds 已提交
680 681 682 683 684 685 686 687 688 689 690 691
	int8_t etype = -1;

	if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
		return 0;

	if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
		adsize = sizeof(short_ad);
	else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
		adsize = sizeof(long_ad);
	else
		return 0;

I
Ingo Molnar 已提交
692
	mutex_lock(&sbi->s_alloc_mutex);
J
Jan Kara 已提交
693 694 695
	epos.offset = sizeof(struct unallocSpaceEntry);
	epos.block = UDF_I_LOCATION(table);
	epos.bh = NULL;
L
Linus Torvalds 已提交
696 697 698
	eloc.logicalBlockNum = 0xFFFFFFFF;

	while (first_block != eloc.logicalBlockNum && (etype =
J
Jan Kara 已提交
699
		udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
L
Linus Torvalds 已提交
700 701 702 703 704 705 706 707
	{
		udf_debug("eloc=%d, elen=%d, first_block=%d\n",
			eloc.logicalBlockNum, elen, first_block);
		; /* empty loop body */
	}

	if (first_block == eloc.logicalBlockNum)
	{
J
Jan Kara 已提交
708
		epos.offset -= adsize;
L
Linus Torvalds 已提交
709 710 711 712 713 714 715 716 717

		alloc_count = (elen >> sb->s_blocksize_bits);
		if (inode && DQUOT_PREALLOC_BLOCK(inode, alloc_count > block_count ? block_count : alloc_count))
			alloc_count = 0;
		else if (alloc_count > block_count)
		{
			alloc_count = block_count;
			eloc.logicalBlockNum += alloc_count;
			elen -= (alloc_count << sb->s_blocksize_bits);
J
Jan Kara 已提交
718
			udf_write_aext(table, &epos, eloc, (etype << 30) | elen, 1);
L
Linus Torvalds 已提交
719 720
		}
		else
J
Jan Kara 已提交
721
			udf_delete_aext(table, epos, eloc, (etype << 30) | elen);
L
Linus Torvalds 已提交
722 723 724 725
	}
	else
		alloc_count = 0;

J
Jan Kara 已提交
726
	brelse(epos.bh);
L
Linus Torvalds 已提交
727 728 729 730 731 732 733 734

	if (alloc_count && UDF_SB_LVIDBH(sb))
	{
		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-alloc_count);
		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
		sb->s_dirt = 1;
	}
I
Ingo Molnar 已提交
735
	mutex_unlock(&sbi->s_alloc_mutex);
L
Linus Torvalds 已提交
736 737 738 739 740 741 742 743 744 745
	return alloc_count;
}

static int udf_table_new_block(struct super_block * sb,
	struct inode * inode,
	struct inode *table, uint16_t partition, uint32_t goal, int *err)
{
	struct udf_sb_info *sbi = UDF_SB(sb);
	uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
	uint32_t newblock = 0, adsize;
J
Jan Kara 已提交
746 747 748
	uint32_t elen, goal_elen = 0;
	kernel_lb_addr eloc, goal_eloc;
	struct extent_position epos, goal_epos;
L
Linus Torvalds 已提交
749 750 751 752 753 754 755 756 757 758 759
	int8_t etype;

	*err = -ENOSPC;

	if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
		adsize = sizeof(short_ad);
	else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
		adsize = sizeof(long_ad);
	else
		return newblock;

I
Ingo Molnar 已提交
760
	mutex_lock(&sbi->s_alloc_mutex);
L
Linus Torvalds 已提交
761 762 763 764 765 766 767 768
	if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
		goal = 0;

	/* We search for the closest matching block to goal. If we find a exact hit,
	   we stop. Otherwise we keep going till we run out of extents.
	   We store the buffer_head, bloc, and extoffset of the current closest
	   match and use that when we are done.
	*/
J
Jan Kara 已提交
769 770 771
	epos.offset = sizeof(struct unallocSpaceEntry);
	epos.block = UDF_I_LOCATION(table);
	epos.bh = goal_epos.bh = NULL;
L
Linus Torvalds 已提交
772 773

	while (spread && (etype =
J
Jan Kara 已提交
774
		udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
L
Linus Torvalds 已提交
775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
	{
		if (goal >= eloc.logicalBlockNum)
		{
			if (goal < eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits))
				nspread = 0;
			else
				nspread = goal - eloc.logicalBlockNum -
					(elen >> sb->s_blocksize_bits);
		}
		else
			nspread = eloc.logicalBlockNum - goal;

		if (nspread < spread)
		{
			spread = nspread;
J
Jan Kara 已提交
790
			if (goal_epos.bh != epos.bh)
L
Linus Torvalds 已提交
791
			{
J
Jan Kara 已提交
792
				brelse(goal_epos.bh);
J
Jan Kara 已提交
793
				goal_epos.bh = epos.bh;
J
Jan Kara 已提交
794
				get_bh(goal_epos.bh);
L
Linus Torvalds 已提交
795
			}
J
Jan Kara 已提交
796 797
			goal_epos.block = epos.block;
			goal_epos.offset = epos.offset - adsize;
L
Linus Torvalds 已提交
798 799 800 801 802
			goal_eloc = eloc;
			goal_elen = (etype << 30) | elen;
		}
	}

J
Jan Kara 已提交
803
	brelse(epos.bh);
L
Linus Torvalds 已提交
804 805 806

	if (spread == 0xFFFFFFFF)
	{
J
Jan Kara 已提交
807
		brelse(goal_epos.bh);
I
Ingo Molnar 已提交
808
		mutex_unlock(&sbi->s_alloc_mutex);
L
Linus Torvalds 已提交
809 810 811 812 813 814 815 816 817 818 819 820 821 822
		return 0;
	}

	/* Only allocate blocks from the beginning of the extent.
	   That way, we only delete (empty) extents, never have to insert an
	   extent because of splitting */
	/* This works, but very poorly.... */

	newblock = goal_eloc.logicalBlockNum;
	goal_eloc.logicalBlockNum ++;
	goal_elen -= sb->s_blocksize;

	if (inode && DQUOT_ALLOC_BLOCK(inode, 1))
	{
J
Jan Kara 已提交
823
		brelse(goal_epos.bh);
I
Ingo Molnar 已提交
824
		mutex_unlock(&sbi->s_alloc_mutex);
L
Linus Torvalds 已提交
825 826 827 828 829
		*err = -EDQUOT;
		return 0;
	}

	if (goal_elen)
J
Jan Kara 已提交
830
		udf_write_aext(table, &goal_epos, goal_eloc, goal_elen, 1);
L
Linus Torvalds 已提交
831
	else
J
Jan Kara 已提交
832
		udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
J
Jan Kara 已提交
833
	brelse(goal_epos.bh);
L
Linus Torvalds 已提交
834 835 836 837 838 839 840 841 842

	if (UDF_SB_LVIDBH(sb))
	{
		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-1);
		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
	}

	sb->s_dirt = 1;
I
Ingo Molnar 已提交
843
	mutex_unlock(&sbi->s_alloc_mutex);
L
Linus Torvalds 已提交
844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
	*err = 0;
	return newblock;
}

inline void udf_free_blocks(struct super_block * sb,
	struct inode * inode,
	kernel_lb_addr bloc, uint32_t offset, uint32_t count)
{
	uint16_t partition = bloc.partitionReferenceNum;

	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP)
	{
		return udf_bitmap_free_blocks(sb, inode,
			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
			bloc, offset, count);
	}
	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE)
	{
		return udf_table_free_blocks(sb, inode,
			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
			bloc, offset, count);
	}
	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP)
	{
		return udf_bitmap_free_blocks(sb, inode,
			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
			bloc, offset, count);
	}
	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE)
	{
		return udf_table_free_blocks(sb, inode,
			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
			bloc, offset, count);
	}
	else
		return;
}

inline int udf_prealloc_blocks(struct super_block * sb,
	struct inode * inode,
	uint16_t partition, uint32_t first_block, uint32_t block_count)
{
	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP)
	{
		return udf_bitmap_prealloc_blocks(sb, inode,
			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
			partition, first_block, block_count);
	}
	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE)
	{
		return udf_table_prealloc_blocks(sb, inode,
			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
			partition, first_block, block_count);
	}
	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP)
	{
		return udf_bitmap_prealloc_blocks(sb, inode,
			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
			partition, first_block, block_count);
	}
	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE)
	{
		return udf_table_prealloc_blocks(sb, inode,
			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
			partition, first_block, block_count);
	}
	else
		return 0;
}

inline int udf_new_block(struct super_block * sb,
	struct inode * inode,
	uint16_t partition, uint32_t goal, int *err)
{
J
Jan Kara 已提交
918 919
	int ret;

L
Linus Torvalds 已提交
920 921
	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP)
	{
J
Jan Kara 已提交
922
		ret = udf_bitmap_new_block(sb, inode,
L
Linus Torvalds 已提交
923 924
			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
			partition, goal, err);
J
Jan Kara 已提交
925
		return ret;
L
Linus Torvalds 已提交
926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950
	}
	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE)
	{
		return udf_table_new_block(sb, inode,
			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
			partition, goal, err);
	}
	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP)
	{
		return udf_bitmap_new_block(sb, inode,
			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
			partition, goal, err);
	}
	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE)
	{
		return udf_table_new_block(sb, inode,
			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
			partition, goal, err);
	}
	else
	{
		*err = -EIO;
		return 0;
	}
}