bitmap.c 64.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
 *
 * bitmap_create  - sets up the bitmap structure
 * bitmap_destroy - destroys the bitmap structure
 *
 * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
 * - added disk storage for bitmap
 * - changes to allow various bitmap chunk sizes
 */

/*
 * Still to do:
 *
 * flush after percent set rather than just time based. (maybe both).
 */

18
#include <linux/blkdev.h>
19 20 21 22 23 24 25 26 27 28
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/file.h>
#include <linux/mount.h>
#include <linux/buffer_head.h>
29
#include <linux/seq_file.h>
30
#include "md.h"
31
#include "bitmap.h"
32

33
static inline char *bmname(struct bitmap *bitmap)
34 35 36 37 38 39 40 41 42 43 44 45 46 47
{
	return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
}

/*
 * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
 *
 * 1) check to see if this page is allocated, if it's not then try to alloc
 * 2) if the alloc fails, set the page's hijacked flag so we'll use the
 *    page pointer directly as a counter
 *
 * if we find our page, we increment the page's refcount so that it stays
 * allocated while we're using it
 */
48
static int bitmap_checkpage(struct bitmap_counts *bitmap,
49
			    unsigned long page, int create)
50 51
__releases(bitmap->lock)
__acquires(bitmap->lock)
52 53 54 55
{
	unsigned char *mappage;

	if (page >= bitmap->pages) {
56 57 58 59
		/* This can happen if bitmap_start_sync goes beyond
		 * End-of-device while looking for a whole page.
		 * It is harmless.
		 */
60 61 62 63 64 65 66 67 68 69 70 71 72 73
		return -EINVAL;
	}

	if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
		return 0;

	if (bitmap->bp[page].map) /* page is already allocated, just return */
		return 0;

	if (!create)
		return -ENOENT;

	/* this page has not been allocated yet */

74
	spin_unlock_irq(&bitmap->lock);
75 76 77 78 79 80 81 82 83 84 85 86 87
	/* It is possible that this is being called inside a
	 * prepare_to_wait/finish_wait loop from raid5c:make_request().
	 * In general it is not permitted to sleep in that context as it
	 * can cause the loop to spin freely.
	 * That doesn't apply here as we can only reach this point
	 * once with any loop.
	 * When this function completes, either bp[page].map or
	 * bp[page].hijacked.  In either case, this function will
	 * abort before getting to this point again.  So there is
	 * no risk of a free-spin, and so it is safe to assert
	 * that sleeping here is allowed.
	 */
	sched_annotate_sleep();
88
	mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
89 90 91
	spin_lock_irq(&bitmap->lock);

	if (mappage == NULL) {
92
		pr_debug("md/bitmap: map page allocation failed, hijacking\n");
93 94 95 96
		/* failed - set the hijacked flag so that we can use the
		 * pointer as a counter */
		if (!bitmap->bp[page].map)
			bitmap->bp[page].hijacked = 1;
97 98
	} else if (bitmap->bp[page].map ||
		   bitmap->bp[page].hijacked) {
99
		/* somebody beat us to getting the page */
100
		kfree(mappage);
101
		return 0;
102
	} else {
103

104
		/* no page was in place and we have one, so install it */
105

106 107 108
		bitmap->bp[page].map = mappage;
		bitmap->missing_pages--;
	}
109 110 111 112 113 114
	return 0;
}

/* if page is completely empty, put it back on the free list, or dealloc it */
/* if page was hijacked, unmark the flag so it might get alloced next time */
/* Note: lock should be held when calling this */
115
static void bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
116 117 118 119 120 121 122 123 124 125 126
{
	char *ptr;

	if (bitmap->bp[page].count) /* page is still busy */
		return;

	/* page is no longer in use, it can be released */

	if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
		bitmap->bp[page].hijacked = 0;
		bitmap->bp[page].map = NULL;
127 128 129 130 131
	} else {
		/* normal case, free the page */
		ptr = bitmap->bp[page].map;
		bitmap->bp[page].map = NULL;
		bitmap->missing_pages++;
132
		kfree(ptr);
133 134 135 136 137 138 139 140 141 142 143
	}
}

/*
 * bitmap file handling - read and write the bitmap file and its superblock
 */

/*
 * basic page I/O operations
 */

144
/* IO operations when bitmap is stored near all superblocks */
145 146 147
static int read_sb_page(struct mddev *mddev, loff_t offset,
			struct page *page,
			unsigned long index, int size)
148 149 150
{
	/* choose a good rdev and read the page from there */

151
	struct md_rdev *rdev;
152 153
	sector_t target;

N
NeilBrown 已提交
154
	rdev_for_each(rdev, mddev) {
155 156
		if (! test_bit(In_sync, &rdev->flags)
		    || test_bit(Faulty, &rdev->flags))
157 158
			continue;

J
Jonathan Brassow 已提交
159
		target = offset + index * (PAGE_SIZE/512);
160

161
		if (sync_page_io(rdev, target,
162
				 roundup(size, bdev_logical_block_size(rdev->bdev)),
J
Jonathan Brassow 已提交
163
				 page, READ, true)) {
164
			page->index = index;
165
			return 0;
166 167
		}
	}
168
	return -EIO;
169 170
}

171
static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev)
172 173 174 175 176 177 178
{
	/* Iterate the disks of an mddev, using rcu to protect access to the
	 * linked list, and raising the refcount of devices we return to ensure
	 * they don't disappear while in use.
	 * As devices are only added or removed when raid_disk is < 0 and
	 * nr_pending is 0 and In_sync is clear, the entries we return will
	 * still be in the same position on the list when we re-enter
179
	 * list_for_each_entry_continue_rcu.
180 181 182 183 184
	 *
	 * Note that if entered with 'rdev == NULL' to start at the
	 * beginning, we temporarily assign 'rdev' to an address which
	 * isn't really an rdev, but which can be used by
	 * list_for_each_entry_continue_rcu() to find the first entry.
185 186 187 188
	 */
	rcu_read_lock();
	if (rdev == NULL)
		/* start at the beginning */
189
		rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
190 191 192 193
	else {
		/* release the previous rdev and start from there. */
		rdev_dec_pending(rdev, mddev);
	}
194
	list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) {
195 196 197 198 199 200 201 202 203 204 205 206
		if (rdev->raid_disk >= 0 &&
		    !test_bit(Faulty, &rdev->flags)) {
			/* this is a usable devices */
			atomic_inc(&rdev->nr_pending);
			rcu_read_unlock();
			return rdev;
		}
	}
	rcu_read_unlock();
	return NULL;
}

207
static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
208
{
209
	struct md_rdev *rdev = NULL;
210
	struct block_device *bdev;
211
	struct mddev *mddev = bitmap->mddev;
212
	struct bitmap_storage *store = &bitmap->storage;
213 214 215 216
	int node_offset = 0;

	if (mddev_is_clustered(bitmap->mddev))
		node_offset = bitmap->cluster_slot * store->file_pages;
217

218
	while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
219 220
		int size = PAGE_SIZE;
		loff_t offset = mddev->bitmap_info.offset;
221 222 223

		bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;

224 225 226 227 228
		if (page->index == store->file_pages-1) {
			int last_page_size = store->bytes & (PAGE_SIZE-1);
			if (last_page_size == 0)
				last_page_size = PAGE_SIZE;
			size = roundup(last_page_size,
229
				       bdev_logical_block_size(bdev));
230
		}
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
		/* Just make sure we aren't corrupting data or
		 * metadata
		 */
		if (mddev->external) {
			/* Bitmap could be anywhere. */
			if (rdev->sb_start + offset + (page->index
						       * (PAGE_SIZE/512))
			    > rdev->data_offset
			    &&
			    rdev->sb_start + offset
			    < (rdev->data_offset + mddev->dev_sectors
			     + (PAGE_SIZE/512)))
				goto bad_alignment;
		} else if (offset < 0) {
			/* DATA  BITMAP METADATA  */
			if (offset
			    + (long)(page->index * (PAGE_SIZE/512))
			    + size/512 > 0)
				/* bitmap runs in to metadata */
				goto bad_alignment;
			if (rdev->data_offset + mddev->dev_sectors
			    > rdev->sb_start + offset)
				/* data runs in to bitmap */
				goto bad_alignment;
		} else if (rdev->sb_start < rdev->data_offset) {
			/* METADATA BITMAP DATA */
			if (rdev->sb_start
			    + offset
			    + page->index*(PAGE_SIZE/512) + size/512
			    > rdev->data_offset)
				/* bitmap runs in to data */
				goto bad_alignment;
		} else {
			/* DATA METADATA BITMAP - no problems */
		}
		md_super_write(mddev, rdev,
			       rdev->sb_start + offset
			       + page->index * (PAGE_SIZE/512),
			       size,
			       page);
271
	}
272 273

	if (wait)
274
		md_super_wait(mddev);
275
	return 0;
276 277 278

 bad_alignment:
	return -EINVAL;
279 280
}

281
static void bitmap_file_kick(struct bitmap *bitmap);
282
/*
283
 * write out a page to a file
284
 */
285
static void write_page(struct bitmap *bitmap, struct page *page, int wait)
286
{
287
	struct buffer_head *bh;
288

289
	if (bitmap->storage.file == NULL) {
290 291
		switch (write_sb_page(bitmap, page, wait)) {
		case -EINVAL:
292
			set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
293
		}
294
	} else {
295

296
		bh = page_buffers(page);
297

298 299 300 301
		while (bh && bh->b_blocknr) {
			atomic_inc(&bitmap->pending_writes);
			set_buffer_locked(bh);
			set_buffer_mapped(bh);
J
Jens Axboe 已提交
302
			submit_bh(WRITE | REQ_SYNC, bh);
303 304
			bh = bh->b_this_page;
		}
305

306
		if (wait)
307 308
			wait_event(bitmap->write_wait,
				   atomic_read(&bitmap->pending_writes)==0);
309
	}
310
	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
311
		bitmap_file_kick(bitmap);
312 313 314 315 316
}

static void end_bitmap_write(struct buffer_head *bh, int uptodate)
{
	struct bitmap *bitmap = bh->b_private;
317

318 319
	if (!uptodate)
		set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
320 321 322
	if (atomic_dec_and_test(&bitmap->pending_writes))
		wake_up(&bitmap->write_wait);
}
323

324 325 326 327 328 329 330 331 332 333
/* copied from buffer.c */
static void
__clear_page_buffers(struct page *page)
{
	ClearPagePrivate(page);
	set_page_private(page, 0);
	page_cache_release(page);
}
static void free_buffers(struct page *page)
{
334
	struct buffer_head *bh;
335

336 337 338 339
	if (!PagePrivate(page))
		return;

	bh = page_buffers(page);
340 341 342 343
	while (bh) {
		struct buffer_head *next = bh->b_this_page;
		free_buffer_head(bh);
		bh = next;
344
	}
345 346
	__clear_page_buffers(page);
	put_page(page);
347 348
}

349 350 351 352 353 354 355
/* read a page from a file.
 * We both read the page, and attach buffers to the page to record the
 * address of each block (using bmap).  These addresses will be used
 * to write the block later, completely bypassing the filesystem.
 * This usage is similar to how swap files are handled, and allows us
 * to write to a file with no concerns of memory allocation failing.
 */
356 357 358 359
static int read_page(struct file *file, unsigned long index,
		     struct bitmap *bitmap,
		     unsigned long count,
		     struct page *page)
360
{
361
	int ret = 0;
A
Al Viro 已提交
362
	struct inode *inode = file_inode(file);
363 364
	struct buffer_head *bh;
	sector_t block;
365

366 367
	pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
		 (unsigned long long)index << PAGE_SHIFT);
368

369 370
	bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0);
	if (!bh) {
371
		ret = -ENOMEM;
372 373
		goto out;
	}
374 375 376 377 378 379 380 381 382
	attach_page_buffers(page, bh);
	block = index << (PAGE_SHIFT - inode->i_blkbits);
	while (bh) {
		if (count == 0)
			bh->b_blocknr = 0;
		else {
			bh->b_blocknr = bmap(inode, block);
			if (bh->b_blocknr == 0) {
				/* Cannot use this file! */
383
				ret = -EINVAL;
384 385 386 387 388 389 390 391 392 393
				goto out;
			}
			bh->b_bdev = inode->i_sb->s_bdev;
			if (count < (1<<inode->i_blkbits))
				count = 0;
			else
				count -= (1<<inode->i_blkbits);

			bh->b_end_io = end_bitmap_write;
			bh->b_private = bitmap;
394 395 396 397
			atomic_inc(&bitmap->pending_writes);
			set_buffer_locked(bh);
			set_buffer_mapped(bh);
			submit_bh(READ, bh);
398 399 400 401 402
		}
		block++;
		bh = bh->b_this_page;
	}
	page->index = index;
403 404 405

	wait_event(bitmap->write_wait,
		   atomic_read(&bitmap->pending_writes)==0);
406
	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
407
		ret = -EIO;
408
out:
409 410
	if (ret)
		printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %d\n",
411 412
			(int)PAGE_SIZE,
			(unsigned long long)index << PAGE_SHIFT,
413 414
			ret);
	return ret;
415 416 417 418 419 420 421
}

/*
 * bitmap file superblock operations
 */

/* update the event counter and sync the superblock to disk */
422
void bitmap_update_sb(struct bitmap *bitmap)
423 424 425 426
{
	bitmap_super_t *sb;

	if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
427
		return;
428 429
	if (bitmap->mddev->bitmap_info.external)
		return;
430
	if (!bitmap->storage.sb_page) /* no superblock */
431
		return;
432
	sb = kmap_atomic(bitmap->storage.sb_page);
433
	sb->events = cpu_to_le64(bitmap->mddev->events);
434
	if (bitmap->mddev->events < bitmap->events_cleared)
435 436
		/* rocking back to read-only */
		bitmap->events_cleared = bitmap->mddev->events;
437 438
	sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
	sb->state = cpu_to_le32(bitmap->flags);
439 440 441
	/* Just in case these have been changed via sysfs: */
	sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
	sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
442 443 444
	/* This might have been changed by a reshape */
	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
	sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
G
Goldwyn Rodrigues 已提交
445
	sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
446 447
	sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
					   bitmap_info.space);
448
	kunmap_atomic(sb);
449
	write_page(bitmap, bitmap->storage.sb_page, 1);
450 451 452 453 454 455 456
}

/* print out the bitmap file superblock */
void bitmap_print_sb(struct bitmap *bitmap)
{
	bitmap_super_t *sb;

457
	if (!bitmap || !bitmap->storage.sb_page)
458
		return;
459
	sb = kmap_atomic(bitmap->storage.sb_page);
460
	printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
461 462 463
	printk(KERN_DEBUG "         magic: %08x\n", le32_to_cpu(sb->magic));
	printk(KERN_DEBUG "       version: %d\n", le32_to_cpu(sb->version));
	printk(KERN_DEBUG "          uuid: %08x.%08x.%08x.%08x\n",
464 465 466 467
					*(__u32 *)(sb->uuid+0),
					*(__u32 *)(sb->uuid+4),
					*(__u32 *)(sb->uuid+8),
					*(__u32 *)(sb->uuid+12));
468
	printk(KERN_DEBUG "        events: %llu\n",
469
			(unsigned long long) le64_to_cpu(sb->events));
470
	printk(KERN_DEBUG "events cleared: %llu\n",
471
			(unsigned long long) le64_to_cpu(sb->events_cleared));
472 473 474 475 476
	printk(KERN_DEBUG "         state: %08x\n", le32_to_cpu(sb->state));
	printk(KERN_DEBUG "     chunksize: %d B\n", le32_to_cpu(sb->chunksize));
	printk(KERN_DEBUG "  daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
	printk(KERN_DEBUG "     sync size: %llu KB\n",
			(unsigned long long)le64_to_cpu(sb->sync_size)/2);
477
	printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
478
	kunmap_atomic(sb);
479 480
}

481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
/*
 * bitmap_new_disk_sb
 * @bitmap
 *
 * This function is somewhat the reverse of bitmap_read_sb.  bitmap_read_sb
 * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
 * This function verifies 'bitmap_info' and populates the on-disk bitmap
 * structure, which is to be written to disk.
 *
 * Returns: 0 on success, -Exxx on error
 */
static int bitmap_new_disk_sb(struct bitmap *bitmap)
{
	bitmap_super_t *sb;
	unsigned long chunksize, daemon_sleep, write_behind;

497
	bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
498 499
	if (bitmap->storage.sb_page == NULL)
		return -ENOMEM;
500
	bitmap->storage.sb_page->index = 0;
501

502
	sb = kmap_atomic(bitmap->storage.sb_page);
503 504 505 506 507 508 509

	sb->magic = cpu_to_le32(BITMAP_MAGIC);
	sb->version = cpu_to_le32(BITMAP_MAJOR_HI);

	chunksize = bitmap->mddev->bitmap_info.chunksize;
	BUG_ON(!chunksize);
	if (!is_power_of_2(chunksize)) {
510
		kunmap_atomic(sb);
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
		printk(KERN_ERR "bitmap chunksize not a power of 2\n");
		return -EINVAL;
	}
	sb->chunksize = cpu_to_le32(chunksize);

	daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
	if (!daemon_sleep ||
	    (daemon_sleep < 1) || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
		printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n");
		daemon_sleep = 5 * HZ;
	}
	sb->daemon_sleep = cpu_to_le32(daemon_sleep);
	bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;

	/*
	 * FIXME: write_behind for RAID1.  If not specified, what
	 * is a good choice?  We choose COUNTER_MAX / 2 arbitrarily.
	 */
	write_behind = bitmap->mddev->bitmap_info.max_write_behind;
	if (write_behind > COUNTER_MAX)
		write_behind = COUNTER_MAX / 2;
	sb->write_behind = cpu_to_le32(write_behind);
	bitmap->mddev->bitmap_info.max_write_behind = write_behind;

	/* keep the array size field of the bitmap superblock up to date */
	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);

	memcpy(sb->uuid, bitmap->mddev->uuid, 16);

540
	set_bit(BITMAP_STALE, &bitmap->flags);
541
	sb->state = cpu_to_le32(bitmap->flags);
542 543
	bitmap->events_cleared = bitmap->mddev->events;
	sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
544
	bitmap->mddev->bitmap_info.nodes = 0;
545

546
	kunmap_atomic(sb);
547 548 549 550

	return 0;
}

551 552 553 554 555
/* read the superblock from the bitmap file and initialize some bitmap fields */
static int bitmap_read_sb(struct bitmap *bitmap)
{
	char *reason = NULL;
	bitmap_super_t *sb;
556
	unsigned long chunksize, daemon_sleep, write_behind;
557
	unsigned long long events;
G
Goldwyn Rodrigues 已提交
558
	int nodes = 0;
559
	unsigned long sectors_reserved = 0;
560
	int err = -EINVAL;
561
	struct page *sb_page;
562

563
	if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
564 565 566
		chunksize = 128 * 1024 * 1024;
		daemon_sleep = 5 * HZ;
		write_behind = 0;
567
		set_bit(BITMAP_STALE, &bitmap->flags);
568 569 570
		err = 0;
		goto out_no_sb;
	}
571
	/* page 0 is the superblock, read it... */
572 573 574
	sb_page = alloc_page(GFP_KERNEL);
	if (!sb_page)
		return -ENOMEM;
575
	bitmap->storage.sb_page = sb_page;
576

577
re_read:
578 579
	/* If cluster_slot is set, the cluster is setup */
	if (bitmap->cluster_slot >= 0) {
580
		sector_t bm_blocks = bitmap->mddev->resync_max_sectors;
581

582 583
		sector_div(bm_blocks,
			   bitmap->mddev->bitmap_info.chunksize >> 9);
584 585 586
		/* bits to bytes */
		bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
		/* to 4k blocks */
587
		bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
588 589 590 591 592
		bitmap->mddev->bitmap_info.offset += bitmap->cluster_slot * (bm_blocks << 3);
		pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
			bitmap->cluster_slot, (unsigned long long)bitmap->mddev->bitmap_info.offset);
	}

593 594
	if (bitmap->storage.file) {
		loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host);
595 596
		int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;

597
		err = read_page(bitmap->storage.file, 0,
598
				bitmap, bytes, sb_page);
599
	} else {
600 601 602 603
		err = read_sb_page(bitmap->mddev,
				   bitmap->mddev->bitmap_info.offset,
				   sb_page,
				   0, sizeof(bitmap_super_t));
604
	}
605
	if (err)
606 607
		return err;

608
	err = -EINVAL;
609
	sb = kmap_atomic(sb_page);
610 611

	chunksize = le32_to_cpu(sb->chunksize);
612
	daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
613
	write_behind = le32_to_cpu(sb->write_behind);
614
	sectors_reserved = le32_to_cpu(sb->sectors_reserved);
615 616 617 618 619 620 621 622 623 624
	/* XXX: This is a hack to ensure that we don't use clustering
	 *  in case:
	 *	- dm-raid is in use and
	 *	- the nodes written in bitmap_sb is erroneous.
	 */
	if (!bitmap->mddev->sync_super) {
		nodes = le32_to_cpu(sb->nodes);
		strlcpy(bitmap->mddev->bitmap_info.cluster_name,
				sb->cluster_name, 64);
	}
625 626 627 628

	/* verify that the bitmap-specific fields are valid */
	if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
		reason = "bad magic";
629 630
	else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
		 le32_to_cpu(sb->version) > BITMAP_MAJOR_HI)
631
		reason = "unrecognized superblock version";
632
	else if (chunksize < 512)
633
		reason = "bitmap chunksize too small";
J
Jonathan Brassow 已提交
634
	else if (!is_power_of_2(chunksize))
635
		reason = "bitmap chunksize not a power of 2";
636
	else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
637
		reason = "daemon sleep period out of range";
638 639
	else if (write_behind > COUNTER_MAX)
		reason = "write-behind limit out of range (0 - 16383)";
640 641 642 643 644 645 646 647 648
	if (reason) {
		printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n",
			bmname(bitmap), reason);
		goto out;
	}

	/* keep the array size field of the bitmap superblock up to date */
	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);

649 650 651 652 653 654 655 656 657 658 659 660
	if (bitmap->mddev->persistent) {
		/*
		 * We have a persistent array superblock, so compare the
		 * bitmap's UUID and event counter to the mddev's
		 */
		if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
			printk(KERN_INFO
			       "%s: bitmap superblock UUID mismatch\n",
			       bmname(bitmap));
			goto out;
		}
		events = le64_to_cpu(sb->events);
661
		if (!nodes && (events < bitmap->mddev->events)) {
662 663 664 665 666
			printk(KERN_INFO
			       "%s: bitmap file is out of date (%llu < %llu) "
			       "-- forcing full recovery\n",
			       bmname(bitmap), events,
			       (unsigned long long) bitmap->mddev->events);
667
			set_bit(BITMAP_STALE, &bitmap->flags);
668
		}
669
	}
670

671
	/* assign fields using values from superblock */
672
	bitmap->flags |= le32_to_cpu(sb->state);
673
	if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
674
		set_bit(BITMAP_HOSTENDIAN, &bitmap->flags);
675
	bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
G
Goldwyn Rodrigues 已提交
676
	strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
677
	err = 0;
678

679
out:
680
	kunmap_atomic(sb);
681 682
	/* Assiging chunksize is required for "re_read" */
	bitmap->mddev->bitmap_info.chunksize = chunksize;
683
	if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
684 685 686 687 688 689 690 691 692 693 694
		err = md_setup_cluster(bitmap->mddev, nodes);
		if (err) {
			pr_err("%s: Could not setup cluster service (%d)\n",
					bmname(bitmap), err);
			goto out_no_sb;
		}
		bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
		goto re_read;
	}


695
out_no_sb:
696
	if (test_bit(BITMAP_STALE, &bitmap->flags))
697 698 699 700
		bitmap->events_cleared = bitmap->mddev->events;
	bitmap->mddev->bitmap_info.chunksize = chunksize;
	bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
	bitmap->mddev->bitmap_info.max_write_behind = write_behind;
G
Goldwyn Rodrigues 已提交
701
	bitmap->mddev->bitmap_info.nodes = nodes;
702 703 704
	if (bitmap->mddev->bitmap_info.space == 0 ||
	    bitmap->mddev->bitmap_info.space > sectors_reserved)
		bitmap->mddev->bitmap_info.space = sectors_reserved;
705
	if (err) {
706
		bitmap_print_sb(bitmap);
707
		if (bitmap->cluster_slot < 0)
708 709
			md_cluster_stop(bitmap->mddev);
	}
710 711 712 713 714 715 716
	return err;
}

/*
 * general bitmap file operations
 */

717 718 719 720 721 722
/*
 * on-disk bitmap:
 *
 * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
 * file a page at a time. There's a superblock at the start of the file.
 */
723
/* calculate the index of the page that contains this bit */
724 725
static inline unsigned long file_page_index(struct bitmap_storage *store,
					    unsigned long chunk)
726
{
727
	if (store->sb_page)
728 729
		chunk += sizeof(bitmap_super_t) << 3;
	return chunk >> PAGE_BIT_SHIFT;
730 731 732
}

/* calculate the (bit) offset of this bit within a page */
733 734
static inline unsigned long file_page_offset(struct bitmap_storage *store,
					     unsigned long chunk)
735
{
736
	if (store->sb_page)
737 738
		chunk += sizeof(bitmap_super_t) << 3;
	return chunk & (PAGE_BITS - 1);
739 740 741 742 743 744
}

/*
 * return a pointer to the page in the filemap that contains the given bit
 *
 */
745
static inline struct page *filemap_get_page(struct bitmap_storage *store,
746
					    unsigned long chunk)
747
{
748
	if (file_page_index(store, chunk) >= store->file_pages)
749
		return NULL;
750
	return store->filemap[file_page_index(store, chunk)];
751 752
}

753
static int bitmap_storage_alloc(struct bitmap_storage *store,
754 755
				unsigned long chunks, int with_super,
				int slot_number)
756
{
757
	int pnum, offset = 0;
758 759 760 761 762 763 764 765
	unsigned long num_pages;
	unsigned long bytes;

	bytes = DIV_ROUND_UP(chunks, 8);
	if (with_super)
		bytes += sizeof(bitmap_super_t);

	num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
766
	offset = slot_number * (num_pages - 1);
767 768 769 770 771 772 773

	store->filemap = kmalloc(sizeof(struct page *)
				 * num_pages, GFP_KERNEL);
	if (!store->filemap)
		return -ENOMEM;

	if (with_super && !store->sb_page) {
774
		store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
775 776 777
		if (store->sb_page == NULL)
			return -ENOMEM;
	}
778

779 780 781 782
	pnum = 0;
	if (store->sb_page) {
		store->filemap[0] = store->sb_page;
		pnum = 1;
783
		store->sb_page->index = offset;
784
	}
785

786
	for ( ; pnum < num_pages; pnum++) {
787
		store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO);
788 789 790 791
		if (!store->filemap[pnum]) {
			store->file_pages = pnum;
			return -ENOMEM;
		}
792
		store->filemap[pnum]->index = pnum + offset;
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
	}
	store->file_pages = pnum;

	/* We need 4 bits per page, rounded up to a multiple
	 * of sizeof(unsigned long) */
	store->filemap_attr = kzalloc(
		roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
		GFP_KERNEL);
	if (!store->filemap_attr)
		return -ENOMEM;

	store->bytes = bytes;

	return 0;
}

809
static void bitmap_file_unmap(struct bitmap_storage *store)
810 811 812
{
	struct page **map, *sb_page;
	int pages;
813
	struct file *file;
814

815
	file = store->file;
816 817 818
	map = store->filemap;
	pages = store->file_pages;
	sb_page = store->sb_page;
819 820

	while (pages--)
821
		if (map[pages] != sb_page) /* 0 is sb_page, release it below */
822
			free_buffers(map[pages]);
823
	kfree(map);
824
	kfree(store->filemap_attr);
825

826 827
	if (sb_page)
		free_buffers(sb_page);
828

829
	if (file) {
A
Al Viro 已提交
830
		struct inode *inode = file_inode(file);
831
		invalidate_mapping_pages(inode->i_mapping, 0, -1);
832
		fput(file);
833
	}
834 835 836 837 838 839 840 841 842 843 844
}

/*
 * bitmap_file_kick - if an error occurs while manipulating the bitmap file
 * then it is no longer reliable, so we stop using it and we mark the file
 * as failed in the superblock
 */
static void bitmap_file_kick(struct bitmap *bitmap)
{
	char *path, *ptr = NULL;

845
	if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
846
		bitmap_update_sb(bitmap);
847

848
		if (bitmap->storage.file) {
849 850
			path = kmalloc(PAGE_SIZE, GFP_KERNEL);
			if (path)
851 852
				ptr = d_path(&bitmap->storage.file->f_path,
					     path, PAGE_SIZE);
C
Christoph Hellwig 已提交
853

854 855
			printk(KERN_ALERT
			      "%s: kicking failed bitmap file %s from array!\n",
C
Christoph Hellwig 已提交
856
			      bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
857

858 859 860 861 862
			kfree(path);
		} else
			printk(KERN_ALERT
			       "%s: disabling internal bitmap due to errors\n",
			       bmname(bitmap));
863
	}
864 865 866
}

enum bitmap_page_attr {
867
	BITMAP_PAGE_DIRTY = 0,     /* there are set bits that need to be synced */
868 869
	BITMAP_PAGE_PENDING = 1,   /* there are bits that are being cleaned.
				    * i.e. counter is 1 or 2. */
870
	BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
871 872
};

873 874
static inline void set_page_attr(struct bitmap *bitmap, int pnum,
				 enum bitmap_page_attr attr)
875
{
876
	set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
877 878
}

879 880
static inline void clear_page_attr(struct bitmap *bitmap, int pnum,
				   enum bitmap_page_attr attr)
881
{
882
	clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
883 884
}

885 886
static inline int test_page_attr(struct bitmap *bitmap, int pnum,
				 enum bitmap_page_attr attr)
887
{
888
	return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
889 890
}

891 892 893 894 895 896
static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum,
					   enum bitmap_page_attr attr)
{
	return test_and_clear_bit((pnum<<2) + attr,
				  bitmap->storage.filemap_attr);
}
897 898 899 900 901 902 903 904 905 906
/*
 * bitmap_file_set_bit -- called before performing a write to the md device
 * to set (and eventually sync) a particular bit in the bitmap file
 *
 * we set the bit immediately, then we record the page number so that
 * when an unplug occurs, we can flush the dirty pages out to disk
 */
static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
{
	unsigned long bit;
907
	struct page *page;
908
	void *kaddr;
909
	unsigned long chunk = block >> bitmap->counts.chunkshift;
910

911
	page = filemap_get_page(&bitmap->storage, chunk);
912 913
	if (!page)
		return;
914
	bit = file_page_offset(&bitmap->storage, chunk);
915

916
	/* set the bit */
917
	kaddr = kmap_atomic(page);
918
	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
919 920
		set_bit(bit, kaddr);
	else
921
		set_bit_le(bit, kaddr);
922
	kunmap_atomic(kaddr);
923
	pr_debug("set file bit %lu page %lu\n", bit, page->index);
924
	/* record page number so it gets flushed to disk when unplug occurs */
925
	set_page_attr(bitmap, page->index, BITMAP_PAGE_DIRTY);
926 927
}

928 929 930 931 932
static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
{
	unsigned long bit;
	struct page *page;
	void *paddr;
933
	unsigned long chunk = block >> bitmap->counts.chunkshift;
934

935
	page = filemap_get_page(&bitmap->storage, chunk);
936 937
	if (!page)
		return;
938
	bit = file_page_offset(&bitmap->storage, chunk);
939
	paddr = kmap_atomic(page);
940
	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
941 942
		clear_bit(bit, paddr);
	else
943
		clear_bit_le(bit, paddr);
944
	kunmap_atomic(paddr);
945 946
	if (!test_page_attr(bitmap, page->index, BITMAP_PAGE_NEEDWRITE)) {
		set_page_attr(bitmap, page->index, BITMAP_PAGE_PENDING);
947 948 949 950
		bitmap->allclean = 0;
	}
}

951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
static int bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
{
	unsigned long bit;
	struct page *page;
	void *paddr;
	unsigned long chunk = block >> bitmap->counts.chunkshift;
	int set = 0;

	page = filemap_get_page(&bitmap->storage, chunk);
	if (!page)
		return -EINVAL;
	bit = file_page_offset(&bitmap->storage, chunk);
	paddr = kmap_atomic(page);
	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
		set = test_bit(bit, paddr);
	else
		set = test_bit_le(bit, paddr);
	kunmap_atomic(paddr);
	return set;
}


973 974 975
/* this gets called when the md device is ready to unplug its underlying
 * (slave) device queues -- before we let any writes go down, we need to
 * sync the dirty pages of the bitmap file to disk */
976
void bitmap_unplug(struct bitmap *bitmap)
977
{
978
	unsigned long i;
979
	int dirty, need_write;
980

981 982
	if (!bitmap || !bitmap->storage.filemap ||
	    test_bit(BITMAP_STALE, &bitmap->flags))
983
		return;
984 985 986

	/* look at each page to see if there are any set bits that need to be
	 * flushed out to disk */
987
	for (i = 0; i < bitmap->storage.file_pages; i++) {
988
		if (!bitmap->storage.filemap)
989
			return;
990 991 992 993
		dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
		need_write = test_and_clear_page_attr(bitmap, i,
						      BITMAP_PAGE_NEEDWRITE);
		if (dirty || need_write) {
994
			clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
995 996
			write_page(bitmap, bitmap->storage.filemap[i], 0);
		}
997
	}
998 999 1000 1001 1002 1003
	if (bitmap->storage.file)
		wait_event(bitmap->write_wait,
			   atomic_read(&bitmap->pending_writes)==0);
	else
		md_super_wait(bitmap->mddev);

1004
	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
1005
		bitmap_file_kick(bitmap);
1006
}
1007
EXPORT_SYMBOL(bitmap_unplug);
1008

1009
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
1010 1011 1012 1013 1014 1015 1016
/* * bitmap_init_from_disk -- called at bitmap_create time to initialize
 * the in-memory bitmap from the on-disk bitmap -- also, sets up the
 * memory mapping of the bitmap file
 * Special cases:
 *   if there's no bitmap file, or if the bitmap file had been
 *   previously kicked from the array, we mark all the bits as
 *   1's in order to cause a full resync.
1017 1018 1019
 *
 * We ignore all bits for sectors that end earlier than 'start'.
 * This is used when reading an out-of-date bitmap...
1020
 */
1021
static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1022
{
1023
	unsigned long i, chunks, index, oldindex, bit, node_offset = 0;
1024
	struct page *page = NULL;
1025
	unsigned long bit_cnt = 0;
1026
	struct file *file;
1027
	unsigned long offset;
1028 1029
	int outofdate;
	int ret = -ENOSPC;
1030
	void *paddr;
1031
	struct bitmap_storage *store = &bitmap->storage;
1032

1033
	chunks = bitmap->counts.chunks;
1034
	file = store->file;
1035

1036 1037
	if (!file && !bitmap->mddev->bitmap_info.offset) {
		/* No permanent bitmap - fill with '1s'. */
1038 1039
		store->filemap = NULL;
		store->file_pages = 0;
1040 1041
		for (i = 0; i < chunks ; i++) {
			/* if the disk bit is set, set the memory bit */
1042
			int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift)
1043 1044
				      >= start);
			bitmap_set_memory_bits(bitmap,
1045
					       (sector_t)i << bitmap->counts.chunkshift,
1046 1047 1048 1049
					       needed);
		}
		return 0;
	}
1050

1051
	outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
1052 1053 1054 1055
	if (outofdate)
		printk(KERN_INFO "%s: bitmap file is out of date, doing full "
			"recovery\n", bmname(bitmap));

1056
	if (file && i_size_read(file->f_mapping->host) < store->bytes) {
1057
		printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
1058 1059 1060
		       bmname(bitmap),
		       (unsigned long) i_size_read(file->f_mapping->host),
		       store->bytes);
1061
		goto err;
1062
	}
1063

1064
	oldindex = ~0L;
1065
	offset = 0;
1066
	if (!bitmap->mddev->bitmap_info.external)
1067
		offset = sizeof(bitmap_super_t);
1068

1069 1070 1071
	if (mddev_is_clustered(bitmap->mddev))
		node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE));

1072
	for (i = 0; i < chunks; i++) {
1073
		int b;
1074 1075
		index = file_page_index(&bitmap->storage, i);
		bit = file_page_offset(&bitmap->storage, i);
1076
		if (index != oldindex) { /* this is a new page, read it in */
1077
			int count;
1078
			/* unmap the old page, we're done with it */
1079 1080
			if (index == store->file_pages-1)
				count = store->bytes - index * PAGE_SIZE;
1081 1082
			else
				count = PAGE_SIZE;
1083
			page = store->filemap[index];
1084 1085 1086 1087 1088 1089 1090 1091
			if (file)
				ret = read_page(file, index, bitmap,
						count, page);
			else
				ret = read_sb_page(
					bitmap->mddev,
					bitmap->mddev->bitmap_info.offset,
					page,
1092
					index + node_offset, count);
1093 1094

			if (ret)
1095
				goto err;
1096

1097 1098 1099 1100 1101
			oldindex = index;

			if (outofdate) {
				/*
				 * if bitmap is out of date, dirty the
1102
				 * whole page and write it out
1103
				 */
1104
				paddr = kmap_atomic(page);
1105
				memset(paddr + offset, 0xff,
1106
				       PAGE_SIZE - offset);
1107
				kunmap_atomic(paddr);
1108 1109 1110
				write_page(bitmap, page, 1);

				ret = -EIO;
1111 1112
				if (test_bit(BITMAP_WRITE_ERROR,
					     &bitmap->flags))
1113
					goto err;
1114 1115
			}
		}
1116
		paddr = kmap_atomic(page);
1117
		if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
1118
			b = test_bit(bit, paddr);
1119
		else
A
Akinobu Mita 已提交
1120
			b = test_bit_le(bit, paddr);
1121
		kunmap_atomic(paddr);
1122
		if (b) {
1123
			/* if the disk bit is set, set the memory bit */
1124
			int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift
1125 1126
				      >= start);
			bitmap_set_memory_bits(bitmap,
1127
					       (sector_t)i << bitmap->counts.chunkshift,
1128
					       needed);
1129 1130
			bit_cnt++;
		}
1131
		offset = 0;
1132 1133 1134
	}

	printk(KERN_INFO "%s: bitmap initialized from disk: "
1135
	       "read %lu pages, set %lu of %lu bits\n",
1136
	       bmname(bitmap), store->file_pages,
1137
	       bit_cnt, chunks);
1138 1139

	return 0;
1140

1141 1142 1143
 err:
	printk(KERN_INFO "%s: bitmap initialisation failed: %d\n",
	       bmname(bitmap), ret);
1144 1145 1146
	return ret;
}

1147 1148 1149 1150 1151
void bitmap_write_all(struct bitmap *bitmap)
{
	/* We don't actually write all bitmap blocks here,
	 * just flag them as needing to be written
	 */
1152
	int i;
1153

1154
	if (!bitmap || !bitmap->storage.filemap)
1155
		return;
1156
	if (bitmap->storage.file)
1157 1158 1159
		/* Only one copy, so nothing needed */
		return;

1160
	for (i = 0; i < bitmap->storage.file_pages; i++)
1161
		set_page_attr(bitmap, i,
1162
			      BITMAP_PAGE_NEEDWRITE);
1163
	bitmap->allclean = 0;
1164 1165
}

1166 1167
static void bitmap_count_page(struct bitmap_counts *bitmap,
			      sector_t offset, int inc)
1168
{
1169
	sector_t chunk = offset >> bitmap->chunkshift;
1170 1171 1172 1173
	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
	bitmap->bp[page].count += inc;
	bitmap_checkfree(bitmap, page);
}
1174

1175
static void bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
1176 1177 1178 1179 1180 1181 1182 1183 1184
{
	sector_t chunk = offset >> bitmap->chunkshift;
	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
	struct bitmap_page *bp = &bitmap->bp[page];

	if (!bp->pending)
		bp->pending = 1;
}

1185
static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap,
N
NeilBrown 已提交
1186
					    sector_t offset, sector_t *blocks,
1187 1188 1189 1190 1191 1192 1193
					    int create);

/*
 * bitmap daemon -- periodically wakes up to clean bits and flush pages
 *			out to disk
 */

1194
void bitmap_daemon_work(struct mddev *mddev)
1195
{
1196
	struct bitmap *bitmap;
1197
	unsigned long j;
1198
	unsigned long nextpage;
N
NeilBrown 已提交
1199
	sector_t blocks;
1200
	struct bitmap_counts *counts;
1201

1202 1203 1204
	/* Use a mutex to guard daemon_work against
	 * bitmap_destroy.
	 */
1205
	mutex_lock(&mddev->bitmap_info.mutex);
1206 1207
	bitmap = mddev->bitmap;
	if (bitmap == NULL) {
1208
		mutex_unlock(&mddev->bitmap_info.mutex);
1209
		return;
1210
	}
1211
	if (time_before(jiffies, bitmap->daemon_lastrun
N
NeilBrown 已提交
1212
			+ mddev->bitmap_info.daemon_sleep))
1213 1214
		goto done;

1215
	bitmap->daemon_lastrun = jiffies;
1216
	if (bitmap->allclean) {
N
NeilBrown 已提交
1217
		mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1218
		goto done;
1219 1220
	}
	bitmap->allclean = 1;
1221

1222 1223 1224 1225
	/* Any file-page which is PENDING now needs to be written.
	 * So set NEEDWRITE now, then after we make any last-minute changes
	 * we will write it.
	 */
1226
	for (j = 0; j < bitmap->storage.file_pages; j++)
1227 1228
		if (test_and_clear_page_attr(bitmap, j,
					     BITMAP_PAGE_PENDING))
1229
			set_page_attr(bitmap, j,
1230 1231 1232 1233 1234 1235 1236 1237
				      BITMAP_PAGE_NEEDWRITE);

	if (bitmap->need_sync &&
	    mddev->bitmap_info.external == 0) {
		/* Arrange for superblock update as well as
		 * other changes */
		bitmap_super_t *sb;
		bitmap->need_sync = 0;
1238 1239
		if (bitmap->storage.filemap) {
			sb = kmap_atomic(bitmap->storage.sb_page);
1240 1241 1242
			sb->events_cleared =
				cpu_to_le64(bitmap->events_cleared);
			kunmap_atomic(sb);
1243
			set_page_attr(bitmap, 0,
1244 1245
				      BITMAP_PAGE_NEEDWRITE);
		}
1246 1247 1248 1249
	}
	/* Now look at the bitmap counters and if any are '2' or '1',
	 * decrement and handle accordingly.
	 */
1250 1251
	counts = &bitmap->counts;
	spin_lock_irq(&counts->lock);
1252
	nextpage = 0;
1253
	for (j = 0; j < counts->chunks; j++) {
1254
		bitmap_counter_t *bmc;
1255
		sector_t  block = (sector_t)j << counts->chunkshift;
1256

1257 1258
		if (j == nextpage) {
			nextpage += PAGE_COUNTER_RATIO;
1259
			if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) {
1260
				j |= PAGE_COUNTER_MASK;
1261 1262
				continue;
			}
1263
			counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0;
1264
		}
1265
		bmc = bitmap_get_counter(counts,
1266
					 block,
1267
					 &blocks, 0);
1268 1269

		if (!bmc) {
1270
			j |= PAGE_COUNTER_MASK;
1271 1272 1273 1274 1275
			continue;
		}
		if (*bmc == 1 && !bitmap->need_sync) {
			/* We can clear the bit */
			*bmc = 0;
1276
			bitmap_count_page(counts, block, -1);
1277
			bitmap_file_clear_bit(bitmap, block);
1278 1279
		} else if (*bmc && *bmc <= 2) {
			*bmc = 1;
1280
			bitmap_set_pending(counts, block);
1281
			bitmap->allclean = 0;
1282
		}
1283
	}
1284
	spin_unlock_irq(&counts->lock);
1285

1286 1287 1288 1289 1290 1291 1292 1293
	/* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
	 * DIRTY pages need to be written by bitmap_unplug so it can wait
	 * for them.
	 * If we find any DIRTY page we stop there and let bitmap_unplug
	 * handle all the rest.  This is important in the case where
	 * the first blocking holds the superblock and it has been updated.
	 * We mustn't write any other blocks before the superblock.
	 */
1294 1295 1296 1297
	for (j = 0;
	     j < bitmap->storage.file_pages
		     && !test_bit(BITMAP_STALE, &bitmap->flags);
	     j++) {
1298
		if (test_page_attr(bitmap, j,
1299 1300 1301
				   BITMAP_PAGE_DIRTY))
			/* bitmap_unplug will handle the rest */
			break;
1302 1303
		if (test_and_clear_page_attr(bitmap, j,
					     BITMAP_PAGE_NEEDWRITE)) {
1304
			write_page(bitmap, bitmap->storage.filemap[j], 0);
1305 1306 1307
		}
	}

1308
 done:
1309
	if (bitmap->allclean == 0)
N
NeilBrown 已提交
1310 1311
		mddev->thread->timeout =
			mddev->bitmap_info.daemon_sleep;
1312
	mutex_unlock(&mddev->bitmap_info.mutex);
1313 1314
}

1315
static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap,
N
NeilBrown 已提交
1316
					    sector_t offset, sector_t *blocks,
1317
					    int create)
1318 1319
__releases(bitmap->lock)
__acquires(bitmap->lock)
1320 1321 1322 1323 1324
{
	/* If 'create', we might release the lock and reclaim it.
	 * The lock must have been taken with interrupts enabled.
	 * If !create, we don't release the lock.
	 */
1325
	sector_t chunk = offset >> bitmap->chunkshift;
1326 1327 1328
	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
	unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
	sector_t csize;
1329
	int err;
1330

1331 1332 1333 1334
	err = bitmap_checkpage(bitmap, page, create);

	if (bitmap->bp[page].hijacked ||
	    bitmap->bp[page].map == NULL)
1335
		csize = ((sector_t)1) << (bitmap->chunkshift +
1336 1337
					  PAGE_COUNTER_SHIFT - 1);
	else
1338
		csize = ((sector_t)1) << bitmap->chunkshift;
1339 1340 1341
	*blocks = csize - (offset & (csize - 1));

	if (err < 0)
1342
		return NULL;
1343

1344 1345 1346 1347 1348 1349 1350 1351
	/* now locked ... */

	if (bitmap->bp[page].hijacked) { /* hijacked pointer */
		/* should we use the first or second counter field
		 * of the hijacked pointer? */
		int hi = (pageoff > PAGE_COUNTER_MASK);
		return  &((bitmap_counter_t *)
			  &bitmap->bp[page].map)[hi];
1352
	} else /* page is allocated */
1353 1354 1355 1356
		return (bitmap_counter_t *)
			&(bitmap->bp[page].map[pageoff]);
}

1357
int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
1358
{
1359 1360
	if (!bitmap)
		return 0;
1361 1362

	if (behind) {
1363
		int bw;
1364
		atomic_inc(&bitmap->behind_writes);
1365 1366 1367 1368
		bw = atomic_read(&bitmap->behind_writes);
		if (bw > bitmap->behind_writes_used)
			bitmap->behind_writes_used = bw;

1369 1370
		pr_debug("inc write-behind count %d/%lu\n",
			 bw, bitmap->mddev->bitmap_info.max_write_behind);
1371 1372
	}

1373
	while (sectors) {
N
NeilBrown 已提交
1374
		sector_t blocks;
1375 1376
		bitmap_counter_t *bmc;

1377 1378
		spin_lock_irq(&bitmap->counts.lock);
		bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
1379
		if (!bmc) {
1380
			spin_unlock_irq(&bitmap->counts.lock);
1381 1382 1383
			return 0;
		}

1384
		if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
1385 1386 1387 1388 1389 1390 1391
			DEFINE_WAIT(__wait);
			/* note that it is safe to do the prepare_to_wait
			 * after the test as long as we do it before dropping
			 * the spinlock.
			 */
			prepare_to_wait(&bitmap->overflow_wait, &__wait,
					TASK_UNINTERRUPTIBLE);
1392
			spin_unlock_irq(&bitmap->counts.lock);
1393
			schedule();
1394 1395 1396 1397
			finish_wait(&bitmap->overflow_wait, &__wait);
			continue;
		}

1398
		switch (*bmc) {
1399 1400
		case 0:
			bitmap_file_set_bit(bitmap, offset);
1401
			bitmap_count_page(&bitmap->counts, offset, 1);
1402 1403 1404 1405
			/* fall through */
		case 1:
			*bmc = 2;
		}
1406

1407 1408
		(*bmc)++;

1409
		spin_unlock_irq(&bitmap->counts.lock);
1410 1411 1412 1413

		offset += blocks;
		if (sectors > blocks)
			sectors -= blocks;
1414 1415
		else
			sectors = 0;
1416 1417 1418
	}
	return 0;
}
1419
EXPORT_SYMBOL(bitmap_startwrite);
1420 1421

void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors,
1422
		     int success, int behind)
1423
{
1424 1425
	if (!bitmap)
		return;
1426
	if (behind) {
1427 1428
		if (atomic_dec_and_test(&bitmap->behind_writes))
			wake_up(&bitmap->behind_wait);
1429 1430 1431
		pr_debug("dec write-behind count %d/%lu\n",
			 atomic_read(&bitmap->behind_writes),
			 bitmap->mddev->bitmap_info.max_write_behind);
1432 1433
	}

1434
	while (sectors) {
N
NeilBrown 已提交
1435
		sector_t blocks;
1436 1437 1438
		unsigned long flags;
		bitmap_counter_t *bmc;

1439 1440
		spin_lock_irqsave(&bitmap->counts.lock, flags);
		bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
1441
		if (!bmc) {
1442
			spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1443 1444 1445
			return;
		}

1446
		if (success && !bitmap->mddev->degraded &&
1447 1448 1449
		    bitmap->events_cleared < bitmap->mddev->events) {
			bitmap->events_cleared = bitmap->mddev->events;
			bitmap->need_sync = 1;
1450
			sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
1451 1452
		}

1453
		if (!success && !NEEDED(*bmc))
1454 1455
			*bmc |= NEEDED_MASK;

1456
		if (COUNTER(*bmc) == COUNTER_MAX)
1457 1458
			wake_up(&bitmap->overflow_wait);

1459
		(*bmc)--;
1460
		if (*bmc <= 2) {
1461
			bitmap_set_pending(&bitmap->counts, offset);
1462 1463
			bitmap->allclean = 0;
		}
1464
		spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1465 1466 1467
		offset += blocks;
		if (sectors > blocks)
			sectors -= blocks;
1468 1469
		else
			sectors = 0;
1470 1471
	}
}
1472
EXPORT_SYMBOL(bitmap_endwrite);
1473

N
NeilBrown 已提交
1474
static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1475
			       int degraded)
1476 1477 1478 1479 1480 1481 1482
{
	bitmap_counter_t *bmc;
	int rv;
	if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
		*blocks = 1024;
		return 1; /* always resync if no bitmap */
	}
1483 1484
	spin_lock_irq(&bitmap->counts.lock);
	bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1485 1486 1487 1488 1489 1490 1491
	rv = 0;
	if (bmc) {
		/* locked */
		if (RESYNC(*bmc))
			rv = 1;
		else if (NEEDED(*bmc)) {
			rv = 1;
1492 1493 1494 1495
			if (!degraded) { /* don't set/clear bits if degraded */
				*bmc |= RESYNC_MASK;
				*bmc &= ~NEEDED_MASK;
			}
1496 1497
		}
	}
1498
	spin_unlock_irq(&bitmap->counts.lock);
1499 1500 1501
	return rv;
}

N
NeilBrown 已提交
1502
int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1503 1504 1505 1506 1507 1508 1509 1510 1511 1512
		      int degraded)
{
	/* bitmap_start_sync must always report on multiples of whole
	 * pages, otherwise resync (which is very PAGE_SIZE based) will
	 * get confused.
	 * So call __bitmap_start_sync repeatedly (if needed) until
	 * At least PAGE_SIZE>>9 blocks are covered.
	 * Return the 'or' of the result.
	 */
	int rv = 0;
N
NeilBrown 已提交
1513
	sector_t blocks1;
1514 1515 1516 1517 1518 1519 1520 1521 1522 1523

	*blocks = 0;
	while (*blocks < (PAGE_SIZE>>9)) {
		rv |= __bitmap_start_sync(bitmap, offset,
					  &blocks1, degraded);
		offset += blocks1;
		*blocks += blocks1;
	}
	return rv;
}
1524
EXPORT_SYMBOL(bitmap_start_sync);
1525

N
NeilBrown 已提交
1526
void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
1527 1528 1529
{
	bitmap_counter_t *bmc;
	unsigned long flags;
1530 1531

	if (bitmap == NULL) {
1532 1533 1534
		*blocks = 1024;
		return;
	}
1535 1536
	spin_lock_irqsave(&bitmap->counts.lock, flags);
	bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1537 1538 1539 1540 1541 1542 1543 1544 1545
	if (bmc == NULL)
		goto unlock;
	/* locked */
	if (RESYNC(*bmc)) {
		*bmc &= ~RESYNC_MASK;

		if (!NEEDED(*bmc) && aborted)
			*bmc |= NEEDED_MASK;
		else {
1546
			if (*bmc <= 2) {
1547
				bitmap_set_pending(&bitmap->counts, offset);
1548 1549
				bitmap->allclean = 0;
			}
1550 1551 1552
		}
	}
 unlock:
1553
	spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1554
}
1555
EXPORT_SYMBOL(bitmap_end_sync);
1556 1557 1558 1559 1560 1561 1562 1563

void bitmap_close_sync(struct bitmap *bitmap)
{
	/* Sync has finished, and any bitmap chunks that weren't synced
	 * properly have been aborted.  It remains to us to clear the
	 * RESYNC bit wherever it is still on
	 */
	sector_t sector = 0;
N
NeilBrown 已提交
1564
	sector_t blocks;
N
NeilBrown 已提交
1565 1566
	if (!bitmap)
		return;
1567 1568
	while (sector < bitmap->mddev->resync_max_sectors) {
		bitmap_end_sync(bitmap, sector, &blocks, 0);
N
NeilBrown 已提交
1569 1570 1571
		sector += blocks;
	}
}
1572
EXPORT_SYMBOL(bitmap_close_sync);
N
NeilBrown 已提交
1573 1574 1575 1576

void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
{
	sector_t s = 0;
N
NeilBrown 已提交
1577
	sector_t blocks;
N
NeilBrown 已提交
1578 1579 1580 1581 1582 1583 1584 1585

	if (!bitmap)
		return;
	if (sector == 0) {
		bitmap->last_end_sync = jiffies;
		return;
	}
	if (time_before(jiffies, (bitmap->last_end_sync
1586
				  + bitmap->mddev->bitmap_info.daemon_sleep)))
N
NeilBrown 已提交
1587 1588 1589 1590
		return;
	wait_event(bitmap->mddev->recovery_wait,
		   atomic_read(&bitmap->mddev->recovery_active) == 0);

1591
	bitmap->mddev->curr_resync_completed = sector;
1592
	set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
1593
	sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
N
NeilBrown 已提交
1594 1595 1596 1597
	s = 0;
	while (s < sector && s < bitmap->mddev->resync_max_sectors) {
		bitmap_end_sync(bitmap, s, &blocks, 0);
		s += blocks;
1598
	}
N
NeilBrown 已提交
1599
	bitmap->last_end_sync = jiffies;
1600
	sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
1601
}
1602
EXPORT_SYMBOL(bitmap_cond_end_sync);
1603

1604
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
1605 1606
{
	/* For each chunk covered by any of these sectors, set the
1607
	 * counter to 2 and possibly set resync_needed.  They should all
1608 1609
	 * be 0 at this point
	 */
1610

N
NeilBrown 已提交
1611
	sector_t secs;
1612
	bitmap_counter_t *bmc;
1613 1614
	spin_lock_irq(&bitmap->counts.lock);
	bmc = bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
1615
	if (!bmc) {
1616
		spin_unlock_irq(&bitmap->counts.lock);
1617
		return;
1618
	}
1619
	if (!*bmc) {
1620
		*bmc = 2;
1621 1622
		bitmap_count_page(&bitmap->counts, offset, 1);
		bitmap_set_pending(&bitmap->counts, offset);
1623
		bitmap->allclean = 0;
1624
	}
1625 1626
	if (needed)
		*bmc |= NEEDED_MASK;
1627
	spin_unlock_irq(&bitmap->counts.lock);
1628 1629
}

1630 1631 1632 1633 1634 1635
/* dirty the memory and file bits for bitmap chunks "s" to "e" */
void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
{
	unsigned long chunk;

	for (chunk = s; chunk <= e; chunk++) {
1636
		sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
1637 1638
		bitmap_set_memory_bits(bitmap, sec, 1);
		bitmap_file_set_bit(bitmap, sec);
1639 1640 1641 1642 1643 1644
		if (sec < bitmap->mddev->recovery_cp)
			/* We are asserting that the array is dirty,
			 * so move the recovery_cp address back so
			 * that it is obvious that it is dirty
			 */
			bitmap->mddev->recovery_cp = sec;
1645 1646 1647
	}
}

1648 1649 1650
/*
 * flush out any pending updates
 */
1651
void bitmap_flush(struct mddev *mddev)
1652 1653
{
	struct bitmap *bitmap = mddev->bitmap;
1654
	long sleep;
1655 1656 1657 1658 1659 1660 1661

	if (!bitmap) /* there was no bitmap */
		return;

	/* run the daemon_work three time to ensure everything is flushed
	 * that can be
	 */
1662
	sleep = mddev->bitmap_info.daemon_sleep * 2;
1663
	bitmap->daemon_lastrun -= sleep;
1664
	bitmap_daemon_work(mddev);
1665
	bitmap->daemon_lastrun -= sleep;
1666
	bitmap_daemon_work(mddev);
1667
	bitmap->daemon_lastrun -= sleep;
1668
	bitmap_daemon_work(mddev);
1669 1670 1671
	bitmap_update_sb(bitmap);
}

1672 1673 1674
/*
 * free memory that was allocated
 */
1675
static void bitmap_free(struct bitmap *bitmap)
1676 1677 1678 1679 1680 1681 1682
{
	unsigned long k, pages;
	struct bitmap_page *bp;

	if (!bitmap) /* there was no bitmap */
		return;

1683 1684
	if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
		bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
1685 1686
		md_cluster_stop(bitmap->mddev);

1687 1688 1689 1690 1691 1692
	/* Shouldn't be needed - but just in case.... */
	wait_event(bitmap->write_wait,
		   atomic_read(&bitmap->pending_writes) == 0);

	/* release the bitmap file  */
	bitmap_file_unmap(&bitmap->storage);
1693

1694 1695
	bp = bitmap->counts.bp;
	pages = bitmap->counts.pages;
1696 1697 1698 1699 1700 1701 1702 1703 1704 1705

	/* free all allocated memory */

	if (bp) /* deallocate the page memory */
		for (k = 0; k < pages; k++)
			if (bp[k].map && !bp[k].hijacked)
				kfree(bp[k].map);
	kfree(bp);
	kfree(bitmap);
}
1706

1707
void bitmap_destroy(struct mddev *mddev)
1708 1709 1710 1711 1712 1713
{
	struct bitmap *bitmap = mddev->bitmap;

	if (!bitmap) /* there was no bitmap */
		return;

1714
	mutex_lock(&mddev->bitmap_info.mutex);
1715
	spin_lock(&mddev->lock);
1716
	mddev->bitmap = NULL; /* disconnect from the md device */
1717
	spin_unlock(&mddev->lock);
1718
	mutex_unlock(&mddev->bitmap_info.mutex);
1719 1720
	if (mddev->thread)
		mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1721

1722 1723 1724
	if (bitmap->sysfs_can_clear)
		sysfs_put(bitmap->sysfs_can_clear);

1725 1726
	bitmap_free(bitmap);
}
1727 1728 1729 1730 1731

/*
 * initialize the bitmap structure
 * if this returns an error, bitmap_destroy must be called to do clean up
 */
1732
struct bitmap *bitmap_create(struct mddev *mddev, int slot)
1733 1734
{
	struct bitmap *bitmap;
1735
	sector_t blocks = mddev->resync_max_sectors;
1736
	struct file *file = mddev->bitmap_info.file;
1737
	int err;
1738
	struct kernfs_node *bm = NULL;
1739

A
Alexey Dobriyan 已提交
1740
	BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
1741

1742
	BUG_ON(file && mddev->bitmap_info.offset);
1743

1744
	bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
1745
	if (!bitmap)
1746
		return ERR_PTR(-ENOMEM);
1747

1748
	spin_lock_init(&bitmap->counts.lock);
1749 1750
	atomic_set(&bitmap->pending_writes, 0);
	init_waitqueue_head(&bitmap->write_wait);
1751
	init_waitqueue_head(&bitmap->overflow_wait);
1752
	init_waitqueue_head(&bitmap->behind_wait);
1753

1754
	bitmap->mddev = mddev;
1755
	bitmap->cluster_slot = slot;
1756

1757
	if (mddev->kobj.sd)
T
Tejun Heo 已提交
1758
		bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
1759
	if (bm) {
T
Tejun Heo 已提交
1760
		bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
1761 1762 1763 1764
		sysfs_put(bm);
	} else
		bitmap->sysfs_can_clear = NULL;

1765
	bitmap->storage.file = file;
1766 1767
	if (file) {
		get_file(file);
1768 1769 1770 1771
		/* As future accesses to this file will use bmap,
		 * and bypass the page cache, we must sync the file
		 * first.
		 */
1772
		vfs_fsync(file, 1);
1773
	}
1774
	/* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
1775 1776 1777 1778 1779 1780 1781 1782 1783 1784
	if (!mddev->bitmap_info.external) {
		/*
		 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
		 * instructing us to create a new on-disk bitmap instance.
		 */
		if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
			err = bitmap_new_disk_sb(bitmap);
		else
			err = bitmap_read_sb(bitmap);
	} else {
1785 1786 1787 1788 1789 1790 1791
		err = 0;
		if (mddev->bitmap_info.chunksize == 0 ||
		    mddev->bitmap_info.daemon_sleep == 0)
			/* chunksize and time_base need to be
			 * set first. */
			err = -EINVAL;
	}
1792
	if (err)
1793
		goto error;
1794

1795
	bitmap->daemon_lastrun = jiffies;
1796 1797
	err = bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
	if (err)
1798
		goto error;
1799

1800
	printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
1801
	       bitmap->counts.pages, bmname(bitmap));
1802

1803 1804 1805
	err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
	if (err)
		goto error;
1806

1807
	return bitmap;
1808 1809
 error:
	bitmap_free(bitmap);
1810
	return ERR_PTR(err);
1811 1812
}

1813
int bitmap_load(struct mddev *mddev)
1814 1815
{
	int err = 0;
1816
	sector_t start = 0;
1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828
	sector_t sector = 0;
	struct bitmap *bitmap = mddev->bitmap;

	if (!bitmap)
		goto out;

	/* Clear out old bitmap info first:  Either there is none, or we
	 * are resuming after someone else has possibly changed things,
	 * so we should forget old cached info.
	 * All chunks should be clean, but some might need_sync.
	 */
	while (sector < mddev->resync_max_sectors) {
N
NeilBrown 已提交
1829
		sector_t blocks;
1830 1831 1832 1833 1834
		bitmap_start_sync(bitmap, sector, &blocks, 0);
		sector += blocks;
	}
	bitmap_close_sync(bitmap);

1835 1836 1837 1838 1839 1840
	if (mddev->degraded == 0
	    || bitmap->events_cleared == mddev->events)
		/* no need to keep dirty bits to optimise a
		 * re-add of a missing device */
		start = mddev->recovery_cp;

1841
	mutex_lock(&mddev->bitmap_info.mutex);
1842
	err = bitmap_init_from_disk(bitmap, start);
1843
	mutex_unlock(&mddev->bitmap_info.mutex);
1844

1845
	if (err)
1846
		goto out;
1847
	clear_bit(BITMAP_STALE, &bitmap->flags);
1848 1849 1850

	/* Kick recovery in case any bits were set */
	set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
1851

1852
	mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
1853
	md_wakeup_thread(mddev->thread);
1854

1855 1856
	bitmap_update_sb(bitmap);

1857
	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
1858 1859
		err = -EIO;
out:
1860
	return err;
1861
}
1862
EXPORT_SYMBOL_GPL(bitmap_load);
1863

1864 1865 1866 1867
/* Loads the bitmap associated with slot and copies the resync information
 * to our bitmap
 */
int bitmap_copy_from_slot(struct mddev *mddev, int slot,
1868
		sector_t *low, sector_t *high, bool clear_bits)
1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898
{
	int rv = 0, i, j;
	sector_t block, lo = 0, hi = 0;
	struct bitmap_counts *counts;
	struct bitmap *bitmap = bitmap_create(mddev, slot);

	if (IS_ERR(bitmap))
		return PTR_ERR(bitmap);

	rv = bitmap_read_sb(bitmap);
	if (rv)
		goto err;

	rv = bitmap_init_from_disk(bitmap, 0);
	if (rv)
		goto err;

	counts = &bitmap->counts;
	for (j = 0; j < counts->chunks; j++) {
		block = (sector_t)j << counts->chunkshift;
		if (bitmap_file_test_bit(bitmap, block)) {
			if (!lo)
				lo = block;
			hi = block;
			bitmap_file_clear_bit(bitmap, block);
			bitmap_set_memory_bits(mddev->bitmap, block, 1);
			bitmap_file_set_bit(mddev->bitmap, block);
		}
	}

1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
	if (clear_bits) {
		bitmap_update_sb(bitmap);
		/* Setting this for the ev_page should be enough.
		 * And we do not require both write_all and PAGE_DIRT either
		 */
		for (i = 0; i < bitmap->storage.file_pages; i++)
			set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
		bitmap_write_all(bitmap);
		bitmap_unplug(bitmap);
	}
1909 1910 1911 1912 1913 1914 1915 1916 1917
	*low = lo;
	*high = hi;
err:
	bitmap_free(bitmap);
	return rv;
}
EXPORT_SYMBOL_GPL(bitmap_copy_from_slot);


1918 1919 1920
void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
{
	unsigned long chunk_kb;
1921
	struct bitmap_counts *counts;
1922 1923 1924 1925

	if (!bitmap)
		return;

1926 1927
	counts = &bitmap->counts;

1928 1929 1930
	chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10;
	seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
		   "%lu%s chunk",
1931 1932 1933
		   counts->pages - counts->missing_pages,
		   counts->pages,
		   (counts->pages - counts->missing_pages)
1934 1935 1936
		   << (PAGE_SHIFT - 10),
		   chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize,
		   chunk_kb ? "KB" : "B");
1937
	if (bitmap->storage.file) {
1938
		seq_printf(seq, ", file: ");
1939
		seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
1940 1941 1942 1943 1944
	}

	seq_printf(seq, "\n");
}

1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001
int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
		  int chunksize, int init)
{
	/* If chunk_size is 0, choose an appropriate chunk size.
	 * Then possibly allocate new storage space.
	 * Then quiesce, copy bits, replace bitmap, and re-start
	 *
	 * This function is called both to set up the initial bitmap
	 * and to resize the bitmap while the array is active.
	 * If this happens as a result of the array being resized,
	 * chunksize will be zero, and we need to choose a suitable
	 * chunksize, otherwise we use what we are given.
	 */
	struct bitmap_storage store;
	struct bitmap_counts old_counts;
	unsigned long chunks;
	sector_t block;
	sector_t old_blocks, new_blocks;
	int chunkshift;
	int ret = 0;
	long pages;
	struct bitmap_page *new_bp;

	if (chunksize == 0) {
		/* If there is enough space, leave the chunk size unchanged,
		 * else increase by factor of two until there is enough space.
		 */
		long bytes;
		long space = bitmap->mddev->bitmap_info.space;

		if (space == 0) {
			/* We don't know how much space there is, so limit
			 * to current size - in sectors.
			 */
			bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8);
			if (!bitmap->mddev->bitmap_info.external)
				bytes += sizeof(bitmap_super_t);
			space = DIV_ROUND_UP(bytes, 512);
			bitmap->mddev->bitmap_info.space = space;
		}
		chunkshift = bitmap->counts.chunkshift;
		chunkshift--;
		do {
			/* 'chunkshift' is shift from block size to chunk size */
			chunkshift++;
			chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
			bytes = DIV_ROUND_UP(chunks, 8);
			if (!bitmap->mddev->bitmap_info.external)
				bytes += sizeof(bitmap_super_t);
		} while (bytes > (space << 9));
	} else
		chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;

	chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
	memset(&store, 0, sizeof(store));
	if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
		ret = bitmap_storage_alloc(&store, chunks,
2002 2003
					   !bitmap->mddev->bitmap_info.external,
					   bitmap->cluster_slot);
2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109
	if (ret)
		goto err;

	pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);

	new_bp = kzalloc(pages * sizeof(*new_bp), GFP_KERNEL);
	ret = -ENOMEM;
	if (!new_bp) {
		bitmap_file_unmap(&store);
		goto err;
	}

	if (!init)
		bitmap->mddev->pers->quiesce(bitmap->mddev, 1);

	store.file = bitmap->storage.file;
	bitmap->storage.file = NULL;

	if (store.sb_page && bitmap->storage.sb_page)
		memcpy(page_address(store.sb_page),
		       page_address(bitmap->storage.sb_page),
		       sizeof(bitmap_super_t));
	bitmap_file_unmap(&bitmap->storage);
	bitmap->storage = store;

	old_counts = bitmap->counts;
	bitmap->counts.bp = new_bp;
	bitmap->counts.pages = pages;
	bitmap->counts.missing_pages = pages;
	bitmap->counts.chunkshift = chunkshift;
	bitmap->counts.chunks = chunks;
	bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift +
						     BITMAP_BLOCK_SHIFT);

	blocks = min(old_counts.chunks << old_counts.chunkshift,
		     chunks << chunkshift);

	spin_lock_irq(&bitmap->counts.lock);
	for (block = 0; block < blocks; ) {
		bitmap_counter_t *bmc_old, *bmc_new;
		int set;

		bmc_old = bitmap_get_counter(&old_counts, block,
					     &old_blocks, 0);
		set = bmc_old && NEEDED(*bmc_old);

		if (set) {
			bmc_new = bitmap_get_counter(&bitmap->counts, block,
						     &new_blocks, 1);
			if (*bmc_new == 0) {
				/* need to set on-disk bits too. */
				sector_t end = block + new_blocks;
				sector_t start = block >> chunkshift;
				start <<= chunkshift;
				while (start < end) {
					bitmap_file_set_bit(bitmap, block);
					start += 1 << chunkshift;
				}
				*bmc_new = 2;
				bitmap_count_page(&bitmap->counts,
						  block, 1);
				bitmap_set_pending(&bitmap->counts,
						   block);
			}
			*bmc_new |= NEEDED_MASK;
			if (new_blocks < old_blocks)
				old_blocks = new_blocks;
		}
		block += old_blocks;
	}

	if (!init) {
		int i;
		while (block < (chunks << chunkshift)) {
			bitmap_counter_t *bmc;
			bmc = bitmap_get_counter(&bitmap->counts, block,
						 &new_blocks, 1);
			if (bmc) {
				/* new space.  It needs to be resynced, so
				 * we set NEEDED_MASK.
				 */
				if (*bmc == 0) {
					*bmc = NEEDED_MASK | 2;
					bitmap_count_page(&bitmap->counts,
							  block, 1);
					bitmap_set_pending(&bitmap->counts,
							   block);
				}
			}
			block += new_blocks;
		}
		for (i = 0; i < bitmap->storage.file_pages; i++)
			set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
	}
	spin_unlock_irq(&bitmap->counts.lock);

	if (!init) {
		bitmap_unplug(bitmap);
		bitmap->mddev->pers->quiesce(bitmap->mddev, 0);
	}
	ret = 0;
err:
	return ret;
}
EXPORT_SYMBOL_GPL(bitmap_resize);

2110
static ssize_t
2111
location_show(struct mddev *mddev, char *page)
2112 2113
{
	ssize_t len;
2114
	if (mddev->bitmap_info.file)
2115
		len = sprintf(page, "file");
2116
	else if (mddev->bitmap_info.offset)
2117
		len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
2118
	else
2119 2120 2121 2122 2123 2124
		len = sprintf(page, "none");
	len += sprintf(page+len, "\n");
	return len;
}

static ssize_t
2125
location_store(struct mddev *mddev, const char *buf, size_t len)
2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161
{

	if (mddev->pers) {
		if (!mddev->pers->quiesce)
			return -EBUSY;
		if (mddev->recovery || mddev->sync_thread)
			return -EBUSY;
	}

	if (mddev->bitmap || mddev->bitmap_info.file ||
	    mddev->bitmap_info.offset) {
		/* bitmap already configured.  Only option is to clear it */
		if (strncmp(buf, "none", 4) != 0)
			return -EBUSY;
		if (mddev->pers) {
			mddev->pers->quiesce(mddev, 1);
			bitmap_destroy(mddev);
			mddev->pers->quiesce(mddev, 0);
		}
		mddev->bitmap_info.offset = 0;
		if (mddev->bitmap_info.file) {
			struct file *f = mddev->bitmap_info.file;
			mddev->bitmap_info.file = NULL;
			fput(f);
		}
	} else {
		/* No bitmap, OK to set a location */
		long long offset;
		if (strncmp(buf, "none", 4) == 0)
			/* nothing to be done */;
		else if (strncmp(buf, "file:", 5) == 0) {
			/* Not supported yet */
			return -EINVAL;
		} else {
			int rv;
			if (buf[0] == '+')
2162
				rv = kstrtoll(buf+1, 10, &offset);
2163
			else
2164
				rv = kstrtoll(buf, 10, &offset);
2165 2166 2167 2168
			if (rv)
				return rv;
			if (offset == 0)
				return -EINVAL;
2169 2170
			if (mddev->bitmap_info.external == 0 &&
			    mddev->major_version == 0 &&
2171 2172 2173 2174
			    offset != mddev->bitmap_info.default_offset)
				return -EINVAL;
			mddev->bitmap_info.offset = offset;
			if (mddev->pers) {
2175
				struct bitmap *bitmap;
2176
				mddev->pers->quiesce(mddev, 1);
2177 2178 2179 2180 2181
				bitmap = bitmap_create(mddev, -1);
				if (IS_ERR(bitmap))
					rv = PTR_ERR(bitmap);
				else {
					mddev->bitmap = bitmap;
2182
					rv = bitmap_load(mddev);
2183 2184 2185 2186
					if (rv) {
						bitmap_destroy(mddev);
						mddev->bitmap_info.offset = 0;
					}
2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206
				}
				mddev->pers->quiesce(mddev, 0);
				if (rv)
					return rv;
			}
		}
	}
	if (!mddev->external) {
		/* Ensure new bitmap info is stored in
		 * metadata promptly.
		 */
		set_bit(MD_CHANGE_DEVS, &mddev->flags);
		md_wakeup_thread(mddev->thread);
	}
	return len;
}

static struct md_sysfs_entry bitmap_location =
__ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);

2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230
/* 'bitmap/space' is the space available at 'location' for the
 * bitmap.  This allows the kernel to know when it is safe to
 * resize the bitmap to match a resized array.
 */
static ssize_t
space_show(struct mddev *mddev, char *page)
{
	return sprintf(page, "%lu\n", mddev->bitmap_info.space);
}

static ssize_t
space_store(struct mddev *mddev, const char *buf, size_t len)
{
	unsigned long sectors;
	int rv;

	rv = kstrtoul(buf, 10, &sectors);
	if (rv)
		return rv;

	if (sectors == 0)
		return -EINVAL;

	if (mddev->bitmap &&
2231
	    sectors < (mddev->bitmap->storage.bytes + 511) >> 9)
2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243
		return -EFBIG; /* Bitmap is too big for this small space */

	/* could make sure it isn't too big, but that isn't really
	 * needed - user-space should be careful.
	 */
	mddev->bitmap_info.space = sectors;
	return len;
}

static struct md_sysfs_entry bitmap_space =
__ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store);

2244
static ssize_t
2245
timeout_show(struct mddev *mddev, char *page)
2246 2247 2248 2249
{
	ssize_t len;
	unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
	unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
2250

2251 2252 2253 2254 2255 2256 2257 2258
	len = sprintf(page, "%lu", secs);
	if (jifs)
		len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
	len += sprintf(page+len, "\n");
	return len;
}

static ssize_t
2259
timeout_store(struct mddev *mddev, const char *buf, size_t len)
2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294
{
	/* timeout can be set at any time */
	unsigned long timeout;
	int rv = strict_strtoul_scaled(buf, &timeout, 4);
	if (rv)
		return rv;

	/* just to make sure we don't overflow... */
	if (timeout >= LONG_MAX / HZ)
		return -EINVAL;

	timeout = timeout * HZ / 10000;

	if (timeout >= MAX_SCHEDULE_TIMEOUT)
		timeout = MAX_SCHEDULE_TIMEOUT-1;
	if (timeout < 1)
		timeout = 1;
	mddev->bitmap_info.daemon_sleep = timeout;
	if (mddev->thread) {
		/* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
		 * the bitmap is all clean and we don't need to
		 * adjust the timeout right now
		 */
		if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
			mddev->thread->timeout = timeout;
			md_wakeup_thread(mddev->thread);
		}
	}
	return len;
}

static struct md_sysfs_entry bitmap_timeout =
__ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);

static ssize_t
2295
backlog_show(struct mddev *mddev, char *page)
2296 2297 2298 2299 2300
{
	return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
}

static ssize_t
2301
backlog_store(struct mddev *mddev, const char *buf, size_t len)
2302 2303
{
	unsigned long backlog;
2304
	int rv = kstrtoul(buf, 10, &backlog);
2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316
	if (rv)
		return rv;
	if (backlog > COUNTER_MAX)
		return -EINVAL;
	mddev->bitmap_info.max_write_behind = backlog;
	return len;
}

static struct md_sysfs_entry bitmap_backlog =
__ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);

static ssize_t
2317
chunksize_show(struct mddev *mddev, char *page)
2318 2319 2320 2321 2322
{
	return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
}

static ssize_t
2323
chunksize_store(struct mddev *mddev, const char *buf, size_t len)
2324 2325 2326 2327 2328 2329
{
	/* Can only be changed when no bitmap is active */
	int rv;
	unsigned long csize;
	if (mddev->bitmap)
		return -EBUSY;
2330
	rv = kstrtoul(buf, 10, &csize);
2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342
	if (rv)
		return rv;
	if (csize < 512 ||
	    !is_power_of_2(csize))
		return -EINVAL;
	mddev->bitmap_info.chunksize = csize;
	return len;
}

static struct md_sysfs_entry bitmap_chunksize =
__ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);

2343
static ssize_t metadata_show(struct mddev *mddev, char *page)
2344
{
G
Goldwyn Rodrigues 已提交
2345 2346
	if (mddev_is_clustered(mddev))
		return sprintf(page, "clustered\n");
2347 2348 2349 2350
	return sprintf(page, "%s\n", (mddev->bitmap_info.external
				      ? "external" : "internal"));
}

2351
static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
2352 2353 2354 2355 2356 2357 2358
{
	if (mddev->bitmap ||
	    mddev->bitmap_info.file ||
	    mddev->bitmap_info.offset)
		return -EBUSY;
	if (strncmp(buf, "external", 8) == 0)
		mddev->bitmap_info.external = 1;
G
Goldwyn Rodrigues 已提交
2359 2360
	else if ((strncmp(buf, "internal", 8) == 0) ||
			(strncmp(buf, "clustered", 9) == 0))
2361 2362 2363 2364 2365 2366 2367 2368 2369
		mddev->bitmap_info.external = 0;
	else
		return -EINVAL;
	return len;
}

static struct md_sysfs_entry bitmap_metadata =
__ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);

2370
static ssize_t can_clear_show(struct mddev *mddev, char *page)
2371 2372
{
	int len;
2373
	spin_lock(&mddev->lock);
2374 2375 2376 2377 2378
	if (mddev->bitmap)
		len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
					     "false" : "true"));
	else
		len = sprintf(page, "\n");
2379
	spin_unlock(&mddev->lock);
2380 2381 2382
	return len;
}

2383
static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400
{
	if (mddev->bitmap == NULL)
		return -ENOENT;
	if (strncmp(buf, "false", 5) == 0)
		mddev->bitmap->need_sync = 1;
	else if (strncmp(buf, "true", 4) == 0) {
		if (mddev->degraded)
			return -EBUSY;
		mddev->bitmap->need_sync = 0;
	} else
		return -EINVAL;
	return len;
}

static struct md_sysfs_entry bitmap_can_clear =
__ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);

2401
static ssize_t
2402
behind_writes_used_show(struct mddev *mddev, char *page)
2403
{
2404 2405
	ssize_t ret;
	spin_lock(&mddev->lock);
2406
	if (mddev->bitmap == NULL)
2407 2408 2409 2410 2411 2412
		ret = sprintf(page, "0\n");
	else
		ret = sprintf(page, "%lu\n",
			      mddev->bitmap->behind_writes_used);
	spin_unlock(&mddev->lock);
	return ret;
2413 2414 2415
}

static ssize_t
2416
behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
2417 2418 2419 2420 2421 2422 2423 2424 2425 2426
{
	if (mddev->bitmap)
		mddev->bitmap->behind_writes_used = 0;
	return len;
}

static struct md_sysfs_entry max_backlog_used =
__ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
       behind_writes_used_show, behind_writes_used_reset);

2427 2428
static struct attribute *md_bitmap_attrs[] = {
	&bitmap_location.attr,
2429
	&bitmap_space.attr,
2430 2431 2432
	&bitmap_timeout.attr,
	&bitmap_backlog.attr,
	&bitmap_chunksize.attr,
2433 2434
	&bitmap_metadata.attr,
	&bitmap_can_clear.attr,
2435
	&max_backlog_used.attr,
2436 2437 2438 2439 2440 2441 2442
	NULL
};
struct attribute_group md_bitmap_group = {
	.name = "bitmap",
	.attrs = md_bitmap_attrs,
};