bitmap.c 64.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
 *
 * bitmap_create  - sets up the bitmap structure
 * bitmap_destroy - destroys the bitmap structure
 *
 * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
 * - added disk storage for bitmap
 * - changes to allow various bitmap chunk sizes
 */

/*
 * Still to do:
 *
 * flush after percent set rather than just time based. (maybe both).
 */

18
#include <linux/blkdev.h>
19 20 21 22 23 24 25 26 27 28
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/file.h>
#include <linux/mount.h>
#include <linux/buffer_head.h>
29
#include <linux/seq_file.h>
30
#include "md.h"
31
#include "bitmap.h"
32

33
static inline char *bmname(struct bitmap *bitmap)
34 35 36 37 38 39 40 41 42 43 44 45 46 47
{
	return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
}

/*
 * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
 *
 * 1) check to see if this page is allocated, if it's not then try to alloc
 * 2) if the alloc fails, set the page's hijacked flag so we'll use the
 *    page pointer directly as a counter
 *
 * if we find our page, we increment the page's refcount so that it stays
 * allocated while we're using it
 */
48
static int bitmap_checkpage(struct bitmap_counts *bitmap,
49
			    unsigned long page, int create)
50 51
__releases(bitmap->lock)
__acquires(bitmap->lock)
52 53 54 55
{
	unsigned char *mappage;

	if (page >= bitmap->pages) {
56 57 58 59
		/* This can happen if bitmap_start_sync goes beyond
		 * End-of-device while looking for a whole page.
		 * It is harmless.
		 */
60 61 62 63 64 65 66 67 68 69 70 71 72 73
		return -EINVAL;
	}

	if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
		return 0;

	if (bitmap->bp[page].map) /* page is already allocated, just return */
		return 0;

	if (!create)
		return -ENOENT;

	/* this page has not been allocated yet */

74
	spin_unlock_irq(&bitmap->lock);
75 76 77 78 79 80 81 82 83 84 85 86 87
	/* It is possible that this is being called inside a
	 * prepare_to_wait/finish_wait loop from raid5c:make_request().
	 * In general it is not permitted to sleep in that context as it
	 * can cause the loop to spin freely.
	 * That doesn't apply here as we can only reach this point
	 * once with any loop.
	 * When this function completes, either bp[page].map or
	 * bp[page].hijacked.  In either case, this function will
	 * abort before getting to this point again.  So there is
	 * no risk of a free-spin, and so it is safe to assert
	 * that sleeping here is allowed.
	 */
	sched_annotate_sleep();
88
	mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
89 90 91
	spin_lock_irq(&bitmap->lock);

	if (mappage == NULL) {
92
		pr_debug("md/bitmap: map page allocation failed, hijacking\n");
93 94 95 96
		/* failed - set the hijacked flag so that we can use the
		 * pointer as a counter */
		if (!bitmap->bp[page].map)
			bitmap->bp[page].hijacked = 1;
97 98
	} else if (bitmap->bp[page].map ||
		   bitmap->bp[page].hijacked) {
99
		/* somebody beat us to getting the page */
100
		kfree(mappage);
101
		return 0;
102
	} else {
103

104
		/* no page was in place and we have one, so install it */
105

106 107 108
		bitmap->bp[page].map = mappage;
		bitmap->missing_pages--;
	}
109 110 111 112 113 114
	return 0;
}

/* if page is completely empty, put it back on the free list, or dealloc it */
/* if page was hijacked, unmark the flag so it might get alloced next time */
/* Note: lock should be held when calling this */
115
static void bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
116 117 118 119 120 121 122 123 124 125 126
{
	char *ptr;

	if (bitmap->bp[page].count) /* page is still busy */
		return;

	/* page is no longer in use, it can be released */

	if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
		bitmap->bp[page].hijacked = 0;
		bitmap->bp[page].map = NULL;
127 128 129 130 131
	} else {
		/* normal case, free the page */
		ptr = bitmap->bp[page].map;
		bitmap->bp[page].map = NULL;
		bitmap->missing_pages++;
132
		kfree(ptr);
133 134 135 136 137 138 139 140 141 142 143
	}
}

/*
 * bitmap file handling - read and write the bitmap file and its superblock
 */

/*
 * basic page I/O operations
 */

144
/* IO operations when bitmap is stored near all superblocks */
145 146 147
static int read_sb_page(struct mddev *mddev, loff_t offset,
			struct page *page,
			unsigned long index, int size)
148 149 150
{
	/* choose a good rdev and read the page from there */

151
	struct md_rdev *rdev;
152 153
	sector_t target;

N
NeilBrown 已提交
154
	rdev_for_each(rdev, mddev) {
155 156
		if (! test_bit(In_sync, &rdev->flags)
		    || test_bit(Faulty, &rdev->flags))
157 158
			continue;

J
Jonathan Brassow 已提交
159
		target = offset + index * (PAGE_SIZE/512);
160

161
		if (sync_page_io(rdev, target,
162
				 roundup(size, bdev_logical_block_size(rdev->bdev)),
J
Jonathan Brassow 已提交
163
				 page, READ, true)) {
164
			page->index = index;
165
			return 0;
166 167
		}
	}
168
	return -EIO;
169 170
}

171
static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev)
172 173 174 175 176 177 178
{
	/* Iterate the disks of an mddev, using rcu to protect access to the
	 * linked list, and raising the refcount of devices we return to ensure
	 * they don't disappear while in use.
	 * As devices are only added or removed when raid_disk is < 0 and
	 * nr_pending is 0 and In_sync is clear, the entries we return will
	 * still be in the same position on the list when we re-enter
179
	 * list_for_each_entry_continue_rcu.
180 181 182 183 184
	 *
	 * Note that if entered with 'rdev == NULL' to start at the
	 * beginning, we temporarily assign 'rdev' to an address which
	 * isn't really an rdev, but which can be used by
	 * list_for_each_entry_continue_rcu() to find the first entry.
185 186 187 188
	 */
	rcu_read_lock();
	if (rdev == NULL)
		/* start at the beginning */
189
		rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
190 191 192 193
	else {
		/* release the previous rdev and start from there. */
		rdev_dec_pending(rdev, mddev);
	}
194
	list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) {
195 196 197 198 199 200 201 202 203 204 205 206
		if (rdev->raid_disk >= 0 &&
		    !test_bit(Faulty, &rdev->flags)) {
			/* this is a usable devices */
			atomic_inc(&rdev->nr_pending);
			rcu_read_unlock();
			return rdev;
		}
	}
	rcu_read_unlock();
	return NULL;
}

207
static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
208
{
209
	struct md_rdev *rdev = NULL;
210
	struct block_device *bdev;
211
	struct mddev *mddev = bitmap->mddev;
212
	struct bitmap_storage *store = &bitmap->storage;
213 214 215 216
	int node_offset = 0;

	if (mddev_is_clustered(bitmap->mddev))
		node_offset = bitmap->cluster_slot * store->file_pages;
217

218
	while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
219 220
		int size = PAGE_SIZE;
		loff_t offset = mddev->bitmap_info.offset;
221 222 223

		bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;

224 225 226 227 228
		if (page->index == store->file_pages-1) {
			int last_page_size = store->bytes & (PAGE_SIZE-1);
			if (last_page_size == 0)
				last_page_size = PAGE_SIZE;
			size = roundup(last_page_size,
229
				       bdev_logical_block_size(bdev));
230
		}
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
		/* Just make sure we aren't corrupting data or
		 * metadata
		 */
		if (mddev->external) {
			/* Bitmap could be anywhere. */
			if (rdev->sb_start + offset + (page->index
						       * (PAGE_SIZE/512))
			    > rdev->data_offset
			    &&
			    rdev->sb_start + offset
			    < (rdev->data_offset + mddev->dev_sectors
			     + (PAGE_SIZE/512)))
				goto bad_alignment;
		} else if (offset < 0) {
			/* DATA  BITMAP METADATA  */
			if (offset
			    + (long)(page->index * (PAGE_SIZE/512))
			    + size/512 > 0)
				/* bitmap runs in to metadata */
				goto bad_alignment;
			if (rdev->data_offset + mddev->dev_sectors
			    > rdev->sb_start + offset)
				/* data runs in to bitmap */
				goto bad_alignment;
		} else if (rdev->sb_start < rdev->data_offset) {
			/* METADATA BITMAP DATA */
			if (rdev->sb_start
			    + offset
			    + page->index*(PAGE_SIZE/512) + size/512
			    > rdev->data_offset)
				/* bitmap runs in to data */
				goto bad_alignment;
		} else {
			/* DATA METADATA BITMAP - no problems */
		}
		md_super_write(mddev, rdev,
			       rdev->sb_start + offset
			       + page->index * (PAGE_SIZE/512),
			       size,
			       page);
271
	}
272 273

	if (wait)
274
		md_super_wait(mddev);
275
	return 0;
276 277 278

 bad_alignment:
	return -EINVAL;
279 280
}

281
static void bitmap_file_kick(struct bitmap *bitmap);
282
/*
283
 * write out a page to a file
284
 */
285
static void write_page(struct bitmap *bitmap, struct page *page, int wait)
286
{
287
	struct buffer_head *bh;
288

289
	if (bitmap->storage.file == NULL) {
290 291
		switch (write_sb_page(bitmap, page, wait)) {
		case -EINVAL:
292
			set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
293
		}
294
	} else {
295

296
		bh = page_buffers(page);
297

298 299 300 301
		while (bh && bh->b_blocknr) {
			atomic_inc(&bitmap->pending_writes);
			set_buffer_locked(bh);
			set_buffer_mapped(bh);
J
Jens Axboe 已提交
302
			submit_bh(WRITE | REQ_SYNC, bh);
303 304
			bh = bh->b_this_page;
		}
305

306
		if (wait)
307 308
			wait_event(bitmap->write_wait,
				   atomic_read(&bitmap->pending_writes)==0);
309
	}
310
	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
311
		bitmap_file_kick(bitmap);
312 313 314 315 316
}

static void end_bitmap_write(struct buffer_head *bh, int uptodate)
{
	struct bitmap *bitmap = bh->b_private;
317

318 319
	if (!uptodate)
		set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
320 321 322
	if (atomic_dec_and_test(&bitmap->pending_writes))
		wake_up(&bitmap->write_wait);
}
323

324 325 326 327 328 329 330 331 332 333
/* copied from buffer.c */
static void
__clear_page_buffers(struct page *page)
{
	ClearPagePrivate(page);
	set_page_private(page, 0);
	page_cache_release(page);
}
static void free_buffers(struct page *page)
{
334
	struct buffer_head *bh;
335

336 337 338 339
	if (!PagePrivate(page))
		return;

	bh = page_buffers(page);
340 341 342 343
	while (bh) {
		struct buffer_head *next = bh->b_this_page;
		free_buffer_head(bh);
		bh = next;
344
	}
345 346
	__clear_page_buffers(page);
	put_page(page);
347 348
}

349 350 351 352 353 354 355
/* read a page from a file.
 * We both read the page, and attach buffers to the page to record the
 * address of each block (using bmap).  These addresses will be used
 * to write the block later, completely bypassing the filesystem.
 * This usage is similar to how swap files are handled, and allows us
 * to write to a file with no concerns of memory allocation failing.
 */
356 357 358 359
static int read_page(struct file *file, unsigned long index,
		     struct bitmap *bitmap,
		     unsigned long count,
		     struct page *page)
360
{
361
	int ret = 0;
A
Al Viro 已提交
362
	struct inode *inode = file_inode(file);
363 364
	struct buffer_head *bh;
	sector_t block;
365

366 367
	pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
		 (unsigned long long)index << PAGE_SHIFT);
368

369 370
	bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0);
	if (!bh) {
371
		ret = -ENOMEM;
372 373
		goto out;
	}
374 375 376 377 378 379 380 381 382
	attach_page_buffers(page, bh);
	block = index << (PAGE_SHIFT - inode->i_blkbits);
	while (bh) {
		if (count == 0)
			bh->b_blocknr = 0;
		else {
			bh->b_blocknr = bmap(inode, block);
			if (bh->b_blocknr == 0) {
				/* Cannot use this file! */
383
				ret = -EINVAL;
384 385 386 387 388 389 390 391 392 393
				goto out;
			}
			bh->b_bdev = inode->i_sb->s_bdev;
			if (count < (1<<inode->i_blkbits))
				count = 0;
			else
				count -= (1<<inode->i_blkbits);

			bh->b_end_io = end_bitmap_write;
			bh->b_private = bitmap;
394 395 396 397
			atomic_inc(&bitmap->pending_writes);
			set_buffer_locked(bh);
			set_buffer_mapped(bh);
			submit_bh(READ, bh);
398 399 400 401 402
		}
		block++;
		bh = bh->b_this_page;
	}
	page->index = index;
403 404 405

	wait_event(bitmap->write_wait,
		   atomic_read(&bitmap->pending_writes)==0);
406
	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
407
		ret = -EIO;
408
out:
409 410
	if (ret)
		printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %d\n",
411 412
			(int)PAGE_SIZE,
			(unsigned long long)index << PAGE_SHIFT,
413 414
			ret);
	return ret;
415 416 417 418 419 420 421
}

/*
 * bitmap file superblock operations
 */

/* update the event counter and sync the superblock to disk */
422
void bitmap_update_sb(struct bitmap *bitmap)
423 424 425 426
{
	bitmap_super_t *sb;

	if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
427
		return;
428 429
	if (bitmap->mddev->bitmap_info.external)
		return;
430
	if (!bitmap->storage.sb_page) /* no superblock */
431
		return;
432
	sb = kmap_atomic(bitmap->storage.sb_page);
433
	sb->events = cpu_to_le64(bitmap->mddev->events);
434
	if (bitmap->mddev->events < bitmap->events_cleared)
435 436
		/* rocking back to read-only */
		bitmap->events_cleared = bitmap->mddev->events;
437 438
	sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
	sb->state = cpu_to_le32(bitmap->flags);
439 440 441
	/* Just in case these have been changed via sysfs: */
	sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
	sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
442 443 444
	/* This might have been changed by a reshape */
	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
	sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
G
Goldwyn Rodrigues 已提交
445
	sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
446 447
	sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
					   bitmap_info.space);
448
	kunmap_atomic(sb);
449
	write_page(bitmap, bitmap->storage.sb_page, 1);
450 451 452 453 454 455 456
}

/* print out the bitmap file superblock */
void bitmap_print_sb(struct bitmap *bitmap)
{
	bitmap_super_t *sb;

457
	if (!bitmap || !bitmap->storage.sb_page)
458
		return;
459
	sb = kmap_atomic(bitmap->storage.sb_page);
460
	printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
461 462 463
	printk(KERN_DEBUG "         magic: %08x\n", le32_to_cpu(sb->magic));
	printk(KERN_DEBUG "       version: %d\n", le32_to_cpu(sb->version));
	printk(KERN_DEBUG "          uuid: %08x.%08x.%08x.%08x\n",
464 465 466 467
					*(__u32 *)(sb->uuid+0),
					*(__u32 *)(sb->uuid+4),
					*(__u32 *)(sb->uuid+8),
					*(__u32 *)(sb->uuid+12));
468
	printk(KERN_DEBUG "        events: %llu\n",
469
			(unsigned long long) le64_to_cpu(sb->events));
470
	printk(KERN_DEBUG "events cleared: %llu\n",
471
			(unsigned long long) le64_to_cpu(sb->events_cleared));
472 473 474 475 476
	printk(KERN_DEBUG "         state: %08x\n", le32_to_cpu(sb->state));
	printk(KERN_DEBUG "     chunksize: %d B\n", le32_to_cpu(sb->chunksize));
	printk(KERN_DEBUG "  daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
	printk(KERN_DEBUG "     sync size: %llu KB\n",
			(unsigned long long)le64_to_cpu(sb->sync_size)/2);
477
	printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
478
	kunmap_atomic(sb);
479 480
}

481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
/*
 * bitmap_new_disk_sb
 * @bitmap
 *
 * This function is somewhat the reverse of bitmap_read_sb.  bitmap_read_sb
 * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
 * This function verifies 'bitmap_info' and populates the on-disk bitmap
 * structure, which is to be written to disk.
 *
 * Returns: 0 on success, -Exxx on error
 */
static int bitmap_new_disk_sb(struct bitmap *bitmap)
{
	bitmap_super_t *sb;
	unsigned long chunksize, daemon_sleep, write_behind;

497
	bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
498 499
	if (bitmap->storage.sb_page == NULL)
		return -ENOMEM;
500
	bitmap->storage.sb_page->index = 0;
501

502
	sb = kmap_atomic(bitmap->storage.sb_page);
503 504 505 506 507 508 509

	sb->magic = cpu_to_le32(BITMAP_MAGIC);
	sb->version = cpu_to_le32(BITMAP_MAJOR_HI);

	chunksize = bitmap->mddev->bitmap_info.chunksize;
	BUG_ON(!chunksize);
	if (!is_power_of_2(chunksize)) {
510
		kunmap_atomic(sb);
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
		printk(KERN_ERR "bitmap chunksize not a power of 2\n");
		return -EINVAL;
	}
	sb->chunksize = cpu_to_le32(chunksize);

	daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
	if (!daemon_sleep ||
	    (daemon_sleep < 1) || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
		printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n");
		daemon_sleep = 5 * HZ;
	}
	sb->daemon_sleep = cpu_to_le32(daemon_sleep);
	bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;

	/*
	 * FIXME: write_behind for RAID1.  If not specified, what
	 * is a good choice?  We choose COUNTER_MAX / 2 arbitrarily.
	 */
	write_behind = bitmap->mddev->bitmap_info.max_write_behind;
	if (write_behind > COUNTER_MAX)
		write_behind = COUNTER_MAX / 2;
	sb->write_behind = cpu_to_le32(write_behind);
	bitmap->mddev->bitmap_info.max_write_behind = write_behind;

	/* keep the array size field of the bitmap superblock up to date */
	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);

	memcpy(sb->uuid, bitmap->mddev->uuid, 16);

540
	set_bit(BITMAP_STALE, &bitmap->flags);
541
	sb->state = cpu_to_le32(bitmap->flags);
542 543
	bitmap->events_cleared = bitmap->mddev->events;
	sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
544
	bitmap->mddev->bitmap_info.nodes = 0;
545

546
	kunmap_atomic(sb);
547 548 549 550

	return 0;
}

551 552 553 554 555
/* read the superblock from the bitmap file and initialize some bitmap fields */
static int bitmap_read_sb(struct bitmap *bitmap)
{
	char *reason = NULL;
	bitmap_super_t *sb;
556
	unsigned long chunksize, daemon_sleep, write_behind;
557
	unsigned long long events;
G
Goldwyn Rodrigues 已提交
558
	int nodes = 0;
559
	unsigned long sectors_reserved = 0;
560
	int err = -EINVAL;
561
	struct page *sb_page;
562
	loff_t offset = bitmap->mddev->bitmap_info.offset;
563

564
	if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
565 566 567
		chunksize = 128 * 1024 * 1024;
		daemon_sleep = 5 * HZ;
		write_behind = 0;
568
		set_bit(BITMAP_STALE, &bitmap->flags);
569 570 571
		err = 0;
		goto out_no_sb;
	}
572
	/* page 0 is the superblock, read it... */
573 574 575
	sb_page = alloc_page(GFP_KERNEL);
	if (!sb_page)
		return -ENOMEM;
576
	bitmap->storage.sb_page = sb_page;
577

578
re_read:
579 580
	/* If cluster_slot is set, the cluster is setup */
	if (bitmap->cluster_slot >= 0) {
581
		sector_t bm_blocks = bitmap->mddev->resync_max_sectors;
582

583 584
		sector_div(bm_blocks,
			   bitmap->mddev->bitmap_info.chunksize >> 9);
585 586 587
		/* bits to bytes */
		bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
		/* to 4k blocks */
588
		bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
589
		offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
590
		pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
591
			bitmap->cluster_slot, offset);
592 593
	}

594 595
	if (bitmap->storage.file) {
		loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host);
596 597
		int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;

598
		err = read_page(bitmap->storage.file, 0,
599
				bitmap, bytes, sb_page);
600
	} else {
601
		err = read_sb_page(bitmap->mddev,
602
				   offset,
603 604
				   sb_page,
				   0, sizeof(bitmap_super_t));
605
	}
606
	if (err)
607 608
		return err;

609
	err = -EINVAL;
610
	sb = kmap_atomic(sb_page);
611 612

	chunksize = le32_to_cpu(sb->chunksize);
613
	daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
614
	write_behind = le32_to_cpu(sb->write_behind);
615
	sectors_reserved = le32_to_cpu(sb->sectors_reserved);
616 617 618 619 620 621 622 623 624 625
	/* XXX: This is a hack to ensure that we don't use clustering
	 *  in case:
	 *	- dm-raid is in use and
	 *	- the nodes written in bitmap_sb is erroneous.
	 */
	if (!bitmap->mddev->sync_super) {
		nodes = le32_to_cpu(sb->nodes);
		strlcpy(bitmap->mddev->bitmap_info.cluster_name,
				sb->cluster_name, 64);
	}
626 627 628 629

	/* verify that the bitmap-specific fields are valid */
	if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
		reason = "bad magic";
630 631
	else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
		 le32_to_cpu(sb->version) > BITMAP_MAJOR_HI)
632
		reason = "unrecognized superblock version";
633
	else if (chunksize < 512)
634
		reason = "bitmap chunksize too small";
J
Jonathan Brassow 已提交
635
	else if (!is_power_of_2(chunksize))
636
		reason = "bitmap chunksize not a power of 2";
637
	else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
638
		reason = "daemon sleep period out of range";
639 640
	else if (write_behind > COUNTER_MAX)
		reason = "write-behind limit out of range (0 - 16383)";
641 642 643 644 645 646 647 648 649
	if (reason) {
		printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n",
			bmname(bitmap), reason);
		goto out;
	}

	/* keep the array size field of the bitmap superblock up to date */
	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);

650 651 652 653 654 655 656 657 658 659 660 661
	if (bitmap->mddev->persistent) {
		/*
		 * We have a persistent array superblock, so compare the
		 * bitmap's UUID and event counter to the mddev's
		 */
		if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
			printk(KERN_INFO
			       "%s: bitmap superblock UUID mismatch\n",
			       bmname(bitmap));
			goto out;
		}
		events = le64_to_cpu(sb->events);
662
		if (!nodes && (events < bitmap->mddev->events)) {
663 664 665 666 667
			printk(KERN_INFO
			       "%s: bitmap file is out of date (%llu < %llu) "
			       "-- forcing full recovery\n",
			       bmname(bitmap), events,
			       (unsigned long long) bitmap->mddev->events);
668
			set_bit(BITMAP_STALE, &bitmap->flags);
669
		}
670
	}
671

672
	/* assign fields using values from superblock */
673
	bitmap->flags |= le32_to_cpu(sb->state);
674
	if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
675
		set_bit(BITMAP_HOSTENDIAN, &bitmap->flags);
676
	bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
G
Goldwyn Rodrigues 已提交
677
	strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
678
	err = 0;
679

680
out:
681
	kunmap_atomic(sb);
682 683
	/* Assiging chunksize is required for "re_read" */
	bitmap->mddev->bitmap_info.chunksize = chunksize;
684
	if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
685 686 687 688 689 690 691 692 693 694 695
		err = md_setup_cluster(bitmap->mddev, nodes);
		if (err) {
			pr_err("%s: Could not setup cluster service (%d)\n",
					bmname(bitmap), err);
			goto out_no_sb;
		}
		bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
		goto re_read;
	}


696
out_no_sb:
697
	if (test_bit(BITMAP_STALE, &bitmap->flags))
698 699 700 701
		bitmap->events_cleared = bitmap->mddev->events;
	bitmap->mddev->bitmap_info.chunksize = chunksize;
	bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
	bitmap->mddev->bitmap_info.max_write_behind = write_behind;
G
Goldwyn Rodrigues 已提交
702
	bitmap->mddev->bitmap_info.nodes = nodes;
703 704 705
	if (bitmap->mddev->bitmap_info.space == 0 ||
	    bitmap->mddev->bitmap_info.space > sectors_reserved)
		bitmap->mddev->bitmap_info.space = sectors_reserved;
706
	if (err) {
707
		bitmap_print_sb(bitmap);
708
		if (bitmap->cluster_slot < 0)
709 710
			md_cluster_stop(bitmap->mddev);
	}
711 712 713 714 715 716 717
	return err;
}

/*
 * general bitmap file operations
 */

718 719 720 721 722 723
/*
 * on-disk bitmap:
 *
 * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
 * file a page at a time. There's a superblock at the start of the file.
 */
724
/* calculate the index of the page that contains this bit */
725 726
static inline unsigned long file_page_index(struct bitmap_storage *store,
					    unsigned long chunk)
727
{
728
	if (store->sb_page)
729 730
		chunk += sizeof(bitmap_super_t) << 3;
	return chunk >> PAGE_BIT_SHIFT;
731 732 733
}

/* calculate the (bit) offset of this bit within a page */
734 735
static inline unsigned long file_page_offset(struct bitmap_storage *store,
					     unsigned long chunk)
736
{
737
	if (store->sb_page)
738 739
		chunk += sizeof(bitmap_super_t) << 3;
	return chunk & (PAGE_BITS - 1);
740 741 742 743 744 745
}

/*
 * return a pointer to the page in the filemap that contains the given bit
 *
 */
746
static inline struct page *filemap_get_page(struct bitmap_storage *store,
747
					    unsigned long chunk)
748
{
749
	if (file_page_index(store, chunk) >= store->file_pages)
750
		return NULL;
751
	return store->filemap[file_page_index(store, chunk)];
752 753
}

754
static int bitmap_storage_alloc(struct bitmap_storage *store,
755 756
				unsigned long chunks, int with_super,
				int slot_number)
757
{
758
	int pnum, offset = 0;
759 760 761 762 763 764 765 766
	unsigned long num_pages;
	unsigned long bytes;

	bytes = DIV_ROUND_UP(chunks, 8);
	if (with_super)
		bytes += sizeof(bitmap_super_t);

	num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
767
	offset = slot_number * (num_pages - 1);
768 769 770 771 772 773 774

	store->filemap = kmalloc(sizeof(struct page *)
				 * num_pages, GFP_KERNEL);
	if (!store->filemap)
		return -ENOMEM;

	if (with_super && !store->sb_page) {
775
		store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
776 777 778
		if (store->sb_page == NULL)
			return -ENOMEM;
	}
779

780 781 782 783
	pnum = 0;
	if (store->sb_page) {
		store->filemap[0] = store->sb_page;
		pnum = 1;
784
		store->sb_page->index = offset;
785
	}
786

787
	for ( ; pnum < num_pages; pnum++) {
788
		store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO);
789 790 791 792
		if (!store->filemap[pnum]) {
			store->file_pages = pnum;
			return -ENOMEM;
		}
793
		store->filemap[pnum]->index = pnum + offset;
794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
	}
	store->file_pages = pnum;

	/* We need 4 bits per page, rounded up to a multiple
	 * of sizeof(unsigned long) */
	store->filemap_attr = kzalloc(
		roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
		GFP_KERNEL);
	if (!store->filemap_attr)
		return -ENOMEM;

	store->bytes = bytes;

	return 0;
}

810
static void bitmap_file_unmap(struct bitmap_storage *store)
811 812 813
{
	struct page **map, *sb_page;
	int pages;
814
	struct file *file;
815

816
	file = store->file;
817 818 819
	map = store->filemap;
	pages = store->file_pages;
	sb_page = store->sb_page;
820 821

	while (pages--)
822
		if (map[pages] != sb_page) /* 0 is sb_page, release it below */
823
			free_buffers(map[pages]);
824
	kfree(map);
825
	kfree(store->filemap_attr);
826

827 828
	if (sb_page)
		free_buffers(sb_page);
829

830
	if (file) {
A
Al Viro 已提交
831
		struct inode *inode = file_inode(file);
832
		invalidate_mapping_pages(inode->i_mapping, 0, -1);
833
		fput(file);
834
	}
835 836 837 838 839 840 841 842 843 844 845
}

/*
 * bitmap_file_kick - if an error occurs while manipulating the bitmap file
 * then it is no longer reliable, so we stop using it and we mark the file
 * as failed in the superblock
 */
static void bitmap_file_kick(struct bitmap *bitmap)
{
	char *path, *ptr = NULL;

846
	if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
847
		bitmap_update_sb(bitmap);
848

849
		if (bitmap->storage.file) {
850 851
			path = kmalloc(PAGE_SIZE, GFP_KERNEL);
			if (path)
M
Miklos Szeredi 已提交
852
				ptr = file_path(bitmap->storage.file,
853
					     path, PAGE_SIZE);
C
Christoph Hellwig 已提交
854

855 856
			printk(KERN_ALERT
			      "%s: kicking failed bitmap file %s from array!\n",
C
Christoph Hellwig 已提交
857
			      bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
858

859 860 861 862 863
			kfree(path);
		} else
			printk(KERN_ALERT
			       "%s: disabling internal bitmap due to errors\n",
			       bmname(bitmap));
864
	}
865 866 867
}

enum bitmap_page_attr {
868
	BITMAP_PAGE_DIRTY = 0,     /* there are set bits that need to be synced */
869 870
	BITMAP_PAGE_PENDING = 1,   /* there are bits that are being cleaned.
				    * i.e. counter is 1 or 2. */
871
	BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
872 873
};

874 875
static inline void set_page_attr(struct bitmap *bitmap, int pnum,
				 enum bitmap_page_attr attr)
876
{
877
	set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
878 879
}

880 881
static inline void clear_page_attr(struct bitmap *bitmap, int pnum,
				   enum bitmap_page_attr attr)
882
{
883
	clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
884 885
}

886 887
static inline int test_page_attr(struct bitmap *bitmap, int pnum,
				 enum bitmap_page_attr attr)
888
{
889
	return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
890 891
}

892 893 894 895 896 897
static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum,
					   enum bitmap_page_attr attr)
{
	return test_and_clear_bit((pnum<<2) + attr,
				  bitmap->storage.filemap_attr);
}
898 899 900 901 902 903 904 905 906 907
/*
 * bitmap_file_set_bit -- called before performing a write to the md device
 * to set (and eventually sync) a particular bit in the bitmap file
 *
 * we set the bit immediately, then we record the page number so that
 * when an unplug occurs, we can flush the dirty pages out to disk
 */
static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
{
	unsigned long bit;
908
	struct page *page;
909
	void *kaddr;
910
	unsigned long chunk = block >> bitmap->counts.chunkshift;
911

912
	page = filemap_get_page(&bitmap->storage, chunk);
913 914
	if (!page)
		return;
915
	bit = file_page_offset(&bitmap->storage, chunk);
916

917
	/* set the bit */
918
	kaddr = kmap_atomic(page);
919
	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
920 921
		set_bit(bit, kaddr);
	else
922
		set_bit_le(bit, kaddr);
923
	kunmap_atomic(kaddr);
924
	pr_debug("set file bit %lu page %lu\n", bit, page->index);
925
	/* record page number so it gets flushed to disk when unplug occurs */
926
	set_page_attr(bitmap, page->index, BITMAP_PAGE_DIRTY);
927 928
}

929 930 931 932 933
static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
{
	unsigned long bit;
	struct page *page;
	void *paddr;
934
	unsigned long chunk = block >> bitmap->counts.chunkshift;
935

936
	page = filemap_get_page(&bitmap->storage, chunk);
937 938
	if (!page)
		return;
939
	bit = file_page_offset(&bitmap->storage, chunk);
940
	paddr = kmap_atomic(page);
941
	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
942 943
		clear_bit(bit, paddr);
	else
944
		clear_bit_le(bit, paddr);
945
	kunmap_atomic(paddr);
946 947
	if (!test_page_attr(bitmap, page->index, BITMAP_PAGE_NEEDWRITE)) {
		set_page_attr(bitmap, page->index, BITMAP_PAGE_PENDING);
948 949 950 951
		bitmap->allclean = 0;
	}
}

952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
static int bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
{
	unsigned long bit;
	struct page *page;
	void *paddr;
	unsigned long chunk = block >> bitmap->counts.chunkshift;
	int set = 0;

	page = filemap_get_page(&bitmap->storage, chunk);
	if (!page)
		return -EINVAL;
	bit = file_page_offset(&bitmap->storage, chunk);
	paddr = kmap_atomic(page);
	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
		set = test_bit(bit, paddr);
	else
		set = test_bit_le(bit, paddr);
	kunmap_atomic(paddr);
	return set;
}


974 975 976
/* this gets called when the md device is ready to unplug its underlying
 * (slave) device queues -- before we let any writes go down, we need to
 * sync the dirty pages of the bitmap file to disk */
977
void bitmap_unplug(struct bitmap *bitmap)
978
{
979
	unsigned long i;
980
	int dirty, need_write;
981

982 983
	if (!bitmap || !bitmap->storage.filemap ||
	    test_bit(BITMAP_STALE, &bitmap->flags))
984
		return;
985 986 987

	/* look at each page to see if there are any set bits that need to be
	 * flushed out to disk */
988
	for (i = 0; i < bitmap->storage.file_pages; i++) {
989
		if (!bitmap->storage.filemap)
990
			return;
991 992 993 994
		dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
		need_write = test_and_clear_page_attr(bitmap, i,
						      BITMAP_PAGE_NEEDWRITE);
		if (dirty || need_write) {
995
			clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
996 997
			write_page(bitmap, bitmap->storage.filemap[i], 0);
		}
998
	}
999 1000 1001 1002 1003 1004
	if (bitmap->storage.file)
		wait_event(bitmap->write_wait,
			   atomic_read(&bitmap->pending_writes)==0);
	else
		md_super_wait(bitmap->mddev);

1005
	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
1006
		bitmap_file_kick(bitmap);
1007
}
1008
EXPORT_SYMBOL(bitmap_unplug);
1009

1010
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
1011 1012 1013 1014 1015 1016 1017
/* * bitmap_init_from_disk -- called at bitmap_create time to initialize
 * the in-memory bitmap from the on-disk bitmap -- also, sets up the
 * memory mapping of the bitmap file
 * Special cases:
 *   if there's no bitmap file, or if the bitmap file had been
 *   previously kicked from the array, we mark all the bits as
 *   1's in order to cause a full resync.
1018 1019 1020
 *
 * We ignore all bits for sectors that end earlier than 'start'.
 * This is used when reading an out-of-date bitmap...
1021
 */
1022
static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1023
{
1024
	unsigned long i, chunks, index, oldindex, bit, node_offset = 0;
1025
	struct page *page = NULL;
1026
	unsigned long bit_cnt = 0;
1027
	struct file *file;
1028
	unsigned long offset;
1029 1030
	int outofdate;
	int ret = -ENOSPC;
1031
	void *paddr;
1032
	struct bitmap_storage *store = &bitmap->storage;
1033

1034
	chunks = bitmap->counts.chunks;
1035
	file = store->file;
1036

1037 1038
	if (!file && !bitmap->mddev->bitmap_info.offset) {
		/* No permanent bitmap - fill with '1s'. */
1039 1040
		store->filemap = NULL;
		store->file_pages = 0;
1041 1042
		for (i = 0; i < chunks ; i++) {
			/* if the disk bit is set, set the memory bit */
1043
			int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift)
1044 1045
				      >= start);
			bitmap_set_memory_bits(bitmap,
1046
					       (sector_t)i << bitmap->counts.chunkshift,
1047 1048 1049 1050
					       needed);
		}
		return 0;
	}
1051

1052
	outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
1053 1054 1055 1056
	if (outofdate)
		printk(KERN_INFO "%s: bitmap file is out of date, doing full "
			"recovery\n", bmname(bitmap));

1057
	if (file && i_size_read(file->f_mapping->host) < store->bytes) {
1058
		printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
1059 1060 1061
		       bmname(bitmap),
		       (unsigned long) i_size_read(file->f_mapping->host),
		       store->bytes);
1062
		goto err;
1063
	}
1064

1065
	oldindex = ~0L;
1066
	offset = 0;
1067
	if (!bitmap->mddev->bitmap_info.external)
1068
		offset = sizeof(bitmap_super_t);
1069

1070 1071 1072
	if (mddev_is_clustered(bitmap->mddev))
		node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE));

1073
	for (i = 0; i < chunks; i++) {
1074
		int b;
1075 1076
		index = file_page_index(&bitmap->storage, i);
		bit = file_page_offset(&bitmap->storage, i);
1077
		if (index != oldindex) { /* this is a new page, read it in */
1078
			int count;
1079
			/* unmap the old page, we're done with it */
1080 1081
			if (index == store->file_pages-1)
				count = store->bytes - index * PAGE_SIZE;
1082 1083
			else
				count = PAGE_SIZE;
1084
			page = store->filemap[index];
1085 1086 1087 1088 1089 1090 1091 1092
			if (file)
				ret = read_page(file, index, bitmap,
						count, page);
			else
				ret = read_sb_page(
					bitmap->mddev,
					bitmap->mddev->bitmap_info.offset,
					page,
1093
					index + node_offset, count);
1094 1095

			if (ret)
1096
				goto err;
1097

1098 1099 1100 1101 1102
			oldindex = index;

			if (outofdate) {
				/*
				 * if bitmap is out of date, dirty the
1103
				 * whole page and write it out
1104
				 */
1105
				paddr = kmap_atomic(page);
1106
				memset(paddr + offset, 0xff,
1107
				       PAGE_SIZE - offset);
1108
				kunmap_atomic(paddr);
1109 1110 1111
				write_page(bitmap, page, 1);

				ret = -EIO;
1112 1113
				if (test_bit(BITMAP_WRITE_ERROR,
					     &bitmap->flags))
1114
					goto err;
1115 1116
			}
		}
1117
		paddr = kmap_atomic(page);
1118
		if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
1119
			b = test_bit(bit, paddr);
1120
		else
A
Akinobu Mita 已提交
1121
			b = test_bit_le(bit, paddr);
1122
		kunmap_atomic(paddr);
1123
		if (b) {
1124
			/* if the disk bit is set, set the memory bit */
1125
			int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift
1126 1127
				      >= start);
			bitmap_set_memory_bits(bitmap,
1128
					       (sector_t)i << bitmap->counts.chunkshift,
1129
					       needed);
1130 1131
			bit_cnt++;
		}
1132
		offset = 0;
1133 1134 1135
	}

	printk(KERN_INFO "%s: bitmap initialized from disk: "
1136
	       "read %lu pages, set %lu of %lu bits\n",
1137
	       bmname(bitmap), store->file_pages,
1138
	       bit_cnt, chunks);
1139 1140

	return 0;
1141

1142 1143 1144
 err:
	printk(KERN_INFO "%s: bitmap initialisation failed: %d\n",
	       bmname(bitmap), ret);
1145 1146 1147
	return ret;
}

1148 1149 1150 1151 1152
void bitmap_write_all(struct bitmap *bitmap)
{
	/* We don't actually write all bitmap blocks here,
	 * just flag them as needing to be written
	 */
1153
	int i;
1154

1155
	if (!bitmap || !bitmap->storage.filemap)
1156
		return;
1157
	if (bitmap->storage.file)
1158 1159 1160
		/* Only one copy, so nothing needed */
		return;

1161
	for (i = 0; i < bitmap->storage.file_pages; i++)
1162
		set_page_attr(bitmap, i,
1163
			      BITMAP_PAGE_NEEDWRITE);
1164
	bitmap->allclean = 0;
1165 1166
}

1167 1168
static void bitmap_count_page(struct bitmap_counts *bitmap,
			      sector_t offset, int inc)
1169
{
1170
	sector_t chunk = offset >> bitmap->chunkshift;
1171 1172 1173 1174
	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
	bitmap->bp[page].count += inc;
	bitmap_checkfree(bitmap, page);
}
1175

1176
static void bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
1177 1178 1179 1180 1181 1182 1183 1184 1185
{
	sector_t chunk = offset >> bitmap->chunkshift;
	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
	struct bitmap_page *bp = &bitmap->bp[page];

	if (!bp->pending)
		bp->pending = 1;
}

1186
static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap,
N
NeilBrown 已提交
1187
					    sector_t offset, sector_t *blocks,
1188 1189 1190 1191 1192 1193 1194
					    int create);

/*
 * bitmap daemon -- periodically wakes up to clean bits and flush pages
 *			out to disk
 */

1195
void bitmap_daemon_work(struct mddev *mddev)
1196
{
1197
	struct bitmap *bitmap;
1198
	unsigned long j;
1199
	unsigned long nextpage;
N
NeilBrown 已提交
1200
	sector_t blocks;
1201
	struct bitmap_counts *counts;
1202

1203 1204 1205
	/* Use a mutex to guard daemon_work against
	 * bitmap_destroy.
	 */
1206
	mutex_lock(&mddev->bitmap_info.mutex);
1207 1208
	bitmap = mddev->bitmap;
	if (bitmap == NULL) {
1209
		mutex_unlock(&mddev->bitmap_info.mutex);
1210
		return;
1211
	}
1212
	if (time_before(jiffies, bitmap->daemon_lastrun
N
NeilBrown 已提交
1213
			+ mddev->bitmap_info.daemon_sleep))
1214 1215
		goto done;

1216
	bitmap->daemon_lastrun = jiffies;
1217
	if (bitmap->allclean) {
N
NeilBrown 已提交
1218
		mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1219
		goto done;
1220 1221
	}
	bitmap->allclean = 1;
1222

1223 1224 1225 1226
	/* Any file-page which is PENDING now needs to be written.
	 * So set NEEDWRITE now, then after we make any last-minute changes
	 * we will write it.
	 */
1227
	for (j = 0; j < bitmap->storage.file_pages; j++)
1228 1229
		if (test_and_clear_page_attr(bitmap, j,
					     BITMAP_PAGE_PENDING))
1230
			set_page_attr(bitmap, j,
1231 1232 1233 1234 1235 1236 1237 1238
				      BITMAP_PAGE_NEEDWRITE);

	if (bitmap->need_sync &&
	    mddev->bitmap_info.external == 0) {
		/* Arrange for superblock update as well as
		 * other changes */
		bitmap_super_t *sb;
		bitmap->need_sync = 0;
1239 1240
		if (bitmap->storage.filemap) {
			sb = kmap_atomic(bitmap->storage.sb_page);
1241 1242 1243
			sb->events_cleared =
				cpu_to_le64(bitmap->events_cleared);
			kunmap_atomic(sb);
1244
			set_page_attr(bitmap, 0,
1245 1246
				      BITMAP_PAGE_NEEDWRITE);
		}
1247 1248 1249 1250
	}
	/* Now look at the bitmap counters and if any are '2' or '1',
	 * decrement and handle accordingly.
	 */
1251 1252
	counts = &bitmap->counts;
	spin_lock_irq(&counts->lock);
1253
	nextpage = 0;
1254
	for (j = 0; j < counts->chunks; j++) {
1255
		bitmap_counter_t *bmc;
1256
		sector_t  block = (sector_t)j << counts->chunkshift;
1257

1258 1259
		if (j == nextpage) {
			nextpage += PAGE_COUNTER_RATIO;
1260
			if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) {
1261
				j |= PAGE_COUNTER_MASK;
1262 1263
				continue;
			}
1264
			counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0;
1265
		}
1266
		bmc = bitmap_get_counter(counts,
1267
					 block,
1268
					 &blocks, 0);
1269 1270

		if (!bmc) {
1271
			j |= PAGE_COUNTER_MASK;
1272 1273 1274 1275 1276
			continue;
		}
		if (*bmc == 1 && !bitmap->need_sync) {
			/* We can clear the bit */
			*bmc = 0;
1277
			bitmap_count_page(counts, block, -1);
1278
			bitmap_file_clear_bit(bitmap, block);
1279 1280
		} else if (*bmc && *bmc <= 2) {
			*bmc = 1;
1281
			bitmap_set_pending(counts, block);
1282
			bitmap->allclean = 0;
1283
		}
1284
	}
1285
	spin_unlock_irq(&counts->lock);
1286

1287 1288 1289 1290 1291 1292 1293 1294
	/* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
	 * DIRTY pages need to be written by bitmap_unplug so it can wait
	 * for them.
	 * If we find any DIRTY page we stop there and let bitmap_unplug
	 * handle all the rest.  This is important in the case where
	 * the first blocking holds the superblock and it has been updated.
	 * We mustn't write any other blocks before the superblock.
	 */
1295 1296 1297 1298
	for (j = 0;
	     j < bitmap->storage.file_pages
		     && !test_bit(BITMAP_STALE, &bitmap->flags);
	     j++) {
1299
		if (test_page_attr(bitmap, j,
1300 1301 1302
				   BITMAP_PAGE_DIRTY))
			/* bitmap_unplug will handle the rest */
			break;
1303 1304
		if (test_and_clear_page_attr(bitmap, j,
					     BITMAP_PAGE_NEEDWRITE)) {
1305
			write_page(bitmap, bitmap->storage.filemap[j], 0);
1306 1307 1308
		}
	}

1309
 done:
1310
	if (bitmap->allclean == 0)
N
NeilBrown 已提交
1311 1312
		mddev->thread->timeout =
			mddev->bitmap_info.daemon_sleep;
1313
	mutex_unlock(&mddev->bitmap_info.mutex);
1314 1315
}

1316
static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap,
N
NeilBrown 已提交
1317
					    sector_t offset, sector_t *blocks,
1318
					    int create)
1319 1320
__releases(bitmap->lock)
__acquires(bitmap->lock)
1321 1322 1323 1324 1325
{
	/* If 'create', we might release the lock and reclaim it.
	 * The lock must have been taken with interrupts enabled.
	 * If !create, we don't release the lock.
	 */
1326
	sector_t chunk = offset >> bitmap->chunkshift;
1327 1328 1329
	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
	unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
	sector_t csize;
1330
	int err;
1331

1332 1333 1334 1335
	err = bitmap_checkpage(bitmap, page, create);

	if (bitmap->bp[page].hijacked ||
	    bitmap->bp[page].map == NULL)
1336
		csize = ((sector_t)1) << (bitmap->chunkshift +
1337 1338
					  PAGE_COUNTER_SHIFT - 1);
	else
1339
		csize = ((sector_t)1) << bitmap->chunkshift;
1340 1341 1342
	*blocks = csize - (offset & (csize - 1));

	if (err < 0)
1343
		return NULL;
1344

1345 1346 1347 1348 1349 1350 1351 1352
	/* now locked ... */

	if (bitmap->bp[page].hijacked) { /* hijacked pointer */
		/* should we use the first or second counter field
		 * of the hijacked pointer? */
		int hi = (pageoff > PAGE_COUNTER_MASK);
		return  &((bitmap_counter_t *)
			  &bitmap->bp[page].map)[hi];
1353
	} else /* page is allocated */
1354 1355 1356 1357
		return (bitmap_counter_t *)
			&(bitmap->bp[page].map[pageoff]);
}

1358
int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
1359
{
1360 1361
	if (!bitmap)
		return 0;
1362 1363

	if (behind) {
1364
		int bw;
1365
		atomic_inc(&bitmap->behind_writes);
1366 1367 1368 1369
		bw = atomic_read(&bitmap->behind_writes);
		if (bw > bitmap->behind_writes_used)
			bitmap->behind_writes_used = bw;

1370 1371
		pr_debug("inc write-behind count %d/%lu\n",
			 bw, bitmap->mddev->bitmap_info.max_write_behind);
1372 1373
	}

1374
	while (sectors) {
N
NeilBrown 已提交
1375
		sector_t blocks;
1376 1377
		bitmap_counter_t *bmc;

1378 1379
		spin_lock_irq(&bitmap->counts.lock);
		bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
1380
		if (!bmc) {
1381
			spin_unlock_irq(&bitmap->counts.lock);
1382 1383 1384
			return 0;
		}

1385
		if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
1386 1387 1388 1389 1390 1391 1392
			DEFINE_WAIT(__wait);
			/* note that it is safe to do the prepare_to_wait
			 * after the test as long as we do it before dropping
			 * the spinlock.
			 */
			prepare_to_wait(&bitmap->overflow_wait, &__wait,
					TASK_UNINTERRUPTIBLE);
1393
			spin_unlock_irq(&bitmap->counts.lock);
1394
			schedule();
1395 1396 1397 1398
			finish_wait(&bitmap->overflow_wait, &__wait);
			continue;
		}

1399
		switch (*bmc) {
1400 1401
		case 0:
			bitmap_file_set_bit(bitmap, offset);
1402
			bitmap_count_page(&bitmap->counts, offset, 1);
1403 1404 1405 1406
			/* fall through */
		case 1:
			*bmc = 2;
		}
1407

1408 1409
		(*bmc)++;

1410
		spin_unlock_irq(&bitmap->counts.lock);
1411 1412 1413 1414

		offset += blocks;
		if (sectors > blocks)
			sectors -= blocks;
1415 1416
		else
			sectors = 0;
1417 1418 1419
	}
	return 0;
}
1420
EXPORT_SYMBOL(bitmap_startwrite);
1421 1422

void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors,
1423
		     int success, int behind)
1424
{
1425 1426
	if (!bitmap)
		return;
1427
	if (behind) {
1428 1429
		if (atomic_dec_and_test(&bitmap->behind_writes))
			wake_up(&bitmap->behind_wait);
1430 1431 1432
		pr_debug("dec write-behind count %d/%lu\n",
			 atomic_read(&bitmap->behind_writes),
			 bitmap->mddev->bitmap_info.max_write_behind);
1433 1434
	}

1435
	while (sectors) {
N
NeilBrown 已提交
1436
		sector_t blocks;
1437 1438 1439
		unsigned long flags;
		bitmap_counter_t *bmc;

1440 1441
		spin_lock_irqsave(&bitmap->counts.lock, flags);
		bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
1442
		if (!bmc) {
1443
			spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1444 1445 1446
			return;
		}

1447
		if (success && !bitmap->mddev->degraded &&
1448 1449 1450
		    bitmap->events_cleared < bitmap->mddev->events) {
			bitmap->events_cleared = bitmap->mddev->events;
			bitmap->need_sync = 1;
1451
			sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
1452 1453
		}

1454
		if (!success && !NEEDED(*bmc))
1455 1456
			*bmc |= NEEDED_MASK;

1457
		if (COUNTER(*bmc) == COUNTER_MAX)
1458 1459
			wake_up(&bitmap->overflow_wait);

1460
		(*bmc)--;
1461
		if (*bmc <= 2) {
1462
			bitmap_set_pending(&bitmap->counts, offset);
1463 1464
			bitmap->allclean = 0;
		}
1465
		spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1466 1467 1468
		offset += blocks;
		if (sectors > blocks)
			sectors -= blocks;
1469 1470
		else
			sectors = 0;
1471 1472
	}
}
1473
EXPORT_SYMBOL(bitmap_endwrite);
1474

N
NeilBrown 已提交
1475
static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1476
			       int degraded)
1477 1478 1479 1480 1481 1482 1483
{
	bitmap_counter_t *bmc;
	int rv;
	if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
		*blocks = 1024;
		return 1; /* always resync if no bitmap */
	}
1484 1485
	spin_lock_irq(&bitmap->counts.lock);
	bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1486 1487 1488 1489 1490 1491 1492
	rv = 0;
	if (bmc) {
		/* locked */
		if (RESYNC(*bmc))
			rv = 1;
		else if (NEEDED(*bmc)) {
			rv = 1;
1493 1494 1495 1496
			if (!degraded) { /* don't set/clear bits if degraded */
				*bmc |= RESYNC_MASK;
				*bmc &= ~NEEDED_MASK;
			}
1497 1498
		}
	}
1499
	spin_unlock_irq(&bitmap->counts.lock);
1500 1501 1502
	return rv;
}

N
NeilBrown 已提交
1503
int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1504 1505 1506 1507 1508 1509 1510 1511 1512 1513
		      int degraded)
{
	/* bitmap_start_sync must always report on multiples of whole
	 * pages, otherwise resync (which is very PAGE_SIZE based) will
	 * get confused.
	 * So call __bitmap_start_sync repeatedly (if needed) until
	 * At least PAGE_SIZE>>9 blocks are covered.
	 * Return the 'or' of the result.
	 */
	int rv = 0;
N
NeilBrown 已提交
1514
	sector_t blocks1;
1515 1516 1517 1518 1519 1520 1521 1522 1523 1524

	*blocks = 0;
	while (*blocks < (PAGE_SIZE>>9)) {
		rv |= __bitmap_start_sync(bitmap, offset,
					  &blocks1, degraded);
		offset += blocks1;
		*blocks += blocks1;
	}
	return rv;
}
1525
EXPORT_SYMBOL(bitmap_start_sync);
1526

N
NeilBrown 已提交
1527
void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
1528 1529 1530
{
	bitmap_counter_t *bmc;
	unsigned long flags;
1531 1532

	if (bitmap == NULL) {
1533 1534 1535
		*blocks = 1024;
		return;
	}
1536 1537
	spin_lock_irqsave(&bitmap->counts.lock, flags);
	bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1538 1539 1540 1541 1542 1543 1544 1545 1546
	if (bmc == NULL)
		goto unlock;
	/* locked */
	if (RESYNC(*bmc)) {
		*bmc &= ~RESYNC_MASK;

		if (!NEEDED(*bmc) && aborted)
			*bmc |= NEEDED_MASK;
		else {
1547
			if (*bmc <= 2) {
1548
				bitmap_set_pending(&bitmap->counts, offset);
1549 1550
				bitmap->allclean = 0;
			}
1551 1552 1553
		}
	}
 unlock:
1554
	spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1555
}
1556
EXPORT_SYMBOL(bitmap_end_sync);
1557 1558 1559 1560 1561 1562 1563 1564

void bitmap_close_sync(struct bitmap *bitmap)
{
	/* Sync has finished, and any bitmap chunks that weren't synced
	 * properly have been aborted.  It remains to us to clear the
	 * RESYNC bit wherever it is still on
	 */
	sector_t sector = 0;
N
NeilBrown 已提交
1565
	sector_t blocks;
N
NeilBrown 已提交
1566 1567
	if (!bitmap)
		return;
1568 1569
	while (sector < bitmap->mddev->resync_max_sectors) {
		bitmap_end_sync(bitmap, sector, &blocks, 0);
N
NeilBrown 已提交
1570 1571 1572
		sector += blocks;
	}
}
1573
EXPORT_SYMBOL(bitmap_close_sync);
N
NeilBrown 已提交
1574 1575 1576 1577

void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
{
	sector_t s = 0;
N
NeilBrown 已提交
1578
	sector_t blocks;
N
NeilBrown 已提交
1579 1580 1581 1582 1583 1584 1585 1586

	if (!bitmap)
		return;
	if (sector == 0) {
		bitmap->last_end_sync = jiffies;
		return;
	}
	if (time_before(jiffies, (bitmap->last_end_sync
1587
				  + bitmap->mddev->bitmap_info.daemon_sleep)))
N
NeilBrown 已提交
1588 1589 1590 1591
		return;
	wait_event(bitmap->mddev->recovery_wait,
		   atomic_read(&bitmap->mddev->recovery_active) == 0);

1592
	bitmap->mddev->curr_resync_completed = sector;
1593
	set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
1594
	sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
N
NeilBrown 已提交
1595 1596 1597 1598
	s = 0;
	while (s < sector && s < bitmap->mddev->resync_max_sectors) {
		bitmap_end_sync(bitmap, s, &blocks, 0);
		s += blocks;
1599
	}
N
NeilBrown 已提交
1600
	bitmap->last_end_sync = jiffies;
1601
	sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
1602
}
1603
EXPORT_SYMBOL(bitmap_cond_end_sync);
1604

1605
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
1606 1607
{
	/* For each chunk covered by any of these sectors, set the
1608
	 * counter to 2 and possibly set resync_needed.  They should all
1609 1610
	 * be 0 at this point
	 */
1611

N
NeilBrown 已提交
1612
	sector_t secs;
1613
	bitmap_counter_t *bmc;
1614 1615
	spin_lock_irq(&bitmap->counts.lock);
	bmc = bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
1616
	if (!bmc) {
1617
		spin_unlock_irq(&bitmap->counts.lock);
1618
		return;
1619
	}
1620
	if (!*bmc) {
1621
		*bmc = 2;
1622 1623
		bitmap_count_page(&bitmap->counts, offset, 1);
		bitmap_set_pending(&bitmap->counts, offset);
1624
		bitmap->allclean = 0;
1625
	}
1626 1627
	if (needed)
		*bmc |= NEEDED_MASK;
1628
	spin_unlock_irq(&bitmap->counts.lock);
1629 1630
}

1631 1632 1633 1634 1635 1636
/* dirty the memory and file bits for bitmap chunks "s" to "e" */
void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
{
	unsigned long chunk;

	for (chunk = s; chunk <= e; chunk++) {
1637
		sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
1638 1639
		bitmap_set_memory_bits(bitmap, sec, 1);
		bitmap_file_set_bit(bitmap, sec);
1640 1641 1642 1643 1644 1645
		if (sec < bitmap->mddev->recovery_cp)
			/* We are asserting that the array is dirty,
			 * so move the recovery_cp address back so
			 * that it is obvious that it is dirty
			 */
			bitmap->mddev->recovery_cp = sec;
1646 1647 1648
	}
}

1649 1650 1651
/*
 * flush out any pending updates
 */
1652
void bitmap_flush(struct mddev *mddev)
1653 1654
{
	struct bitmap *bitmap = mddev->bitmap;
1655
	long sleep;
1656 1657 1658 1659 1660 1661 1662

	if (!bitmap) /* there was no bitmap */
		return;

	/* run the daemon_work three time to ensure everything is flushed
	 * that can be
	 */
1663
	sleep = mddev->bitmap_info.daemon_sleep * 2;
1664
	bitmap->daemon_lastrun -= sleep;
1665
	bitmap_daemon_work(mddev);
1666
	bitmap->daemon_lastrun -= sleep;
1667
	bitmap_daemon_work(mddev);
1668
	bitmap->daemon_lastrun -= sleep;
1669
	bitmap_daemon_work(mddev);
1670 1671 1672
	bitmap_update_sb(bitmap);
}

1673 1674 1675
/*
 * free memory that was allocated
 */
1676
static void bitmap_free(struct bitmap *bitmap)
1677 1678 1679 1680 1681 1682 1683
{
	unsigned long k, pages;
	struct bitmap_page *bp;

	if (!bitmap) /* there was no bitmap */
		return;

1684 1685
	if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
		bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
1686 1687
		md_cluster_stop(bitmap->mddev);

1688 1689 1690 1691 1692 1693
	/* Shouldn't be needed - but just in case.... */
	wait_event(bitmap->write_wait,
		   atomic_read(&bitmap->pending_writes) == 0);

	/* release the bitmap file  */
	bitmap_file_unmap(&bitmap->storage);
1694

1695 1696
	bp = bitmap->counts.bp;
	pages = bitmap->counts.pages;
1697 1698 1699 1700 1701 1702 1703 1704 1705 1706

	/* free all allocated memory */

	if (bp) /* deallocate the page memory */
		for (k = 0; k < pages; k++)
			if (bp[k].map && !bp[k].hijacked)
				kfree(bp[k].map);
	kfree(bp);
	kfree(bitmap);
}
1707

1708
void bitmap_destroy(struct mddev *mddev)
1709 1710 1711 1712 1713 1714
{
	struct bitmap *bitmap = mddev->bitmap;

	if (!bitmap) /* there was no bitmap */
		return;

1715
	mutex_lock(&mddev->bitmap_info.mutex);
1716
	spin_lock(&mddev->lock);
1717
	mddev->bitmap = NULL; /* disconnect from the md device */
1718
	spin_unlock(&mddev->lock);
1719
	mutex_unlock(&mddev->bitmap_info.mutex);
1720 1721
	if (mddev->thread)
		mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1722

1723 1724 1725
	if (bitmap->sysfs_can_clear)
		sysfs_put(bitmap->sysfs_can_clear);

1726 1727
	bitmap_free(bitmap);
}
1728 1729 1730 1731 1732

/*
 * initialize the bitmap structure
 * if this returns an error, bitmap_destroy must be called to do clean up
 */
1733
struct bitmap *bitmap_create(struct mddev *mddev, int slot)
1734 1735
{
	struct bitmap *bitmap;
1736
	sector_t blocks = mddev->resync_max_sectors;
1737
	struct file *file = mddev->bitmap_info.file;
1738
	int err;
1739
	struct kernfs_node *bm = NULL;
1740

A
Alexey Dobriyan 已提交
1741
	BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
1742

1743
	BUG_ON(file && mddev->bitmap_info.offset);
1744

1745
	bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
1746
	if (!bitmap)
1747
		return ERR_PTR(-ENOMEM);
1748

1749
	spin_lock_init(&bitmap->counts.lock);
1750 1751
	atomic_set(&bitmap->pending_writes, 0);
	init_waitqueue_head(&bitmap->write_wait);
1752
	init_waitqueue_head(&bitmap->overflow_wait);
1753
	init_waitqueue_head(&bitmap->behind_wait);
1754

1755
	bitmap->mddev = mddev;
1756
	bitmap->cluster_slot = slot;
1757

1758
	if (mddev->kobj.sd)
T
Tejun Heo 已提交
1759
		bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
1760
	if (bm) {
T
Tejun Heo 已提交
1761
		bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
1762 1763 1764 1765
		sysfs_put(bm);
	} else
		bitmap->sysfs_can_clear = NULL;

1766
	bitmap->storage.file = file;
1767 1768
	if (file) {
		get_file(file);
1769 1770 1771 1772
		/* As future accesses to this file will use bmap,
		 * and bypass the page cache, we must sync the file
		 * first.
		 */
1773
		vfs_fsync(file, 1);
1774
	}
1775
	/* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
1776 1777 1778 1779 1780 1781 1782 1783 1784 1785
	if (!mddev->bitmap_info.external) {
		/*
		 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
		 * instructing us to create a new on-disk bitmap instance.
		 */
		if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
			err = bitmap_new_disk_sb(bitmap);
		else
			err = bitmap_read_sb(bitmap);
	} else {
1786 1787 1788 1789 1790 1791 1792
		err = 0;
		if (mddev->bitmap_info.chunksize == 0 ||
		    mddev->bitmap_info.daemon_sleep == 0)
			/* chunksize and time_base need to be
			 * set first. */
			err = -EINVAL;
	}
1793
	if (err)
1794
		goto error;
1795

1796
	bitmap->daemon_lastrun = jiffies;
1797 1798
	err = bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
	if (err)
1799
		goto error;
1800

1801
	printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
1802
	       bitmap->counts.pages, bmname(bitmap));
1803

1804 1805 1806
	err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
	if (err)
		goto error;
1807

1808
	return bitmap;
1809 1810
 error:
	bitmap_free(bitmap);
1811
	return ERR_PTR(err);
1812 1813
}

1814
int bitmap_load(struct mddev *mddev)
1815 1816
{
	int err = 0;
1817
	sector_t start = 0;
1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829
	sector_t sector = 0;
	struct bitmap *bitmap = mddev->bitmap;

	if (!bitmap)
		goto out;

	/* Clear out old bitmap info first:  Either there is none, or we
	 * are resuming after someone else has possibly changed things,
	 * so we should forget old cached info.
	 * All chunks should be clean, but some might need_sync.
	 */
	while (sector < mddev->resync_max_sectors) {
N
NeilBrown 已提交
1830
		sector_t blocks;
1831 1832 1833 1834 1835
		bitmap_start_sync(bitmap, sector, &blocks, 0);
		sector += blocks;
	}
	bitmap_close_sync(bitmap);

1836 1837 1838 1839 1840 1841
	if (mddev->degraded == 0
	    || bitmap->events_cleared == mddev->events)
		/* no need to keep dirty bits to optimise a
		 * re-add of a missing device */
		start = mddev->recovery_cp;

1842
	mutex_lock(&mddev->bitmap_info.mutex);
1843
	err = bitmap_init_from_disk(bitmap, start);
1844
	mutex_unlock(&mddev->bitmap_info.mutex);
1845

1846
	if (err)
1847
		goto out;
1848
	clear_bit(BITMAP_STALE, &bitmap->flags);
1849 1850 1851

	/* Kick recovery in case any bits were set */
	set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
1852

1853
	mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
1854
	md_wakeup_thread(mddev->thread);
1855

1856 1857
	bitmap_update_sb(bitmap);

1858
	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
1859 1860
		err = -EIO;
out:
1861
	return err;
1862
}
1863
EXPORT_SYMBOL_GPL(bitmap_load);
1864

1865 1866 1867 1868
/* Loads the bitmap associated with slot and copies the resync information
 * to our bitmap
 */
int bitmap_copy_from_slot(struct mddev *mddev, int slot,
1869
		sector_t *low, sector_t *high, bool clear_bits)
1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895
{
	int rv = 0, i, j;
	sector_t block, lo = 0, hi = 0;
	struct bitmap_counts *counts;
	struct bitmap *bitmap = bitmap_create(mddev, slot);

	if (IS_ERR(bitmap))
		return PTR_ERR(bitmap);

	rv = bitmap_init_from_disk(bitmap, 0);
	if (rv)
		goto err;

	counts = &bitmap->counts;
	for (j = 0; j < counts->chunks; j++) {
		block = (sector_t)j << counts->chunkshift;
		if (bitmap_file_test_bit(bitmap, block)) {
			if (!lo)
				lo = block;
			hi = block;
			bitmap_file_clear_bit(bitmap, block);
			bitmap_set_memory_bits(mddev->bitmap, block, 1);
			bitmap_file_set_bit(mddev->bitmap, block);
		}
	}

1896 1897 1898 1899 1900 1901 1902 1903 1904 1905
	if (clear_bits) {
		bitmap_update_sb(bitmap);
		/* Setting this for the ev_page should be enough.
		 * And we do not require both write_all and PAGE_DIRT either
		 */
		for (i = 0; i < bitmap->storage.file_pages; i++)
			set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
		bitmap_write_all(bitmap);
		bitmap_unplug(bitmap);
	}
1906 1907 1908 1909 1910 1911 1912 1913 1914
	*low = lo;
	*high = hi;
err:
	bitmap_free(bitmap);
	return rv;
}
EXPORT_SYMBOL_GPL(bitmap_copy_from_slot);


1915 1916 1917
void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
{
	unsigned long chunk_kb;
1918
	struct bitmap_counts *counts;
1919 1920 1921 1922

	if (!bitmap)
		return;

1923 1924
	counts = &bitmap->counts;

1925 1926 1927
	chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10;
	seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
		   "%lu%s chunk",
1928 1929 1930
		   counts->pages - counts->missing_pages,
		   counts->pages,
		   (counts->pages - counts->missing_pages)
1931 1932 1933
		   << (PAGE_SHIFT - 10),
		   chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize,
		   chunk_kb ? "KB" : "B");
1934
	if (bitmap->storage.file) {
1935
		seq_printf(seq, ", file: ");
M
Miklos Szeredi 已提交
1936
		seq_file_path(seq, bitmap->storage.file, " \t\n");
1937 1938 1939 1940 1941
	}

	seq_printf(seq, "\n");
}

1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998
int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
		  int chunksize, int init)
{
	/* If chunk_size is 0, choose an appropriate chunk size.
	 * Then possibly allocate new storage space.
	 * Then quiesce, copy bits, replace bitmap, and re-start
	 *
	 * This function is called both to set up the initial bitmap
	 * and to resize the bitmap while the array is active.
	 * If this happens as a result of the array being resized,
	 * chunksize will be zero, and we need to choose a suitable
	 * chunksize, otherwise we use what we are given.
	 */
	struct bitmap_storage store;
	struct bitmap_counts old_counts;
	unsigned long chunks;
	sector_t block;
	sector_t old_blocks, new_blocks;
	int chunkshift;
	int ret = 0;
	long pages;
	struct bitmap_page *new_bp;

	if (chunksize == 0) {
		/* If there is enough space, leave the chunk size unchanged,
		 * else increase by factor of two until there is enough space.
		 */
		long bytes;
		long space = bitmap->mddev->bitmap_info.space;

		if (space == 0) {
			/* We don't know how much space there is, so limit
			 * to current size - in sectors.
			 */
			bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8);
			if (!bitmap->mddev->bitmap_info.external)
				bytes += sizeof(bitmap_super_t);
			space = DIV_ROUND_UP(bytes, 512);
			bitmap->mddev->bitmap_info.space = space;
		}
		chunkshift = bitmap->counts.chunkshift;
		chunkshift--;
		do {
			/* 'chunkshift' is shift from block size to chunk size */
			chunkshift++;
			chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
			bytes = DIV_ROUND_UP(chunks, 8);
			if (!bitmap->mddev->bitmap_info.external)
				bytes += sizeof(bitmap_super_t);
		} while (bytes > (space << 9));
	} else
		chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;

	chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
	memset(&store, 0, sizeof(store));
	if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
		ret = bitmap_storage_alloc(&store, chunks,
1999 2000
					   !bitmap->mddev->bitmap_info.external,
					   bitmap->cluster_slot);
2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106
	if (ret)
		goto err;

	pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);

	new_bp = kzalloc(pages * sizeof(*new_bp), GFP_KERNEL);
	ret = -ENOMEM;
	if (!new_bp) {
		bitmap_file_unmap(&store);
		goto err;
	}

	if (!init)
		bitmap->mddev->pers->quiesce(bitmap->mddev, 1);

	store.file = bitmap->storage.file;
	bitmap->storage.file = NULL;

	if (store.sb_page && bitmap->storage.sb_page)
		memcpy(page_address(store.sb_page),
		       page_address(bitmap->storage.sb_page),
		       sizeof(bitmap_super_t));
	bitmap_file_unmap(&bitmap->storage);
	bitmap->storage = store;

	old_counts = bitmap->counts;
	bitmap->counts.bp = new_bp;
	bitmap->counts.pages = pages;
	bitmap->counts.missing_pages = pages;
	bitmap->counts.chunkshift = chunkshift;
	bitmap->counts.chunks = chunks;
	bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift +
						     BITMAP_BLOCK_SHIFT);

	blocks = min(old_counts.chunks << old_counts.chunkshift,
		     chunks << chunkshift);

	spin_lock_irq(&bitmap->counts.lock);
	for (block = 0; block < blocks; ) {
		bitmap_counter_t *bmc_old, *bmc_new;
		int set;

		bmc_old = bitmap_get_counter(&old_counts, block,
					     &old_blocks, 0);
		set = bmc_old && NEEDED(*bmc_old);

		if (set) {
			bmc_new = bitmap_get_counter(&bitmap->counts, block,
						     &new_blocks, 1);
			if (*bmc_new == 0) {
				/* need to set on-disk bits too. */
				sector_t end = block + new_blocks;
				sector_t start = block >> chunkshift;
				start <<= chunkshift;
				while (start < end) {
					bitmap_file_set_bit(bitmap, block);
					start += 1 << chunkshift;
				}
				*bmc_new = 2;
				bitmap_count_page(&bitmap->counts,
						  block, 1);
				bitmap_set_pending(&bitmap->counts,
						   block);
			}
			*bmc_new |= NEEDED_MASK;
			if (new_blocks < old_blocks)
				old_blocks = new_blocks;
		}
		block += old_blocks;
	}

	if (!init) {
		int i;
		while (block < (chunks << chunkshift)) {
			bitmap_counter_t *bmc;
			bmc = bitmap_get_counter(&bitmap->counts, block,
						 &new_blocks, 1);
			if (bmc) {
				/* new space.  It needs to be resynced, so
				 * we set NEEDED_MASK.
				 */
				if (*bmc == 0) {
					*bmc = NEEDED_MASK | 2;
					bitmap_count_page(&bitmap->counts,
							  block, 1);
					bitmap_set_pending(&bitmap->counts,
							   block);
				}
			}
			block += new_blocks;
		}
		for (i = 0; i < bitmap->storage.file_pages; i++)
			set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
	}
	spin_unlock_irq(&bitmap->counts.lock);

	if (!init) {
		bitmap_unplug(bitmap);
		bitmap->mddev->pers->quiesce(bitmap->mddev, 0);
	}
	ret = 0;
err:
	return ret;
}
EXPORT_SYMBOL_GPL(bitmap_resize);

2107
static ssize_t
2108
location_show(struct mddev *mddev, char *page)
2109 2110
{
	ssize_t len;
2111
	if (mddev->bitmap_info.file)
2112
		len = sprintf(page, "file");
2113
	else if (mddev->bitmap_info.offset)
2114
		len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
2115
	else
2116 2117 2118 2119 2120 2121
		len = sprintf(page, "none");
	len += sprintf(page+len, "\n");
	return len;
}

static ssize_t
2122
location_store(struct mddev *mddev, const char *buf, size_t len)
2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158
{

	if (mddev->pers) {
		if (!mddev->pers->quiesce)
			return -EBUSY;
		if (mddev->recovery || mddev->sync_thread)
			return -EBUSY;
	}

	if (mddev->bitmap || mddev->bitmap_info.file ||
	    mddev->bitmap_info.offset) {
		/* bitmap already configured.  Only option is to clear it */
		if (strncmp(buf, "none", 4) != 0)
			return -EBUSY;
		if (mddev->pers) {
			mddev->pers->quiesce(mddev, 1);
			bitmap_destroy(mddev);
			mddev->pers->quiesce(mddev, 0);
		}
		mddev->bitmap_info.offset = 0;
		if (mddev->bitmap_info.file) {
			struct file *f = mddev->bitmap_info.file;
			mddev->bitmap_info.file = NULL;
			fput(f);
		}
	} else {
		/* No bitmap, OK to set a location */
		long long offset;
		if (strncmp(buf, "none", 4) == 0)
			/* nothing to be done */;
		else if (strncmp(buf, "file:", 5) == 0) {
			/* Not supported yet */
			return -EINVAL;
		} else {
			int rv;
			if (buf[0] == '+')
2159
				rv = kstrtoll(buf+1, 10, &offset);
2160
			else
2161
				rv = kstrtoll(buf, 10, &offset);
2162 2163 2164 2165
			if (rv)
				return rv;
			if (offset == 0)
				return -EINVAL;
2166 2167
			if (mddev->bitmap_info.external == 0 &&
			    mddev->major_version == 0 &&
2168 2169 2170 2171
			    offset != mddev->bitmap_info.default_offset)
				return -EINVAL;
			mddev->bitmap_info.offset = offset;
			if (mddev->pers) {
2172
				struct bitmap *bitmap;
2173
				mddev->pers->quiesce(mddev, 1);
2174 2175 2176 2177 2178
				bitmap = bitmap_create(mddev, -1);
				if (IS_ERR(bitmap))
					rv = PTR_ERR(bitmap);
				else {
					mddev->bitmap = bitmap;
2179
					rv = bitmap_load(mddev);
2180 2181 2182 2183
					if (rv) {
						bitmap_destroy(mddev);
						mddev->bitmap_info.offset = 0;
					}
2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203
				}
				mddev->pers->quiesce(mddev, 0);
				if (rv)
					return rv;
			}
		}
	}
	if (!mddev->external) {
		/* Ensure new bitmap info is stored in
		 * metadata promptly.
		 */
		set_bit(MD_CHANGE_DEVS, &mddev->flags);
		md_wakeup_thread(mddev->thread);
	}
	return len;
}

static struct md_sysfs_entry bitmap_location =
__ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);

2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227
/* 'bitmap/space' is the space available at 'location' for the
 * bitmap.  This allows the kernel to know when it is safe to
 * resize the bitmap to match a resized array.
 */
static ssize_t
space_show(struct mddev *mddev, char *page)
{
	return sprintf(page, "%lu\n", mddev->bitmap_info.space);
}

static ssize_t
space_store(struct mddev *mddev, const char *buf, size_t len)
{
	unsigned long sectors;
	int rv;

	rv = kstrtoul(buf, 10, &sectors);
	if (rv)
		return rv;

	if (sectors == 0)
		return -EINVAL;

	if (mddev->bitmap &&
2228
	    sectors < (mddev->bitmap->storage.bytes + 511) >> 9)
2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240
		return -EFBIG; /* Bitmap is too big for this small space */

	/* could make sure it isn't too big, but that isn't really
	 * needed - user-space should be careful.
	 */
	mddev->bitmap_info.space = sectors;
	return len;
}

static struct md_sysfs_entry bitmap_space =
__ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store);

2241
static ssize_t
2242
timeout_show(struct mddev *mddev, char *page)
2243 2244 2245 2246
{
	ssize_t len;
	unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
	unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
2247

2248 2249 2250 2251 2252 2253 2254 2255
	len = sprintf(page, "%lu", secs);
	if (jifs)
		len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
	len += sprintf(page+len, "\n");
	return len;
}

static ssize_t
2256
timeout_store(struct mddev *mddev, const char *buf, size_t len)
2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291
{
	/* timeout can be set at any time */
	unsigned long timeout;
	int rv = strict_strtoul_scaled(buf, &timeout, 4);
	if (rv)
		return rv;

	/* just to make sure we don't overflow... */
	if (timeout >= LONG_MAX / HZ)
		return -EINVAL;

	timeout = timeout * HZ / 10000;

	if (timeout >= MAX_SCHEDULE_TIMEOUT)
		timeout = MAX_SCHEDULE_TIMEOUT-1;
	if (timeout < 1)
		timeout = 1;
	mddev->bitmap_info.daemon_sleep = timeout;
	if (mddev->thread) {
		/* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
		 * the bitmap is all clean and we don't need to
		 * adjust the timeout right now
		 */
		if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
			mddev->thread->timeout = timeout;
			md_wakeup_thread(mddev->thread);
		}
	}
	return len;
}

static struct md_sysfs_entry bitmap_timeout =
__ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);

static ssize_t
2292
backlog_show(struct mddev *mddev, char *page)
2293 2294 2295 2296 2297
{
	return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
}

static ssize_t
2298
backlog_store(struct mddev *mddev, const char *buf, size_t len)
2299 2300
{
	unsigned long backlog;
2301
	int rv = kstrtoul(buf, 10, &backlog);
2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313
	if (rv)
		return rv;
	if (backlog > COUNTER_MAX)
		return -EINVAL;
	mddev->bitmap_info.max_write_behind = backlog;
	return len;
}

static struct md_sysfs_entry bitmap_backlog =
__ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);

static ssize_t
2314
chunksize_show(struct mddev *mddev, char *page)
2315 2316 2317 2318 2319
{
	return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
}

static ssize_t
2320
chunksize_store(struct mddev *mddev, const char *buf, size_t len)
2321 2322 2323 2324 2325 2326
{
	/* Can only be changed when no bitmap is active */
	int rv;
	unsigned long csize;
	if (mddev->bitmap)
		return -EBUSY;
2327
	rv = kstrtoul(buf, 10, &csize);
2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339
	if (rv)
		return rv;
	if (csize < 512 ||
	    !is_power_of_2(csize))
		return -EINVAL;
	mddev->bitmap_info.chunksize = csize;
	return len;
}

static struct md_sysfs_entry bitmap_chunksize =
__ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);

2340
static ssize_t metadata_show(struct mddev *mddev, char *page)
2341
{
G
Goldwyn Rodrigues 已提交
2342 2343
	if (mddev_is_clustered(mddev))
		return sprintf(page, "clustered\n");
2344 2345 2346 2347
	return sprintf(page, "%s\n", (mddev->bitmap_info.external
				      ? "external" : "internal"));
}

2348
static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
2349 2350 2351 2352 2353 2354 2355
{
	if (mddev->bitmap ||
	    mddev->bitmap_info.file ||
	    mddev->bitmap_info.offset)
		return -EBUSY;
	if (strncmp(buf, "external", 8) == 0)
		mddev->bitmap_info.external = 1;
G
Goldwyn Rodrigues 已提交
2356 2357
	else if ((strncmp(buf, "internal", 8) == 0) ||
			(strncmp(buf, "clustered", 9) == 0))
2358 2359 2360 2361 2362 2363 2364 2365 2366
		mddev->bitmap_info.external = 0;
	else
		return -EINVAL;
	return len;
}

static struct md_sysfs_entry bitmap_metadata =
__ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);

2367
static ssize_t can_clear_show(struct mddev *mddev, char *page)
2368 2369
{
	int len;
2370
	spin_lock(&mddev->lock);
2371 2372 2373 2374 2375
	if (mddev->bitmap)
		len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
					     "false" : "true"));
	else
		len = sprintf(page, "\n");
2376
	spin_unlock(&mddev->lock);
2377 2378 2379
	return len;
}

2380
static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397
{
	if (mddev->bitmap == NULL)
		return -ENOENT;
	if (strncmp(buf, "false", 5) == 0)
		mddev->bitmap->need_sync = 1;
	else if (strncmp(buf, "true", 4) == 0) {
		if (mddev->degraded)
			return -EBUSY;
		mddev->bitmap->need_sync = 0;
	} else
		return -EINVAL;
	return len;
}

static struct md_sysfs_entry bitmap_can_clear =
__ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);

2398
static ssize_t
2399
behind_writes_used_show(struct mddev *mddev, char *page)
2400
{
2401 2402
	ssize_t ret;
	spin_lock(&mddev->lock);
2403
	if (mddev->bitmap == NULL)
2404 2405 2406 2407 2408 2409
		ret = sprintf(page, "0\n");
	else
		ret = sprintf(page, "%lu\n",
			      mddev->bitmap->behind_writes_used);
	spin_unlock(&mddev->lock);
	return ret;
2410 2411 2412
}

static ssize_t
2413
behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
2414 2415 2416 2417 2418 2419 2420 2421 2422 2423
{
	if (mddev->bitmap)
		mddev->bitmap->behind_writes_used = 0;
	return len;
}

static struct md_sysfs_entry max_backlog_used =
__ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
       behind_writes_used_show, behind_writes_used_reset);

2424 2425
static struct attribute *md_bitmap_attrs[] = {
	&bitmap_location.attr,
2426
	&bitmap_space.attr,
2427 2428 2429
	&bitmap_timeout.attr,
	&bitmap_backlog.attr,
	&bitmap_chunksize.attr,
2430 2431
	&bitmap_metadata.attr,
	&bitmap_can_clear.attr,
2432
	&max_backlog_used.attr,
2433 2434 2435 2436 2437 2438 2439
	NULL
};
struct attribute_group md_bitmap_group = {
	.name = "bitmap",
	.attrs = md_bitmap_attrs,
};