bitmap.c 64.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
 *
 * bitmap_create  - sets up the bitmap structure
 * bitmap_destroy - destroys the bitmap structure
 *
 * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
 * - added disk storage for bitmap
 * - changes to allow various bitmap chunk sizes
 */

/*
 * Still to do:
 *
 * flush after percent set rather than just time based. (maybe both).
 */

18
#include <linux/blkdev.h>
19 20 21 22 23 24 25 26 27 28
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/file.h>
#include <linux/mount.h>
#include <linux/buffer_head.h>
29
#include <linux/seq_file.h>
30
#include "md.h"
31
#include "bitmap.h"
32

33
static inline char *bmname(struct bitmap *bitmap)
34 35 36 37 38 39 40 41 42 43 44 45 46 47
{
	return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
}

/*
 * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
 *
 * 1) check to see if this page is allocated, if it's not then try to alloc
 * 2) if the alloc fails, set the page's hijacked flag so we'll use the
 *    page pointer directly as a counter
 *
 * if we find our page, we increment the page's refcount so that it stays
 * allocated while we're using it
 */
48
static int bitmap_checkpage(struct bitmap_counts *bitmap,
49
			    unsigned long page, int create)
50 51
__releases(bitmap->lock)
__acquires(bitmap->lock)
52 53 54 55
{
	unsigned char *mappage;

	if (page >= bitmap->pages) {
56 57 58 59
		/* This can happen if bitmap_start_sync goes beyond
		 * End-of-device while looking for a whole page.
		 * It is harmless.
		 */
60 61 62 63 64 65 66 67 68 69 70 71 72 73
		return -EINVAL;
	}

	if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
		return 0;

	if (bitmap->bp[page].map) /* page is already allocated, just return */
		return 0;

	if (!create)
		return -ENOENT;

	/* this page has not been allocated yet */

74
	spin_unlock_irq(&bitmap->lock);
75 76 77 78 79 80 81 82 83 84 85 86 87
	/* It is possible that this is being called inside a
	 * prepare_to_wait/finish_wait loop from raid5c:make_request().
	 * In general it is not permitted to sleep in that context as it
	 * can cause the loop to spin freely.
	 * That doesn't apply here as we can only reach this point
	 * once with any loop.
	 * When this function completes, either bp[page].map or
	 * bp[page].hijacked.  In either case, this function will
	 * abort before getting to this point again.  So there is
	 * no risk of a free-spin, and so it is safe to assert
	 * that sleeping here is allowed.
	 */
	sched_annotate_sleep();
88
	mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
89 90 91
	spin_lock_irq(&bitmap->lock);

	if (mappage == NULL) {
92
		pr_debug("md/bitmap: map page allocation failed, hijacking\n");
93 94 95 96
		/* failed - set the hijacked flag so that we can use the
		 * pointer as a counter */
		if (!bitmap->bp[page].map)
			bitmap->bp[page].hijacked = 1;
97 98
	} else if (bitmap->bp[page].map ||
		   bitmap->bp[page].hijacked) {
99
		/* somebody beat us to getting the page */
100
		kfree(mappage);
101
	} else {
102

103
		/* no page was in place and we have one, so install it */
104

105 106 107
		bitmap->bp[page].map = mappage;
		bitmap->missing_pages--;
	}
108 109 110 111 112 113
	return 0;
}

/* if page is completely empty, put it back on the free list, or dealloc it */
/* if page was hijacked, unmark the flag so it might get alloced next time */
/* Note: lock should be held when calling this */
114
static void bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
115 116 117 118 119 120 121 122 123 124 125
{
	char *ptr;

	if (bitmap->bp[page].count) /* page is still busy */
		return;

	/* page is no longer in use, it can be released */

	if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
		bitmap->bp[page].hijacked = 0;
		bitmap->bp[page].map = NULL;
126 127 128 129 130
	} else {
		/* normal case, free the page */
		ptr = bitmap->bp[page].map;
		bitmap->bp[page].map = NULL;
		bitmap->missing_pages++;
131
		kfree(ptr);
132 133 134 135 136 137 138 139 140 141 142
	}
}

/*
 * bitmap file handling - read and write the bitmap file and its superblock
 */

/*
 * basic page I/O operations
 */

143
/* IO operations when bitmap is stored near all superblocks */
144 145 146
static int read_sb_page(struct mddev *mddev, loff_t offset,
			struct page *page,
			unsigned long index, int size)
147 148 149
{
	/* choose a good rdev and read the page from there */

150
	struct md_rdev *rdev;
151 152
	sector_t target;

N
NeilBrown 已提交
153
	rdev_for_each(rdev, mddev) {
154 155
		if (! test_bit(In_sync, &rdev->flags)
		    || test_bit(Faulty, &rdev->flags))
156 157
			continue;

J
Jonathan Brassow 已提交
158
		target = offset + index * (PAGE_SIZE/512);
159

160
		if (sync_page_io(rdev, target,
161
				 roundup(size, bdev_logical_block_size(rdev->bdev)),
J
Jonathan Brassow 已提交
162
				 page, READ, true)) {
163
			page->index = index;
164
			return 0;
165 166
		}
	}
167
	return -EIO;
168 169
}

170
static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev)
171 172 173 174 175 176 177
{
	/* Iterate the disks of an mddev, using rcu to protect access to the
	 * linked list, and raising the refcount of devices we return to ensure
	 * they don't disappear while in use.
	 * As devices are only added or removed when raid_disk is < 0 and
	 * nr_pending is 0 and In_sync is clear, the entries we return will
	 * still be in the same position on the list when we re-enter
178
	 * list_for_each_entry_continue_rcu.
179 180 181 182 183
	 *
	 * Note that if entered with 'rdev == NULL' to start at the
	 * beginning, we temporarily assign 'rdev' to an address which
	 * isn't really an rdev, but which can be used by
	 * list_for_each_entry_continue_rcu() to find the first entry.
184 185 186 187
	 */
	rcu_read_lock();
	if (rdev == NULL)
		/* start at the beginning */
188
		rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
189 190 191 192
	else {
		/* release the previous rdev and start from there. */
		rdev_dec_pending(rdev, mddev);
	}
193
	list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) {
194 195 196 197 198 199 200 201 202 203 204 205
		if (rdev->raid_disk >= 0 &&
		    !test_bit(Faulty, &rdev->flags)) {
			/* this is a usable devices */
			atomic_inc(&rdev->nr_pending);
			rcu_read_unlock();
			return rdev;
		}
	}
	rcu_read_unlock();
	return NULL;
}

206
static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
207
{
208
	struct md_rdev *rdev = NULL;
209
	struct block_device *bdev;
210
	struct mddev *mddev = bitmap->mddev;
211
	struct bitmap_storage *store = &bitmap->storage;
212

213
	while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
214 215
		int size = PAGE_SIZE;
		loff_t offset = mddev->bitmap_info.offset;
216 217 218

		bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;

219 220 221 222 223
		if (page->index == store->file_pages-1) {
			int last_page_size = store->bytes & (PAGE_SIZE-1);
			if (last_page_size == 0)
				last_page_size = PAGE_SIZE;
			size = roundup(last_page_size,
224
				       bdev_logical_block_size(bdev));
225
		}
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
		/* Just make sure we aren't corrupting data or
		 * metadata
		 */
		if (mddev->external) {
			/* Bitmap could be anywhere. */
			if (rdev->sb_start + offset + (page->index
						       * (PAGE_SIZE/512))
			    > rdev->data_offset
			    &&
			    rdev->sb_start + offset
			    < (rdev->data_offset + mddev->dev_sectors
			     + (PAGE_SIZE/512)))
				goto bad_alignment;
		} else if (offset < 0) {
			/* DATA  BITMAP METADATA  */
			if (offset
			    + (long)(page->index * (PAGE_SIZE/512))
			    + size/512 > 0)
				/* bitmap runs in to metadata */
				goto bad_alignment;
			if (rdev->data_offset + mddev->dev_sectors
			    > rdev->sb_start + offset)
				/* data runs in to bitmap */
				goto bad_alignment;
		} else if (rdev->sb_start < rdev->data_offset) {
			/* METADATA BITMAP DATA */
			if (rdev->sb_start
			    + offset
			    + page->index*(PAGE_SIZE/512) + size/512
			    > rdev->data_offset)
				/* bitmap runs in to data */
				goto bad_alignment;
		} else {
			/* DATA METADATA BITMAP - no problems */
		}
		md_super_write(mddev, rdev,
			       rdev->sb_start + offset
			       + page->index * (PAGE_SIZE/512),
			       size,
			       page);
266
	}
267 268

	if (wait)
269
		md_super_wait(mddev);
270
	return 0;
271 272 273

 bad_alignment:
	return -EINVAL;
274 275
}

276
static void bitmap_file_kick(struct bitmap *bitmap);
277
/*
278
 * write out a page to a file
279
 */
280
static void write_page(struct bitmap *bitmap, struct page *page, int wait)
281
{
282
	struct buffer_head *bh;
283

284
	if (bitmap->storage.file == NULL) {
285 286
		switch (write_sb_page(bitmap, page, wait)) {
		case -EINVAL:
287
			set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
288
		}
289
	} else {
290

291
		bh = page_buffers(page);
292

293 294 295 296
		while (bh && bh->b_blocknr) {
			atomic_inc(&bitmap->pending_writes);
			set_buffer_locked(bh);
			set_buffer_mapped(bh);
J
Jens Axboe 已提交
297
			submit_bh(WRITE | REQ_SYNC, bh);
298 299
			bh = bh->b_this_page;
		}
300

301
		if (wait)
302 303
			wait_event(bitmap->write_wait,
				   atomic_read(&bitmap->pending_writes)==0);
304
	}
305
	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
306
		bitmap_file_kick(bitmap);
307 308 309 310 311
}

static void end_bitmap_write(struct buffer_head *bh, int uptodate)
{
	struct bitmap *bitmap = bh->b_private;
312

313 314
	if (!uptodate)
		set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
315 316 317
	if (atomic_dec_and_test(&bitmap->pending_writes))
		wake_up(&bitmap->write_wait);
}
318

319 320 321 322 323 324 325 326 327 328
/* copied from buffer.c */
static void
__clear_page_buffers(struct page *page)
{
	ClearPagePrivate(page);
	set_page_private(page, 0);
	page_cache_release(page);
}
static void free_buffers(struct page *page)
{
329
	struct buffer_head *bh;
330

331 332 333 334
	if (!PagePrivate(page))
		return;

	bh = page_buffers(page);
335 336 337 338
	while (bh) {
		struct buffer_head *next = bh->b_this_page;
		free_buffer_head(bh);
		bh = next;
339
	}
340 341
	__clear_page_buffers(page);
	put_page(page);
342 343
}

344 345 346 347 348 349 350
/* read a page from a file.
 * We both read the page, and attach buffers to the page to record the
 * address of each block (using bmap).  These addresses will be used
 * to write the block later, completely bypassing the filesystem.
 * This usage is similar to how swap files are handled, and allows us
 * to write to a file with no concerns of memory allocation failing.
 */
351 352 353 354
static int read_page(struct file *file, unsigned long index,
		     struct bitmap *bitmap,
		     unsigned long count,
		     struct page *page)
355
{
356
	int ret = 0;
A
Al Viro 已提交
357
	struct inode *inode = file_inode(file);
358 359
	struct buffer_head *bh;
	sector_t block;
360

361 362
	pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
		 (unsigned long long)index << PAGE_SHIFT);
363

364 365
	bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0);
	if (!bh) {
366
		ret = -ENOMEM;
367 368
		goto out;
	}
369 370 371 372 373 374 375 376 377
	attach_page_buffers(page, bh);
	block = index << (PAGE_SHIFT - inode->i_blkbits);
	while (bh) {
		if (count == 0)
			bh->b_blocknr = 0;
		else {
			bh->b_blocknr = bmap(inode, block);
			if (bh->b_blocknr == 0) {
				/* Cannot use this file! */
378
				ret = -EINVAL;
379 380 381 382 383 384 385 386 387 388
				goto out;
			}
			bh->b_bdev = inode->i_sb->s_bdev;
			if (count < (1<<inode->i_blkbits))
				count = 0;
			else
				count -= (1<<inode->i_blkbits);

			bh->b_end_io = end_bitmap_write;
			bh->b_private = bitmap;
389 390 391 392
			atomic_inc(&bitmap->pending_writes);
			set_buffer_locked(bh);
			set_buffer_mapped(bh);
			submit_bh(READ, bh);
393 394 395 396 397
		}
		block++;
		bh = bh->b_this_page;
	}
	page->index = index;
398 399 400

	wait_event(bitmap->write_wait,
		   atomic_read(&bitmap->pending_writes)==0);
401
	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
402
		ret = -EIO;
403
out:
404 405
	if (ret)
		printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %d\n",
406 407
			(int)PAGE_SIZE,
			(unsigned long long)index << PAGE_SHIFT,
408 409
			ret);
	return ret;
410 411 412 413 414 415 416
}

/*
 * bitmap file superblock operations
 */

/* update the event counter and sync the superblock to disk */
417
void bitmap_update_sb(struct bitmap *bitmap)
418 419 420 421
{
	bitmap_super_t *sb;

	if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
422
		return;
423 424
	if (bitmap->mddev->bitmap_info.external)
		return;
425
	if (!bitmap->storage.sb_page) /* no superblock */
426
		return;
427
	sb = kmap_atomic(bitmap->storage.sb_page);
428
	sb->events = cpu_to_le64(bitmap->mddev->events);
429
	if (bitmap->mddev->events < bitmap->events_cleared)
430 431
		/* rocking back to read-only */
		bitmap->events_cleared = bitmap->mddev->events;
432 433
	sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
	sb->state = cpu_to_le32(bitmap->flags);
434 435 436
	/* Just in case these have been changed via sysfs: */
	sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
	sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
437 438 439
	/* This might have been changed by a reshape */
	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
	sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
G
Goldwyn Rodrigues 已提交
440
	sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
441 442
	sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
					   bitmap_info.space);
443
	kunmap_atomic(sb);
444
	write_page(bitmap, bitmap->storage.sb_page, 1);
445 446 447 448 449 450 451
}

/* print out the bitmap file superblock */
void bitmap_print_sb(struct bitmap *bitmap)
{
	bitmap_super_t *sb;

452
	if (!bitmap || !bitmap->storage.sb_page)
453
		return;
454
	sb = kmap_atomic(bitmap->storage.sb_page);
455
	printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
456 457 458
	printk(KERN_DEBUG "         magic: %08x\n", le32_to_cpu(sb->magic));
	printk(KERN_DEBUG "       version: %d\n", le32_to_cpu(sb->version));
	printk(KERN_DEBUG "          uuid: %08x.%08x.%08x.%08x\n",
459 460 461 462
					*(__u32 *)(sb->uuid+0),
					*(__u32 *)(sb->uuid+4),
					*(__u32 *)(sb->uuid+8),
					*(__u32 *)(sb->uuid+12));
463
	printk(KERN_DEBUG "        events: %llu\n",
464
			(unsigned long long) le64_to_cpu(sb->events));
465
	printk(KERN_DEBUG "events cleared: %llu\n",
466
			(unsigned long long) le64_to_cpu(sb->events_cleared));
467 468 469 470 471
	printk(KERN_DEBUG "         state: %08x\n", le32_to_cpu(sb->state));
	printk(KERN_DEBUG "     chunksize: %d B\n", le32_to_cpu(sb->chunksize));
	printk(KERN_DEBUG "  daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
	printk(KERN_DEBUG "     sync size: %llu KB\n",
			(unsigned long long)le64_to_cpu(sb->sync_size)/2);
472
	printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
473
	kunmap_atomic(sb);
474 475
}

476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
/*
 * bitmap_new_disk_sb
 * @bitmap
 *
 * This function is somewhat the reverse of bitmap_read_sb.  bitmap_read_sb
 * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
 * This function verifies 'bitmap_info' and populates the on-disk bitmap
 * structure, which is to be written to disk.
 *
 * Returns: 0 on success, -Exxx on error
 */
static int bitmap_new_disk_sb(struct bitmap *bitmap)
{
	bitmap_super_t *sb;
	unsigned long chunksize, daemon_sleep, write_behind;

492
	bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
493 494
	if (bitmap->storage.sb_page == NULL)
		return -ENOMEM;
495
	bitmap->storage.sb_page->index = 0;
496

497
	sb = kmap_atomic(bitmap->storage.sb_page);
498 499 500 501 502 503 504

	sb->magic = cpu_to_le32(BITMAP_MAGIC);
	sb->version = cpu_to_le32(BITMAP_MAJOR_HI);

	chunksize = bitmap->mddev->bitmap_info.chunksize;
	BUG_ON(!chunksize);
	if (!is_power_of_2(chunksize)) {
505
		kunmap_atomic(sb);
506 507 508 509 510 511
		printk(KERN_ERR "bitmap chunksize not a power of 2\n");
		return -EINVAL;
	}
	sb->chunksize = cpu_to_le32(chunksize);

	daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
512
	if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
		printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n");
		daemon_sleep = 5 * HZ;
	}
	sb->daemon_sleep = cpu_to_le32(daemon_sleep);
	bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;

	/*
	 * FIXME: write_behind for RAID1.  If not specified, what
	 * is a good choice?  We choose COUNTER_MAX / 2 arbitrarily.
	 */
	write_behind = bitmap->mddev->bitmap_info.max_write_behind;
	if (write_behind > COUNTER_MAX)
		write_behind = COUNTER_MAX / 2;
	sb->write_behind = cpu_to_le32(write_behind);
	bitmap->mddev->bitmap_info.max_write_behind = write_behind;

	/* keep the array size field of the bitmap superblock up to date */
	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);

	memcpy(sb->uuid, bitmap->mddev->uuid, 16);

534
	set_bit(BITMAP_STALE, &bitmap->flags);
535
	sb->state = cpu_to_le32(bitmap->flags);
536 537
	bitmap->events_cleared = bitmap->mddev->events;
	sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
538
	bitmap->mddev->bitmap_info.nodes = 0;
539

540
	kunmap_atomic(sb);
541 542 543 544

	return 0;
}

545 546 547 548 549
/* read the superblock from the bitmap file and initialize some bitmap fields */
static int bitmap_read_sb(struct bitmap *bitmap)
{
	char *reason = NULL;
	bitmap_super_t *sb;
550
	unsigned long chunksize, daemon_sleep, write_behind;
551
	unsigned long long events;
G
Goldwyn Rodrigues 已提交
552
	int nodes = 0;
553
	unsigned long sectors_reserved = 0;
554
	int err = -EINVAL;
555
	struct page *sb_page;
556
	loff_t offset = bitmap->mddev->bitmap_info.offset;
557

558
	if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
559 560 561
		chunksize = 128 * 1024 * 1024;
		daemon_sleep = 5 * HZ;
		write_behind = 0;
562
		set_bit(BITMAP_STALE, &bitmap->flags);
563 564 565
		err = 0;
		goto out_no_sb;
	}
566
	/* page 0 is the superblock, read it... */
567 568 569
	sb_page = alloc_page(GFP_KERNEL);
	if (!sb_page)
		return -ENOMEM;
570
	bitmap->storage.sb_page = sb_page;
571

572
re_read:
573 574
	/* If cluster_slot is set, the cluster is setup */
	if (bitmap->cluster_slot >= 0) {
575
		sector_t bm_blocks = bitmap->mddev->resync_max_sectors;
576

577 578
		sector_div(bm_blocks,
			   bitmap->mddev->bitmap_info.chunksize >> 9);
579 580 581
		/* bits to bytes */
		bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
		/* to 4k blocks */
582
		bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
583
		offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
584
		pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
585
			bitmap->cluster_slot, offset);
586 587
	}

588 589
	if (bitmap->storage.file) {
		loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host);
590 591
		int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;

592
		err = read_page(bitmap->storage.file, 0,
593
				bitmap, bytes, sb_page);
594
	} else {
595
		err = read_sb_page(bitmap->mddev,
596
				   offset,
597 598
				   sb_page,
				   0, sizeof(bitmap_super_t));
599
	}
600
	if (err)
601 602
		return err;

603
	err = -EINVAL;
604
	sb = kmap_atomic(sb_page);
605 606

	chunksize = le32_to_cpu(sb->chunksize);
607
	daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
608
	write_behind = le32_to_cpu(sb->write_behind);
609
	sectors_reserved = le32_to_cpu(sb->sectors_reserved);
610 611
	/* Setup nodes/clustername only if bitmap version is
	 * cluster-compatible
612
	 */
613
	if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
614 615 616 617
		nodes = le32_to_cpu(sb->nodes);
		strlcpy(bitmap->mddev->bitmap_info.cluster_name,
				sb->cluster_name, 64);
	}
618 619 620 621

	/* verify that the bitmap-specific fields are valid */
	if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
		reason = "bad magic";
622
	else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
623
		 le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED)
624
		reason = "unrecognized superblock version";
625
	else if (chunksize < 512)
626
		reason = "bitmap chunksize too small";
J
Jonathan Brassow 已提交
627
	else if (!is_power_of_2(chunksize))
628
		reason = "bitmap chunksize not a power of 2";
629
	else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
630
		reason = "daemon sleep period out of range";
631 632
	else if (write_behind > COUNTER_MAX)
		reason = "write-behind limit out of range (0 - 16383)";
633 634 635 636 637 638 639 640 641
	if (reason) {
		printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n",
			bmname(bitmap), reason);
		goto out;
	}

	/* keep the array size field of the bitmap superblock up to date */
	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);

642 643 644 645 646 647 648 649 650 651 652 653
	if (bitmap->mddev->persistent) {
		/*
		 * We have a persistent array superblock, so compare the
		 * bitmap's UUID and event counter to the mddev's
		 */
		if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
			printk(KERN_INFO
			       "%s: bitmap superblock UUID mismatch\n",
			       bmname(bitmap));
			goto out;
		}
		events = le64_to_cpu(sb->events);
654
		if (!nodes && (events < bitmap->mddev->events)) {
655 656 657 658 659
			printk(KERN_INFO
			       "%s: bitmap file is out of date (%llu < %llu) "
			       "-- forcing full recovery\n",
			       bmname(bitmap), events,
			       (unsigned long long) bitmap->mddev->events);
660
			set_bit(BITMAP_STALE, &bitmap->flags);
661
		}
662
	}
663

664
	/* assign fields using values from superblock */
665
	bitmap->flags |= le32_to_cpu(sb->state);
666
	if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
667
		set_bit(BITMAP_HOSTENDIAN, &bitmap->flags);
668
	bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
G
Goldwyn Rodrigues 已提交
669
	strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
670
	err = 0;
671

672
out:
673
	kunmap_atomic(sb);
674 675
	/* Assiging chunksize is required for "re_read" */
	bitmap->mddev->bitmap_info.chunksize = chunksize;
676
	if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
677 678 679 680 681 682 683 684 685 686 687
		err = md_setup_cluster(bitmap->mddev, nodes);
		if (err) {
			pr_err("%s: Could not setup cluster service (%d)\n",
					bmname(bitmap), err);
			goto out_no_sb;
		}
		bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
		goto re_read;
	}


688
out_no_sb:
689
	if (test_bit(BITMAP_STALE, &bitmap->flags))
690 691 692 693
		bitmap->events_cleared = bitmap->mddev->events;
	bitmap->mddev->bitmap_info.chunksize = chunksize;
	bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
	bitmap->mddev->bitmap_info.max_write_behind = write_behind;
G
Goldwyn Rodrigues 已提交
694
	bitmap->mddev->bitmap_info.nodes = nodes;
695 696 697
	if (bitmap->mddev->bitmap_info.space == 0 ||
	    bitmap->mddev->bitmap_info.space > sectors_reserved)
		bitmap->mddev->bitmap_info.space = sectors_reserved;
698
	if (err) {
699
		bitmap_print_sb(bitmap);
700
		if (bitmap->cluster_slot < 0)
701 702
			md_cluster_stop(bitmap->mddev);
	}
703 704 705 706 707 708 709
	return err;
}

/*
 * general bitmap file operations
 */

710 711 712 713 714 715
/*
 * on-disk bitmap:
 *
 * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
 * file a page at a time. There's a superblock at the start of the file.
 */
716
/* calculate the index of the page that contains this bit */
717 718
static inline unsigned long file_page_index(struct bitmap_storage *store,
					    unsigned long chunk)
719
{
720
	if (store->sb_page)
721 722
		chunk += sizeof(bitmap_super_t) << 3;
	return chunk >> PAGE_BIT_SHIFT;
723 724 725
}

/* calculate the (bit) offset of this bit within a page */
726 727
static inline unsigned long file_page_offset(struct bitmap_storage *store,
					     unsigned long chunk)
728
{
729
	if (store->sb_page)
730 731
		chunk += sizeof(bitmap_super_t) << 3;
	return chunk & (PAGE_BITS - 1);
732 733 734 735 736 737
}

/*
 * return a pointer to the page in the filemap that contains the given bit
 *
 */
738
static inline struct page *filemap_get_page(struct bitmap_storage *store,
739
					    unsigned long chunk)
740
{
741
	if (file_page_index(store, chunk) >= store->file_pages)
742
		return NULL;
743
	return store->filemap[file_page_index(store, chunk)];
744 745
}

746
static int bitmap_storage_alloc(struct bitmap_storage *store,
747 748
				unsigned long chunks, int with_super,
				int slot_number)
749
{
750
	int pnum, offset = 0;
751 752 753 754 755 756 757 758
	unsigned long num_pages;
	unsigned long bytes;

	bytes = DIV_ROUND_UP(chunks, 8);
	if (with_super)
		bytes += sizeof(bitmap_super_t);

	num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
759
	offset = slot_number * (num_pages - 1);
760 761 762 763 764 765 766

	store->filemap = kmalloc(sizeof(struct page *)
				 * num_pages, GFP_KERNEL);
	if (!store->filemap)
		return -ENOMEM;

	if (with_super && !store->sb_page) {
767
		store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
768 769 770
		if (store->sb_page == NULL)
			return -ENOMEM;
	}
771

772 773 774 775
	pnum = 0;
	if (store->sb_page) {
		store->filemap[0] = store->sb_page;
		pnum = 1;
776
		store->sb_page->index = offset;
777
	}
778

779
	for ( ; pnum < num_pages; pnum++) {
780
		store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO);
781 782 783 784
		if (!store->filemap[pnum]) {
			store->file_pages = pnum;
			return -ENOMEM;
		}
785
		store->filemap[pnum]->index = pnum + offset;
786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801
	}
	store->file_pages = pnum;

	/* We need 4 bits per page, rounded up to a multiple
	 * of sizeof(unsigned long) */
	store->filemap_attr = kzalloc(
		roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
		GFP_KERNEL);
	if (!store->filemap_attr)
		return -ENOMEM;

	store->bytes = bytes;

	return 0;
}

802
static void bitmap_file_unmap(struct bitmap_storage *store)
803 804 805
{
	struct page **map, *sb_page;
	int pages;
806
	struct file *file;
807

808
	file = store->file;
809 810 811
	map = store->filemap;
	pages = store->file_pages;
	sb_page = store->sb_page;
812 813

	while (pages--)
814
		if (map[pages] != sb_page) /* 0 is sb_page, release it below */
815
			free_buffers(map[pages]);
816
	kfree(map);
817
	kfree(store->filemap_attr);
818

819 820
	if (sb_page)
		free_buffers(sb_page);
821

822
	if (file) {
A
Al Viro 已提交
823
		struct inode *inode = file_inode(file);
824
		invalidate_mapping_pages(inode->i_mapping, 0, -1);
825
		fput(file);
826
	}
827 828 829 830 831 832 833 834 835 836 837
}

/*
 * bitmap_file_kick - if an error occurs while manipulating the bitmap file
 * then it is no longer reliable, so we stop using it and we mark the file
 * as failed in the superblock
 */
static void bitmap_file_kick(struct bitmap *bitmap)
{
	char *path, *ptr = NULL;

838
	if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
839
		bitmap_update_sb(bitmap);
840

841
		if (bitmap->storage.file) {
842 843
			path = kmalloc(PAGE_SIZE, GFP_KERNEL);
			if (path)
M
Miklos Szeredi 已提交
844
				ptr = file_path(bitmap->storage.file,
845
					     path, PAGE_SIZE);
C
Christoph Hellwig 已提交
846

847 848
			printk(KERN_ALERT
			      "%s: kicking failed bitmap file %s from array!\n",
C
Christoph Hellwig 已提交
849
			      bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
850

851 852 853 854 855
			kfree(path);
		} else
			printk(KERN_ALERT
			       "%s: disabling internal bitmap due to errors\n",
			       bmname(bitmap));
856
	}
857 858 859
}

enum bitmap_page_attr {
860
	BITMAP_PAGE_DIRTY = 0,     /* there are set bits that need to be synced */
861 862
	BITMAP_PAGE_PENDING = 1,   /* there are bits that are being cleaned.
				    * i.e. counter is 1 or 2. */
863
	BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
864 865
};

866 867
static inline void set_page_attr(struct bitmap *bitmap, int pnum,
				 enum bitmap_page_attr attr)
868
{
869
	set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
870 871
}

872 873
static inline void clear_page_attr(struct bitmap *bitmap, int pnum,
				   enum bitmap_page_attr attr)
874
{
875
	clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
876 877
}

878 879
static inline int test_page_attr(struct bitmap *bitmap, int pnum,
				 enum bitmap_page_attr attr)
880
{
881
	return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
882 883
}

884 885 886 887 888 889
static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum,
					   enum bitmap_page_attr attr)
{
	return test_and_clear_bit((pnum<<2) + attr,
				  bitmap->storage.filemap_attr);
}
890 891 892 893 894 895 896 897 898 899
/*
 * bitmap_file_set_bit -- called before performing a write to the md device
 * to set (and eventually sync) a particular bit in the bitmap file
 *
 * we set the bit immediately, then we record the page number so that
 * when an unplug occurs, we can flush the dirty pages out to disk
 */
static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
{
	unsigned long bit;
900
	struct page *page;
901
	void *kaddr;
902
	unsigned long chunk = block >> bitmap->counts.chunkshift;
903

904
	page = filemap_get_page(&bitmap->storage, chunk);
905 906
	if (!page)
		return;
907
	bit = file_page_offset(&bitmap->storage, chunk);
908

909
	/* set the bit */
910
	kaddr = kmap_atomic(page);
911
	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
912 913
		set_bit(bit, kaddr);
	else
914
		set_bit_le(bit, kaddr);
915
	kunmap_atomic(kaddr);
916
	pr_debug("set file bit %lu page %lu\n", bit, page->index);
917
	/* record page number so it gets flushed to disk when unplug occurs */
918
	set_page_attr(bitmap, page->index, BITMAP_PAGE_DIRTY);
919 920
}

921 922 923 924 925
static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
{
	unsigned long bit;
	struct page *page;
	void *paddr;
926
	unsigned long chunk = block >> bitmap->counts.chunkshift;
927

928
	page = filemap_get_page(&bitmap->storage, chunk);
929 930
	if (!page)
		return;
931
	bit = file_page_offset(&bitmap->storage, chunk);
932
	paddr = kmap_atomic(page);
933
	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
934 935
		clear_bit(bit, paddr);
	else
936
		clear_bit_le(bit, paddr);
937
	kunmap_atomic(paddr);
938 939
	if (!test_page_attr(bitmap, page->index, BITMAP_PAGE_NEEDWRITE)) {
		set_page_attr(bitmap, page->index, BITMAP_PAGE_PENDING);
940 941 942 943
		bitmap->allclean = 0;
	}
}

944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
static int bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
{
	unsigned long bit;
	struct page *page;
	void *paddr;
	unsigned long chunk = block >> bitmap->counts.chunkshift;
	int set = 0;

	page = filemap_get_page(&bitmap->storage, chunk);
	if (!page)
		return -EINVAL;
	bit = file_page_offset(&bitmap->storage, chunk);
	paddr = kmap_atomic(page);
	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
		set = test_bit(bit, paddr);
	else
		set = test_bit_le(bit, paddr);
	kunmap_atomic(paddr);
	return set;
}


966 967 968
/* this gets called when the md device is ready to unplug its underlying
 * (slave) device queues -- before we let any writes go down, we need to
 * sync the dirty pages of the bitmap file to disk */
969
void bitmap_unplug(struct bitmap *bitmap)
970
{
971
	unsigned long i;
972
	int dirty, need_write;
973

974 975
	if (!bitmap || !bitmap->storage.filemap ||
	    test_bit(BITMAP_STALE, &bitmap->flags))
976
		return;
977 978 979

	/* look at each page to see if there are any set bits that need to be
	 * flushed out to disk */
980
	for (i = 0; i < bitmap->storage.file_pages; i++) {
981
		if (!bitmap->storage.filemap)
982
			return;
983 984 985 986
		dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
		need_write = test_and_clear_page_attr(bitmap, i,
						      BITMAP_PAGE_NEEDWRITE);
		if (dirty || need_write) {
987
			clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
988 989
			write_page(bitmap, bitmap->storage.filemap[i], 0);
		}
990
	}
991 992 993 994 995 996
	if (bitmap->storage.file)
		wait_event(bitmap->write_wait,
			   atomic_read(&bitmap->pending_writes)==0);
	else
		md_super_wait(bitmap->mddev);

997
	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
998
		bitmap_file_kick(bitmap);
999
}
1000
EXPORT_SYMBOL(bitmap_unplug);
1001

1002
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
1003 1004 1005 1006 1007 1008 1009
/* * bitmap_init_from_disk -- called at bitmap_create time to initialize
 * the in-memory bitmap from the on-disk bitmap -- also, sets up the
 * memory mapping of the bitmap file
 * Special cases:
 *   if there's no bitmap file, or if the bitmap file had been
 *   previously kicked from the array, we mark all the bits as
 *   1's in order to cause a full resync.
1010 1011 1012
 *
 * We ignore all bits for sectors that end earlier than 'start'.
 * This is used when reading an out-of-date bitmap...
1013
 */
1014
static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1015
{
1016
	unsigned long i, chunks, index, oldindex, bit, node_offset = 0;
1017
	struct page *page = NULL;
1018
	unsigned long bit_cnt = 0;
1019
	struct file *file;
1020
	unsigned long offset;
1021 1022
	int outofdate;
	int ret = -ENOSPC;
1023
	void *paddr;
1024
	struct bitmap_storage *store = &bitmap->storage;
1025

1026
	chunks = bitmap->counts.chunks;
1027
	file = store->file;
1028

1029 1030
	if (!file && !bitmap->mddev->bitmap_info.offset) {
		/* No permanent bitmap - fill with '1s'. */
1031 1032
		store->filemap = NULL;
		store->file_pages = 0;
1033 1034
		for (i = 0; i < chunks ; i++) {
			/* if the disk bit is set, set the memory bit */
1035
			int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift)
1036 1037
				      >= start);
			bitmap_set_memory_bits(bitmap,
1038
					       (sector_t)i << bitmap->counts.chunkshift,
1039 1040 1041 1042
					       needed);
		}
		return 0;
	}
1043

1044
	outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
1045 1046 1047 1048
	if (outofdate)
		printk(KERN_INFO "%s: bitmap file is out of date, doing full "
			"recovery\n", bmname(bitmap));

1049
	if (file && i_size_read(file->f_mapping->host) < store->bytes) {
1050
		printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
1051 1052 1053
		       bmname(bitmap),
		       (unsigned long) i_size_read(file->f_mapping->host),
		       store->bytes);
1054
		goto err;
1055
	}
1056

1057
	oldindex = ~0L;
1058
	offset = 0;
1059
	if (!bitmap->mddev->bitmap_info.external)
1060
		offset = sizeof(bitmap_super_t);
1061

1062 1063 1064
	if (mddev_is_clustered(bitmap->mddev))
		node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE));

1065
	for (i = 0; i < chunks; i++) {
1066
		int b;
1067 1068
		index = file_page_index(&bitmap->storage, i);
		bit = file_page_offset(&bitmap->storage, i);
1069
		if (index != oldindex) { /* this is a new page, read it in */
1070
			int count;
1071
			/* unmap the old page, we're done with it */
1072 1073
			if (index == store->file_pages-1)
				count = store->bytes - index * PAGE_SIZE;
1074 1075
			else
				count = PAGE_SIZE;
1076
			page = store->filemap[index];
1077 1078 1079 1080 1081 1082 1083 1084
			if (file)
				ret = read_page(file, index, bitmap,
						count, page);
			else
				ret = read_sb_page(
					bitmap->mddev,
					bitmap->mddev->bitmap_info.offset,
					page,
1085
					index + node_offset, count);
1086 1087

			if (ret)
1088
				goto err;
1089

1090 1091 1092 1093 1094
			oldindex = index;

			if (outofdate) {
				/*
				 * if bitmap is out of date, dirty the
1095
				 * whole page and write it out
1096
				 */
1097
				paddr = kmap_atomic(page);
1098
				memset(paddr + offset, 0xff,
1099
				       PAGE_SIZE - offset);
1100
				kunmap_atomic(paddr);
1101 1102 1103
				write_page(bitmap, page, 1);

				ret = -EIO;
1104 1105
				if (test_bit(BITMAP_WRITE_ERROR,
					     &bitmap->flags))
1106
					goto err;
1107 1108
			}
		}
1109
		paddr = kmap_atomic(page);
1110
		if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
1111
			b = test_bit(bit, paddr);
1112
		else
A
Akinobu Mita 已提交
1113
			b = test_bit_le(bit, paddr);
1114
		kunmap_atomic(paddr);
1115
		if (b) {
1116
			/* if the disk bit is set, set the memory bit */
1117
			int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift
1118 1119
				      >= start);
			bitmap_set_memory_bits(bitmap,
1120
					       (sector_t)i << bitmap->counts.chunkshift,
1121
					       needed);
1122 1123
			bit_cnt++;
		}
1124
		offset = 0;
1125 1126 1127
	}

	printk(KERN_INFO "%s: bitmap initialized from disk: "
1128
	       "read %lu pages, set %lu of %lu bits\n",
1129
	       bmname(bitmap), store->file_pages,
1130
	       bit_cnt, chunks);
1131 1132

	return 0;
1133

1134 1135 1136
 err:
	printk(KERN_INFO "%s: bitmap initialisation failed: %d\n",
	       bmname(bitmap), ret);
1137 1138 1139
	return ret;
}

1140 1141 1142 1143 1144
void bitmap_write_all(struct bitmap *bitmap)
{
	/* We don't actually write all bitmap blocks here,
	 * just flag them as needing to be written
	 */
1145
	int i;
1146

1147
	if (!bitmap || !bitmap->storage.filemap)
1148
		return;
1149
	if (bitmap->storage.file)
1150 1151 1152
		/* Only one copy, so nothing needed */
		return;

1153
	for (i = 0; i < bitmap->storage.file_pages; i++)
1154
		set_page_attr(bitmap, i,
1155
			      BITMAP_PAGE_NEEDWRITE);
1156
	bitmap->allclean = 0;
1157 1158
}

1159 1160
static void bitmap_count_page(struct bitmap_counts *bitmap,
			      sector_t offset, int inc)
1161
{
1162
	sector_t chunk = offset >> bitmap->chunkshift;
1163 1164 1165 1166
	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
	bitmap->bp[page].count += inc;
	bitmap_checkfree(bitmap, page);
}
1167

1168
static void bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
1169 1170 1171 1172 1173 1174 1175 1176 1177
{
	sector_t chunk = offset >> bitmap->chunkshift;
	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
	struct bitmap_page *bp = &bitmap->bp[page];

	if (!bp->pending)
		bp->pending = 1;
}

1178
static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap,
N
NeilBrown 已提交
1179
					    sector_t offset, sector_t *blocks,
1180 1181 1182 1183 1184 1185 1186
					    int create);

/*
 * bitmap daemon -- periodically wakes up to clean bits and flush pages
 *			out to disk
 */

1187
void bitmap_daemon_work(struct mddev *mddev)
1188
{
1189
	struct bitmap *bitmap;
1190
	unsigned long j;
1191
	unsigned long nextpage;
N
NeilBrown 已提交
1192
	sector_t blocks;
1193
	struct bitmap_counts *counts;
1194

1195 1196 1197
	/* Use a mutex to guard daemon_work against
	 * bitmap_destroy.
	 */
1198
	mutex_lock(&mddev->bitmap_info.mutex);
1199 1200
	bitmap = mddev->bitmap;
	if (bitmap == NULL) {
1201
		mutex_unlock(&mddev->bitmap_info.mutex);
1202
		return;
1203
	}
1204
	if (time_before(jiffies, bitmap->daemon_lastrun
N
NeilBrown 已提交
1205
			+ mddev->bitmap_info.daemon_sleep))
1206 1207
		goto done;

1208
	bitmap->daemon_lastrun = jiffies;
1209
	if (bitmap->allclean) {
N
NeilBrown 已提交
1210
		mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1211
		goto done;
1212 1213
	}
	bitmap->allclean = 1;
1214

1215 1216 1217 1218
	/* Any file-page which is PENDING now needs to be written.
	 * So set NEEDWRITE now, then after we make any last-minute changes
	 * we will write it.
	 */
1219
	for (j = 0; j < bitmap->storage.file_pages; j++)
1220 1221
		if (test_and_clear_page_attr(bitmap, j,
					     BITMAP_PAGE_PENDING))
1222
			set_page_attr(bitmap, j,
1223 1224 1225 1226 1227 1228 1229 1230
				      BITMAP_PAGE_NEEDWRITE);

	if (bitmap->need_sync &&
	    mddev->bitmap_info.external == 0) {
		/* Arrange for superblock update as well as
		 * other changes */
		bitmap_super_t *sb;
		bitmap->need_sync = 0;
1231 1232
		if (bitmap->storage.filemap) {
			sb = kmap_atomic(bitmap->storage.sb_page);
1233 1234 1235
			sb->events_cleared =
				cpu_to_le64(bitmap->events_cleared);
			kunmap_atomic(sb);
1236
			set_page_attr(bitmap, 0,
1237 1238
				      BITMAP_PAGE_NEEDWRITE);
		}
1239 1240 1241 1242
	}
	/* Now look at the bitmap counters and if any are '2' or '1',
	 * decrement and handle accordingly.
	 */
1243 1244
	counts = &bitmap->counts;
	spin_lock_irq(&counts->lock);
1245
	nextpage = 0;
1246
	for (j = 0; j < counts->chunks; j++) {
1247
		bitmap_counter_t *bmc;
1248
		sector_t  block = (sector_t)j << counts->chunkshift;
1249

1250 1251
		if (j == nextpage) {
			nextpage += PAGE_COUNTER_RATIO;
1252
			if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) {
1253
				j |= PAGE_COUNTER_MASK;
1254 1255
				continue;
			}
1256
			counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0;
1257
		}
1258
		bmc = bitmap_get_counter(counts,
1259
					 block,
1260
					 &blocks, 0);
1261 1262

		if (!bmc) {
1263
			j |= PAGE_COUNTER_MASK;
1264 1265 1266 1267 1268
			continue;
		}
		if (*bmc == 1 && !bitmap->need_sync) {
			/* We can clear the bit */
			*bmc = 0;
1269
			bitmap_count_page(counts, block, -1);
1270
			bitmap_file_clear_bit(bitmap, block);
1271 1272
		} else if (*bmc && *bmc <= 2) {
			*bmc = 1;
1273
			bitmap_set_pending(counts, block);
1274
			bitmap->allclean = 0;
1275
		}
1276
	}
1277
	spin_unlock_irq(&counts->lock);
1278

1279 1280 1281 1282 1283 1284 1285 1286
	/* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
	 * DIRTY pages need to be written by bitmap_unplug so it can wait
	 * for them.
	 * If we find any DIRTY page we stop there and let bitmap_unplug
	 * handle all the rest.  This is important in the case where
	 * the first blocking holds the superblock and it has been updated.
	 * We mustn't write any other blocks before the superblock.
	 */
1287 1288 1289 1290
	for (j = 0;
	     j < bitmap->storage.file_pages
		     && !test_bit(BITMAP_STALE, &bitmap->flags);
	     j++) {
1291
		if (test_page_attr(bitmap, j,
1292 1293 1294
				   BITMAP_PAGE_DIRTY))
			/* bitmap_unplug will handle the rest */
			break;
1295 1296
		if (test_and_clear_page_attr(bitmap, j,
					     BITMAP_PAGE_NEEDWRITE)) {
1297
			write_page(bitmap, bitmap->storage.filemap[j], 0);
1298 1299 1300
		}
	}

1301
 done:
1302
	if (bitmap->allclean == 0)
N
NeilBrown 已提交
1303 1304
		mddev->thread->timeout =
			mddev->bitmap_info.daemon_sleep;
1305
	mutex_unlock(&mddev->bitmap_info.mutex);
1306 1307
}

1308
static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap,
N
NeilBrown 已提交
1309
					    sector_t offset, sector_t *blocks,
1310
					    int create)
1311 1312
__releases(bitmap->lock)
__acquires(bitmap->lock)
1313 1314 1315 1316 1317
{
	/* If 'create', we might release the lock and reclaim it.
	 * The lock must have been taken with interrupts enabled.
	 * If !create, we don't release the lock.
	 */
1318
	sector_t chunk = offset >> bitmap->chunkshift;
1319 1320 1321
	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
	unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
	sector_t csize;
1322
	int err;
1323

1324 1325 1326 1327
	err = bitmap_checkpage(bitmap, page, create);

	if (bitmap->bp[page].hijacked ||
	    bitmap->bp[page].map == NULL)
1328
		csize = ((sector_t)1) << (bitmap->chunkshift +
1329 1330
					  PAGE_COUNTER_SHIFT - 1);
	else
1331
		csize = ((sector_t)1) << bitmap->chunkshift;
1332 1333 1334
	*blocks = csize - (offset & (csize - 1));

	if (err < 0)
1335
		return NULL;
1336

1337 1338 1339 1340 1341 1342 1343 1344
	/* now locked ... */

	if (bitmap->bp[page].hijacked) { /* hijacked pointer */
		/* should we use the first or second counter field
		 * of the hijacked pointer? */
		int hi = (pageoff > PAGE_COUNTER_MASK);
		return  &((bitmap_counter_t *)
			  &bitmap->bp[page].map)[hi];
1345
	} else /* page is allocated */
1346 1347 1348 1349
		return (bitmap_counter_t *)
			&(bitmap->bp[page].map[pageoff]);
}

1350
int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
1351
{
1352 1353
	if (!bitmap)
		return 0;
1354 1355

	if (behind) {
1356
		int bw;
1357
		atomic_inc(&bitmap->behind_writes);
1358 1359 1360 1361
		bw = atomic_read(&bitmap->behind_writes);
		if (bw > bitmap->behind_writes_used)
			bitmap->behind_writes_used = bw;

1362 1363
		pr_debug("inc write-behind count %d/%lu\n",
			 bw, bitmap->mddev->bitmap_info.max_write_behind);
1364 1365
	}

1366
	while (sectors) {
N
NeilBrown 已提交
1367
		sector_t blocks;
1368 1369
		bitmap_counter_t *bmc;

1370 1371
		spin_lock_irq(&bitmap->counts.lock);
		bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
1372
		if (!bmc) {
1373
			spin_unlock_irq(&bitmap->counts.lock);
1374 1375 1376
			return 0;
		}

1377
		if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
1378 1379 1380 1381 1382 1383 1384
			DEFINE_WAIT(__wait);
			/* note that it is safe to do the prepare_to_wait
			 * after the test as long as we do it before dropping
			 * the spinlock.
			 */
			prepare_to_wait(&bitmap->overflow_wait, &__wait,
					TASK_UNINTERRUPTIBLE);
1385
			spin_unlock_irq(&bitmap->counts.lock);
1386
			schedule();
1387 1388 1389 1390
			finish_wait(&bitmap->overflow_wait, &__wait);
			continue;
		}

1391
		switch (*bmc) {
1392 1393
		case 0:
			bitmap_file_set_bit(bitmap, offset);
1394
			bitmap_count_page(&bitmap->counts, offset, 1);
1395 1396 1397 1398
			/* fall through */
		case 1:
			*bmc = 2;
		}
1399

1400 1401
		(*bmc)++;

1402
		spin_unlock_irq(&bitmap->counts.lock);
1403 1404 1405 1406

		offset += blocks;
		if (sectors > blocks)
			sectors -= blocks;
1407 1408
		else
			sectors = 0;
1409 1410 1411
	}
	return 0;
}
1412
EXPORT_SYMBOL(bitmap_startwrite);
1413 1414

void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors,
1415
		     int success, int behind)
1416
{
1417 1418
	if (!bitmap)
		return;
1419
	if (behind) {
1420 1421
		if (atomic_dec_and_test(&bitmap->behind_writes))
			wake_up(&bitmap->behind_wait);
1422 1423 1424
		pr_debug("dec write-behind count %d/%lu\n",
			 atomic_read(&bitmap->behind_writes),
			 bitmap->mddev->bitmap_info.max_write_behind);
1425 1426
	}

1427
	while (sectors) {
N
NeilBrown 已提交
1428
		sector_t blocks;
1429 1430 1431
		unsigned long flags;
		bitmap_counter_t *bmc;

1432 1433
		spin_lock_irqsave(&bitmap->counts.lock, flags);
		bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
1434
		if (!bmc) {
1435
			spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1436 1437 1438
			return;
		}

1439
		if (success && !bitmap->mddev->degraded &&
1440 1441 1442
		    bitmap->events_cleared < bitmap->mddev->events) {
			bitmap->events_cleared = bitmap->mddev->events;
			bitmap->need_sync = 1;
1443
			sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
1444 1445
		}

1446
		if (!success && !NEEDED(*bmc))
1447 1448
			*bmc |= NEEDED_MASK;

1449
		if (COUNTER(*bmc) == COUNTER_MAX)
1450 1451
			wake_up(&bitmap->overflow_wait);

1452
		(*bmc)--;
1453
		if (*bmc <= 2) {
1454
			bitmap_set_pending(&bitmap->counts, offset);
1455 1456
			bitmap->allclean = 0;
		}
1457
		spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1458 1459 1460
		offset += blocks;
		if (sectors > blocks)
			sectors -= blocks;
1461 1462
		else
			sectors = 0;
1463 1464
	}
}
1465
EXPORT_SYMBOL(bitmap_endwrite);
1466

N
NeilBrown 已提交
1467
static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1468
			       int degraded)
1469 1470 1471 1472 1473 1474 1475
{
	bitmap_counter_t *bmc;
	int rv;
	if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
		*blocks = 1024;
		return 1; /* always resync if no bitmap */
	}
1476 1477
	spin_lock_irq(&bitmap->counts.lock);
	bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1478 1479 1480 1481 1482 1483 1484
	rv = 0;
	if (bmc) {
		/* locked */
		if (RESYNC(*bmc))
			rv = 1;
		else if (NEEDED(*bmc)) {
			rv = 1;
1485 1486 1487 1488
			if (!degraded) { /* don't set/clear bits if degraded */
				*bmc |= RESYNC_MASK;
				*bmc &= ~NEEDED_MASK;
			}
1489 1490
		}
	}
1491
	spin_unlock_irq(&bitmap->counts.lock);
1492 1493 1494
	return rv;
}

N
NeilBrown 已提交
1495
int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
		      int degraded)
{
	/* bitmap_start_sync must always report on multiples of whole
	 * pages, otherwise resync (which is very PAGE_SIZE based) will
	 * get confused.
	 * So call __bitmap_start_sync repeatedly (if needed) until
	 * At least PAGE_SIZE>>9 blocks are covered.
	 * Return the 'or' of the result.
	 */
	int rv = 0;
N
NeilBrown 已提交
1506
	sector_t blocks1;
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516

	*blocks = 0;
	while (*blocks < (PAGE_SIZE>>9)) {
		rv |= __bitmap_start_sync(bitmap, offset,
					  &blocks1, degraded);
		offset += blocks1;
		*blocks += blocks1;
	}
	return rv;
}
1517
EXPORT_SYMBOL(bitmap_start_sync);
1518

N
NeilBrown 已提交
1519
void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
1520 1521 1522
{
	bitmap_counter_t *bmc;
	unsigned long flags;
1523 1524

	if (bitmap == NULL) {
1525 1526 1527
		*blocks = 1024;
		return;
	}
1528 1529
	spin_lock_irqsave(&bitmap->counts.lock, flags);
	bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1530 1531 1532 1533 1534 1535 1536 1537 1538
	if (bmc == NULL)
		goto unlock;
	/* locked */
	if (RESYNC(*bmc)) {
		*bmc &= ~RESYNC_MASK;

		if (!NEEDED(*bmc) && aborted)
			*bmc |= NEEDED_MASK;
		else {
1539
			if (*bmc <= 2) {
1540
				bitmap_set_pending(&bitmap->counts, offset);
1541 1542
				bitmap->allclean = 0;
			}
1543 1544 1545
		}
	}
 unlock:
1546
	spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1547
}
1548
EXPORT_SYMBOL(bitmap_end_sync);
1549 1550 1551 1552 1553 1554 1555 1556

void bitmap_close_sync(struct bitmap *bitmap)
{
	/* Sync has finished, and any bitmap chunks that weren't synced
	 * properly have been aborted.  It remains to us to clear the
	 * RESYNC bit wherever it is still on
	 */
	sector_t sector = 0;
N
NeilBrown 已提交
1557
	sector_t blocks;
N
NeilBrown 已提交
1558 1559
	if (!bitmap)
		return;
1560 1561
	while (sector < bitmap->mddev->resync_max_sectors) {
		bitmap_end_sync(bitmap, sector, &blocks, 0);
N
NeilBrown 已提交
1562 1563 1564
		sector += blocks;
	}
}
1565
EXPORT_SYMBOL(bitmap_close_sync);
N
NeilBrown 已提交
1566

1567
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
N
NeilBrown 已提交
1568 1569
{
	sector_t s = 0;
N
NeilBrown 已提交
1570
	sector_t blocks;
N
NeilBrown 已提交
1571 1572 1573 1574 1575 1576 1577

	if (!bitmap)
		return;
	if (sector == 0) {
		bitmap->last_end_sync = jiffies;
		return;
	}
1578
	if (!force && time_before(jiffies, (bitmap->last_end_sync
1579
				  + bitmap->mddev->bitmap_info.daemon_sleep)))
N
NeilBrown 已提交
1580 1581 1582 1583
		return;
	wait_event(bitmap->mddev->recovery_wait,
		   atomic_read(&bitmap->mddev->recovery_active) == 0);

1584
	bitmap->mddev->curr_resync_completed = sector;
1585
	set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
1586
	sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
N
NeilBrown 已提交
1587 1588 1589 1590
	s = 0;
	while (s < sector && s < bitmap->mddev->resync_max_sectors) {
		bitmap_end_sync(bitmap, s, &blocks, 0);
		s += blocks;
1591
	}
N
NeilBrown 已提交
1592
	bitmap->last_end_sync = jiffies;
1593
	sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
1594
}
1595
EXPORT_SYMBOL(bitmap_cond_end_sync);
1596

1597
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
1598 1599
{
	/* For each chunk covered by any of these sectors, set the
1600
	 * counter to 2 and possibly set resync_needed.  They should all
1601 1602
	 * be 0 at this point
	 */
1603

N
NeilBrown 已提交
1604
	sector_t secs;
1605
	bitmap_counter_t *bmc;
1606 1607
	spin_lock_irq(&bitmap->counts.lock);
	bmc = bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
1608
	if (!bmc) {
1609
		spin_unlock_irq(&bitmap->counts.lock);
1610
		return;
1611
	}
1612
	if (!*bmc) {
1613
		*bmc = 2;
1614 1615
		bitmap_count_page(&bitmap->counts, offset, 1);
		bitmap_set_pending(&bitmap->counts, offset);
1616
		bitmap->allclean = 0;
1617
	}
1618 1619
	if (needed)
		*bmc |= NEEDED_MASK;
1620
	spin_unlock_irq(&bitmap->counts.lock);
1621 1622
}

1623 1624 1625 1626 1627 1628
/* dirty the memory and file bits for bitmap chunks "s" to "e" */
void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
{
	unsigned long chunk;

	for (chunk = s; chunk <= e; chunk++) {
1629
		sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
1630 1631
		bitmap_set_memory_bits(bitmap, sec, 1);
		bitmap_file_set_bit(bitmap, sec);
1632 1633 1634 1635 1636 1637
		if (sec < bitmap->mddev->recovery_cp)
			/* We are asserting that the array is dirty,
			 * so move the recovery_cp address back so
			 * that it is obvious that it is dirty
			 */
			bitmap->mddev->recovery_cp = sec;
1638 1639 1640
	}
}

1641 1642 1643
/*
 * flush out any pending updates
 */
1644
void bitmap_flush(struct mddev *mddev)
1645 1646
{
	struct bitmap *bitmap = mddev->bitmap;
1647
	long sleep;
1648 1649 1650 1651 1652 1653 1654

	if (!bitmap) /* there was no bitmap */
		return;

	/* run the daemon_work three time to ensure everything is flushed
	 * that can be
	 */
1655
	sleep = mddev->bitmap_info.daemon_sleep * 2;
1656
	bitmap->daemon_lastrun -= sleep;
1657
	bitmap_daemon_work(mddev);
1658
	bitmap->daemon_lastrun -= sleep;
1659
	bitmap_daemon_work(mddev);
1660
	bitmap->daemon_lastrun -= sleep;
1661
	bitmap_daemon_work(mddev);
1662 1663 1664
	bitmap_update_sb(bitmap);
}

1665 1666 1667
/*
 * free memory that was allocated
 */
1668
static void bitmap_free(struct bitmap *bitmap)
1669 1670 1671 1672 1673 1674 1675
{
	unsigned long k, pages;
	struct bitmap_page *bp;

	if (!bitmap) /* there was no bitmap */
		return;

1676 1677
	if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
		bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
1678 1679
		md_cluster_stop(bitmap->mddev);

1680 1681 1682 1683 1684 1685
	/* Shouldn't be needed - but just in case.... */
	wait_event(bitmap->write_wait,
		   atomic_read(&bitmap->pending_writes) == 0);

	/* release the bitmap file  */
	bitmap_file_unmap(&bitmap->storage);
1686

1687 1688
	bp = bitmap->counts.bp;
	pages = bitmap->counts.pages;
1689 1690 1691 1692 1693 1694 1695 1696 1697 1698

	/* free all allocated memory */

	if (bp) /* deallocate the page memory */
		for (k = 0; k < pages; k++)
			if (bp[k].map && !bp[k].hijacked)
				kfree(bp[k].map);
	kfree(bp);
	kfree(bitmap);
}
1699

1700
void bitmap_destroy(struct mddev *mddev)
1701 1702 1703 1704 1705 1706
{
	struct bitmap *bitmap = mddev->bitmap;

	if (!bitmap) /* there was no bitmap */
		return;

1707
	mutex_lock(&mddev->bitmap_info.mutex);
1708
	spin_lock(&mddev->lock);
1709
	mddev->bitmap = NULL; /* disconnect from the md device */
1710
	spin_unlock(&mddev->lock);
1711
	mutex_unlock(&mddev->bitmap_info.mutex);
1712 1713
	if (mddev->thread)
		mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1714

1715 1716 1717
	if (bitmap->sysfs_can_clear)
		sysfs_put(bitmap->sysfs_can_clear);

1718 1719
	bitmap_free(bitmap);
}
1720 1721 1722 1723 1724

/*
 * initialize the bitmap structure
 * if this returns an error, bitmap_destroy must be called to do clean up
 */
1725
struct bitmap *bitmap_create(struct mddev *mddev, int slot)
1726 1727
{
	struct bitmap *bitmap;
1728
	sector_t blocks = mddev->resync_max_sectors;
1729
	struct file *file = mddev->bitmap_info.file;
1730
	int err;
1731
	struct kernfs_node *bm = NULL;
1732

A
Alexey Dobriyan 已提交
1733
	BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
1734

1735
	BUG_ON(file && mddev->bitmap_info.offset);
1736

1737
	bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
1738
	if (!bitmap)
1739
		return ERR_PTR(-ENOMEM);
1740

1741
	spin_lock_init(&bitmap->counts.lock);
1742 1743
	atomic_set(&bitmap->pending_writes, 0);
	init_waitqueue_head(&bitmap->write_wait);
1744
	init_waitqueue_head(&bitmap->overflow_wait);
1745
	init_waitqueue_head(&bitmap->behind_wait);
1746

1747
	bitmap->mddev = mddev;
1748
	bitmap->cluster_slot = slot;
1749

1750
	if (mddev->kobj.sd)
T
Tejun Heo 已提交
1751
		bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
1752
	if (bm) {
T
Tejun Heo 已提交
1753
		bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
1754 1755 1756 1757
		sysfs_put(bm);
	} else
		bitmap->sysfs_can_clear = NULL;

1758
	bitmap->storage.file = file;
1759 1760
	if (file) {
		get_file(file);
1761 1762 1763 1764
		/* As future accesses to this file will use bmap,
		 * and bypass the page cache, we must sync the file
		 * first.
		 */
1765
		vfs_fsync(file, 1);
1766
	}
1767
	/* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
1768 1769 1770 1771 1772 1773 1774 1775 1776 1777
	if (!mddev->bitmap_info.external) {
		/*
		 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
		 * instructing us to create a new on-disk bitmap instance.
		 */
		if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
			err = bitmap_new_disk_sb(bitmap);
		else
			err = bitmap_read_sb(bitmap);
	} else {
1778 1779 1780 1781 1782 1783 1784
		err = 0;
		if (mddev->bitmap_info.chunksize == 0 ||
		    mddev->bitmap_info.daemon_sleep == 0)
			/* chunksize and time_base need to be
			 * set first. */
			err = -EINVAL;
	}
1785
	if (err)
1786
		goto error;
1787

1788
	bitmap->daemon_lastrun = jiffies;
1789 1790
	err = bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
	if (err)
1791
		goto error;
1792

1793
	printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
1794
	       bitmap->counts.pages, bmname(bitmap));
1795

1796 1797 1798
	err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
	if (err)
		goto error;
1799

1800
	return bitmap;
1801 1802
 error:
	bitmap_free(bitmap);
1803
	return ERR_PTR(err);
1804 1805
}

1806
int bitmap_load(struct mddev *mddev)
1807 1808
{
	int err = 0;
1809
	sector_t start = 0;
1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821
	sector_t sector = 0;
	struct bitmap *bitmap = mddev->bitmap;

	if (!bitmap)
		goto out;

	/* Clear out old bitmap info first:  Either there is none, or we
	 * are resuming after someone else has possibly changed things,
	 * so we should forget old cached info.
	 * All chunks should be clean, but some might need_sync.
	 */
	while (sector < mddev->resync_max_sectors) {
N
NeilBrown 已提交
1822
		sector_t blocks;
1823 1824 1825 1826 1827
		bitmap_start_sync(bitmap, sector, &blocks, 0);
		sector += blocks;
	}
	bitmap_close_sync(bitmap);

1828 1829 1830 1831 1832 1833
	if (mddev->degraded == 0
	    || bitmap->events_cleared == mddev->events)
		/* no need to keep dirty bits to optimise a
		 * re-add of a missing device */
		start = mddev->recovery_cp;

1834
	mutex_lock(&mddev->bitmap_info.mutex);
1835
	err = bitmap_init_from_disk(bitmap, start);
1836
	mutex_unlock(&mddev->bitmap_info.mutex);
1837

1838
	if (err)
1839
		goto out;
1840
	clear_bit(BITMAP_STALE, &bitmap->flags);
1841 1842 1843

	/* Kick recovery in case any bits were set */
	set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
1844

1845
	mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
1846
	md_wakeup_thread(mddev->thread);
1847

1848 1849
	bitmap_update_sb(bitmap);

1850
	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
1851 1852
		err = -EIO;
out:
1853
	return err;
1854
}
1855
EXPORT_SYMBOL_GPL(bitmap_load);
1856

1857 1858 1859 1860
/* Loads the bitmap associated with slot and copies the resync information
 * to our bitmap
 */
int bitmap_copy_from_slot(struct mddev *mddev, int slot,
1861
		sector_t *low, sector_t *high, bool clear_bits)
1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887
{
	int rv = 0, i, j;
	sector_t block, lo = 0, hi = 0;
	struct bitmap_counts *counts;
	struct bitmap *bitmap = bitmap_create(mddev, slot);

	if (IS_ERR(bitmap))
		return PTR_ERR(bitmap);

	rv = bitmap_init_from_disk(bitmap, 0);
	if (rv)
		goto err;

	counts = &bitmap->counts;
	for (j = 0; j < counts->chunks; j++) {
		block = (sector_t)j << counts->chunkshift;
		if (bitmap_file_test_bit(bitmap, block)) {
			if (!lo)
				lo = block;
			hi = block;
			bitmap_file_clear_bit(bitmap, block);
			bitmap_set_memory_bits(mddev->bitmap, block, 1);
			bitmap_file_set_bit(mddev->bitmap, block);
		}
	}

1888 1889 1890 1891 1892 1893 1894 1895 1896 1897
	if (clear_bits) {
		bitmap_update_sb(bitmap);
		/* Setting this for the ev_page should be enough.
		 * And we do not require both write_all and PAGE_DIRT either
		 */
		for (i = 0; i < bitmap->storage.file_pages; i++)
			set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
		bitmap_write_all(bitmap);
		bitmap_unplug(bitmap);
	}
1898 1899 1900 1901 1902 1903 1904 1905 1906
	*low = lo;
	*high = hi;
err:
	bitmap_free(bitmap);
	return rv;
}
EXPORT_SYMBOL_GPL(bitmap_copy_from_slot);


1907 1908 1909
void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
{
	unsigned long chunk_kb;
1910
	struct bitmap_counts *counts;
1911 1912 1913 1914

	if (!bitmap)
		return;

1915 1916
	counts = &bitmap->counts;

1917 1918 1919
	chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10;
	seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
		   "%lu%s chunk",
1920 1921 1922
		   counts->pages - counts->missing_pages,
		   counts->pages,
		   (counts->pages - counts->missing_pages)
1923 1924 1925
		   << (PAGE_SHIFT - 10),
		   chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize,
		   chunk_kb ? "KB" : "B");
1926
	if (bitmap->storage.file) {
1927
		seq_printf(seq, ", file: ");
M
Miklos Szeredi 已提交
1928
		seq_file_path(seq, bitmap->storage.file, " \t\n");
1929 1930 1931 1932 1933
	}

	seq_printf(seq, "\n");
}

1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990
int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
		  int chunksize, int init)
{
	/* If chunk_size is 0, choose an appropriate chunk size.
	 * Then possibly allocate new storage space.
	 * Then quiesce, copy bits, replace bitmap, and re-start
	 *
	 * This function is called both to set up the initial bitmap
	 * and to resize the bitmap while the array is active.
	 * If this happens as a result of the array being resized,
	 * chunksize will be zero, and we need to choose a suitable
	 * chunksize, otherwise we use what we are given.
	 */
	struct bitmap_storage store;
	struct bitmap_counts old_counts;
	unsigned long chunks;
	sector_t block;
	sector_t old_blocks, new_blocks;
	int chunkshift;
	int ret = 0;
	long pages;
	struct bitmap_page *new_bp;

	if (chunksize == 0) {
		/* If there is enough space, leave the chunk size unchanged,
		 * else increase by factor of two until there is enough space.
		 */
		long bytes;
		long space = bitmap->mddev->bitmap_info.space;

		if (space == 0) {
			/* We don't know how much space there is, so limit
			 * to current size - in sectors.
			 */
			bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8);
			if (!bitmap->mddev->bitmap_info.external)
				bytes += sizeof(bitmap_super_t);
			space = DIV_ROUND_UP(bytes, 512);
			bitmap->mddev->bitmap_info.space = space;
		}
		chunkshift = bitmap->counts.chunkshift;
		chunkshift--;
		do {
			/* 'chunkshift' is shift from block size to chunk size */
			chunkshift++;
			chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
			bytes = DIV_ROUND_UP(chunks, 8);
			if (!bitmap->mddev->bitmap_info.external)
				bytes += sizeof(bitmap_super_t);
		} while (bytes > (space << 9));
	} else
		chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;

	chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
	memset(&store, 0, sizeof(store));
	if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
		ret = bitmap_storage_alloc(&store, chunks,
1991
					   !bitmap->mddev->bitmap_info.external,
1992 1993
					   mddev_is_clustered(bitmap->mddev)
					   ? bitmap->cluster_slot : 0);
1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099
	if (ret)
		goto err;

	pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);

	new_bp = kzalloc(pages * sizeof(*new_bp), GFP_KERNEL);
	ret = -ENOMEM;
	if (!new_bp) {
		bitmap_file_unmap(&store);
		goto err;
	}

	if (!init)
		bitmap->mddev->pers->quiesce(bitmap->mddev, 1);

	store.file = bitmap->storage.file;
	bitmap->storage.file = NULL;

	if (store.sb_page && bitmap->storage.sb_page)
		memcpy(page_address(store.sb_page),
		       page_address(bitmap->storage.sb_page),
		       sizeof(bitmap_super_t));
	bitmap_file_unmap(&bitmap->storage);
	bitmap->storage = store;

	old_counts = bitmap->counts;
	bitmap->counts.bp = new_bp;
	bitmap->counts.pages = pages;
	bitmap->counts.missing_pages = pages;
	bitmap->counts.chunkshift = chunkshift;
	bitmap->counts.chunks = chunks;
	bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift +
						     BITMAP_BLOCK_SHIFT);

	blocks = min(old_counts.chunks << old_counts.chunkshift,
		     chunks << chunkshift);

	spin_lock_irq(&bitmap->counts.lock);
	for (block = 0; block < blocks; ) {
		bitmap_counter_t *bmc_old, *bmc_new;
		int set;

		bmc_old = bitmap_get_counter(&old_counts, block,
					     &old_blocks, 0);
		set = bmc_old && NEEDED(*bmc_old);

		if (set) {
			bmc_new = bitmap_get_counter(&bitmap->counts, block,
						     &new_blocks, 1);
			if (*bmc_new == 0) {
				/* need to set on-disk bits too. */
				sector_t end = block + new_blocks;
				sector_t start = block >> chunkshift;
				start <<= chunkshift;
				while (start < end) {
					bitmap_file_set_bit(bitmap, block);
					start += 1 << chunkshift;
				}
				*bmc_new = 2;
				bitmap_count_page(&bitmap->counts,
						  block, 1);
				bitmap_set_pending(&bitmap->counts,
						   block);
			}
			*bmc_new |= NEEDED_MASK;
			if (new_blocks < old_blocks)
				old_blocks = new_blocks;
		}
		block += old_blocks;
	}

	if (!init) {
		int i;
		while (block < (chunks << chunkshift)) {
			bitmap_counter_t *bmc;
			bmc = bitmap_get_counter(&bitmap->counts, block,
						 &new_blocks, 1);
			if (bmc) {
				/* new space.  It needs to be resynced, so
				 * we set NEEDED_MASK.
				 */
				if (*bmc == 0) {
					*bmc = NEEDED_MASK | 2;
					bitmap_count_page(&bitmap->counts,
							  block, 1);
					bitmap_set_pending(&bitmap->counts,
							   block);
				}
			}
			block += new_blocks;
		}
		for (i = 0; i < bitmap->storage.file_pages; i++)
			set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
	}
	spin_unlock_irq(&bitmap->counts.lock);

	if (!init) {
		bitmap_unplug(bitmap);
		bitmap->mddev->pers->quiesce(bitmap->mddev, 0);
	}
	ret = 0;
err:
	return ret;
}
EXPORT_SYMBOL_GPL(bitmap_resize);

2100
static ssize_t
2101
location_show(struct mddev *mddev, char *page)
2102 2103
{
	ssize_t len;
2104
	if (mddev->bitmap_info.file)
2105
		len = sprintf(page, "file");
2106
	else if (mddev->bitmap_info.offset)
2107
		len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
2108
	else
2109 2110 2111 2112 2113 2114
		len = sprintf(page, "none");
	len += sprintf(page+len, "\n");
	return len;
}

static ssize_t
2115
location_store(struct mddev *mddev, const char *buf, size_t len)
2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151
{

	if (mddev->pers) {
		if (!mddev->pers->quiesce)
			return -EBUSY;
		if (mddev->recovery || mddev->sync_thread)
			return -EBUSY;
	}

	if (mddev->bitmap || mddev->bitmap_info.file ||
	    mddev->bitmap_info.offset) {
		/* bitmap already configured.  Only option is to clear it */
		if (strncmp(buf, "none", 4) != 0)
			return -EBUSY;
		if (mddev->pers) {
			mddev->pers->quiesce(mddev, 1);
			bitmap_destroy(mddev);
			mddev->pers->quiesce(mddev, 0);
		}
		mddev->bitmap_info.offset = 0;
		if (mddev->bitmap_info.file) {
			struct file *f = mddev->bitmap_info.file;
			mddev->bitmap_info.file = NULL;
			fput(f);
		}
	} else {
		/* No bitmap, OK to set a location */
		long long offset;
		if (strncmp(buf, "none", 4) == 0)
			/* nothing to be done */;
		else if (strncmp(buf, "file:", 5) == 0) {
			/* Not supported yet */
			return -EINVAL;
		} else {
			int rv;
			if (buf[0] == '+')
2152
				rv = kstrtoll(buf+1, 10, &offset);
2153
			else
2154
				rv = kstrtoll(buf, 10, &offset);
2155 2156 2157 2158
			if (rv)
				return rv;
			if (offset == 0)
				return -EINVAL;
2159 2160
			if (mddev->bitmap_info.external == 0 &&
			    mddev->major_version == 0 &&
2161 2162 2163 2164
			    offset != mddev->bitmap_info.default_offset)
				return -EINVAL;
			mddev->bitmap_info.offset = offset;
			if (mddev->pers) {
2165
				struct bitmap *bitmap;
2166
				mddev->pers->quiesce(mddev, 1);
2167 2168 2169 2170 2171
				bitmap = bitmap_create(mddev, -1);
				if (IS_ERR(bitmap))
					rv = PTR_ERR(bitmap);
				else {
					mddev->bitmap = bitmap;
2172
					rv = bitmap_load(mddev);
2173 2174 2175 2176
					if (rv) {
						bitmap_destroy(mddev);
						mddev->bitmap_info.offset = 0;
					}
2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196
				}
				mddev->pers->quiesce(mddev, 0);
				if (rv)
					return rv;
			}
		}
	}
	if (!mddev->external) {
		/* Ensure new bitmap info is stored in
		 * metadata promptly.
		 */
		set_bit(MD_CHANGE_DEVS, &mddev->flags);
		md_wakeup_thread(mddev->thread);
	}
	return len;
}

static struct md_sysfs_entry bitmap_location =
__ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);

2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220
/* 'bitmap/space' is the space available at 'location' for the
 * bitmap.  This allows the kernel to know when it is safe to
 * resize the bitmap to match a resized array.
 */
static ssize_t
space_show(struct mddev *mddev, char *page)
{
	return sprintf(page, "%lu\n", mddev->bitmap_info.space);
}

static ssize_t
space_store(struct mddev *mddev, const char *buf, size_t len)
{
	unsigned long sectors;
	int rv;

	rv = kstrtoul(buf, 10, &sectors);
	if (rv)
		return rv;

	if (sectors == 0)
		return -EINVAL;

	if (mddev->bitmap &&
2221
	    sectors < (mddev->bitmap->storage.bytes + 511) >> 9)
2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233
		return -EFBIG; /* Bitmap is too big for this small space */

	/* could make sure it isn't too big, but that isn't really
	 * needed - user-space should be careful.
	 */
	mddev->bitmap_info.space = sectors;
	return len;
}

static struct md_sysfs_entry bitmap_space =
__ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store);

2234
static ssize_t
2235
timeout_show(struct mddev *mddev, char *page)
2236 2237 2238 2239
{
	ssize_t len;
	unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
	unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
2240

2241 2242 2243 2244 2245 2246 2247 2248
	len = sprintf(page, "%lu", secs);
	if (jifs)
		len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
	len += sprintf(page+len, "\n");
	return len;
}

static ssize_t
2249
timeout_store(struct mddev *mddev, const char *buf, size_t len)
2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284
{
	/* timeout can be set at any time */
	unsigned long timeout;
	int rv = strict_strtoul_scaled(buf, &timeout, 4);
	if (rv)
		return rv;

	/* just to make sure we don't overflow... */
	if (timeout >= LONG_MAX / HZ)
		return -EINVAL;

	timeout = timeout * HZ / 10000;

	if (timeout >= MAX_SCHEDULE_TIMEOUT)
		timeout = MAX_SCHEDULE_TIMEOUT-1;
	if (timeout < 1)
		timeout = 1;
	mddev->bitmap_info.daemon_sleep = timeout;
	if (mddev->thread) {
		/* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
		 * the bitmap is all clean and we don't need to
		 * adjust the timeout right now
		 */
		if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
			mddev->thread->timeout = timeout;
			md_wakeup_thread(mddev->thread);
		}
	}
	return len;
}

static struct md_sysfs_entry bitmap_timeout =
__ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);

static ssize_t
2285
backlog_show(struct mddev *mddev, char *page)
2286 2287 2288 2289 2290
{
	return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
}

static ssize_t
2291
backlog_store(struct mddev *mddev, const char *buf, size_t len)
2292 2293
{
	unsigned long backlog;
2294
	int rv = kstrtoul(buf, 10, &backlog);
2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306
	if (rv)
		return rv;
	if (backlog > COUNTER_MAX)
		return -EINVAL;
	mddev->bitmap_info.max_write_behind = backlog;
	return len;
}

static struct md_sysfs_entry bitmap_backlog =
__ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);

static ssize_t
2307
chunksize_show(struct mddev *mddev, char *page)
2308 2309 2310 2311 2312
{
	return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
}

static ssize_t
2313
chunksize_store(struct mddev *mddev, const char *buf, size_t len)
2314 2315 2316 2317 2318 2319
{
	/* Can only be changed when no bitmap is active */
	int rv;
	unsigned long csize;
	if (mddev->bitmap)
		return -EBUSY;
2320
	rv = kstrtoul(buf, 10, &csize);
2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332
	if (rv)
		return rv;
	if (csize < 512 ||
	    !is_power_of_2(csize))
		return -EINVAL;
	mddev->bitmap_info.chunksize = csize;
	return len;
}

static struct md_sysfs_entry bitmap_chunksize =
__ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);

2333
static ssize_t metadata_show(struct mddev *mddev, char *page)
2334
{
G
Goldwyn Rodrigues 已提交
2335 2336
	if (mddev_is_clustered(mddev))
		return sprintf(page, "clustered\n");
2337 2338 2339 2340
	return sprintf(page, "%s\n", (mddev->bitmap_info.external
				      ? "external" : "internal"));
}

2341
static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
2342 2343 2344 2345 2346 2347 2348
{
	if (mddev->bitmap ||
	    mddev->bitmap_info.file ||
	    mddev->bitmap_info.offset)
		return -EBUSY;
	if (strncmp(buf, "external", 8) == 0)
		mddev->bitmap_info.external = 1;
G
Goldwyn Rodrigues 已提交
2349 2350
	else if ((strncmp(buf, "internal", 8) == 0) ||
			(strncmp(buf, "clustered", 9) == 0))
2351 2352 2353 2354 2355 2356 2357 2358 2359
		mddev->bitmap_info.external = 0;
	else
		return -EINVAL;
	return len;
}

static struct md_sysfs_entry bitmap_metadata =
__ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);

2360
static ssize_t can_clear_show(struct mddev *mddev, char *page)
2361 2362
{
	int len;
2363
	spin_lock(&mddev->lock);
2364 2365 2366 2367 2368
	if (mddev->bitmap)
		len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
					     "false" : "true"));
	else
		len = sprintf(page, "\n");
2369
	spin_unlock(&mddev->lock);
2370 2371 2372
	return len;
}

2373
static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390
{
	if (mddev->bitmap == NULL)
		return -ENOENT;
	if (strncmp(buf, "false", 5) == 0)
		mddev->bitmap->need_sync = 1;
	else if (strncmp(buf, "true", 4) == 0) {
		if (mddev->degraded)
			return -EBUSY;
		mddev->bitmap->need_sync = 0;
	} else
		return -EINVAL;
	return len;
}

static struct md_sysfs_entry bitmap_can_clear =
__ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);

2391
static ssize_t
2392
behind_writes_used_show(struct mddev *mddev, char *page)
2393
{
2394 2395
	ssize_t ret;
	spin_lock(&mddev->lock);
2396
	if (mddev->bitmap == NULL)
2397 2398 2399 2400 2401 2402
		ret = sprintf(page, "0\n");
	else
		ret = sprintf(page, "%lu\n",
			      mddev->bitmap->behind_writes_used);
	spin_unlock(&mddev->lock);
	return ret;
2403 2404 2405
}

static ssize_t
2406
behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
2407 2408 2409 2410 2411 2412 2413 2414 2415 2416
{
	if (mddev->bitmap)
		mddev->bitmap->behind_writes_used = 0;
	return len;
}

static struct md_sysfs_entry max_backlog_used =
__ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
       behind_writes_used_show, behind_writes_used_reset);

2417 2418
static struct attribute *md_bitmap_attrs[] = {
	&bitmap_location.attr,
2419
	&bitmap_space.attr,
2420 2421 2422
	&bitmap_timeout.attr,
	&bitmap_backlog.attr,
	&bitmap_chunksize.attr,
2423 2424
	&bitmap_metadata.attr,
	&bitmap_can_clear.attr,
2425
	&max_backlog_used.attr,
2426 2427 2428 2429 2430 2431 2432
	NULL
};
struct attribute_group md_bitmap_group = {
	.name = "bitmap",
	.attrs = md_bitmap_attrs,
};