bitmap.c 55.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
 *
 * bitmap_create  - sets up the bitmap structure
 * bitmap_destroy - destroys the bitmap structure
 *
 * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
 * - added disk storage for bitmap
 * - changes to allow various bitmap chunk sizes
 */

/*
 * Still to do:
 *
 * flush after percent set rather than just time based. (maybe both).
 */

18
#include <linux/blkdev.h>
19 20 21 22 23 24 25 26 27 28
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/file.h>
#include <linux/mount.h>
#include <linux/buffer_head.h>
29
#include <linux/seq_file.h>
30
#include "md.h"
31
#include "bitmap.h"
32

33
static inline char *bmname(struct bitmap *bitmap)
34 35 36 37 38 39 40 41 42 43 44 45 46 47
{
	return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
}

/*
 * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
 *
 * 1) check to see if this page is allocated, if it's not then try to alloc
 * 2) if the alloc fails, set the page's hijacked flag so we'll use the
 *    page pointer directly as a counter
 *
 * if we find our page, we increment the page's refcount so that it stays
 * allocated while we're using it
 */
48 49
static int bitmap_checkpage(struct bitmap *bitmap,
			    unsigned long page, int create)
50 51
__releases(bitmap->lock)
__acquires(bitmap->lock)
52 53 54 55
{
	unsigned char *mappage;

	if (page >= bitmap->pages) {
56 57 58 59
		/* This can happen if bitmap_start_sync goes beyond
		 * End-of-device while looking for a whole page.
		 * It is harmless.
		 */
60 61 62 63 64 65 66 67 68 69 70 71 72 73
		return -EINVAL;
	}

	if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
		return 0;

	if (bitmap->bp[page].map) /* page is already allocated, just return */
		return 0;

	if (!create)
		return -ENOENT;

	/* this page has not been allocated yet */

74
	spin_unlock_irq(&bitmap->lock);
75
	mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
76 77 78
	spin_lock_irq(&bitmap->lock);

	if (mappage == NULL) {
79 80
		pr_debug("%s: bitmap map page allocation failed, hijacking\n",
			 bmname(bitmap));
81 82 83 84
		/* failed - set the hijacked flag so that we can use the
		 * pointer as a counter */
		if (!bitmap->bp[page].map)
			bitmap->bp[page].hijacked = 1;
85 86
	} else if (bitmap->bp[page].map ||
		   bitmap->bp[page].hijacked) {
87
		/* somebody beat us to getting the page */
88
		kfree(mappage);
89
		return 0;
90
	} else {
91

92
		/* no page was in place and we have one, so install it */
93

94 95 96
		bitmap->bp[page].map = mappage;
		bitmap->missing_pages--;
	}
97 98 99 100 101 102
	return 0;
}

/* if page is completely empty, put it back on the free list, or dealloc it */
/* if page was hijacked, unmark the flag so it might get alloced next time */
/* Note: lock should be held when calling this */
103
static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
104 105 106 107 108 109 110 111 112 113 114
{
	char *ptr;

	if (bitmap->bp[page].count) /* page is still busy */
		return;

	/* page is no longer in use, it can be released */

	if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
		bitmap->bp[page].hijacked = 0;
		bitmap->bp[page].map = NULL;
115 116 117 118 119
	} else {
		/* normal case, free the page */
		ptr = bitmap->bp[page].map;
		bitmap->bp[page].map = NULL;
		bitmap->missing_pages++;
120
		kfree(ptr);
121 122 123 124 125 126 127 128 129 130 131
	}
}

/*
 * bitmap file handling - read and write the bitmap file and its superblock
 */

/*
 * basic page I/O operations
 */

132
/* IO operations when bitmap is stored near all superblocks */
133
static struct page *read_sb_page(struct mddev *mddev, loff_t offset,
134 135
				 struct page *page,
				 unsigned long index, int size)
136 137 138
{
	/* choose a good rdev and read the page from there */

139
	struct md_rdev *rdev;
140
	sector_t target;
141
	int did_alloc = 0;
142

143
	if (!page) {
144
		page = alloc_page(GFP_KERNEL);
145 146 147 148
		if (!page)
			return ERR_PTR(-ENOMEM);
		did_alloc = 1;
	}
149

N
NeilBrown 已提交
150
	rdev_for_each(rdev, mddev) {
151 152
		if (! test_bit(In_sync, &rdev->flags)
		    || test_bit(Faulty, &rdev->flags))
153 154
			continue;

J
Jonathan Brassow 已提交
155
		target = offset + index * (PAGE_SIZE/512);
156

157
		if (sync_page_io(rdev, target,
158
				 roundup(size, bdev_logical_block_size(rdev->bdev)),
J
Jonathan Brassow 已提交
159
				 page, READ, true)) {
160
			page->index = index;
161 162
			attach_page_buffers(page, NULL); /* so that free_buffer will
							  * quietly no-op */
163 164 165
			return page;
		}
	}
166 167
	if (did_alloc)
		put_page(page);
168
	return ERR_PTR(-EIO);
169 170 171

}

172
static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev)
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
{
	/* Iterate the disks of an mddev, using rcu to protect access to the
	 * linked list, and raising the refcount of devices we return to ensure
	 * they don't disappear while in use.
	 * As devices are only added or removed when raid_disk is < 0 and
	 * nr_pending is 0 and In_sync is clear, the entries we return will
	 * still be in the same position on the list when we re-enter
	 * list_for_each_continue_rcu.
	 */
	struct list_head *pos;
	rcu_read_lock();
	if (rdev == NULL)
		/* start at the beginning */
		pos = &mddev->disks;
	else {
		/* release the previous rdev and start from there. */
		rdev_dec_pending(rdev, mddev);
		pos = &rdev->same_set;
	}
	list_for_each_continue_rcu(pos, &mddev->disks) {
193
		rdev = list_entry(pos, struct md_rdev, same_set);
194 195 196 197 198 199 200 201 202 203 204 205
		if (rdev->raid_disk >= 0 &&
		    !test_bit(Faulty, &rdev->flags)) {
			/* this is a usable devices */
			atomic_inc(&rdev->nr_pending);
			rcu_read_unlock();
			return rdev;
		}
	}
	rcu_read_unlock();
	return NULL;
}

206
static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
207
{
208
	struct md_rdev *rdev = NULL;
209
	struct block_device *bdev;
210
	struct mddev *mddev = bitmap->mddev;
211

212
	while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
213 214
		int size = PAGE_SIZE;
		loff_t offset = mddev->bitmap_info.offset;
215 216 217

		bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;

218 219
		if (page->index == bitmap->file_pages-1)
			size = roundup(bitmap->last_page_size,
220
				       bdev_logical_block_size(bdev));
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
		/* Just make sure we aren't corrupting data or
		 * metadata
		 */
		if (mddev->external) {
			/* Bitmap could be anywhere. */
			if (rdev->sb_start + offset + (page->index
						       * (PAGE_SIZE/512))
			    > rdev->data_offset
			    &&
			    rdev->sb_start + offset
			    < (rdev->data_offset + mddev->dev_sectors
			     + (PAGE_SIZE/512)))
				goto bad_alignment;
		} else if (offset < 0) {
			/* DATA  BITMAP METADATA  */
			if (offset
			    + (long)(page->index * (PAGE_SIZE/512))
			    + size/512 > 0)
				/* bitmap runs in to metadata */
				goto bad_alignment;
			if (rdev->data_offset + mddev->dev_sectors
			    > rdev->sb_start + offset)
				/* data runs in to bitmap */
				goto bad_alignment;
		} else if (rdev->sb_start < rdev->data_offset) {
			/* METADATA BITMAP DATA */
			if (rdev->sb_start
			    + offset
			    + page->index*(PAGE_SIZE/512) + size/512
			    > rdev->data_offset)
				/* bitmap runs in to data */
				goto bad_alignment;
		} else {
			/* DATA METADATA BITMAP - no problems */
		}
		md_super_write(mddev, rdev,
			       rdev->sb_start + offset
			       + page->index * (PAGE_SIZE/512),
			       size,
			       page);
261
	}
262 263

	if (wait)
264
		md_super_wait(mddev);
265
	return 0;
266 267 268

 bad_alignment:
	return -EINVAL;
269 270
}

271
static void bitmap_file_kick(struct bitmap *bitmap);
272
/*
273
 * write out a page to a file
274
 */
275
static void write_page(struct bitmap *bitmap, struct page *page, int wait)
276
{
277
	struct buffer_head *bh;
278

279 280 281 282 283
	if (bitmap->file == NULL) {
		switch (write_sb_page(bitmap, page, wait)) {
		case -EINVAL:
			bitmap->flags |= BITMAP_WRITE_ERROR;
		}
284
	} else {
285

286
		bh = page_buffers(page);
287

288 289 290 291
		while (bh && bh->b_blocknr) {
			atomic_inc(&bitmap->pending_writes);
			set_buffer_locked(bh);
			set_buffer_mapped(bh);
J
Jens Axboe 已提交
292
			submit_bh(WRITE | REQ_SYNC, bh);
293 294
			bh = bh->b_this_page;
		}
295

296
		if (wait)
297 298
			wait_event(bitmap->write_wait,
				   atomic_read(&bitmap->pending_writes)==0);
299
	}
300 301
	if (bitmap->flags & BITMAP_WRITE_ERROR)
		bitmap_file_kick(bitmap);
302 303 304 305 306 307
}

static void end_bitmap_write(struct buffer_head *bh, int uptodate)
{
	struct bitmap *bitmap = bh->b_private;
	unsigned long flags;
308

309 310 311 312
	if (!uptodate) {
		spin_lock_irqsave(&bitmap->lock, flags);
		bitmap->flags |= BITMAP_WRITE_ERROR;
		spin_unlock_irqrestore(&bitmap->lock, flags);
313
	}
314 315 316
	if (atomic_dec_and_test(&bitmap->pending_writes))
		wake_up(&bitmap->write_wait);
}
317

318 319 320 321 322 323 324 325 326 327 328
/* copied from buffer.c */
static void
__clear_page_buffers(struct page *page)
{
	ClearPagePrivate(page);
	set_page_private(page, 0);
	page_cache_release(page);
}
static void free_buffers(struct page *page)
{
	struct buffer_head *bh = page_buffers(page);
329

330 331 332 333
	while (bh) {
		struct buffer_head *next = bh->b_this_page;
		free_buffer_head(bh);
		bh = next;
334
	}
335 336
	__clear_page_buffers(page);
	put_page(page);
337 338
}

339 340 341 342 343 344 345
/* read a page from a file.
 * We both read the page, and attach buffers to the page to record the
 * address of each block (using bmap).  These addresses will be used
 * to write the block later, completely bypassing the filesystem.
 * This usage is similar to how swap files are handled, and allows us
 * to write to a file with no concerns of memory allocation failing.
 */
346
static struct page *read_page(struct file *file, unsigned long index,
347 348
			      struct bitmap *bitmap,
			      unsigned long count)
349 350
{
	struct page *page = NULL;
J
Josef Sipek 已提交
351
	struct inode *inode = file->f_path.dentry->d_inode;
352 353
	struct buffer_head *bh;
	sector_t block;
354

355 356
	pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
		 (unsigned long long)index << PAGE_SHIFT);
357

358 359 360
	page = alloc_page(GFP_KERNEL);
	if (!page)
		page = ERR_PTR(-ENOMEM);
361 362
	if (IS_ERR(page))
		goto out;
363 364 365

	bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0);
	if (!bh) {
366
		put_page(page);
367
		page = ERR_PTR(-ENOMEM);
368 369
		goto out;
	}
370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
	attach_page_buffers(page, bh);
	block = index << (PAGE_SHIFT - inode->i_blkbits);
	while (bh) {
		if (count == 0)
			bh->b_blocknr = 0;
		else {
			bh->b_blocknr = bmap(inode, block);
			if (bh->b_blocknr == 0) {
				/* Cannot use this file! */
				free_buffers(page);
				page = ERR_PTR(-EINVAL);
				goto out;
			}
			bh->b_bdev = inode->i_sb->s_bdev;
			if (count < (1<<inode->i_blkbits))
				count = 0;
			else
				count -= (1<<inode->i_blkbits);

			bh->b_end_io = end_bitmap_write;
			bh->b_private = bitmap;
391 392 393 394
			atomic_inc(&bitmap->pending_writes);
			set_buffer_locked(bh);
			set_buffer_mapped(bh);
			submit_bh(READ, bh);
395 396 397 398 399
		}
		block++;
		bh = bh->b_this_page;
	}
	page->index = index;
400 401 402 403 404 405 406

	wait_event(bitmap->write_wait,
		   atomic_read(&bitmap->pending_writes)==0);
	if (bitmap->flags & BITMAP_WRITE_ERROR) {
		free_buffers(page);
		page = ERR_PTR(-EIO);
	}
407 408
out:
	if (IS_ERR(page))
409
		printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %ld\n",
410 411
			(int)PAGE_SIZE,
			(unsigned long long)index << PAGE_SHIFT,
412 413 414 415 416 417 418 419 420
			PTR_ERR(page));
	return page;
}

/*
 * bitmap file superblock operations
 */

/* update the event counter and sync the superblock to disk */
421
void bitmap_update_sb(struct bitmap *bitmap)
422 423 424 425
{
	bitmap_super_t *sb;

	if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
426
		return;
427 428
	if (bitmap->mddev->bitmap_info.external)
		return;
429
	if (!bitmap->sb_page) /* no superblock */
430
		return;
431
	sb = kmap_atomic(bitmap->sb_page);
432
	sb->events = cpu_to_le64(bitmap->mddev->events);
433
	if (bitmap->mddev->events < bitmap->events_cleared)
434 435
		/* rocking back to read-only */
		bitmap->events_cleared = bitmap->mddev->events;
436 437
	sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
	sb->state = cpu_to_le32(bitmap->flags);
438 439 440
	/* Just in case these have been changed via sysfs: */
	sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
	sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
441
	kunmap_atomic(sb);
442
	write_page(bitmap, bitmap->sb_page, 1);
443 444 445 446 447 448 449 450 451
}

/* print out the bitmap file superblock */
void bitmap_print_sb(struct bitmap *bitmap)
{
	bitmap_super_t *sb;

	if (!bitmap || !bitmap->sb_page)
		return;
452
	sb = kmap_atomic(bitmap->sb_page);
453
	printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
454 455 456
	printk(KERN_DEBUG "         magic: %08x\n", le32_to_cpu(sb->magic));
	printk(KERN_DEBUG "       version: %d\n", le32_to_cpu(sb->version));
	printk(KERN_DEBUG "          uuid: %08x.%08x.%08x.%08x\n",
457 458 459 460
					*(__u32 *)(sb->uuid+0),
					*(__u32 *)(sb->uuid+4),
					*(__u32 *)(sb->uuid+8),
					*(__u32 *)(sb->uuid+12));
461
	printk(KERN_DEBUG "        events: %llu\n",
462
			(unsigned long long) le64_to_cpu(sb->events));
463
	printk(KERN_DEBUG "events cleared: %llu\n",
464
			(unsigned long long) le64_to_cpu(sb->events_cleared));
465 466 467 468 469
	printk(KERN_DEBUG "         state: %08x\n", le32_to_cpu(sb->state));
	printk(KERN_DEBUG "     chunksize: %d B\n", le32_to_cpu(sb->chunksize));
	printk(KERN_DEBUG "  daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
	printk(KERN_DEBUG "     sync size: %llu KB\n",
			(unsigned long long)le64_to_cpu(sb->sync_size)/2);
470
	printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
471
	kunmap_atomic(sb);
472 473
}

474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
/*
 * bitmap_new_disk_sb
 * @bitmap
 *
 * This function is somewhat the reverse of bitmap_read_sb.  bitmap_read_sb
 * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
 * This function verifies 'bitmap_info' and populates the on-disk bitmap
 * structure, which is to be written to disk.
 *
 * Returns: 0 on success, -Exxx on error
 */
static int bitmap_new_disk_sb(struct bitmap *bitmap)
{
	bitmap_super_t *sb;
	unsigned long chunksize, daemon_sleep, write_behind;
	int err = -EINVAL;

	bitmap->sb_page = alloc_page(GFP_KERNEL);
	if (IS_ERR(bitmap->sb_page)) {
		err = PTR_ERR(bitmap->sb_page);
		bitmap->sb_page = NULL;
		return err;
	}
	bitmap->sb_page->index = 0;

499
	sb = kmap_atomic(bitmap->sb_page);
500 501 502 503 504 505 506

	sb->magic = cpu_to_le32(BITMAP_MAGIC);
	sb->version = cpu_to_le32(BITMAP_MAJOR_HI);

	chunksize = bitmap->mddev->bitmap_info.chunksize;
	BUG_ON(!chunksize);
	if (!is_power_of_2(chunksize)) {
507
		kunmap_atomic(sb);
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
		printk(KERN_ERR "bitmap chunksize not a power of 2\n");
		return -EINVAL;
	}
	sb->chunksize = cpu_to_le32(chunksize);

	daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
	if (!daemon_sleep ||
	    (daemon_sleep < 1) || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
		printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n");
		daemon_sleep = 5 * HZ;
	}
	sb->daemon_sleep = cpu_to_le32(daemon_sleep);
	bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;

	/*
	 * FIXME: write_behind for RAID1.  If not specified, what
	 * is a good choice?  We choose COUNTER_MAX / 2 arbitrarily.
	 */
	write_behind = bitmap->mddev->bitmap_info.max_write_behind;
	if (write_behind > COUNTER_MAX)
		write_behind = COUNTER_MAX / 2;
	sb->write_behind = cpu_to_le32(write_behind);
	bitmap->mddev->bitmap_info.max_write_behind = write_behind;

	/* keep the array size field of the bitmap superblock up to date */
	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);

	memcpy(sb->uuid, bitmap->mddev->uuid, 16);

	bitmap->flags |= BITMAP_STALE;
	sb->state |= cpu_to_le32(BITMAP_STALE);
	bitmap->events_cleared = bitmap->mddev->events;
	sb->events_cleared = cpu_to_le64(bitmap->mddev->events);

542
	kunmap_atomic(sb);
543 544 545 546

	return 0;
}

547 548 549 550 551
/* read the superblock from the bitmap file and initialize some bitmap fields */
static int bitmap_read_sb(struct bitmap *bitmap)
{
	char *reason = NULL;
	bitmap_super_t *sb;
552
	unsigned long chunksize, daemon_sleep, write_behind;
553 554 555 556
	unsigned long long events;
	int err = -EINVAL;

	/* page 0 is the superblock, read it... */
557 558 559 560 561 562
	if (bitmap->file) {
		loff_t isize = i_size_read(bitmap->file->f_mapping->host);
		int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;

		bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes);
	} else {
563 564
		bitmap->sb_page = read_sb_page(bitmap->mddev,
					       bitmap->mddev->bitmap_info.offset,
565 566
					       NULL,
					       0, sizeof(bitmap_super_t));
567
	}
568 569 570 571 572 573
	if (IS_ERR(bitmap->sb_page)) {
		err = PTR_ERR(bitmap->sb_page);
		bitmap->sb_page = NULL;
		return err;
	}

574
	sb = kmap_atomic(bitmap->sb_page);
575 576

	chunksize = le32_to_cpu(sb->chunksize);
577
	daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
578
	write_behind = le32_to_cpu(sb->write_behind);
579 580 581 582

	/* verify that the bitmap-specific fields are valid */
	if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
		reason = "bad magic";
583 584
	else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
		 le32_to_cpu(sb->version) > BITMAP_MAJOR_HI)
585
		reason = "unrecognized superblock version";
586
	else if (chunksize < 512)
587
		reason = "bitmap chunksize too small";
J
Jonathan Brassow 已提交
588
	else if (!is_power_of_2(chunksize))
589
		reason = "bitmap chunksize not a power of 2";
590
	else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
591
		reason = "daemon sleep period out of range";
592 593
	else if (write_behind > COUNTER_MAX)
		reason = "write-behind limit out of range (0 - 16383)";
594 595 596 597 598 599 600 601 602
	if (reason) {
		printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n",
			bmname(bitmap), reason);
		goto out;
	}

	/* keep the array size field of the bitmap superblock up to date */
	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);

603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
	if (bitmap->mddev->persistent) {
		/*
		 * We have a persistent array superblock, so compare the
		 * bitmap's UUID and event counter to the mddev's
		 */
		if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
			printk(KERN_INFO
			       "%s: bitmap superblock UUID mismatch\n",
			       bmname(bitmap));
			goto out;
		}
		events = le64_to_cpu(sb->events);
		if (events < bitmap->mddev->events) {
			printk(KERN_INFO
			       "%s: bitmap file is out of date (%llu < %llu) "
			       "-- forcing full recovery\n",
			       bmname(bitmap), events,
			       (unsigned long long) bitmap->mddev->events);
			sb->state |= cpu_to_le32(BITMAP_STALE);
		}
623
	}
624

625
	/* assign fields using values from superblock */
626 627 628
	bitmap->mddev->bitmap_info.chunksize = chunksize;
	bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
	bitmap->mddev->bitmap_info.max_write_behind = write_behind;
629
	bitmap->flags |= le32_to_cpu(sb->state);
630 631
	if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
		bitmap->flags |= BITMAP_HOSTENDIAN;
632
	bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
633
	if (bitmap->flags & BITMAP_STALE)
634
		bitmap->events_cleared = bitmap->mddev->events;
635 636
	err = 0;
out:
637
	kunmap_atomic(sb);
638 639 640 641 642 643 644 645 646 647
	if (err)
		bitmap_print_sb(bitmap);
	return err;
}

enum bitmap_mask_op {
	MASK_SET,
	MASK_UNSET
};

648 649 650
/* record the state of the bitmap in the superblock.  Return the old value */
static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
			     enum bitmap_mask_op op)
651 652
{
	bitmap_super_t *sb;
653
	int old;
654

655
	if (!bitmap->sb_page) /* can't set the state */
656
		return 0;
657
	sb = kmap_atomic(bitmap->sb_page);
658
	old = le32_to_cpu(sb->state) & bits;
659
	switch (op) {
660 661
	case MASK_SET:
		sb->state |= cpu_to_le32(bits);
662
		bitmap->flags |= bits;
663 664 665
		break;
	case MASK_UNSET:
		sb->state &= cpu_to_le32(~bits);
666
		bitmap->flags &= ~bits;
667 668 669
		break;
	default:
		BUG();
670
	}
671
	kunmap_atomic(sb);
672
	return old;
673 674 675 676 677 678
}

/*
 * general bitmap file operations
 */

679 680 681 682 683 684
/*
 * on-disk bitmap:
 *
 * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
 * file a page at a time. There's a superblock at the start of the file.
 */
685
/* calculate the index of the page that contains this bit */
686
static inline unsigned long file_page_index(struct bitmap *bitmap, unsigned long chunk)
687
{
688 689 690
	if (!bitmap->mddev->bitmap_info.external)
		chunk += sizeof(bitmap_super_t) << 3;
	return chunk >> PAGE_BIT_SHIFT;
691 692 693
}

/* calculate the (bit) offset of this bit within a page */
694
static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned long chunk)
695
{
696 697 698
	if (!bitmap->mddev->bitmap_info.external)
		chunk += sizeof(bitmap_super_t) << 3;
	return chunk & (PAGE_BITS - 1);
699 700 701 702 703 704 705 706 707 708
}

/*
 * return a pointer to the page in the filemap that contains the given bit
 *
 * this lookup is complicated by the fact that the bitmap sb might be exactly
 * 1 page (e.g., x86) or less than 1 page -- so the bitmap might start on page
 * 0 or page 1
 */
static inline struct page *filemap_get_page(struct bitmap *bitmap,
709
					    unsigned long chunk)
710
{
711 712
	if (file_page_index(bitmap, chunk) >= bitmap->file_pages)
		return NULL;
713 714
	return bitmap->filemap[file_page_index(bitmap, chunk)
			       - file_page_index(bitmap, 0)];
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735
}

static void bitmap_file_unmap(struct bitmap *bitmap)
{
	struct page **map, *sb_page;
	unsigned long *attr;
	int pages;
	unsigned long flags;

	spin_lock_irqsave(&bitmap->lock, flags);
	map = bitmap->filemap;
	bitmap->filemap = NULL;
	attr = bitmap->filemap_attr;
	bitmap->filemap_attr = NULL;
	pages = bitmap->file_pages;
	bitmap->file_pages = 0;
	sb_page = bitmap->sb_page;
	bitmap->sb_page = NULL;
	spin_unlock_irqrestore(&bitmap->lock, flags);

	while (pages--)
736
		if (map[pages] != sb_page) /* 0 is sb_page, release it below */
737
			free_buffers(map[pages]);
738 739 740
	kfree(map);
	kfree(attr);

741 742
	if (sb_page)
		free_buffers(sb_page);
743 744 745 746 747 748 749 750 751 752 753 754
}

static void bitmap_file_put(struct bitmap *bitmap)
{
	struct file *file;
	unsigned long flags;

	spin_lock_irqsave(&bitmap->lock, flags);
	file = bitmap->file;
	bitmap->file = NULL;
	spin_unlock_irqrestore(&bitmap->lock, flags);

755 756 757
	if (file)
		wait_event(bitmap->write_wait,
			   atomic_read(&bitmap->pending_writes)==0);
758 759
	bitmap_file_unmap(bitmap);

760
	if (file) {
J
Josef Sipek 已提交
761
		struct inode *inode = file->f_path.dentry->d_inode;
762
		invalidate_mapping_pages(inode->i_mapping, 0, -1);
763
		fput(file);
764
	}
765 766 767 768 769 770 771 772 773 774 775
}

/*
 * bitmap_file_kick - if an error occurs while manipulating the bitmap file
 * then it is no longer reliable, so we stop using it and we mark the file
 * as failed in the superblock
 */
static void bitmap_file_kick(struct bitmap *bitmap)
{
	char *path, *ptr = NULL;

776 777
	if (bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET) == 0) {
		bitmap_update_sb(bitmap);
778

779 780 781
		if (bitmap->file) {
			path = kmalloc(PAGE_SIZE, GFP_KERNEL);
			if (path)
C
Christoph Hellwig 已提交
782 783 784
				ptr = d_path(&bitmap->file->f_path, path,
					     PAGE_SIZE);

785 786
			printk(KERN_ALERT
			      "%s: kicking failed bitmap file %s from array!\n",
C
Christoph Hellwig 已提交
787
			      bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
788

789 790 791 792 793
			kfree(path);
		} else
			printk(KERN_ALERT
			       "%s: disabling internal bitmap due to errors\n",
			       bmname(bitmap));
794
	}
795 796 797 798 799 800 801

	bitmap_file_put(bitmap);

	return;
}

enum bitmap_page_attr {
802
	BITMAP_PAGE_DIRTY = 0,     /* there are set bits that need to be synced */
803 804
	BITMAP_PAGE_PENDING = 1,   /* there are bits that are being cleaned.
				    * i.e. counter is 1 or 2. */
805
	BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
806 807 808 809 810
};

static inline void set_page_attr(struct bitmap *bitmap, struct page *page,
				enum bitmap_page_attr attr)
{
811
	__set_bit((page->index<<2) + attr, bitmap->filemap_attr);
812 813 814 815 816
}

static inline void clear_page_attr(struct bitmap *bitmap, struct page *page,
				enum bitmap_page_attr attr)
{
817
	__clear_bit((page->index<<2) + attr, bitmap->filemap_attr);
818 819
}

820 821
static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page,
					   enum bitmap_page_attr attr)
822
{
823
	return test_bit((page->index<<2) + attr, bitmap->filemap_attr);
824 825 826 827 828 829 830 831 832 833 834 835
}

/*
 * bitmap_file_set_bit -- called before performing a write to the md device
 * to set (and eventually sync) a particular bit in the bitmap file
 *
 * we set the bit immediately, then we record the page number so that
 * when an unplug occurs, we can flush the dirty pages out to disk
 */
static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
{
	unsigned long bit;
836
	struct page *page;
837
	void *kaddr;
838
	unsigned long chunk = block >> bitmap->chunkshift;
839

840 841
	if (!bitmap->filemap)
		return;
842

843 844 845 846
	page = filemap_get_page(bitmap, chunk);
	if (!page)
		return;
	bit = file_page_offset(bitmap, chunk);
847

848
	/* set the bit */
849
	kaddr = kmap_atomic(page);
850 851 852 853
	if (bitmap->flags & BITMAP_HOSTENDIAN)
		set_bit(bit, kaddr);
	else
		__set_bit_le(bit, kaddr);
854
	kunmap_atomic(kaddr);
855
	pr_debug("set file bit %lu page %lu\n", bit, page->index);
856 857 858 859 860 861 862
	/* record page number so it gets flushed to disk when unplug occurs */
	set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
}

/* this gets called when the md device is ready to unplug its underlying
 * (slave) device queues -- before we let any writes go down, we need to
 * sync the dirty pages of the bitmap file to disk */
863
void bitmap_unplug(struct bitmap *bitmap)
864
{
865 866
	unsigned long i, flags;
	int dirty, need_write;
867 868 869 870
	struct page *page;
	int wait = 0;

	if (!bitmap)
871
		return;
872 873 874 875 876

	/* look at each page to see if there are any set bits that need to be
	 * flushed out to disk */
	for (i = 0; i < bitmap->file_pages; i++) {
		spin_lock_irqsave(&bitmap->lock, flags);
877
		if (!bitmap->filemap) {
878
			spin_unlock_irqrestore(&bitmap->lock, flags);
879
			return;
880 881
		}
		page = bitmap->filemap[i];
882 883
		dirty = test_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
		need_write = test_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
884 885
		clear_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
		clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
886 887
		if (dirty || need_write)
			clear_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
888
		if (dirty)
889 890 891
			wait = 1;
		spin_unlock_irqrestore(&bitmap->lock, flags);

892
		if (dirty || need_write)
893
			write_page(bitmap, page, 0);
894 895
	}
	if (wait) { /* if any writes were performed, we need to wait on them */
896
		if (bitmap->file)
897 898
			wait_event(bitmap->write_wait,
				   atomic_read(&bitmap->pending_writes)==0);
899
		else
900
			md_super_wait(bitmap->mddev);
901
	}
902 903
	if (bitmap->flags & BITMAP_WRITE_ERROR)
		bitmap_file_kick(bitmap);
904
}
905
EXPORT_SYMBOL(bitmap_unplug);
906

907
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
908 909 910 911 912 913 914
/* * bitmap_init_from_disk -- called at bitmap_create time to initialize
 * the in-memory bitmap from the on-disk bitmap -- also, sets up the
 * memory mapping of the bitmap file
 * Special cases:
 *   if there's no bitmap file, or if the bitmap file had been
 *   previously kicked from the array, we mark all the bits as
 *   1's in order to cause a full resync.
915 916 917
 *
 * We ignore all bits for sectors that end earlier than 'start'.
 * This is used when reading an out-of-date bitmap...
918
 */
919
static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
920 921 922 923 924
{
	unsigned long i, chunks, index, oldindex, bit;
	struct page *page = NULL, *oldpage = NULL;
	unsigned long num_pages, bit_cnt = 0;
	struct file *file;
925
	unsigned long bytes, offset;
926 927
	int outofdate;
	int ret = -ENOSPC;
928
	void *paddr;
929 930 931 932

	chunks = bitmap->chunks;
	file = bitmap->file;

933
	BUG_ON(!file && !bitmap->mddev->bitmap_info.offset);
934 935 936 937 938 939

	outofdate = bitmap->flags & BITMAP_STALE;
	if (outofdate)
		printk(KERN_INFO "%s: bitmap file is out of date, doing full "
			"recovery\n", bmname(bitmap));

940
	bytes = DIV_ROUND_UP(bitmap->chunks, 8);
941 942
	if (!bitmap->mddev->bitmap_info.external)
		bytes += sizeof(bitmap_super_t);
943

944
	num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
945

946
	if (file && i_size_read(file->f_mapping->host) < bytes) {
947 948 949
		printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
			bmname(bitmap),
			(unsigned long) i_size_read(file->f_mapping->host),
950
			bytes);
951
		goto err;
952
	}
953 954 955

	ret = -ENOMEM;

956
	bitmap->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
957
	if (!bitmap->filemap)
958
		goto err;
959

960 961
	/* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */
	bitmap->filemap_attr = kzalloc(
962
		roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
963
		GFP_KERNEL);
964
	if (!bitmap->filemap_attr)
965
		goto err;
966 967 968 969

	oldindex = ~0L;

	for (i = 0; i < chunks; i++) {
970
		int b;
971 972
		index = file_page_index(bitmap, i);
		bit = file_page_offset(bitmap, i);
973
		if (index != oldindex) { /* this is a new page, read it in */
974
			int count;
975
			/* unmap the old page, we're done with it */
976
			if (index == num_pages-1)
977
				count = bytes - index * PAGE_SIZE;
978 979
			else
				count = PAGE_SIZE;
980
			if (index == 0 && bitmap->sb_page) {
981 982 983 984 985 986 987
				/*
				 * if we're here then the superblock page
				 * contains some bits (PAGE_SIZE != sizeof sb)
				 * we've already read it in, so just use it
				 */
				page = bitmap->sb_page;
				offset = sizeof(bitmap_super_t);
N
NeilBrown 已提交
988
				if (!file)
989 990 991 992 993
					page = read_sb_page(
						bitmap->mddev,
						bitmap->mddev->bitmap_info.offset,
						page,
						index, count);
994
			} else if (file) {
995
				page = read_page(file, index, bitmap, count);
996 997
				offset = 0;
			} else {
998 999
				page = read_sb_page(bitmap->mddev,
						    bitmap->mddev->bitmap_info.offset,
1000 1001
						    NULL,
						    index, count);
1002 1003
				offset = 0;
			}
1004 1005
			if (IS_ERR(page)) { /* read error */
				ret = PTR_ERR(page);
1006
				goto err;
1007 1008
			}

1009 1010 1011
			oldindex = index;
			oldpage = page;

1012 1013 1014
			bitmap->filemap[bitmap->file_pages++] = page;
			bitmap->last_page_size = count;

1015 1016 1017
			if (outofdate) {
				/*
				 * if bitmap is out of date, dirty the
1018
				 * whole page and write it out
1019
				 */
1020
				paddr = kmap_atomic(page);
1021
				memset(paddr + offset, 0xff,
1022
				       PAGE_SIZE - offset);
1023
				kunmap_atomic(paddr);
1024 1025 1026
				write_page(bitmap, page, 1);

				ret = -EIO;
1027
				if (bitmap->flags & BITMAP_WRITE_ERROR)
1028
					goto err;
1029 1030
			}
		}
1031
		paddr = kmap_atomic(page);
1032
		if (bitmap->flags & BITMAP_HOSTENDIAN)
1033
			b = test_bit(bit, paddr);
1034
		else
A
Akinobu Mita 已提交
1035
			b = test_bit_le(bit, paddr);
1036
		kunmap_atomic(paddr);
1037
		if (b) {
1038
			/* if the disk bit is set, set the memory bit */
1039
			int needed = ((sector_t)(i+1) << bitmap->chunkshift
1040 1041
				      >= start);
			bitmap_set_memory_bits(bitmap,
1042
					       (sector_t)i << bitmap->chunkshift,
1043
					       needed);
1044 1045 1046 1047
			bit_cnt++;
		}
	}

1048
	/* everything went OK */
1049 1050 1051 1052 1053 1054 1055 1056 1057
	ret = 0;
	bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET);

	if (bit_cnt) { /* Kick recovery if any bits were set */
		set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
		md_wakeup_thread(bitmap->mddev->thread);
	}

	printk(KERN_INFO "%s: bitmap initialized from disk: "
1058 1059
	       "read %lu/%lu pages, set %lu of %lu bits\n",
	       bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, chunks);
1060 1061

	return 0;
1062

1063 1064 1065
 err:
	printk(KERN_INFO "%s: bitmap initialisation failed: %d\n",
	       bmname(bitmap), ret);
1066 1067 1068
	return ret;
}

1069 1070 1071 1072 1073
void bitmap_write_all(struct bitmap *bitmap)
{
	/* We don't actually write all bitmap blocks here,
	 * just flag them as needing to be written
	 */
1074
	int i;
1075

1076
	spin_lock_irq(&bitmap->lock);
1077
	for (i = 0; i < bitmap->file_pages; i++)
1078 1079
		set_page_attr(bitmap, bitmap->filemap[i],
			      BITMAP_PAGE_NEEDWRITE);
1080
	bitmap->allclean = 0;
1081
	spin_unlock_irq(&bitmap->lock);
1082 1083
}

1084 1085
static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
{
1086
	sector_t chunk = offset >> bitmap->chunkshift;
1087 1088 1089 1090
	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
	bitmap->bp[page].count += inc;
	bitmap_checkfree(bitmap, page);
}
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101

static void bitmap_set_pending(struct bitmap *bitmap, sector_t offset)
{
	sector_t chunk = offset >> bitmap->chunkshift;
	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
	struct bitmap_page *bp = &bitmap->bp[page];

	if (!bp->pending)
		bp->pending = 1;
}

1102
static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
N
NeilBrown 已提交
1103
					    sector_t offset, sector_t *blocks,
1104 1105 1106 1107 1108 1109 1110
					    int create);

/*
 * bitmap daemon -- periodically wakes up to clean bits and flush pages
 *			out to disk
 */

1111
void bitmap_daemon_work(struct mddev *mddev)
1112
{
1113
	struct bitmap *bitmap;
1114
	unsigned long j;
1115
	unsigned long nextpage;
1116
	unsigned long flags;
N
NeilBrown 已提交
1117
	sector_t blocks;
1118
	void *paddr;
1119

1120 1121 1122
	/* Use a mutex to guard daemon_work against
	 * bitmap_destroy.
	 */
1123
	mutex_lock(&mddev->bitmap_info.mutex);
1124 1125
	bitmap = mddev->bitmap;
	if (bitmap == NULL) {
1126
		mutex_unlock(&mddev->bitmap_info.mutex);
1127
		return;
1128
	}
1129
	if (time_before(jiffies, bitmap->daemon_lastrun
N
NeilBrown 已提交
1130
			+ mddev->bitmap_info.daemon_sleep))
1131 1132
		goto done;

1133
	bitmap->daemon_lastrun = jiffies;
1134
	if (bitmap->allclean) {
N
NeilBrown 已提交
1135
		mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1136
		goto done;
1137 1138
	}
	bitmap->allclean = 1;
1139

1140 1141 1142 1143
	/* Any file-page which is PENDING now needs to be written.
	 * So set NEEDWRITE now, then after we make any last-minute changes
	 * we will write it.
	 */
1144
	spin_lock_irqsave(&bitmap->lock, flags);
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
	if (!bitmap->filemap)
		/* error or shutdown */
		goto out;

	for (j = 0; j < bitmap->file_pages; j++)
		if (test_page_attr(bitmap, bitmap->filemap[j],
				   BITMAP_PAGE_PENDING)) {
			set_page_attr(bitmap, bitmap->filemap[j],
				      BITMAP_PAGE_NEEDWRITE);
			clear_page_attr(bitmap, bitmap->filemap[j],
					BITMAP_PAGE_PENDING);
		}

	if (bitmap->need_sync &&
	    mddev->bitmap_info.external == 0) {
		/* Arrange for superblock update as well as
		 * other changes */
		bitmap_super_t *sb;
		bitmap->need_sync = 0;
		sb = kmap_atomic(bitmap->sb_page);
		sb->events_cleared =
			cpu_to_le64(bitmap->events_cleared);
		kunmap_atomic(sb);
		set_page_attr(bitmap, bitmap->sb_page, BITMAP_PAGE_NEEDWRITE);
	}
	/* Now look at the bitmap counters and if any are '2' or '1',
	 * decrement and handle accordingly.
	 */
	nextpage = 0;
1174 1175
	for (j = 0; j < bitmap->chunks; j++) {
		bitmap_counter_t *bmc;
1176

1177 1178 1179 1180
		if (j == nextpage) {
			nextpage += PAGE_COUNTER_RATIO;
			if (!bitmap->bp[j >> PAGE_COUNTER_SHIFT].pending) {
				j |= PAGE_COUNTER_MASK;
1181 1182
				continue;
			}
1183
			bitmap->bp[j >> PAGE_COUNTER_SHIFT].pending = 0;
1184
		}
1185
		bmc = bitmap_get_counter(bitmap,
1186
					 (sector_t)j << bitmap->chunkshift,
1187
					 &blocks, 0);
1188 1189

		if (!bmc) {
1190
			j |= PAGE_COUNTER_MASK;
1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
			continue;
		}
		if (*bmc == 1 && !bitmap->need_sync) {
			/* We can clear the bit */
			struct page *page;
			*bmc = 0;
			bitmap_count_page(
				bitmap,
				(sector_t)j << bitmap->chunkshift,
				-1);

			page = filemap_get_page(bitmap, j);
			paddr = kmap_atomic(page);
			if (bitmap->flags & BITMAP_HOSTENDIAN)
				clear_bit(file_page_offset(bitmap, j),
					  paddr);
			else
				__clear_bit_le(file_page_offset(bitmap, j),
					       paddr);
			kunmap_atomic(paddr);
			if (!test_page_attr(bitmap, page,
					    BITMAP_PAGE_NEEDWRITE)) {
				set_page_attr(bitmap, page,
					      BITMAP_PAGE_PENDING);
1215
				bitmap->allclean = 0;
1216
			}
1217 1218 1219 1220 1221 1222
		} else if (*bmc && *bmc <= 2) {
			*bmc = 1;
			bitmap_set_pending(
				bitmap,
				(sector_t)j << bitmap->chunkshift);
			bitmap->allclean = 0;
1223
		}
1224 1225
	}

1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
	/* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
	 * DIRTY pages need to be written by bitmap_unplug so it can wait
	 * for them.
	 * If we find any DIRTY page we stop there and let bitmap_unplug
	 * handle all the rest.  This is important in the case where
	 * the first blocking holds the superblock and it has been updated.
	 * We mustn't write any other blocks before the superblock.
	 */
	for (j = 0; j < bitmap->file_pages; j++) {
		struct page *page = bitmap->filemap[j];

		if (test_page_attr(bitmap, page,
				   BITMAP_PAGE_DIRTY))
			/* bitmap_unplug will handle the rest */
			break;
		if (test_page_attr(bitmap, page,
				   BITMAP_PAGE_NEEDWRITE)) {
			clear_page_attr(bitmap, page,
					BITMAP_PAGE_NEEDWRITE);
1245
			spin_unlock_irqrestore(&bitmap->lock, flags);
1246 1247 1248 1249
			write_page(bitmap, page, 0);
			spin_lock_irqsave(&bitmap->lock, flags);
			if (!bitmap->filemap)
				break;
1250 1251
		}
	}
1252 1253
out:
	spin_unlock_irqrestore(&bitmap->lock, flags);
1254

1255
 done:
1256
	if (bitmap->allclean == 0)
N
NeilBrown 已提交
1257 1258
		mddev->thread->timeout =
			mddev->bitmap_info.daemon_sleep;
1259
	mutex_unlock(&mddev->bitmap_info.mutex);
1260 1261 1262
}

static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
N
NeilBrown 已提交
1263
					    sector_t offset, sector_t *blocks,
1264
					    int create)
1265 1266
__releases(bitmap->lock)
__acquires(bitmap->lock)
1267 1268 1269 1270 1271
{
	/* If 'create', we might release the lock and reclaim it.
	 * The lock must have been taken with interrupts enabled.
	 * If !create, we don't release the lock.
	 */
1272
	sector_t chunk = offset >> bitmap->chunkshift;
1273 1274 1275
	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
	unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
	sector_t csize;
1276
	int err;
1277

1278 1279 1280 1281
	err = bitmap_checkpage(bitmap, page, create);

	if (bitmap->bp[page].hijacked ||
	    bitmap->bp[page].map == NULL)
1282
		csize = ((sector_t)1) << (bitmap->chunkshift +
1283 1284
					  PAGE_COUNTER_SHIFT - 1);
	else
1285
		csize = ((sector_t)1) << bitmap->chunkshift;
1286 1287 1288
	*blocks = csize - (offset & (csize - 1));

	if (err < 0)
1289
		return NULL;
1290

1291 1292 1293 1294 1295 1296 1297 1298
	/* now locked ... */

	if (bitmap->bp[page].hijacked) { /* hijacked pointer */
		/* should we use the first or second counter field
		 * of the hijacked pointer? */
		int hi = (pageoff > PAGE_COUNTER_MASK);
		return  &((bitmap_counter_t *)
			  &bitmap->bp[page].map)[hi];
1299
	} else /* page is allocated */
1300 1301 1302 1303
		return (bitmap_counter_t *)
			&(bitmap->bp[page].map[pageoff]);
}

1304
int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
1305
{
1306 1307
	if (!bitmap)
		return 0;
1308 1309

	if (behind) {
1310
		int bw;
1311
		atomic_inc(&bitmap->behind_writes);
1312 1313 1314 1315
		bw = atomic_read(&bitmap->behind_writes);
		if (bw > bitmap->behind_writes_used)
			bitmap->behind_writes_used = bw;

1316 1317
		pr_debug("inc write-behind count %d/%lu\n",
			 bw, bitmap->mddev->bitmap_info.max_write_behind);
1318 1319
	}

1320
	while (sectors) {
N
NeilBrown 已提交
1321
		sector_t blocks;
1322 1323 1324 1325 1326 1327 1328 1329 1330
		bitmap_counter_t *bmc;

		spin_lock_irq(&bitmap->lock);
		bmc = bitmap_get_counter(bitmap, offset, &blocks, 1);
		if (!bmc) {
			spin_unlock_irq(&bitmap->lock);
			return 0;
		}

1331
		if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
1332 1333 1334 1335 1336 1337 1338 1339
			DEFINE_WAIT(__wait);
			/* note that it is safe to do the prepare_to_wait
			 * after the test as long as we do it before dropping
			 * the spinlock.
			 */
			prepare_to_wait(&bitmap->overflow_wait, &__wait,
					TASK_UNINTERRUPTIBLE);
			spin_unlock_irq(&bitmap->lock);
J
Jens Axboe 已提交
1340
			io_schedule();
1341 1342 1343 1344
			finish_wait(&bitmap->overflow_wait, &__wait);
			continue;
		}

1345
		switch (*bmc) {
1346 1347
		case 0:
			bitmap_file_set_bit(bitmap, offset);
1348
			bitmap_count_page(bitmap, offset, 1);
1349 1350 1351 1352
			/* fall through */
		case 1:
			*bmc = 2;
		}
1353

1354 1355 1356 1357 1358 1359 1360
		(*bmc)++;

		spin_unlock_irq(&bitmap->lock);

		offset += blocks;
		if (sectors > blocks)
			sectors -= blocks;
1361 1362
		else
			sectors = 0;
1363 1364 1365
	}
	return 0;
}
1366
EXPORT_SYMBOL(bitmap_startwrite);
1367 1368

void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors,
1369
		     int success, int behind)
1370
{
1371 1372
	if (!bitmap)
		return;
1373
	if (behind) {
1374 1375
		if (atomic_dec_and_test(&bitmap->behind_writes))
			wake_up(&bitmap->behind_wait);
1376 1377 1378
		pr_debug("dec write-behind count %d/%lu\n",
			 atomic_read(&bitmap->behind_writes),
			 bitmap->mddev->bitmap_info.max_write_behind);
1379 1380
	}

1381
	while (sectors) {
N
NeilBrown 已提交
1382
		sector_t blocks;
1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
		unsigned long flags;
		bitmap_counter_t *bmc;

		spin_lock_irqsave(&bitmap->lock, flags);
		bmc = bitmap_get_counter(bitmap, offset, &blocks, 0);
		if (!bmc) {
			spin_unlock_irqrestore(&bitmap->lock, flags);
			return;
		}

1393
		if (success && !bitmap->mddev->degraded &&
1394 1395 1396
		    bitmap->events_cleared < bitmap->mddev->events) {
			bitmap->events_cleared = bitmap->mddev->events;
			bitmap->need_sync = 1;
1397
			sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
1398 1399
		}

1400
		if (!success && !NEEDED(*bmc))
1401 1402
			*bmc |= NEEDED_MASK;

1403
		if (COUNTER(*bmc) == COUNTER_MAX)
1404 1405
			wake_up(&bitmap->overflow_wait);

1406
		(*bmc)--;
1407
		if (*bmc <= 2) {
1408
			bitmap_set_pending(bitmap, offset);
1409 1410
			bitmap->allclean = 0;
		}
1411 1412 1413 1414
		spin_unlock_irqrestore(&bitmap->lock, flags);
		offset += blocks;
		if (sectors > blocks)
			sectors -= blocks;
1415 1416
		else
			sectors = 0;
1417 1418
	}
}
1419
EXPORT_SYMBOL(bitmap_endwrite);
1420

N
NeilBrown 已提交
1421
static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1422
			       int degraded)
1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438
{
	bitmap_counter_t *bmc;
	int rv;
	if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
		*blocks = 1024;
		return 1; /* always resync if no bitmap */
	}
	spin_lock_irq(&bitmap->lock);
	bmc = bitmap_get_counter(bitmap, offset, blocks, 0);
	rv = 0;
	if (bmc) {
		/* locked */
		if (RESYNC(*bmc))
			rv = 1;
		else if (NEEDED(*bmc)) {
			rv = 1;
1439 1440 1441 1442
			if (!degraded) { /* don't set/clear bits if degraded */
				*bmc |= RESYNC_MASK;
				*bmc &= ~NEEDED_MASK;
			}
1443 1444 1445 1446 1447 1448
		}
	}
	spin_unlock_irq(&bitmap->lock);
	return rv;
}

N
NeilBrown 已提交
1449
int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
		      int degraded)
{
	/* bitmap_start_sync must always report on multiples of whole
	 * pages, otherwise resync (which is very PAGE_SIZE based) will
	 * get confused.
	 * So call __bitmap_start_sync repeatedly (if needed) until
	 * At least PAGE_SIZE>>9 blocks are covered.
	 * Return the 'or' of the result.
	 */
	int rv = 0;
N
NeilBrown 已提交
1460
	sector_t blocks1;
1461 1462 1463 1464 1465 1466 1467 1468 1469 1470

	*blocks = 0;
	while (*blocks < (PAGE_SIZE>>9)) {
		rv |= __bitmap_start_sync(bitmap, offset,
					  &blocks1, degraded);
		offset += blocks1;
		*blocks += blocks1;
	}
	return rv;
}
1471
EXPORT_SYMBOL(bitmap_start_sync);
1472

N
NeilBrown 已提交
1473
void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
1474 1475 1476
{
	bitmap_counter_t *bmc;
	unsigned long flags;
1477 1478

	if (bitmap == NULL) {
1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492
		*blocks = 1024;
		return;
	}
	spin_lock_irqsave(&bitmap->lock, flags);
	bmc = bitmap_get_counter(bitmap, offset, blocks, 0);
	if (bmc == NULL)
		goto unlock;
	/* locked */
	if (RESYNC(*bmc)) {
		*bmc &= ~RESYNC_MASK;

		if (!NEEDED(*bmc) && aborted)
			*bmc |= NEEDED_MASK;
		else {
1493
			if (*bmc <= 2) {
1494
				bitmap_set_pending(bitmap, offset);
1495 1496
				bitmap->allclean = 0;
			}
1497 1498 1499 1500 1501
		}
	}
 unlock:
	spin_unlock_irqrestore(&bitmap->lock, flags);
}
1502
EXPORT_SYMBOL(bitmap_end_sync);
1503 1504 1505 1506 1507 1508 1509 1510

void bitmap_close_sync(struct bitmap *bitmap)
{
	/* Sync has finished, and any bitmap chunks that weren't synced
	 * properly have been aborted.  It remains to us to clear the
	 * RESYNC bit wherever it is still on
	 */
	sector_t sector = 0;
N
NeilBrown 已提交
1511
	sector_t blocks;
N
NeilBrown 已提交
1512 1513
	if (!bitmap)
		return;
1514 1515
	while (sector < bitmap->mddev->resync_max_sectors) {
		bitmap_end_sync(bitmap, sector, &blocks, 0);
N
NeilBrown 已提交
1516 1517 1518
		sector += blocks;
	}
}
1519
EXPORT_SYMBOL(bitmap_close_sync);
N
NeilBrown 已提交
1520 1521 1522 1523

void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
{
	sector_t s = 0;
N
NeilBrown 已提交
1524
	sector_t blocks;
N
NeilBrown 已提交
1525 1526 1527 1528 1529 1530 1531 1532

	if (!bitmap)
		return;
	if (sector == 0) {
		bitmap->last_end_sync = jiffies;
		return;
	}
	if (time_before(jiffies, (bitmap->last_end_sync
1533
				  + bitmap->mddev->bitmap_info.daemon_sleep)))
N
NeilBrown 已提交
1534 1535 1536 1537
		return;
	wait_event(bitmap->mddev->recovery_wait,
		   atomic_read(&bitmap->mddev->recovery_active) == 0);

1538
	bitmap->mddev->curr_resync_completed = sector;
1539
	set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
1540
	sector &= ~((1ULL << bitmap->chunkshift) - 1);
N
NeilBrown 已提交
1541 1542 1543 1544
	s = 0;
	while (s < sector && s < bitmap->mddev->resync_max_sectors) {
		bitmap_end_sync(bitmap, s, &blocks, 0);
		s += blocks;
1545
	}
N
NeilBrown 已提交
1546
	bitmap->last_end_sync = jiffies;
1547
	sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
1548
}
1549
EXPORT_SYMBOL(bitmap_cond_end_sync);
1550

1551
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
1552 1553
{
	/* For each chunk covered by any of these sectors, set the
1554
	 * counter to 1 and set resync_needed.  They should all
1555 1556
	 * be 0 at this point
	 */
1557

N
NeilBrown 已提交
1558
	sector_t secs;
1559 1560 1561 1562
	bitmap_counter_t *bmc;
	spin_lock_irq(&bitmap->lock);
	bmc = bitmap_get_counter(bitmap, offset, &secs, 1);
	if (!bmc) {
1563
		spin_unlock_irq(&bitmap->lock);
1564
		return;
1565
	}
1566
	if (!*bmc) {
1567
		*bmc = 2 | (needed ? NEEDED_MASK : 0);
1568
		bitmap_count_page(bitmap, offset, 1);
1569
		bitmap_set_pending(bitmap, offset);
1570
		bitmap->allclean = 0;
1571 1572
	}
	spin_unlock_irq(&bitmap->lock);
1573 1574
}

1575 1576 1577 1578 1579 1580
/* dirty the memory and file bits for bitmap chunks "s" to "e" */
void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
{
	unsigned long chunk;

	for (chunk = s; chunk <= e; chunk++) {
1581
		sector_t sec = (sector_t)chunk << bitmap->chunkshift;
1582
		bitmap_set_memory_bits(bitmap, sec, 1);
1583
		spin_lock_irq(&bitmap->lock);
1584
		bitmap_file_set_bit(bitmap, sec);
1585
		spin_unlock_irq(&bitmap->lock);
1586 1587 1588 1589 1590 1591
		if (sec < bitmap->mddev->recovery_cp)
			/* We are asserting that the array is dirty,
			 * so move the recovery_cp address back so
			 * that it is obvious that it is dirty
			 */
			bitmap->mddev->recovery_cp = sec;
1592 1593 1594
	}
}

1595 1596 1597
/*
 * flush out any pending updates
 */
1598
void bitmap_flush(struct mddev *mddev)
1599 1600
{
	struct bitmap *bitmap = mddev->bitmap;
1601
	long sleep;
1602 1603 1604 1605 1606 1607 1608

	if (!bitmap) /* there was no bitmap */
		return;

	/* run the daemon_work three time to ensure everything is flushed
	 * that can be
	 */
1609
	sleep = mddev->bitmap_info.daemon_sleep * 2;
1610
	bitmap->daemon_lastrun -= sleep;
1611
	bitmap_daemon_work(mddev);
1612
	bitmap->daemon_lastrun -= sleep;
1613
	bitmap_daemon_work(mddev);
1614
	bitmap->daemon_lastrun -= sleep;
1615
	bitmap_daemon_work(mddev);
1616 1617 1618
	bitmap_update_sb(bitmap);
}

1619 1620 1621
/*
 * free memory that was allocated
 */
1622
static void bitmap_free(struct bitmap *bitmap)
1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644
{
	unsigned long k, pages;
	struct bitmap_page *bp;

	if (!bitmap) /* there was no bitmap */
		return;

	/* release the bitmap file and kill the daemon */
	bitmap_file_put(bitmap);

	bp = bitmap->bp;
	pages = bitmap->pages;

	/* free all allocated memory */

	if (bp) /* deallocate the page memory */
		for (k = 0; k < pages; k++)
			if (bp[k].map && !bp[k].hijacked)
				kfree(bp[k].map);
	kfree(bp);
	kfree(bitmap);
}
1645

1646
void bitmap_destroy(struct mddev *mddev)
1647 1648 1649 1650 1651 1652
{
	struct bitmap *bitmap = mddev->bitmap;

	if (!bitmap) /* there was no bitmap */
		return;

1653
	mutex_lock(&mddev->bitmap_info.mutex);
1654
	mddev->bitmap = NULL; /* disconnect from the md device */
1655
	mutex_unlock(&mddev->bitmap_info.mutex);
1656 1657
	if (mddev->thread)
		mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1658

1659 1660 1661
	if (bitmap->sysfs_can_clear)
		sysfs_put(bitmap->sysfs_can_clear);

1662 1663
	bitmap_free(bitmap);
}
1664 1665 1666 1667 1668

/*
 * initialize the bitmap structure
 * if this returns an error, bitmap_destroy must be called to do clean up
 */
1669
int bitmap_create(struct mddev *mddev)
1670 1671
{
	struct bitmap *bitmap;
1672
	sector_t blocks = mddev->resync_max_sectors;
1673 1674
	unsigned long chunks;
	unsigned long pages;
1675
	struct file *file = mddev->bitmap_info.file;
1676
	int err;
1677
	struct sysfs_dirent *bm = NULL;
1678

A
Alexey Dobriyan 已提交
1679
	BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
1680

1681
	if (!file
1682
	    && !mddev->bitmap_info.offset) /* bitmap disabled, nothing to do */
1683 1684
		return 0;

1685
	BUG_ON(file && mddev->bitmap_info.offset);
1686

1687
	bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
1688 1689 1690 1691
	if (!bitmap)
		return -ENOMEM;

	spin_lock_init(&bitmap->lock);
1692 1693
	atomic_set(&bitmap->pending_writes, 0);
	init_waitqueue_head(&bitmap->write_wait);
1694
	init_waitqueue_head(&bitmap->overflow_wait);
1695
	init_waitqueue_head(&bitmap->behind_wait);
1696

1697 1698
	bitmap->mddev = mddev;

1699 1700
	if (mddev->kobj.sd)
		bm = sysfs_get_dirent(mddev->kobj.sd, NULL, "bitmap");
1701
	if (bm) {
1702
		bitmap->sysfs_can_clear = sysfs_get_dirent(bm, NULL, "can_clear");
1703 1704 1705 1706
		sysfs_put(bm);
	} else
		bitmap->sysfs_can_clear = NULL;

1707
	bitmap->file = file;
1708 1709
	if (file) {
		get_file(file);
1710 1711 1712 1713
		/* As future accesses to this file will use bmap,
		 * and bypass the page cache, we must sync the file
		 * first.
		 */
1714
		vfs_fsync(file, 1);
1715
	}
1716
	/* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
1717 1718 1719 1720 1721 1722 1723 1724 1725 1726
	if (!mddev->bitmap_info.external) {
		/*
		 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
		 * instructing us to create a new on-disk bitmap instance.
		 */
		if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
			err = bitmap_new_disk_sb(bitmap);
		else
			err = bitmap_read_sb(bitmap);
	} else {
1727 1728 1729 1730 1731 1732 1733
		err = 0;
		if (mddev->bitmap_info.chunksize == 0 ||
		    mddev->bitmap_info.daemon_sleep == 0)
			/* chunksize and time_base need to be
			 * set first. */
			err = -EINVAL;
	}
1734
	if (err)
1735
		goto error;
1736

1737
	bitmap->daemon_lastrun = jiffies;
1738 1739
	bitmap->chunkshift = (ffz(~mddev->bitmap_info.chunksize)
			      - BITMAP_BLOCK_SHIFT);
1740

1741
	chunks = (blocks + (1 << bitmap->chunkshift) - 1) >>
1742
			bitmap->chunkshift;
1743
	pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
1744 1745 1746 1747 1748 1749 1750

	BUG_ON(!pages);

	bitmap->chunks = chunks;
	bitmap->pages = pages;
	bitmap->missing_pages = pages;

1751
	bitmap->bp = kzalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL);
1752

1753
	err = -ENOMEM;
1754
	if (!bitmap->bp)
1755
		goto error;
1756

1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769
	printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
		pages, bmname(bitmap));

	mddev->bitmap = bitmap;


	return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0;

 error:
	bitmap_free(bitmap);
	return err;
}

1770
int bitmap_load(struct mddev *mddev)
1771 1772
{
	int err = 0;
1773
	sector_t start = 0;
1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785
	sector_t sector = 0;
	struct bitmap *bitmap = mddev->bitmap;

	if (!bitmap)
		goto out;

	/* Clear out old bitmap info first:  Either there is none, or we
	 * are resuming after someone else has possibly changed things,
	 * so we should forget old cached info.
	 * All chunks should be clean, but some might need_sync.
	 */
	while (sector < mddev->resync_max_sectors) {
N
NeilBrown 已提交
1786
		sector_t blocks;
1787 1788 1789 1790 1791
		bitmap_start_sync(bitmap, sector, &blocks, 0);
		sector += blocks;
	}
	bitmap_close_sync(bitmap);

1792 1793 1794 1795 1796 1797
	if (mddev->degraded == 0
	    || bitmap->events_cleared == mddev->events)
		/* no need to keep dirty bits to optimise a
		 * re-add of a missing device */
		start = mddev->recovery_cp;

1798
	mutex_lock(&mddev->bitmap_info.mutex);
1799
	err = bitmap_init_from_disk(bitmap, start);
1800
	mutex_unlock(&mddev->bitmap_info.mutex);
1801

1802
	if (err)
1803
		goto out;
1804

1805
	mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
1806
	md_wakeup_thread(mddev->thread);
1807

1808 1809
	bitmap_update_sb(bitmap);

1810 1811 1812
	if (bitmap->flags & BITMAP_WRITE_ERROR)
		err = -EIO;
out:
1813
	return err;
1814
}
1815
EXPORT_SYMBOL_GPL(bitmap_load);
1816

1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843
void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
{
	unsigned long chunk_kb;
	unsigned long flags;

	if (!bitmap)
		return;

	spin_lock_irqsave(&bitmap->lock, flags);
	chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10;
	seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
		   "%lu%s chunk",
		   bitmap->pages - bitmap->missing_pages,
		   bitmap->pages,
		   (bitmap->pages - bitmap->missing_pages)
		   << (PAGE_SHIFT - 10),
		   chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize,
		   chunk_kb ? "KB" : "B");
	if (bitmap->file) {
		seq_printf(seq, ", file: ");
		seq_path(seq, &bitmap->file->f_path, " \t\n");
	}

	seq_printf(seq, "\n");
	spin_unlock_irqrestore(&bitmap->lock, flags);
}

1844
static ssize_t
1845
location_show(struct mddev *mddev, char *page)
1846 1847
{
	ssize_t len;
1848
	if (mddev->bitmap_info.file)
1849
		len = sprintf(page, "file");
1850
	else if (mddev->bitmap_info.offset)
1851
		len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
1852
	else
1853 1854 1855 1856 1857 1858
		len = sprintf(page, "none");
	len += sprintf(page+len, "\n");
	return len;
}

static ssize_t
1859
location_store(struct mddev *mddev, const char *buf, size_t len)
1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903
{

	if (mddev->pers) {
		if (!mddev->pers->quiesce)
			return -EBUSY;
		if (mddev->recovery || mddev->sync_thread)
			return -EBUSY;
	}

	if (mddev->bitmap || mddev->bitmap_info.file ||
	    mddev->bitmap_info.offset) {
		/* bitmap already configured.  Only option is to clear it */
		if (strncmp(buf, "none", 4) != 0)
			return -EBUSY;
		if (mddev->pers) {
			mddev->pers->quiesce(mddev, 1);
			bitmap_destroy(mddev);
			mddev->pers->quiesce(mddev, 0);
		}
		mddev->bitmap_info.offset = 0;
		if (mddev->bitmap_info.file) {
			struct file *f = mddev->bitmap_info.file;
			mddev->bitmap_info.file = NULL;
			restore_bitmap_write_access(f);
			fput(f);
		}
	} else {
		/* No bitmap, OK to set a location */
		long long offset;
		if (strncmp(buf, "none", 4) == 0)
			/* nothing to be done */;
		else if (strncmp(buf, "file:", 5) == 0) {
			/* Not supported yet */
			return -EINVAL;
		} else {
			int rv;
			if (buf[0] == '+')
				rv = strict_strtoll(buf+1, 10, &offset);
			else
				rv = strict_strtoll(buf, 10, &offset);
			if (rv)
				return rv;
			if (offset == 0)
				return -EINVAL;
1904 1905
			if (mddev->bitmap_info.external == 0 &&
			    mddev->major_version == 0 &&
1906 1907 1908 1909 1910 1911
			    offset != mddev->bitmap_info.default_offset)
				return -EINVAL;
			mddev->bitmap_info.offset = offset;
			if (mddev->pers) {
				mddev->pers->quiesce(mddev, 1);
				rv = bitmap_create(mddev);
1912 1913
				if (!rv)
					rv = bitmap_load(mddev);
1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
				if (rv) {
					bitmap_destroy(mddev);
					mddev->bitmap_info.offset = 0;
				}
				mddev->pers->quiesce(mddev, 0);
				if (rv)
					return rv;
			}
		}
	}
	if (!mddev->external) {
		/* Ensure new bitmap info is stored in
		 * metadata promptly.
		 */
		set_bit(MD_CHANGE_DEVS, &mddev->flags);
		md_wakeup_thread(mddev->thread);
	}
	return len;
}

static struct md_sysfs_entry bitmap_location =
__ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);

1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974
/* 'bitmap/space' is the space available at 'location' for the
 * bitmap.  This allows the kernel to know when it is safe to
 * resize the bitmap to match a resized array.
 */
static ssize_t
space_show(struct mddev *mddev, char *page)
{
	return sprintf(page, "%lu\n", mddev->bitmap_info.space);
}

static ssize_t
space_store(struct mddev *mddev, const char *buf, size_t len)
{
	unsigned long sectors;
	int rv;

	rv = kstrtoul(buf, 10, &sectors);
	if (rv)
		return rv;

	if (sectors == 0)
		return -EINVAL;

	if (mddev->bitmap &&
	    sectors  < ((mddev->bitmap->file_pages - 1) * PAGE_SIZE
			+ mddev->bitmap->last_page_size + 511) >> 9)
		return -EFBIG; /* Bitmap is too big for this small space */

	/* could make sure it isn't too big, but that isn't really
	 * needed - user-space should be careful.
	 */
	mddev->bitmap_info.space = sectors;
	return len;
}

static struct md_sysfs_entry bitmap_space =
__ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store);

1975
static ssize_t
1976
timeout_show(struct mddev *mddev, char *page)
1977 1978 1979 1980
{
	ssize_t len;
	unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
	unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
1981

1982 1983 1984 1985 1986 1987 1988 1989
	len = sprintf(page, "%lu", secs);
	if (jifs)
		len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
	len += sprintf(page+len, "\n");
	return len;
}

static ssize_t
1990
timeout_store(struct mddev *mddev, const char *buf, size_t len)
1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025
{
	/* timeout can be set at any time */
	unsigned long timeout;
	int rv = strict_strtoul_scaled(buf, &timeout, 4);
	if (rv)
		return rv;

	/* just to make sure we don't overflow... */
	if (timeout >= LONG_MAX / HZ)
		return -EINVAL;

	timeout = timeout * HZ / 10000;

	if (timeout >= MAX_SCHEDULE_TIMEOUT)
		timeout = MAX_SCHEDULE_TIMEOUT-1;
	if (timeout < 1)
		timeout = 1;
	mddev->bitmap_info.daemon_sleep = timeout;
	if (mddev->thread) {
		/* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
		 * the bitmap is all clean and we don't need to
		 * adjust the timeout right now
		 */
		if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
			mddev->thread->timeout = timeout;
			md_wakeup_thread(mddev->thread);
		}
	}
	return len;
}

static struct md_sysfs_entry bitmap_timeout =
__ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);

static ssize_t
2026
backlog_show(struct mddev *mddev, char *page)
2027 2028 2029 2030 2031
{
	return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
}

static ssize_t
2032
backlog_store(struct mddev *mddev, const char *buf, size_t len)
2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047
{
	unsigned long backlog;
	int rv = strict_strtoul(buf, 10, &backlog);
	if (rv)
		return rv;
	if (backlog > COUNTER_MAX)
		return -EINVAL;
	mddev->bitmap_info.max_write_behind = backlog;
	return len;
}

static struct md_sysfs_entry bitmap_backlog =
__ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);

static ssize_t
2048
chunksize_show(struct mddev *mddev, char *page)
2049 2050 2051 2052 2053
{
	return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
}

static ssize_t
2054
chunksize_store(struct mddev *mddev, const char *buf, size_t len)
2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073
{
	/* Can only be changed when no bitmap is active */
	int rv;
	unsigned long csize;
	if (mddev->bitmap)
		return -EBUSY;
	rv = strict_strtoul(buf, 10, &csize);
	if (rv)
		return rv;
	if (csize < 512 ||
	    !is_power_of_2(csize))
		return -EINVAL;
	mddev->bitmap_info.chunksize = csize;
	return len;
}

static struct md_sysfs_entry bitmap_chunksize =
__ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);

2074
static ssize_t metadata_show(struct mddev *mddev, char *page)
2075 2076 2077 2078 2079
{
	return sprintf(page, "%s\n", (mddev->bitmap_info.external
				      ? "external" : "internal"));
}

2080
static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097
{
	if (mddev->bitmap ||
	    mddev->bitmap_info.file ||
	    mddev->bitmap_info.offset)
		return -EBUSY;
	if (strncmp(buf, "external", 8) == 0)
		mddev->bitmap_info.external = 1;
	else if (strncmp(buf, "internal", 8) == 0)
		mddev->bitmap_info.external = 0;
	else
		return -EINVAL;
	return len;
}

static struct md_sysfs_entry bitmap_metadata =
__ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);

2098
static ssize_t can_clear_show(struct mddev *mddev, char *page)
2099 2100 2101 2102 2103 2104 2105 2106 2107 2108
{
	int len;
	if (mddev->bitmap)
		len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
					     "false" : "true"));
	else
		len = sprintf(page, "\n");
	return len;
}

2109
static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126
{
	if (mddev->bitmap == NULL)
		return -ENOENT;
	if (strncmp(buf, "false", 5) == 0)
		mddev->bitmap->need_sync = 1;
	else if (strncmp(buf, "true", 4) == 0) {
		if (mddev->degraded)
			return -EBUSY;
		mddev->bitmap->need_sync = 0;
	} else
		return -EINVAL;
	return len;
}

static struct md_sysfs_entry bitmap_can_clear =
__ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);

2127
static ssize_t
2128
behind_writes_used_show(struct mddev *mddev, char *page)
2129 2130 2131 2132 2133 2134 2135 2136
{
	if (mddev->bitmap == NULL)
		return sprintf(page, "0\n");
	return sprintf(page, "%lu\n",
		       mddev->bitmap->behind_writes_used);
}

static ssize_t
2137
behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
2138 2139 2140 2141 2142 2143 2144 2145 2146 2147
{
	if (mddev->bitmap)
		mddev->bitmap->behind_writes_used = 0;
	return len;
}

static struct md_sysfs_entry max_backlog_used =
__ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
       behind_writes_used_show, behind_writes_used_reset);

2148 2149
static struct attribute *md_bitmap_attrs[] = {
	&bitmap_location.attr,
2150
	&bitmap_space.attr,
2151 2152 2153
	&bitmap_timeout.attr,
	&bitmap_backlog.attr,
	&bitmap_chunksize.attr,
2154 2155
	&bitmap_metadata.attr,
	&bitmap_can_clear.attr,
2156
	&max_backlog_used.attr,
2157 2158 2159 2160 2161 2162 2163
	NULL
};
struct attribute_group md_bitmap_group = {
	.name = "bitmap",
	.attrs = md_bitmap_attrs,
};