bitmap.c 67.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
 *
 * bitmap_create  - sets up the bitmap structure
 * bitmap_destroy - destroys the bitmap structure
 *
 * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
 * - added disk storage for bitmap
 * - changes to allow various bitmap chunk sizes
 */

/*
 * Still to do:
 *
 * flush after percent set rather than just time based. (maybe both).
 */

18
#include <linux/blkdev.h>
19 20 21 22 23 24 25 26 27 28
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/file.h>
#include <linux/mount.h>
#include <linux/buffer_head.h>
29
#include <linux/seq_file.h>
30
#include <trace/events/block.h>
31
#include "md.h"
32
#include "bitmap.h"
33

34
static inline char *bmname(struct bitmap *bitmap)
35 36 37 38 39 40 41 42 43 44 45 46 47 48
{
	return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
}

/*
 * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
 *
 * 1) check to see if this page is allocated, if it's not then try to alloc
 * 2) if the alloc fails, set the page's hijacked flag so we'll use the
 *    page pointer directly as a counter
 *
 * if we find our page, we increment the page's refcount so that it stays
 * allocated while we're using it
 */
49
static int bitmap_checkpage(struct bitmap_counts *bitmap,
50
			    unsigned long page, int create, int no_hijack)
51 52
__releases(bitmap->lock)
__acquires(bitmap->lock)
53 54 55 56
{
	unsigned char *mappage;

	if (page >= bitmap->pages) {
57 58 59 60
		/* This can happen if bitmap_start_sync goes beyond
		 * End-of-device while looking for a whole page.
		 * It is harmless.
		 */
61 62 63 64 65 66 67 68 69 70 71 72 73 74
		return -EINVAL;
	}

	if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
		return 0;

	if (bitmap->bp[page].map) /* page is already allocated, just return */
		return 0;

	if (!create)
		return -ENOENT;

	/* this page has not been allocated yet */

75
	spin_unlock_irq(&bitmap->lock);
76 77 78 79 80 81 82 83 84 85 86 87 88
	/* It is possible that this is being called inside a
	 * prepare_to_wait/finish_wait loop from raid5c:make_request().
	 * In general it is not permitted to sleep in that context as it
	 * can cause the loop to spin freely.
	 * That doesn't apply here as we can only reach this point
	 * once with any loop.
	 * When this function completes, either bp[page].map or
	 * bp[page].hijacked.  In either case, this function will
	 * abort before getting to this point again.  So there is
	 * no risk of a free-spin, and so it is safe to assert
	 * that sleeping here is allowed.
	 */
	sched_annotate_sleep();
89
	mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
90 91 92
	spin_lock_irq(&bitmap->lock);

	if (mappage == NULL) {
93
		pr_debug("md/bitmap: map page allocation failed, hijacking\n");
94 95 96
		/* We don't support hijack for cluster raid */
		if (no_hijack)
			return -ENOMEM;
97 98 99 100
		/* failed - set the hijacked flag so that we can use the
		 * pointer as a counter */
		if (!bitmap->bp[page].map)
			bitmap->bp[page].hijacked = 1;
101 102
	} else if (bitmap->bp[page].map ||
		   bitmap->bp[page].hijacked) {
103
		/* somebody beat us to getting the page */
104
		kfree(mappage);
105
	} else {
106

107
		/* no page was in place and we have one, so install it */
108

109 110 111
		bitmap->bp[page].map = mappage;
		bitmap->missing_pages--;
	}
112 113 114 115 116 117
	return 0;
}

/* if page is completely empty, put it back on the free list, or dealloc it */
/* if page was hijacked, unmark the flag so it might get alloced next time */
/* Note: lock should be held when calling this */
118
static void bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
119 120 121 122 123 124 125 126 127 128 129
{
	char *ptr;

	if (bitmap->bp[page].count) /* page is still busy */
		return;

	/* page is no longer in use, it can be released */

	if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
		bitmap->bp[page].hijacked = 0;
		bitmap->bp[page].map = NULL;
130 131 132 133 134
	} else {
		/* normal case, free the page */
		ptr = bitmap->bp[page].map;
		bitmap->bp[page].map = NULL;
		bitmap->missing_pages++;
135
		kfree(ptr);
136 137 138 139 140 141 142 143 144 145 146
	}
}

/*
 * bitmap file handling - read and write the bitmap file and its superblock
 */

/*
 * basic page I/O operations
 */

147
/* IO operations when bitmap is stored near all superblocks */
148 149 150
static int read_sb_page(struct mddev *mddev, loff_t offset,
			struct page *page,
			unsigned long index, int size)
151 152 153
{
	/* choose a good rdev and read the page from there */

154
	struct md_rdev *rdev;
155 156
	sector_t target;

N
NeilBrown 已提交
157
	rdev_for_each(rdev, mddev) {
158 159
		if (! test_bit(In_sync, &rdev->flags)
		    || test_bit(Faulty, &rdev->flags))
160 161
			continue;

J
Jonathan Brassow 已提交
162
		target = offset + index * (PAGE_SIZE/512);
163

164
		if (sync_page_io(rdev, target,
165
				 roundup(size, bdev_logical_block_size(rdev->bdev)),
M
Mike Christie 已提交
166
				 page, REQ_OP_READ, 0, true)) {
167
			page->index = index;
168
			return 0;
169 170
		}
	}
171
	return -EIO;
172 173
}

174
static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev)
175 176 177 178 179 180 181
{
	/* Iterate the disks of an mddev, using rcu to protect access to the
	 * linked list, and raising the refcount of devices we return to ensure
	 * they don't disappear while in use.
	 * As devices are only added or removed when raid_disk is < 0 and
	 * nr_pending is 0 and In_sync is clear, the entries we return will
	 * still be in the same position on the list when we re-enter
182
	 * list_for_each_entry_continue_rcu.
183 184 185 186 187
	 *
	 * Note that if entered with 'rdev == NULL' to start at the
	 * beginning, we temporarily assign 'rdev' to an address which
	 * isn't really an rdev, but which can be used by
	 * list_for_each_entry_continue_rcu() to find the first entry.
188 189 190 191
	 */
	rcu_read_lock();
	if (rdev == NULL)
		/* start at the beginning */
192
		rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
193 194 195 196
	else {
		/* release the previous rdev and start from there. */
		rdev_dec_pending(rdev, mddev);
	}
197
	list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) {
198 199 200 201 202 203 204 205 206 207 208 209
		if (rdev->raid_disk >= 0 &&
		    !test_bit(Faulty, &rdev->flags)) {
			/* this is a usable devices */
			atomic_inc(&rdev->nr_pending);
			rcu_read_unlock();
			return rdev;
		}
	}
	rcu_read_unlock();
	return NULL;
}

210
static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
211
{
212
	struct md_rdev *rdev;
213
	struct block_device *bdev;
214
	struct mddev *mddev = bitmap->mddev;
215
	struct bitmap_storage *store = &bitmap->storage;
216

217 218
restart:
	rdev = NULL;
219
	while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
220 221
		int size = PAGE_SIZE;
		loff_t offset = mddev->bitmap_info.offset;
222 223 224

		bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;

225 226 227 228 229
		if (page->index == store->file_pages-1) {
			int last_page_size = store->bytes & (PAGE_SIZE-1);
			if (last_page_size == 0)
				last_page_size = PAGE_SIZE;
			size = roundup(last_page_size,
230
				       bdev_logical_block_size(bdev));
231
		}
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
		/* Just make sure we aren't corrupting data or
		 * metadata
		 */
		if (mddev->external) {
			/* Bitmap could be anywhere. */
			if (rdev->sb_start + offset + (page->index
						       * (PAGE_SIZE/512))
			    > rdev->data_offset
			    &&
			    rdev->sb_start + offset
			    < (rdev->data_offset + mddev->dev_sectors
			     + (PAGE_SIZE/512)))
				goto bad_alignment;
		} else if (offset < 0) {
			/* DATA  BITMAP METADATA  */
			if (offset
			    + (long)(page->index * (PAGE_SIZE/512))
			    + size/512 > 0)
				/* bitmap runs in to metadata */
				goto bad_alignment;
			if (rdev->data_offset + mddev->dev_sectors
			    > rdev->sb_start + offset)
				/* data runs in to bitmap */
				goto bad_alignment;
		} else if (rdev->sb_start < rdev->data_offset) {
			/* METADATA BITMAP DATA */
			if (rdev->sb_start
			    + offset
			    + page->index*(PAGE_SIZE/512) + size/512
			    > rdev->data_offset)
				/* bitmap runs in to data */
				goto bad_alignment;
		} else {
			/* DATA METADATA BITMAP - no problems */
		}
		md_super_write(mddev, rdev,
			       rdev->sb_start + offset
			       + page->index * (PAGE_SIZE/512),
			       size,
			       page);
272
	}
273

274 275
	if (wait && md_super_wait(mddev) < 0)
		goto restart;
276
	return 0;
277 278 279

 bad_alignment:
	return -EINVAL;
280 281
}

282
static void bitmap_file_kick(struct bitmap *bitmap);
283
/*
284
 * write out a page to a file
285
 */
286
static void write_page(struct bitmap *bitmap, struct page *page, int wait)
287
{
288
	struct buffer_head *bh;
289

290
	if (bitmap->storage.file == NULL) {
291 292
		switch (write_sb_page(bitmap, page, wait)) {
		case -EINVAL:
293
			set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
294
		}
295
	} else {
296

297
		bh = page_buffers(page);
298

299 300 301 302
		while (bh && bh->b_blocknr) {
			atomic_inc(&bitmap->pending_writes);
			set_buffer_locked(bh);
			set_buffer_mapped(bh);
303
			submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
304 305
			bh = bh->b_this_page;
		}
306

307
		if (wait)
308 309
			wait_event(bitmap->write_wait,
				   atomic_read(&bitmap->pending_writes)==0);
310
	}
311
	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
312
		bitmap_file_kick(bitmap);
313 314 315 316 317
}

static void end_bitmap_write(struct buffer_head *bh, int uptodate)
{
	struct bitmap *bitmap = bh->b_private;
318

319 320
	if (!uptodate)
		set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
321 322 323
	if (atomic_dec_and_test(&bitmap->pending_writes))
		wake_up(&bitmap->write_wait);
}
324

325 326 327 328 329 330
/* copied from buffer.c */
static void
__clear_page_buffers(struct page *page)
{
	ClearPagePrivate(page);
	set_page_private(page, 0);
331
	put_page(page);
332 333 334
}
static void free_buffers(struct page *page)
{
335
	struct buffer_head *bh;
336

337 338 339 340
	if (!PagePrivate(page))
		return;

	bh = page_buffers(page);
341 342 343 344
	while (bh) {
		struct buffer_head *next = bh->b_this_page;
		free_buffer_head(bh);
		bh = next;
345
	}
346 347
	__clear_page_buffers(page);
	put_page(page);
348 349
}

350 351 352 353 354 355 356
/* read a page from a file.
 * We both read the page, and attach buffers to the page to record the
 * address of each block (using bmap).  These addresses will be used
 * to write the block later, completely bypassing the filesystem.
 * This usage is similar to how swap files are handled, and allows us
 * to write to a file with no concerns of memory allocation failing.
 */
357 358 359 360
static int read_page(struct file *file, unsigned long index,
		     struct bitmap *bitmap,
		     unsigned long count,
		     struct page *page)
361
{
362
	int ret = 0;
A
Al Viro 已提交
363
	struct inode *inode = file_inode(file);
364 365
	struct buffer_head *bh;
	sector_t block;
366

367 368
	pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
		 (unsigned long long)index << PAGE_SHIFT);
369

370 371
	bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0);
	if (!bh) {
372
		ret = -ENOMEM;
373 374
		goto out;
	}
375 376 377 378 379 380 381 382 383
	attach_page_buffers(page, bh);
	block = index << (PAGE_SHIFT - inode->i_blkbits);
	while (bh) {
		if (count == 0)
			bh->b_blocknr = 0;
		else {
			bh->b_blocknr = bmap(inode, block);
			if (bh->b_blocknr == 0) {
				/* Cannot use this file! */
384
				ret = -EINVAL;
385 386 387 388 389 390 391 392 393 394
				goto out;
			}
			bh->b_bdev = inode->i_sb->s_bdev;
			if (count < (1<<inode->i_blkbits))
				count = 0;
			else
				count -= (1<<inode->i_blkbits);

			bh->b_end_io = end_bitmap_write;
			bh->b_private = bitmap;
395 396 397
			atomic_inc(&bitmap->pending_writes);
			set_buffer_locked(bh);
			set_buffer_mapped(bh);
398
			submit_bh(REQ_OP_READ, 0, bh);
399 400 401 402 403
		}
		block++;
		bh = bh->b_this_page;
	}
	page->index = index;
404 405 406

	wait_event(bitmap->write_wait,
		   atomic_read(&bitmap->pending_writes)==0);
407
	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
408
		ret = -EIO;
409
out:
410
	if (ret)
411 412 413 414
		pr_err("md: bitmap read error: (%dB @ %llu): %d\n",
		       (int)PAGE_SIZE,
		       (unsigned long long)index << PAGE_SHIFT,
		       ret);
415
	return ret;
416 417 418 419 420 421
}

/*
 * bitmap file superblock operations
 */

422 423 424 425 426 427 428 429 430 431 432
/*
 * bitmap_wait_writes() should be called before writing any bitmap
 * blocks, to ensure previous writes, particularly from
 * bitmap_daemon_work(), have completed.
 */
static void bitmap_wait_writes(struct bitmap *bitmap)
{
	if (bitmap->storage.file)
		wait_event(bitmap->write_wait,
			   atomic_read(&bitmap->pending_writes)==0);
	else
433 434 435 436 437 438 439
		/* Note that we ignore the return value.  The writes
		 * might have failed, but that would just mean that
		 * some bits which should be cleared haven't been,
		 * which is safe.  The relevant bitmap blocks will
		 * probably get written again, but there is no great
		 * loss if they aren't.
		 */
440 441 442 443
		md_super_wait(bitmap->mddev);
}


444
/* update the event counter and sync the superblock to disk */
445
void bitmap_update_sb(struct bitmap *bitmap)
446 447 448 449
{
	bitmap_super_t *sb;

	if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
450
		return;
451 452
	if (bitmap->mddev->bitmap_info.external)
		return;
453
	if (!bitmap->storage.sb_page) /* no superblock */
454
		return;
455
	sb = kmap_atomic(bitmap->storage.sb_page);
456
	sb->events = cpu_to_le64(bitmap->mddev->events);
457
	if (bitmap->mddev->events < bitmap->events_cleared)
458 459
		/* rocking back to read-only */
		bitmap->events_cleared = bitmap->mddev->events;
460 461
	sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
	sb->state = cpu_to_le32(bitmap->flags);
462 463 464
	/* Just in case these have been changed via sysfs: */
	sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
	sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
465 466 467
	/* This might have been changed by a reshape */
	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
	sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
G
Goldwyn Rodrigues 已提交
468
	sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
469 470
	sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
					   bitmap_info.space);
471
	kunmap_atomic(sb);
472
	write_page(bitmap, bitmap->storage.sb_page, 1);
473 474 475 476 477 478 479
}

/* print out the bitmap file superblock */
void bitmap_print_sb(struct bitmap *bitmap)
{
	bitmap_super_t *sb;

480
	if (!bitmap || !bitmap->storage.sb_page)
481
		return;
482
	sb = kmap_atomic(bitmap->storage.sb_page);
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
	pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
	pr_debug("         magic: %08x\n", le32_to_cpu(sb->magic));
	pr_debug("       version: %d\n", le32_to_cpu(sb->version));
	pr_debug("          uuid: %08x.%08x.%08x.%08x\n",
		 *(__u32 *)(sb->uuid+0),
		 *(__u32 *)(sb->uuid+4),
		 *(__u32 *)(sb->uuid+8),
		 *(__u32 *)(sb->uuid+12));
	pr_debug("        events: %llu\n",
		 (unsigned long long) le64_to_cpu(sb->events));
	pr_debug("events cleared: %llu\n",
		 (unsigned long long) le64_to_cpu(sb->events_cleared));
	pr_debug("         state: %08x\n", le32_to_cpu(sb->state));
	pr_debug("     chunksize: %d B\n", le32_to_cpu(sb->chunksize));
	pr_debug("  daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
	pr_debug("     sync size: %llu KB\n",
		 (unsigned long long)le64_to_cpu(sb->sync_size)/2);
	pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind));
501
	kunmap_atomic(sb);
502 503
}

504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
/*
 * bitmap_new_disk_sb
 * @bitmap
 *
 * This function is somewhat the reverse of bitmap_read_sb.  bitmap_read_sb
 * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
 * This function verifies 'bitmap_info' and populates the on-disk bitmap
 * structure, which is to be written to disk.
 *
 * Returns: 0 on success, -Exxx on error
 */
static int bitmap_new_disk_sb(struct bitmap *bitmap)
{
	bitmap_super_t *sb;
	unsigned long chunksize, daemon_sleep, write_behind;

520
	bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
521 522
	if (bitmap->storage.sb_page == NULL)
		return -ENOMEM;
523
	bitmap->storage.sb_page->index = 0;
524

525
	sb = kmap_atomic(bitmap->storage.sb_page);
526 527 528 529 530 531 532

	sb->magic = cpu_to_le32(BITMAP_MAGIC);
	sb->version = cpu_to_le32(BITMAP_MAJOR_HI);

	chunksize = bitmap->mddev->bitmap_info.chunksize;
	BUG_ON(!chunksize);
	if (!is_power_of_2(chunksize)) {
533
		kunmap_atomic(sb);
534
		pr_warn("bitmap chunksize not a power of 2\n");
535 536 537 538 539
		return -EINVAL;
	}
	sb->chunksize = cpu_to_le32(chunksize);

	daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
540
	if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
541
		pr_debug("Choosing daemon_sleep default (5 sec)\n");
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
		daemon_sleep = 5 * HZ;
	}
	sb->daemon_sleep = cpu_to_le32(daemon_sleep);
	bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;

	/*
	 * FIXME: write_behind for RAID1.  If not specified, what
	 * is a good choice?  We choose COUNTER_MAX / 2 arbitrarily.
	 */
	write_behind = bitmap->mddev->bitmap_info.max_write_behind;
	if (write_behind > COUNTER_MAX)
		write_behind = COUNTER_MAX / 2;
	sb->write_behind = cpu_to_le32(write_behind);
	bitmap->mddev->bitmap_info.max_write_behind = write_behind;

	/* keep the array size field of the bitmap superblock up to date */
	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);

	memcpy(sb->uuid, bitmap->mddev->uuid, 16);

562
	set_bit(BITMAP_STALE, &bitmap->flags);
563
	sb->state = cpu_to_le32(bitmap->flags);
564 565
	bitmap->events_cleared = bitmap->mddev->events;
	sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
566
	bitmap->mddev->bitmap_info.nodes = 0;
567

568
	kunmap_atomic(sb);
569 570 571 572

	return 0;
}

573 574 575 576 577
/* read the superblock from the bitmap file and initialize some bitmap fields */
static int bitmap_read_sb(struct bitmap *bitmap)
{
	char *reason = NULL;
	bitmap_super_t *sb;
578
	unsigned long chunksize, daemon_sleep, write_behind;
579
	unsigned long long events;
G
Goldwyn Rodrigues 已提交
580
	int nodes = 0;
581
	unsigned long sectors_reserved = 0;
582
	int err = -EINVAL;
583
	struct page *sb_page;
584
	loff_t offset = bitmap->mddev->bitmap_info.offset;
585

586
	if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
587 588 589
		chunksize = 128 * 1024 * 1024;
		daemon_sleep = 5 * HZ;
		write_behind = 0;
590
		set_bit(BITMAP_STALE, &bitmap->flags);
591 592 593
		err = 0;
		goto out_no_sb;
	}
594
	/* page 0 is the superblock, read it... */
595 596 597
	sb_page = alloc_page(GFP_KERNEL);
	if (!sb_page)
		return -ENOMEM;
598
	bitmap->storage.sb_page = sb_page;
599

600
re_read:
601 602
	/* If cluster_slot is set, the cluster is setup */
	if (bitmap->cluster_slot >= 0) {
603
		sector_t bm_blocks = bitmap->mddev->resync_max_sectors;
604

605 606
		sector_div(bm_blocks,
			   bitmap->mddev->bitmap_info.chunksize >> 9);
607 608 609
		/* bits to bytes */
		bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
		/* to 4k blocks */
610
		bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
611
		offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
612
		pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
613
			bitmap->cluster_slot, offset);
614 615
	}

616 617
	if (bitmap->storage.file) {
		loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host);
618 619
		int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;

620
		err = read_page(bitmap->storage.file, 0,
621
				bitmap, bytes, sb_page);
622
	} else {
623
		err = read_sb_page(bitmap->mddev,
624
				   offset,
625 626
				   sb_page,
				   0, sizeof(bitmap_super_t));
627
	}
628
	if (err)
629 630
		return err;

631
	err = -EINVAL;
632
	sb = kmap_atomic(sb_page);
633 634

	chunksize = le32_to_cpu(sb->chunksize);
635
	daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
636
	write_behind = le32_to_cpu(sb->write_behind);
637
	sectors_reserved = le32_to_cpu(sb->sectors_reserved);
638 639
	/* Setup nodes/clustername only if bitmap version is
	 * cluster-compatible
640
	 */
641
	if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
642 643 644 645
		nodes = le32_to_cpu(sb->nodes);
		strlcpy(bitmap->mddev->bitmap_info.cluster_name,
				sb->cluster_name, 64);
	}
646 647 648 649

	/* verify that the bitmap-specific fields are valid */
	if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
		reason = "bad magic";
650
	else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
651
		 le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED)
652
		reason = "unrecognized superblock version";
653
	else if (chunksize < 512)
654
		reason = "bitmap chunksize too small";
J
Jonathan Brassow 已提交
655
	else if (!is_power_of_2(chunksize))
656
		reason = "bitmap chunksize not a power of 2";
657
	else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
658
		reason = "daemon sleep period out of range";
659 660
	else if (write_behind > COUNTER_MAX)
		reason = "write-behind limit out of range (0 - 16383)";
661
	if (reason) {
662
		pr_warn("%s: invalid bitmap file superblock: %s\n",
663 664 665 666 667 668 669
			bmname(bitmap), reason);
		goto out;
	}

	/* keep the array size field of the bitmap superblock up to date */
	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);

670 671 672 673 674 675
	if (bitmap->mddev->persistent) {
		/*
		 * We have a persistent array superblock, so compare the
		 * bitmap's UUID and event counter to the mddev's
		 */
		if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
676 677
			pr_warn("%s: bitmap superblock UUID mismatch\n",
				bmname(bitmap));
678 679 680
			goto out;
		}
		events = le64_to_cpu(sb->events);
681
		if (!nodes && (events < bitmap->mddev->events)) {
682 683 684
			pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n",
				bmname(bitmap), events,
				(unsigned long long) bitmap->mddev->events);
685
			set_bit(BITMAP_STALE, &bitmap->flags);
686
		}
687
	}
688

689
	/* assign fields using values from superblock */
690
	bitmap->flags |= le32_to_cpu(sb->state);
691
	if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
692
		set_bit(BITMAP_HOSTENDIAN, &bitmap->flags);
693
	bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
G
Goldwyn Rodrigues 已提交
694
	strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
695
	err = 0;
696

697
out:
698
	kunmap_atomic(sb);
699 700
	/* Assiging chunksize is required for "re_read" */
	bitmap->mddev->bitmap_info.chunksize = chunksize;
701
	if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
702 703
		err = md_setup_cluster(bitmap->mddev, nodes);
		if (err) {
704 705
			pr_warn("%s: Could not setup cluster service (%d)\n",
				bmname(bitmap), err);
706 707 708 709 710 711 712
			goto out_no_sb;
		}
		bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
		goto re_read;
	}


713
out_no_sb:
714
	if (test_bit(BITMAP_STALE, &bitmap->flags))
715 716 717 718
		bitmap->events_cleared = bitmap->mddev->events;
	bitmap->mddev->bitmap_info.chunksize = chunksize;
	bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
	bitmap->mddev->bitmap_info.max_write_behind = write_behind;
G
Goldwyn Rodrigues 已提交
719
	bitmap->mddev->bitmap_info.nodes = nodes;
720 721 722
	if (bitmap->mddev->bitmap_info.space == 0 ||
	    bitmap->mddev->bitmap_info.space > sectors_reserved)
		bitmap->mddev->bitmap_info.space = sectors_reserved;
723
	if (err) {
724
		bitmap_print_sb(bitmap);
725
		if (bitmap->cluster_slot < 0)
726 727
			md_cluster_stop(bitmap->mddev);
	}
728 729 730 731 732 733 734
	return err;
}

/*
 * general bitmap file operations
 */

735 736 737 738 739 740
/*
 * on-disk bitmap:
 *
 * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
 * file a page at a time. There's a superblock at the start of the file.
 */
741
/* calculate the index of the page that contains this bit */
742 743
static inline unsigned long file_page_index(struct bitmap_storage *store,
					    unsigned long chunk)
744
{
745
	if (store->sb_page)
746 747
		chunk += sizeof(bitmap_super_t) << 3;
	return chunk >> PAGE_BIT_SHIFT;
748 749 750
}

/* calculate the (bit) offset of this bit within a page */
751 752
static inline unsigned long file_page_offset(struct bitmap_storage *store,
					     unsigned long chunk)
753
{
754
	if (store->sb_page)
755 756
		chunk += sizeof(bitmap_super_t) << 3;
	return chunk & (PAGE_BITS - 1);
757 758 759 760 761 762
}

/*
 * return a pointer to the page in the filemap that contains the given bit
 *
 */
763
static inline struct page *filemap_get_page(struct bitmap_storage *store,
764
					    unsigned long chunk)
765
{
766
	if (file_page_index(store, chunk) >= store->file_pages)
767
		return NULL;
768
	return store->filemap[file_page_index(store, chunk)];
769 770
}

771
static int bitmap_storage_alloc(struct bitmap_storage *store,
772 773
				unsigned long chunks, int with_super,
				int slot_number)
774
{
775
	int pnum, offset = 0;
776 777 778 779 780 781 782 783
	unsigned long num_pages;
	unsigned long bytes;

	bytes = DIV_ROUND_UP(chunks, 8);
	if (with_super)
		bytes += sizeof(bitmap_super_t);

	num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
784
	offset = slot_number * num_pages;
785 786 787 788 789 790 791

	store->filemap = kmalloc(sizeof(struct page *)
				 * num_pages, GFP_KERNEL);
	if (!store->filemap)
		return -ENOMEM;

	if (with_super && !store->sb_page) {
792
		store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
793 794 795
		if (store->sb_page == NULL)
			return -ENOMEM;
	}
796

797 798 799 800
	pnum = 0;
	if (store->sb_page) {
		store->filemap[0] = store->sb_page;
		pnum = 1;
801
		store->sb_page->index = offset;
802
	}
803

804
	for ( ; pnum < num_pages; pnum++) {
805
		store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO);
806 807 808 809
		if (!store->filemap[pnum]) {
			store->file_pages = pnum;
			return -ENOMEM;
		}
810
		store->filemap[pnum]->index = pnum + offset;
811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
	}
	store->file_pages = pnum;

	/* We need 4 bits per page, rounded up to a multiple
	 * of sizeof(unsigned long) */
	store->filemap_attr = kzalloc(
		roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
		GFP_KERNEL);
	if (!store->filemap_attr)
		return -ENOMEM;

	store->bytes = bytes;

	return 0;
}

827
static void bitmap_file_unmap(struct bitmap_storage *store)
828 829 830
{
	struct page **map, *sb_page;
	int pages;
831
	struct file *file;
832

833
	file = store->file;
834 835 836
	map = store->filemap;
	pages = store->file_pages;
	sb_page = store->sb_page;
837 838

	while (pages--)
839
		if (map[pages] != sb_page) /* 0 is sb_page, release it below */
840
			free_buffers(map[pages]);
841
	kfree(map);
842
	kfree(store->filemap_attr);
843

844 845
	if (sb_page)
		free_buffers(sb_page);
846

847
	if (file) {
A
Al Viro 已提交
848
		struct inode *inode = file_inode(file);
849
		invalidate_mapping_pages(inode->i_mapping, 0, -1);
850
		fput(file);
851
	}
852 853 854 855 856 857 858 859 860 861 862
}

/*
 * bitmap_file_kick - if an error occurs while manipulating the bitmap file
 * then it is no longer reliable, so we stop using it and we mark the file
 * as failed in the superblock
 */
static void bitmap_file_kick(struct bitmap *bitmap)
{
	char *path, *ptr = NULL;

863
	if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
864
		bitmap_update_sb(bitmap);
865

866
		if (bitmap->storage.file) {
867 868
			path = kmalloc(PAGE_SIZE, GFP_KERNEL);
			if (path)
M
Miklos Szeredi 已提交
869
				ptr = file_path(bitmap->storage.file,
870
					     path, PAGE_SIZE);
C
Christoph Hellwig 已提交
871

872 873
			pr_warn("%s: kicking failed bitmap file %s from array!\n",
				bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
874

875 876
			kfree(path);
		} else
877 878
			pr_warn("%s: disabling internal bitmap due to errors\n",
				bmname(bitmap));
879
	}
880 881 882
}

enum bitmap_page_attr {
883
	BITMAP_PAGE_DIRTY = 0,     /* there are set bits that need to be synced */
884 885
	BITMAP_PAGE_PENDING = 1,   /* there are bits that are being cleaned.
				    * i.e. counter is 1 or 2. */
886
	BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
887 888
};

889 890
static inline void set_page_attr(struct bitmap *bitmap, int pnum,
				 enum bitmap_page_attr attr)
891
{
892
	set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
893 894
}

895 896
static inline void clear_page_attr(struct bitmap *bitmap, int pnum,
				   enum bitmap_page_attr attr)
897
{
898
	clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
899 900
}

901 902
static inline int test_page_attr(struct bitmap *bitmap, int pnum,
				 enum bitmap_page_attr attr)
903
{
904
	return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
905 906
}

907 908 909 910 911 912
static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum,
					   enum bitmap_page_attr attr)
{
	return test_and_clear_bit((pnum<<2) + attr,
				  bitmap->storage.filemap_attr);
}
913 914 915 916 917 918 919 920 921 922
/*
 * bitmap_file_set_bit -- called before performing a write to the md device
 * to set (and eventually sync) a particular bit in the bitmap file
 *
 * we set the bit immediately, then we record the page number so that
 * when an unplug occurs, we can flush the dirty pages out to disk
 */
static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
{
	unsigned long bit;
923
	struct page *page;
924
	void *kaddr;
925
	unsigned long chunk = block >> bitmap->counts.chunkshift;
926 927 928 929 930
	struct bitmap_storage *store = &bitmap->storage;
	unsigned long node_offset = 0;

	if (mddev_is_clustered(bitmap->mddev))
		node_offset = bitmap->cluster_slot * store->file_pages;
931

932
	page = filemap_get_page(&bitmap->storage, chunk);
933 934
	if (!page)
		return;
935
	bit = file_page_offset(&bitmap->storage, chunk);
936

937
	/* set the bit */
938
	kaddr = kmap_atomic(page);
939
	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
940 941
		set_bit(bit, kaddr);
	else
942
		set_bit_le(bit, kaddr);
943
	kunmap_atomic(kaddr);
944
	pr_debug("set file bit %lu page %lu\n", bit, page->index);
945
	/* record page number so it gets flushed to disk when unplug occurs */
946
	set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY);
947 948
}

949 950 951 952 953
static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
{
	unsigned long bit;
	struct page *page;
	void *paddr;
954
	unsigned long chunk = block >> bitmap->counts.chunkshift;
955 956 957 958 959
	struct bitmap_storage *store = &bitmap->storage;
	unsigned long node_offset = 0;

	if (mddev_is_clustered(bitmap->mddev))
		node_offset = bitmap->cluster_slot * store->file_pages;
960

961
	page = filemap_get_page(&bitmap->storage, chunk);
962 963
	if (!page)
		return;
964
	bit = file_page_offset(&bitmap->storage, chunk);
965
	paddr = kmap_atomic(page);
966
	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
967 968
		clear_bit(bit, paddr);
	else
969
		clear_bit_le(bit, paddr);
970
	kunmap_atomic(paddr);
971 972
	if (!test_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_NEEDWRITE)) {
		set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_PENDING);
973 974 975 976
		bitmap->allclean = 0;
	}
}

977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998
static int bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
{
	unsigned long bit;
	struct page *page;
	void *paddr;
	unsigned long chunk = block >> bitmap->counts.chunkshift;
	int set = 0;

	page = filemap_get_page(&bitmap->storage, chunk);
	if (!page)
		return -EINVAL;
	bit = file_page_offset(&bitmap->storage, chunk);
	paddr = kmap_atomic(page);
	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
		set = test_bit(bit, paddr);
	else
		set = test_bit_le(bit, paddr);
	kunmap_atomic(paddr);
	return set;
}


999 1000 1001
/* this gets called when the md device is ready to unplug its underlying
 * (slave) device queues -- before we let any writes go down, we need to
 * sync the dirty pages of the bitmap file to disk */
1002
void bitmap_unplug(struct bitmap *bitmap)
1003
{
1004
	unsigned long i;
1005
	int dirty, need_write;
1006
	int writing = 0;
1007

1008 1009
	if (!bitmap || !bitmap->storage.filemap ||
	    test_bit(BITMAP_STALE, &bitmap->flags))
1010
		return;
1011 1012 1013

	/* look at each page to see if there are any set bits that need to be
	 * flushed out to disk */
1014
	for (i = 0; i < bitmap->storage.file_pages; i++) {
1015
		if (!bitmap->storage.filemap)
1016
			return;
1017 1018 1019 1020
		dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
		need_write = test_and_clear_page_attr(bitmap, i,
						      BITMAP_PAGE_NEEDWRITE);
		if (dirty || need_write) {
1021
			if (!writing) {
1022
				bitmap_wait_writes(bitmap);
1023 1024 1025 1026
				if (bitmap->mddev->queue)
					blk_add_trace_msg(bitmap->mddev->queue,
							  "md bitmap_unplug");
			}
1027
			clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
1028
			write_page(bitmap, bitmap->storage.filemap[i], 0);
1029
			writing = 1;
1030
		}
1031
	}
1032 1033
	if (writing)
		bitmap_wait_writes(bitmap);
1034

1035
	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
1036
		bitmap_file_kick(bitmap);
1037
}
1038
EXPORT_SYMBOL(bitmap_unplug);
1039

1040
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
1041 1042 1043 1044 1045 1046 1047
/* * bitmap_init_from_disk -- called at bitmap_create time to initialize
 * the in-memory bitmap from the on-disk bitmap -- also, sets up the
 * memory mapping of the bitmap file
 * Special cases:
 *   if there's no bitmap file, or if the bitmap file had been
 *   previously kicked from the array, we mark all the bits as
 *   1's in order to cause a full resync.
1048 1049 1050
 *
 * We ignore all bits for sectors that end earlier than 'start'.
 * This is used when reading an out-of-date bitmap...
1051
 */
1052
static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1053
{
1054
	unsigned long i, chunks, index, oldindex, bit, node_offset = 0;
1055
	struct page *page = NULL;
1056
	unsigned long bit_cnt = 0;
1057
	struct file *file;
1058
	unsigned long offset;
1059 1060
	int outofdate;
	int ret = -ENOSPC;
1061
	void *paddr;
1062
	struct bitmap_storage *store = &bitmap->storage;
1063

1064
	chunks = bitmap->counts.chunks;
1065
	file = store->file;
1066

1067 1068
	if (!file && !bitmap->mddev->bitmap_info.offset) {
		/* No permanent bitmap - fill with '1s'. */
1069 1070
		store->filemap = NULL;
		store->file_pages = 0;
1071 1072
		for (i = 0; i < chunks ; i++) {
			/* if the disk bit is set, set the memory bit */
1073
			int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift)
1074 1075
				      >= start);
			bitmap_set_memory_bits(bitmap,
1076
					       (sector_t)i << bitmap->counts.chunkshift,
1077 1078 1079 1080
					       needed);
		}
		return 0;
	}
1081

1082
	outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
1083
	if (outofdate)
1084
		pr_warn("%s: bitmap file is out of date, doing full recovery\n", bmname(bitmap));
1085

1086
	if (file && i_size_read(file->f_mapping->host) < store->bytes) {
1087 1088 1089 1090
		pr_warn("%s: bitmap file too short %lu < %lu\n",
			bmname(bitmap),
			(unsigned long) i_size_read(file->f_mapping->host),
			store->bytes);
1091
		goto err;
1092
	}
1093

1094
	oldindex = ~0L;
1095
	offset = 0;
1096
	if (!bitmap->mddev->bitmap_info.external)
1097
		offset = sizeof(bitmap_super_t);
1098

1099 1100 1101
	if (mddev_is_clustered(bitmap->mddev))
		node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE));

1102
	for (i = 0; i < chunks; i++) {
1103
		int b;
1104 1105
		index = file_page_index(&bitmap->storage, i);
		bit = file_page_offset(&bitmap->storage, i);
1106
		if (index != oldindex) { /* this is a new page, read it in */
1107
			int count;
1108
			/* unmap the old page, we're done with it */
1109 1110
			if (index == store->file_pages-1)
				count = store->bytes - index * PAGE_SIZE;
1111 1112
			else
				count = PAGE_SIZE;
1113
			page = store->filemap[index];
1114 1115 1116 1117 1118 1119 1120 1121
			if (file)
				ret = read_page(file, index, bitmap,
						count, page);
			else
				ret = read_sb_page(
					bitmap->mddev,
					bitmap->mddev->bitmap_info.offset,
					page,
1122
					index + node_offset, count);
1123 1124

			if (ret)
1125
				goto err;
1126

1127 1128 1129 1130 1131
			oldindex = index;

			if (outofdate) {
				/*
				 * if bitmap is out of date, dirty the
1132
				 * whole page and write it out
1133
				 */
1134
				paddr = kmap_atomic(page);
1135
				memset(paddr + offset, 0xff,
1136
				       PAGE_SIZE - offset);
1137
				kunmap_atomic(paddr);
1138 1139 1140
				write_page(bitmap, page, 1);

				ret = -EIO;
1141 1142
				if (test_bit(BITMAP_WRITE_ERROR,
					     &bitmap->flags))
1143
					goto err;
1144 1145
			}
		}
1146
		paddr = kmap_atomic(page);
1147
		if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
1148
			b = test_bit(bit, paddr);
1149
		else
A
Akinobu Mita 已提交
1150
			b = test_bit_le(bit, paddr);
1151
		kunmap_atomic(paddr);
1152
		if (b) {
1153
			/* if the disk bit is set, set the memory bit */
1154
			int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift
1155 1156
				      >= start);
			bitmap_set_memory_bits(bitmap,
1157
					       (sector_t)i << bitmap->counts.chunkshift,
1158
					       needed);
1159 1160
			bit_cnt++;
		}
1161
		offset = 0;
1162 1163
	}

1164 1165 1166
	pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n",
		 bmname(bitmap), store->file_pages,
		 bit_cnt, chunks);
1167 1168

	return 0;
1169

1170
 err:
1171 1172
	pr_warn("%s: bitmap initialisation failed: %d\n",
		bmname(bitmap), ret);
1173 1174 1175
	return ret;
}

1176 1177 1178 1179 1180
void bitmap_write_all(struct bitmap *bitmap)
{
	/* We don't actually write all bitmap blocks here,
	 * just flag them as needing to be written
	 */
1181
	int i;
1182

1183
	if (!bitmap || !bitmap->storage.filemap)
1184
		return;
1185
	if (bitmap->storage.file)
1186 1187 1188
		/* Only one copy, so nothing needed */
		return;

1189
	for (i = 0; i < bitmap->storage.file_pages; i++)
1190
		set_page_attr(bitmap, i,
1191
			      BITMAP_PAGE_NEEDWRITE);
1192
	bitmap->allclean = 0;
1193 1194
}

1195 1196
static void bitmap_count_page(struct bitmap_counts *bitmap,
			      sector_t offset, int inc)
1197
{
1198
	sector_t chunk = offset >> bitmap->chunkshift;
1199 1200 1201 1202
	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
	bitmap->bp[page].count += inc;
	bitmap_checkfree(bitmap, page);
}
1203

1204
static void bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
1205 1206 1207 1208 1209 1210 1211 1212 1213
{
	sector_t chunk = offset >> bitmap->chunkshift;
	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
	struct bitmap_page *bp = &bitmap->bp[page];

	if (!bp->pending)
		bp->pending = 1;
}

1214
static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap,
N
NeilBrown 已提交
1215
					    sector_t offset, sector_t *blocks,
1216 1217 1218 1219 1220 1221 1222
					    int create);

/*
 * bitmap daemon -- periodically wakes up to clean bits and flush pages
 *			out to disk
 */

1223
void bitmap_daemon_work(struct mddev *mddev)
1224
{
1225
	struct bitmap *bitmap;
1226
	unsigned long j;
1227
	unsigned long nextpage;
N
NeilBrown 已提交
1228
	sector_t blocks;
1229
	struct bitmap_counts *counts;
1230

1231 1232 1233
	/* Use a mutex to guard daemon_work against
	 * bitmap_destroy.
	 */
1234
	mutex_lock(&mddev->bitmap_info.mutex);
1235 1236
	bitmap = mddev->bitmap;
	if (bitmap == NULL) {
1237
		mutex_unlock(&mddev->bitmap_info.mutex);
1238
		return;
1239
	}
1240
	if (time_before(jiffies, bitmap->daemon_lastrun
N
NeilBrown 已提交
1241
			+ mddev->bitmap_info.daemon_sleep))
1242 1243
		goto done;

1244
	bitmap->daemon_lastrun = jiffies;
1245
	if (bitmap->allclean) {
N
NeilBrown 已提交
1246
		mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1247
		goto done;
1248 1249
	}
	bitmap->allclean = 1;
1250

1251 1252 1253 1254
	if (bitmap->mddev->queue)
		blk_add_trace_msg(bitmap->mddev->queue,
				  "md bitmap_daemon_work");

1255 1256 1257 1258
	/* Any file-page which is PENDING now needs to be written.
	 * So set NEEDWRITE now, then after we make any last-minute changes
	 * we will write it.
	 */
1259
	for (j = 0; j < bitmap->storage.file_pages; j++)
1260 1261
		if (test_and_clear_page_attr(bitmap, j,
					     BITMAP_PAGE_PENDING))
1262
			set_page_attr(bitmap, j,
1263 1264 1265 1266 1267 1268 1269 1270
				      BITMAP_PAGE_NEEDWRITE);

	if (bitmap->need_sync &&
	    mddev->bitmap_info.external == 0) {
		/* Arrange for superblock update as well as
		 * other changes */
		bitmap_super_t *sb;
		bitmap->need_sync = 0;
1271 1272
		if (bitmap->storage.filemap) {
			sb = kmap_atomic(bitmap->storage.sb_page);
1273 1274 1275
			sb->events_cleared =
				cpu_to_le64(bitmap->events_cleared);
			kunmap_atomic(sb);
1276
			set_page_attr(bitmap, 0,
1277 1278
				      BITMAP_PAGE_NEEDWRITE);
		}
1279 1280 1281 1282
	}
	/* Now look at the bitmap counters and if any are '2' or '1',
	 * decrement and handle accordingly.
	 */
1283 1284
	counts = &bitmap->counts;
	spin_lock_irq(&counts->lock);
1285
	nextpage = 0;
1286
	for (j = 0; j < counts->chunks; j++) {
1287
		bitmap_counter_t *bmc;
1288
		sector_t  block = (sector_t)j << counts->chunkshift;
1289

1290 1291
		if (j == nextpage) {
			nextpage += PAGE_COUNTER_RATIO;
1292
			if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) {
1293
				j |= PAGE_COUNTER_MASK;
1294 1295
				continue;
			}
1296
			counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0;
1297
		}
1298
		bmc = bitmap_get_counter(counts,
1299
					 block,
1300
					 &blocks, 0);
1301 1302

		if (!bmc) {
1303
			j |= PAGE_COUNTER_MASK;
1304 1305 1306 1307 1308
			continue;
		}
		if (*bmc == 1 && !bitmap->need_sync) {
			/* We can clear the bit */
			*bmc = 0;
1309
			bitmap_count_page(counts, block, -1);
1310
			bitmap_file_clear_bit(bitmap, block);
1311 1312
		} else if (*bmc && *bmc <= 2) {
			*bmc = 1;
1313
			bitmap_set_pending(counts, block);
1314
			bitmap->allclean = 0;
1315
		}
1316
	}
1317
	spin_unlock_irq(&counts->lock);
1318

1319
	bitmap_wait_writes(bitmap);
1320 1321 1322 1323 1324 1325 1326 1327
	/* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
	 * DIRTY pages need to be written by bitmap_unplug so it can wait
	 * for them.
	 * If we find any DIRTY page we stop there and let bitmap_unplug
	 * handle all the rest.  This is important in the case where
	 * the first blocking holds the superblock and it has been updated.
	 * We mustn't write any other blocks before the superblock.
	 */
1328 1329 1330 1331
	for (j = 0;
	     j < bitmap->storage.file_pages
		     && !test_bit(BITMAP_STALE, &bitmap->flags);
	     j++) {
1332
		if (test_page_attr(bitmap, j,
1333 1334 1335
				   BITMAP_PAGE_DIRTY))
			/* bitmap_unplug will handle the rest */
			break;
1336 1337
		if (test_and_clear_page_attr(bitmap, j,
					     BITMAP_PAGE_NEEDWRITE)) {
1338
			write_page(bitmap, bitmap->storage.filemap[j], 0);
1339 1340 1341
		}
	}

1342
 done:
1343
	if (bitmap->allclean == 0)
N
NeilBrown 已提交
1344 1345
		mddev->thread->timeout =
			mddev->bitmap_info.daemon_sleep;
1346
	mutex_unlock(&mddev->bitmap_info.mutex);
1347 1348
}

1349
static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap,
N
NeilBrown 已提交
1350
					    sector_t offset, sector_t *blocks,
1351
					    int create)
1352 1353
__releases(bitmap->lock)
__acquires(bitmap->lock)
1354 1355 1356 1357 1358
{
	/* If 'create', we might release the lock and reclaim it.
	 * The lock must have been taken with interrupts enabled.
	 * If !create, we don't release the lock.
	 */
1359
	sector_t chunk = offset >> bitmap->chunkshift;
1360 1361 1362
	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
	unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
	sector_t csize;
1363
	int err;
1364

1365
	err = bitmap_checkpage(bitmap, page, create, 0);
1366 1367 1368

	if (bitmap->bp[page].hijacked ||
	    bitmap->bp[page].map == NULL)
1369
		csize = ((sector_t)1) << (bitmap->chunkshift +
1370 1371
					  PAGE_COUNTER_SHIFT - 1);
	else
1372
		csize = ((sector_t)1) << bitmap->chunkshift;
1373 1374 1375
	*blocks = csize - (offset & (csize - 1));

	if (err < 0)
1376
		return NULL;
1377

1378 1379 1380 1381 1382 1383 1384 1385
	/* now locked ... */

	if (bitmap->bp[page].hijacked) { /* hijacked pointer */
		/* should we use the first or second counter field
		 * of the hijacked pointer? */
		int hi = (pageoff > PAGE_COUNTER_MASK);
		return  &((bitmap_counter_t *)
			  &bitmap->bp[page].map)[hi];
1386
	} else /* page is allocated */
1387 1388 1389 1390
		return (bitmap_counter_t *)
			&(bitmap->bp[page].map[pageoff]);
}

1391
int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
1392
{
1393 1394
	if (!bitmap)
		return 0;
1395 1396

	if (behind) {
1397
		int bw;
1398
		atomic_inc(&bitmap->behind_writes);
1399 1400 1401 1402
		bw = atomic_read(&bitmap->behind_writes);
		if (bw > bitmap->behind_writes_used)
			bitmap->behind_writes_used = bw;

1403 1404
		pr_debug("inc write-behind count %d/%lu\n",
			 bw, bitmap->mddev->bitmap_info.max_write_behind);
1405 1406
	}

1407
	while (sectors) {
N
NeilBrown 已提交
1408
		sector_t blocks;
1409 1410
		bitmap_counter_t *bmc;

1411 1412
		spin_lock_irq(&bitmap->counts.lock);
		bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
1413
		if (!bmc) {
1414
			spin_unlock_irq(&bitmap->counts.lock);
1415 1416 1417
			return 0;
		}

1418
		if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
1419 1420 1421 1422 1423 1424 1425
			DEFINE_WAIT(__wait);
			/* note that it is safe to do the prepare_to_wait
			 * after the test as long as we do it before dropping
			 * the spinlock.
			 */
			prepare_to_wait(&bitmap->overflow_wait, &__wait,
					TASK_UNINTERRUPTIBLE);
1426
			spin_unlock_irq(&bitmap->counts.lock);
1427
			schedule();
1428 1429 1430 1431
			finish_wait(&bitmap->overflow_wait, &__wait);
			continue;
		}

1432
		switch (*bmc) {
1433 1434
		case 0:
			bitmap_file_set_bit(bitmap, offset);
1435
			bitmap_count_page(&bitmap->counts, offset, 1);
1436 1437 1438 1439
			/* fall through */
		case 1:
			*bmc = 2;
		}
1440

1441 1442
		(*bmc)++;

1443
		spin_unlock_irq(&bitmap->counts.lock);
1444 1445 1446 1447

		offset += blocks;
		if (sectors > blocks)
			sectors -= blocks;
1448 1449
		else
			sectors = 0;
1450 1451 1452
	}
	return 0;
}
1453
EXPORT_SYMBOL(bitmap_startwrite);
1454 1455

void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors,
1456
		     int success, int behind)
1457
{
1458 1459
	if (!bitmap)
		return;
1460
	if (behind) {
1461 1462
		if (atomic_dec_and_test(&bitmap->behind_writes))
			wake_up(&bitmap->behind_wait);
1463 1464 1465
		pr_debug("dec write-behind count %d/%lu\n",
			 atomic_read(&bitmap->behind_writes),
			 bitmap->mddev->bitmap_info.max_write_behind);
1466 1467
	}

1468
	while (sectors) {
N
NeilBrown 已提交
1469
		sector_t blocks;
1470 1471 1472
		unsigned long flags;
		bitmap_counter_t *bmc;

1473 1474
		spin_lock_irqsave(&bitmap->counts.lock, flags);
		bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
1475
		if (!bmc) {
1476
			spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1477 1478 1479
			return;
		}

1480
		if (success && !bitmap->mddev->degraded &&
1481 1482 1483
		    bitmap->events_cleared < bitmap->mddev->events) {
			bitmap->events_cleared = bitmap->mddev->events;
			bitmap->need_sync = 1;
1484
			sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
1485 1486
		}

1487
		if (!success && !NEEDED(*bmc))
1488 1489
			*bmc |= NEEDED_MASK;

1490
		if (COUNTER(*bmc) == COUNTER_MAX)
1491 1492
			wake_up(&bitmap->overflow_wait);

1493
		(*bmc)--;
1494
		if (*bmc <= 2) {
1495
			bitmap_set_pending(&bitmap->counts, offset);
1496 1497
			bitmap->allclean = 0;
		}
1498
		spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1499 1500 1501
		offset += blocks;
		if (sectors > blocks)
			sectors -= blocks;
1502 1503
		else
			sectors = 0;
1504 1505
	}
}
1506
EXPORT_SYMBOL(bitmap_endwrite);
1507

N
NeilBrown 已提交
1508
static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1509
			       int degraded)
1510 1511 1512 1513 1514 1515 1516
{
	bitmap_counter_t *bmc;
	int rv;
	if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
		*blocks = 1024;
		return 1; /* always resync if no bitmap */
	}
1517 1518
	spin_lock_irq(&bitmap->counts.lock);
	bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1519 1520 1521 1522 1523 1524 1525
	rv = 0;
	if (bmc) {
		/* locked */
		if (RESYNC(*bmc))
			rv = 1;
		else if (NEEDED(*bmc)) {
			rv = 1;
1526 1527 1528 1529
			if (!degraded) { /* don't set/clear bits if degraded */
				*bmc |= RESYNC_MASK;
				*bmc &= ~NEEDED_MASK;
			}
1530 1531
		}
	}
1532
	spin_unlock_irq(&bitmap->counts.lock);
1533 1534 1535
	return rv;
}

N
NeilBrown 已提交
1536
int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
		      int degraded)
{
	/* bitmap_start_sync must always report on multiples of whole
	 * pages, otherwise resync (which is very PAGE_SIZE based) will
	 * get confused.
	 * So call __bitmap_start_sync repeatedly (if needed) until
	 * At least PAGE_SIZE>>9 blocks are covered.
	 * Return the 'or' of the result.
	 */
	int rv = 0;
N
NeilBrown 已提交
1547
	sector_t blocks1;
1548 1549 1550 1551 1552 1553 1554 1555 1556 1557

	*blocks = 0;
	while (*blocks < (PAGE_SIZE>>9)) {
		rv |= __bitmap_start_sync(bitmap, offset,
					  &blocks1, degraded);
		offset += blocks1;
		*blocks += blocks1;
	}
	return rv;
}
1558
EXPORT_SYMBOL(bitmap_start_sync);
1559

N
NeilBrown 已提交
1560
void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
1561 1562 1563
{
	bitmap_counter_t *bmc;
	unsigned long flags;
1564 1565

	if (bitmap == NULL) {
1566 1567 1568
		*blocks = 1024;
		return;
	}
1569 1570
	spin_lock_irqsave(&bitmap->counts.lock, flags);
	bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1571 1572 1573 1574 1575 1576 1577 1578 1579
	if (bmc == NULL)
		goto unlock;
	/* locked */
	if (RESYNC(*bmc)) {
		*bmc &= ~RESYNC_MASK;

		if (!NEEDED(*bmc) && aborted)
			*bmc |= NEEDED_MASK;
		else {
1580
			if (*bmc <= 2) {
1581
				bitmap_set_pending(&bitmap->counts, offset);
1582 1583
				bitmap->allclean = 0;
			}
1584 1585 1586
		}
	}
 unlock:
1587
	spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1588
}
1589
EXPORT_SYMBOL(bitmap_end_sync);
1590 1591 1592 1593 1594 1595 1596 1597

void bitmap_close_sync(struct bitmap *bitmap)
{
	/* Sync has finished, and any bitmap chunks that weren't synced
	 * properly have been aborted.  It remains to us to clear the
	 * RESYNC bit wherever it is still on
	 */
	sector_t sector = 0;
N
NeilBrown 已提交
1598
	sector_t blocks;
N
NeilBrown 已提交
1599 1600
	if (!bitmap)
		return;
1601 1602
	while (sector < bitmap->mddev->resync_max_sectors) {
		bitmap_end_sync(bitmap, sector, &blocks, 0);
N
NeilBrown 已提交
1603 1604 1605
		sector += blocks;
	}
}
1606
EXPORT_SYMBOL(bitmap_close_sync);
N
NeilBrown 已提交
1607

1608
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
N
NeilBrown 已提交
1609 1610
{
	sector_t s = 0;
N
NeilBrown 已提交
1611
	sector_t blocks;
N
NeilBrown 已提交
1612 1613 1614 1615 1616 1617 1618

	if (!bitmap)
		return;
	if (sector == 0) {
		bitmap->last_end_sync = jiffies;
		return;
	}
1619
	if (!force && time_before(jiffies, (bitmap->last_end_sync
1620
				  + bitmap->mddev->bitmap_info.daemon_sleep)))
N
NeilBrown 已提交
1621 1622 1623 1624
		return;
	wait_event(bitmap->mddev->recovery_wait,
		   atomic_read(&bitmap->mddev->recovery_active) == 0);

1625
	bitmap->mddev->curr_resync_completed = sector;
1626
	set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags);
1627
	sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
N
NeilBrown 已提交
1628 1629 1630 1631
	s = 0;
	while (s < sector && s < bitmap->mddev->resync_max_sectors) {
		bitmap_end_sync(bitmap, s, &blocks, 0);
		s += blocks;
1632
	}
N
NeilBrown 已提交
1633
	bitmap->last_end_sync = jiffies;
1634
	sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
1635
}
1636
EXPORT_SYMBOL(bitmap_cond_end_sync);
1637

1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658
void bitmap_sync_with_cluster(struct mddev *mddev,
			      sector_t old_lo, sector_t old_hi,
			      sector_t new_lo, sector_t new_hi)
{
	struct bitmap *bitmap = mddev->bitmap;
	sector_t sector, blocks = 0;

	for (sector = old_lo; sector < new_lo; ) {
		bitmap_end_sync(bitmap, sector, &blocks, 0);
		sector += blocks;
	}
	WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");

	for (sector = old_hi; sector < new_hi; ) {
		bitmap_start_sync(bitmap, sector, &blocks, 0);
		sector += blocks;
	}
	WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
}
EXPORT_SYMBOL(bitmap_sync_with_cluster);

1659
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
1660 1661
{
	/* For each chunk covered by any of these sectors, set the
1662
	 * counter to 2 and possibly set resync_needed.  They should all
1663 1664
	 * be 0 at this point
	 */
1665

N
NeilBrown 已提交
1666
	sector_t secs;
1667
	bitmap_counter_t *bmc;
1668 1669
	spin_lock_irq(&bitmap->counts.lock);
	bmc = bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
1670
	if (!bmc) {
1671
		spin_unlock_irq(&bitmap->counts.lock);
1672
		return;
1673
	}
1674
	if (!*bmc) {
1675
		*bmc = 2;
1676 1677
		bitmap_count_page(&bitmap->counts, offset, 1);
		bitmap_set_pending(&bitmap->counts, offset);
1678
		bitmap->allclean = 0;
1679
	}
1680 1681
	if (needed)
		*bmc |= NEEDED_MASK;
1682
	spin_unlock_irq(&bitmap->counts.lock);
1683 1684
}

1685 1686 1687 1688 1689 1690
/* dirty the memory and file bits for bitmap chunks "s" to "e" */
void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
{
	unsigned long chunk;

	for (chunk = s; chunk <= e; chunk++) {
1691
		sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
1692 1693
		bitmap_set_memory_bits(bitmap, sec, 1);
		bitmap_file_set_bit(bitmap, sec);
1694 1695 1696 1697 1698 1699
		if (sec < bitmap->mddev->recovery_cp)
			/* We are asserting that the array is dirty,
			 * so move the recovery_cp address back so
			 * that it is obvious that it is dirty
			 */
			bitmap->mddev->recovery_cp = sec;
1700 1701 1702
	}
}

1703 1704 1705
/*
 * flush out any pending updates
 */
1706
void bitmap_flush(struct mddev *mddev)
1707 1708
{
	struct bitmap *bitmap = mddev->bitmap;
1709
	long sleep;
1710 1711 1712 1713 1714 1715 1716

	if (!bitmap) /* there was no bitmap */
		return;

	/* run the daemon_work three time to ensure everything is flushed
	 * that can be
	 */
1717
	sleep = mddev->bitmap_info.daemon_sleep * 2;
1718
	bitmap->daemon_lastrun -= sleep;
1719
	bitmap_daemon_work(mddev);
1720
	bitmap->daemon_lastrun -= sleep;
1721
	bitmap_daemon_work(mddev);
1722
	bitmap->daemon_lastrun -= sleep;
1723
	bitmap_daemon_work(mddev);
1724 1725 1726
	bitmap_update_sb(bitmap);
}

1727 1728 1729
/*
 * free memory that was allocated
 */
1730
static void bitmap_free(struct bitmap *bitmap)
1731 1732 1733 1734 1735 1736 1737
{
	unsigned long k, pages;
	struct bitmap_page *bp;

	if (!bitmap) /* there was no bitmap */
		return;

1738 1739 1740
	if (bitmap->sysfs_can_clear)
		sysfs_put(bitmap->sysfs_can_clear);

1741 1742
	if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
		bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
1743 1744
		md_cluster_stop(bitmap->mddev);

1745 1746 1747 1748 1749 1750
	/* Shouldn't be needed - but just in case.... */
	wait_event(bitmap->write_wait,
		   atomic_read(&bitmap->pending_writes) == 0);

	/* release the bitmap file  */
	bitmap_file_unmap(&bitmap->storage);
1751

1752 1753
	bp = bitmap->counts.bp;
	pages = bitmap->counts.pages;
1754 1755 1756 1757 1758 1759 1760 1761 1762 1763

	/* free all allocated memory */

	if (bp) /* deallocate the page memory */
		for (k = 0; k < pages; k++)
			if (bp[k].map && !bp[k].hijacked)
				kfree(bp[k].map);
	kfree(bp);
	kfree(bitmap);
}
1764

1765
void bitmap_destroy(struct mddev *mddev)
1766 1767 1768 1769 1770 1771
{
	struct bitmap *bitmap = mddev->bitmap;

	if (!bitmap) /* there was no bitmap */
		return;

1772
	mutex_lock(&mddev->bitmap_info.mutex);
1773
	spin_lock(&mddev->lock);
1774
	mddev->bitmap = NULL; /* disconnect from the md device */
1775
	spin_unlock(&mddev->lock);
1776
	mutex_unlock(&mddev->bitmap_info.mutex);
1777 1778
	if (mddev->thread)
		mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1779 1780 1781

	bitmap_free(bitmap);
}
1782 1783 1784 1785

/*
 * initialize the bitmap structure
 * if this returns an error, bitmap_destroy must be called to do clean up
1786
 * once mddev->bitmap is set
1787
 */
1788
struct bitmap *bitmap_create(struct mddev *mddev, int slot)
1789 1790
{
	struct bitmap *bitmap;
1791
	sector_t blocks = mddev->resync_max_sectors;
1792
	struct file *file = mddev->bitmap_info.file;
1793
	int err;
1794
	struct kernfs_node *bm = NULL;
1795

A
Alexey Dobriyan 已提交
1796
	BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
1797

1798
	BUG_ON(file && mddev->bitmap_info.offset);
1799

1800
	bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
1801
	if (!bitmap)
1802
		return ERR_PTR(-ENOMEM);
1803

1804
	spin_lock_init(&bitmap->counts.lock);
1805 1806
	atomic_set(&bitmap->pending_writes, 0);
	init_waitqueue_head(&bitmap->write_wait);
1807
	init_waitqueue_head(&bitmap->overflow_wait);
1808
	init_waitqueue_head(&bitmap->behind_wait);
1809

1810
	bitmap->mddev = mddev;
1811
	bitmap->cluster_slot = slot;
1812

1813
	if (mddev->kobj.sd)
T
Tejun Heo 已提交
1814
		bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
1815
	if (bm) {
T
Tejun Heo 已提交
1816
		bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
1817 1818 1819 1820
		sysfs_put(bm);
	} else
		bitmap->sysfs_can_clear = NULL;

1821
	bitmap->storage.file = file;
1822 1823
	if (file) {
		get_file(file);
1824 1825 1826 1827
		/* As future accesses to this file will use bmap,
		 * and bypass the page cache, we must sync the file
		 * first.
		 */
1828
		vfs_fsync(file, 1);
1829
	}
1830
	/* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
1831 1832 1833 1834 1835 1836 1837 1838 1839 1840
	if (!mddev->bitmap_info.external) {
		/*
		 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
		 * instructing us to create a new on-disk bitmap instance.
		 */
		if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
			err = bitmap_new_disk_sb(bitmap);
		else
			err = bitmap_read_sb(bitmap);
	} else {
1841 1842 1843 1844 1845 1846 1847
		err = 0;
		if (mddev->bitmap_info.chunksize == 0 ||
		    mddev->bitmap_info.daemon_sleep == 0)
			/* chunksize and time_base need to be
			 * set first. */
			err = -EINVAL;
	}
1848
	if (err)
1849
		goto error;
1850

1851
	bitmap->daemon_lastrun = jiffies;
1852 1853
	err = bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
	if (err)
1854
		goto error;
1855

1856 1857
	pr_debug("created bitmap (%lu pages) for device %s\n",
		 bitmap->counts.pages, bmname(bitmap));
1858

1859 1860 1861
	err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
	if (err)
		goto error;
1862

1863
	return bitmap;
1864 1865
 error:
	bitmap_free(bitmap);
1866
	return ERR_PTR(err);
1867 1868
}

1869
int bitmap_load(struct mddev *mddev)
1870 1871
{
	int err = 0;
1872
	sector_t start = 0;
1873 1874 1875 1876 1877 1878
	sector_t sector = 0;
	struct bitmap *bitmap = mddev->bitmap;

	if (!bitmap)
		goto out;

1879 1880 1881
	if (mddev_is_clustered(mddev))
		md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);

1882 1883 1884 1885 1886 1887
	/* Clear out old bitmap info first:  Either there is none, or we
	 * are resuming after someone else has possibly changed things,
	 * so we should forget old cached info.
	 * All chunks should be clean, but some might need_sync.
	 */
	while (sector < mddev->resync_max_sectors) {
N
NeilBrown 已提交
1888
		sector_t blocks;
1889 1890 1891 1892 1893
		bitmap_start_sync(bitmap, sector, &blocks, 0);
		sector += blocks;
	}
	bitmap_close_sync(bitmap);

1894 1895 1896 1897 1898 1899
	if (mddev->degraded == 0
	    || bitmap->events_cleared == mddev->events)
		/* no need to keep dirty bits to optimise a
		 * re-add of a missing device */
		start = mddev->recovery_cp;

1900
	mutex_lock(&mddev->bitmap_info.mutex);
1901
	err = bitmap_init_from_disk(bitmap, start);
1902
	mutex_unlock(&mddev->bitmap_info.mutex);
1903

1904
	if (err)
1905
		goto out;
1906
	clear_bit(BITMAP_STALE, &bitmap->flags);
1907 1908 1909

	/* Kick recovery in case any bits were set */
	set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
1910

1911
	mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
1912
	md_wakeup_thread(mddev->thread);
1913

1914 1915
	bitmap_update_sb(bitmap);

1916
	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
1917 1918
		err = -EIO;
out:
1919
	return err;
1920
}
1921
EXPORT_SYMBOL_GPL(bitmap_load);
1922

1923 1924 1925 1926
/* Loads the bitmap associated with slot and copies the resync information
 * to our bitmap
 */
int bitmap_copy_from_slot(struct mddev *mddev, int slot,
1927
		sector_t *low, sector_t *high, bool clear_bits)
1928 1929 1930 1931 1932 1933
{
	int rv = 0, i, j;
	sector_t block, lo = 0, hi = 0;
	struct bitmap_counts *counts;
	struct bitmap *bitmap = bitmap_create(mddev, slot);

S
Shaohua Li 已提交
1934
	if (IS_ERR(bitmap))
1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953
		return PTR_ERR(bitmap);

	rv = bitmap_init_from_disk(bitmap, 0);
	if (rv)
		goto err;

	counts = &bitmap->counts;
	for (j = 0; j < counts->chunks; j++) {
		block = (sector_t)j << counts->chunkshift;
		if (bitmap_file_test_bit(bitmap, block)) {
			if (!lo)
				lo = block;
			hi = block;
			bitmap_file_clear_bit(bitmap, block);
			bitmap_set_memory_bits(mddev->bitmap, block, 1);
			bitmap_file_set_bit(mddev->bitmap, block);
		}
	}

1954 1955
	if (clear_bits) {
		bitmap_update_sb(bitmap);
1956 1957
		/* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
		 * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
1958
		for (i = 0; i < bitmap->storage.file_pages; i++)
1959 1960
			if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING))
				set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
1961 1962
		bitmap_unplug(bitmap);
	}
1963
	bitmap_unplug(mddev->bitmap);
1964 1965 1966 1967 1968 1969 1970 1971 1972
	*low = lo;
	*high = hi;
err:
	bitmap_free(bitmap);
	return rv;
}
EXPORT_SYMBOL_GPL(bitmap_copy_from_slot);


1973 1974 1975
void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
{
	unsigned long chunk_kb;
1976
	struct bitmap_counts *counts;
1977 1978 1979 1980

	if (!bitmap)
		return;

1981 1982
	counts = &bitmap->counts;

1983 1984 1985
	chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10;
	seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
		   "%lu%s chunk",
1986 1987 1988
		   counts->pages - counts->missing_pages,
		   counts->pages,
		   (counts->pages - counts->missing_pages)
1989 1990 1991
		   << (PAGE_SHIFT - 10),
		   chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize,
		   chunk_kb ? "KB" : "B");
1992
	if (bitmap->storage.file) {
1993
		seq_printf(seq, ", file: ");
M
Miklos Szeredi 已提交
1994
		seq_file_path(seq, bitmap->storage.file, " \t\n");
1995 1996 1997 1998 1999
	}

	seq_printf(seq, "\n");
}

2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056
int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
		  int chunksize, int init)
{
	/* If chunk_size is 0, choose an appropriate chunk size.
	 * Then possibly allocate new storage space.
	 * Then quiesce, copy bits, replace bitmap, and re-start
	 *
	 * This function is called both to set up the initial bitmap
	 * and to resize the bitmap while the array is active.
	 * If this happens as a result of the array being resized,
	 * chunksize will be zero, and we need to choose a suitable
	 * chunksize, otherwise we use what we are given.
	 */
	struct bitmap_storage store;
	struct bitmap_counts old_counts;
	unsigned long chunks;
	sector_t block;
	sector_t old_blocks, new_blocks;
	int chunkshift;
	int ret = 0;
	long pages;
	struct bitmap_page *new_bp;

	if (chunksize == 0) {
		/* If there is enough space, leave the chunk size unchanged,
		 * else increase by factor of two until there is enough space.
		 */
		long bytes;
		long space = bitmap->mddev->bitmap_info.space;

		if (space == 0) {
			/* We don't know how much space there is, so limit
			 * to current size - in sectors.
			 */
			bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8);
			if (!bitmap->mddev->bitmap_info.external)
				bytes += sizeof(bitmap_super_t);
			space = DIV_ROUND_UP(bytes, 512);
			bitmap->mddev->bitmap_info.space = space;
		}
		chunkshift = bitmap->counts.chunkshift;
		chunkshift--;
		do {
			/* 'chunkshift' is shift from block size to chunk size */
			chunkshift++;
			chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
			bytes = DIV_ROUND_UP(chunks, 8);
			if (!bitmap->mddev->bitmap_info.external)
				bytes += sizeof(bitmap_super_t);
		} while (bytes > (space << 9));
	} else
		chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;

	chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
	memset(&store, 0, sizeof(store));
	if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
		ret = bitmap_storage_alloc(&store, chunks,
2057
					   !bitmap->mddev->bitmap_info.external,
2058 2059
					   mddev_is_clustered(bitmap->mddev)
					   ? bitmap->cluster_slot : 0);
2060 2061
	if (ret) {
		bitmap_file_unmap(&store);
2062
		goto err;
2063
	}
2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099

	pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);

	new_bp = kzalloc(pages * sizeof(*new_bp), GFP_KERNEL);
	ret = -ENOMEM;
	if (!new_bp) {
		bitmap_file_unmap(&store);
		goto err;
	}

	if (!init)
		bitmap->mddev->pers->quiesce(bitmap->mddev, 1);

	store.file = bitmap->storage.file;
	bitmap->storage.file = NULL;

	if (store.sb_page && bitmap->storage.sb_page)
		memcpy(page_address(store.sb_page),
		       page_address(bitmap->storage.sb_page),
		       sizeof(bitmap_super_t));
	bitmap_file_unmap(&bitmap->storage);
	bitmap->storage = store;

	old_counts = bitmap->counts;
	bitmap->counts.bp = new_bp;
	bitmap->counts.pages = pages;
	bitmap->counts.missing_pages = pages;
	bitmap->counts.chunkshift = chunkshift;
	bitmap->counts.chunks = chunks;
	bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift +
						     BITMAP_BLOCK_SHIFT);

	blocks = min(old_counts.chunks << old_counts.chunkshift,
		     chunks << chunkshift);

	spin_lock_irq(&bitmap->counts.lock);
2100 2101 2102 2103 2104 2105 2106 2107 2108 2109
	/* For cluster raid, need to pre-allocate bitmap */
	if (mddev_is_clustered(bitmap->mddev)) {
		unsigned long page;
		for (page = 0; page < pages; page++) {
			ret = bitmap_checkpage(&bitmap->counts, page, 1, 1);
			if (ret) {
				unsigned long k;

				/* deallocate the page memory */
				for (k = 0; k < page; k++) {
2110
					kfree(new_bp[k].map);
2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
				}

				/* restore some fields from old_counts */
				bitmap->counts.bp = old_counts.bp;
				bitmap->counts.pages = old_counts.pages;
				bitmap->counts.missing_pages = old_counts.pages;
				bitmap->counts.chunkshift = old_counts.chunkshift;
				bitmap->counts.chunks = old_counts.chunks;
				bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
									     BITMAP_BLOCK_SHIFT);
				blocks = old_counts.chunks << old_counts.chunkshift;
2122
				pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
2123 2124 2125 2126 2127 2128
				break;
			} else
				bitmap->counts.bp[page].count += 1;
		}
	}

2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196
	for (block = 0; block < blocks; ) {
		bitmap_counter_t *bmc_old, *bmc_new;
		int set;

		bmc_old = bitmap_get_counter(&old_counts, block,
					     &old_blocks, 0);
		set = bmc_old && NEEDED(*bmc_old);

		if (set) {
			bmc_new = bitmap_get_counter(&bitmap->counts, block,
						     &new_blocks, 1);
			if (*bmc_new == 0) {
				/* need to set on-disk bits too. */
				sector_t end = block + new_blocks;
				sector_t start = block >> chunkshift;
				start <<= chunkshift;
				while (start < end) {
					bitmap_file_set_bit(bitmap, block);
					start += 1 << chunkshift;
				}
				*bmc_new = 2;
				bitmap_count_page(&bitmap->counts,
						  block, 1);
				bitmap_set_pending(&bitmap->counts,
						   block);
			}
			*bmc_new |= NEEDED_MASK;
			if (new_blocks < old_blocks)
				old_blocks = new_blocks;
		}
		block += old_blocks;
	}

	if (!init) {
		int i;
		while (block < (chunks << chunkshift)) {
			bitmap_counter_t *bmc;
			bmc = bitmap_get_counter(&bitmap->counts, block,
						 &new_blocks, 1);
			if (bmc) {
				/* new space.  It needs to be resynced, so
				 * we set NEEDED_MASK.
				 */
				if (*bmc == 0) {
					*bmc = NEEDED_MASK | 2;
					bitmap_count_page(&bitmap->counts,
							  block, 1);
					bitmap_set_pending(&bitmap->counts,
							   block);
				}
			}
			block += new_blocks;
		}
		for (i = 0; i < bitmap->storage.file_pages; i++)
			set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
	}
	spin_unlock_irq(&bitmap->counts.lock);

	if (!init) {
		bitmap_unplug(bitmap);
		bitmap->mddev->pers->quiesce(bitmap->mddev, 0);
	}
	ret = 0;
err:
	return ret;
}
EXPORT_SYMBOL_GPL(bitmap_resize);

2197
static ssize_t
2198
location_show(struct mddev *mddev, char *page)
2199 2200
{
	ssize_t len;
2201
	if (mddev->bitmap_info.file)
2202
		len = sprintf(page, "file");
2203
	else if (mddev->bitmap_info.offset)
2204
		len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
2205
	else
2206 2207 2208 2209 2210 2211
		len = sprintf(page, "none");
	len += sprintf(page+len, "\n");
	return len;
}

static ssize_t
2212
location_store(struct mddev *mddev, const char *buf, size_t len)
2213
{
2214
	int rv;
2215

2216 2217 2218
	rv = mddev_lock(mddev);
	if (rv)
		return rv;
2219
	if (mddev->pers) {
2220 2221 2222 2223 2224 2225 2226 2227
		if (!mddev->pers->quiesce) {
			rv = -EBUSY;
			goto out;
		}
		if (mddev->recovery || mddev->sync_thread) {
			rv = -EBUSY;
			goto out;
		}
2228 2229 2230 2231 2232
	}

	if (mddev->bitmap || mddev->bitmap_info.file ||
	    mddev->bitmap_info.offset) {
		/* bitmap already configured.  Only option is to clear it */
2233 2234 2235 2236
		if (strncmp(buf, "none", 4) != 0) {
			rv = -EBUSY;
			goto out;
		}
2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254
		if (mddev->pers) {
			mddev->pers->quiesce(mddev, 1);
			bitmap_destroy(mddev);
			mddev->pers->quiesce(mddev, 0);
		}
		mddev->bitmap_info.offset = 0;
		if (mddev->bitmap_info.file) {
			struct file *f = mddev->bitmap_info.file;
			mddev->bitmap_info.file = NULL;
			fput(f);
		}
	} else {
		/* No bitmap, OK to set a location */
		long long offset;
		if (strncmp(buf, "none", 4) == 0)
			/* nothing to be done */;
		else if (strncmp(buf, "file:", 5) == 0) {
			/* Not supported yet */
2255 2256
			rv = -EINVAL;
			goto out;
2257 2258
		} else {
			if (buf[0] == '+')
2259
				rv = kstrtoll(buf+1, 10, &offset);
2260
			else
2261
				rv = kstrtoll(buf, 10, &offset);
2262
			if (rv)
2263 2264 2265 2266 2267
				goto out;
			if (offset == 0) {
				rv = -EINVAL;
				goto out;
			}
2268 2269
			if (mddev->bitmap_info.external == 0 &&
			    mddev->major_version == 0 &&
2270 2271 2272 2273
			    offset != mddev->bitmap_info.default_offset) {
				rv = -EINVAL;
				goto out;
			}
2274 2275
			mddev->bitmap_info.offset = offset;
			if (mddev->pers) {
2276
				struct bitmap *bitmap;
2277
				mddev->pers->quiesce(mddev, 1);
2278 2279 2280 2281 2282
				bitmap = bitmap_create(mddev, -1);
				if (IS_ERR(bitmap))
					rv = PTR_ERR(bitmap);
				else {
					mddev->bitmap = bitmap;
2283
					rv = bitmap_load(mddev);
2284
					if (rv)
2285
						mddev->bitmap_info.offset = 0;
2286 2287
				}
				mddev->pers->quiesce(mddev, 0);
2288 2289
				if (rv) {
					bitmap_destroy(mddev);
2290
					goto out;
2291
				}
2292 2293 2294 2295 2296 2297 2298
			}
		}
	}
	if (!mddev->external) {
		/* Ensure new bitmap info is stored in
		 * metadata promptly.
		 */
2299
		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2300 2301
		md_wakeup_thread(mddev->thread);
	}
2302 2303 2304 2305 2306
	rv = 0;
out:
	mddev_unlock(mddev);
	if (rv)
		return rv;
2307 2308 2309 2310 2311 2312
	return len;
}

static struct md_sysfs_entry bitmap_location =
__ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);

2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336
/* 'bitmap/space' is the space available at 'location' for the
 * bitmap.  This allows the kernel to know when it is safe to
 * resize the bitmap to match a resized array.
 */
static ssize_t
space_show(struct mddev *mddev, char *page)
{
	return sprintf(page, "%lu\n", mddev->bitmap_info.space);
}

static ssize_t
space_store(struct mddev *mddev, const char *buf, size_t len)
{
	unsigned long sectors;
	int rv;

	rv = kstrtoul(buf, 10, &sectors);
	if (rv)
		return rv;

	if (sectors == 0)
		return -EINVAL;

	if (mddev->bitmap &&
2337
	    sectors < (mddev->bitmap->storage.bytes + 511) >> 9)
2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349
		return -EFBIG; /* Bitmap is too big for this small space */

	/* could make sure it isn't too big, but that isn't really
	 * needed - user-space should be careful.
	 */
	mddev->bitmap_info.space = sectors;
	return len;
}

static struct md_sysfs_entry bitmap_space =
__ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store);

2350
static ssize_t
2351
timeout_show(struct mddev *mddev, char *page)
2352 2353 2354 2355
{
	ssize_t len;
	unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
	unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
2356

2357 2358 2359 2360 2361 2362 2363 2364
	len = sprintf(page, "%lu", secs);
	if (jifs)
		len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
	len += sprintf(page+len, "\n");
	return len;
}

static ssize_t
2365
timeout_store(struct mddev *mddev, const char *buf, size_t len)
2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400
{
	/* timeout can be set at any time */
	unsigned long timeout;
	int rv = strict_strtoul_scaled(buf, &timeout, 4);
	if (rv)
		return rv;

	/* just to make sure we don't overflow... */
	if (timeout >= LONG_MAX / HZ)
		return -EINVAL;

	timeout = timeout * HZ / 10000;

	if (timeout >= MAX_SCHEDULE_TIMEOUT)
		timeout = MAX_SCHEDULE_TIMEOUT-1;
	if (timeout < 1)
		timeout = 1;
	mddev->bitmap_info.daemon_sleep = timeout;
	if (mddev->thread) {
		/* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
		 * the bitmap is all clean and we don't need to
		 * adjust the timeout right now
		 */
		if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
			mddev->thread->timeout = timeout;
			md_wakeup_thread(mddev->thread);
		}
	}
	return len;
}

static struct md_sysfs_entry bitmap_timeout =
__ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);

static ssize_t
2401
backlog_show(struct mddev *mddev, char *page)
2402 2403 2404 2405 2406
{
	return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
}

static ssize_t
2407
backlog_store(struct mddev *mddev, const char *buf, size_t len)
2408 2409
{
	unsigned long backlog;
2410
	int rv = kstrtoul(buf, 10, &backlog);
2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422
	if (rv)
		return rv;
	if (backlog > COUNTER_MAX)
		return -EINVAL;
	mddev->bitmap_info.max_write_behind = backlog;
	return len;
}

static struct md_sysfs_entry bitmap_backlog =
__ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);

static ssize_t
2423
chunksize_show(struct mddev *mddev, char *page)
2424 2425 2426 2427 2428
{
	return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
}

static ssize_t
2429
chunksize_store(struct mddev *mddev, const char *buf, size_t len)
2430 2431 2432 2433 2434 2435
{
	/* Can only be changed when no bitmap is active */
	int rv;
	unsigned long csize;
	if (mddev->bitmap)
		return -EBUSY;
2436
	rv = kstrtoul(buf, 10, &csize);
2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448
	if (rv)
		return rv;
	if (csize < 512 ||
	    !is_power_of_2(csize))
		return -EINVAL;
	mddev->bitmap_info.chunksize = csize;
	return len;
}

static struct md_sysfs_entry bitmap_chunksize =
__ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);

2449
static ssize_t metadata_show(struct mddev *mddev, char *page)
2450
{
G
Goldwyn Rodrigues 已提交
2451 2452
	if (mddev_is_clustered(mddev))
		return sprintf(page, "clustered\n");
2453 2454 2455 2456
	return sprintf(page, "%s\n", (mddev->bitmap_info.external
				      ? "external" : "internal"));
}

2457
static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
2458 2459 2460 2461 2462 2463 2464
{
	if (mddev->bitmap ||
	    mddev->bitmap_info.file ||
	    mddev->bitmap_info.offset)
		return -EBUSY;
	if (strncmp(buf, "external", 8) == 0)
		mddev->bitmap_info.external = 1;
G
Goldwyn Rodrigues 已提交
2465 2466
	else if ((strncmp(buf, "internal", 8) == 0) ||
			(strncmp(buf, "clustered", 9) == 0))
2467 2468 2469 2470 2471 2472 2473 2474 2475
		mddev->bitmap_info.external = 0;
	else
		return -EINVAL;
	return len;
}

static struct md_sysfs_entry bitmap_metadata =
__ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);

2476
static ssize_t can_clear_show(struct mddev *mddev, char *page)
2477 2478
{
	int len;
2479
	spin_lock(&mddev->lock);
2480 2481 2482 2483 2484
	if (mddev->bitmap)
		len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
					     "false" : "true"));
	else
		len = sprintf(page, "\n");
2485
	spin_unlock(&mddev->lock);
2486 2487 2488
	return len;
}

2489
static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506
{
	if (mddev->bitmap == NULL)
		return -ENOENT;
	if (strncmp(buf, "false", 5) == 0)
		mddev->bitmap->need_sync = 1;
	else if (strncmp(buf, "true", 4) == 0) {
		if (mddev->degraded)
			return -EBUSY;
		mddev->bitmap->need_sync = 0;
	} else
		return -EINVAL;
	return len;
}

static struct md_sysfs_entry bitmap_can_clear =
__ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);

2507
static ssize_t
2508
behind_writes_used_show(struct mddev *mddev, char *page)
2509
{
2510 2511
	ssize_t ret;
	spin_lock(&mddev->lock);
2512
	if (mddev->bitmap == NULL)
2513 2514 2515 2516 2517 2518
		ret = sprintf(page, "0\n");
	else
		ret = sprintf(page, "%lu\n",
			      mddev->bitmap->behind_writes_used);
	spin_unlock(&mddev->lock);
	return ret;
2519 2520 2521
}

static ssize_t
2522
behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
2523 2524 2525 2526 2527 2528 2529 2530 2531 2532
{
	if (mddev->bitmap)
		mddev->bitmap->behind_writes_used = 0;
	return len;
}

static struct md_sysfs_entry max_backlog_used =
__ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
       behind_writes_used_show, behind_writes_used_reset);

2533 2534
static struct attribute *md_bitmap_attrs[] = {
	&bitmap_location.attr,
2535
	&bitmap_space.attr,
2536 2537 2538
	&bitmap_timeout.attr,
	&bitmap_backlog.attr,
	&bitmap_chunksize.attr,
2539 2540
	&bitmap_metadata.attr,
	&bitmap_can_clear.attr,
2541
	&max_backlog_used.attr,
2542 2543 2544 2545 2546 2547 2548
	NULL
};
struct attribute_group md_bitmap_group = {
	.name = "bitmap",
	.attrs = md_bitmap_attrs,
};