drbd_req.c 38.3 KB
Newer Older
P
Philipp Reisner 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
/*
   drbd_req.c

   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.

   Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
   Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
   Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.

   drbd is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation; either version 2, or (at your option)
   any later version.

   drbd is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.

   You should have received a copy of the GNU General Public License
   along with drbd; see the file COPYING.  If not, write to
   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.

 */

#include <linux/module.h>

#include <linux/slab.h>
#include <linux/drbd.h>
#include "drbd_int.h"
#include "drbd_req.h"


/* Update disk stats at start of I/O request */
static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio)
{
	const int rw = bio_data_dir(bio);
	int cpu;
	cpu = part_stat_lock();
	part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
	part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio));
42
	part_inc_in_flight(&mdev->vdisk->part0, rw);
P
Philipp Reisner 已提交
43 44 45 46 47 48 49 50 51 52 53 54
	part_stat_unlock();
}

/* Update disk stats when completing request upwards */
static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req)
{
	int rw = bio_data_dir(req->master_bio);
	unsigned long duration = jiffies - req->start_time;
	int cpu;
	cpu = part_stat_lock();
	part_stat_add(cpu, &mdev->vdisk->part0, ticks[rw], duration);
	part_round_stats(cpu, &mdev->vdisk->part0);
55
	part_dec_in_flight(&mdev->vdisk->part0, rw);
P
Philipp Reisner 已提交
56 57 58 59 60 61
	part_stat_unlock();
}

static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const int rw)
{
	const unsigned long s = req->rq_state;
62 63 64 65 66 67 68 69 70

	/* remove it from the transfer log.
	 * well, only if it had been there in the first
	 * place... if it had not (local only or conflicting
	 * and never sent), it should still be "empty" as
	 * initialized in drbd_req_new(), so we can list_del() it
	 * here unconditionally */
	list_del(&req->tl_requests);

P
Philipp Reisner 已提交
71 72 73 74 75 76 77 78 79
	/* if it was a write, we may have to set the corresponding
	 * bit(s) out-of-sync first. If it had a local part, we need to
	 * release the reference to the activity log. */
	if (rw == WRITE) {
		/* Set out-of-sync unless both OK flags are set
		 * (local only or remote failed).
		 * Other places where we set out-of-sync:
		 * READ with local io-error */
		if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
80
			drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
P
Philipp Reisner 已提交
81 82

		if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
83
			drbd_set_in_sync(mdev, req->i.sector, req->i.size);
P
Philipp Reisner 已提交
84 85 86 87 88 89 90 91 92 93 94 95 96

		/* one might be tempted to move the drbd_al_complete_io
		 * to the local io completion callback drbd_endio_pri.
		 * but, if this was a mirror write, we may only
		 * drbd_al_complete_io after this is RQ_NET_DONE,
		 * otherwise the extent could be dropped from the al
		 * before it has actually been written on the peer.
		 * if we crash before our peer knows about the request,
		 * but after the extent has been dropped from the al,
		 * we would forget to resync the corresponding extent.
		 */
		if (s & RQ_LOCAL_MASK) {
			if (get_ldev_if_state(mdev, D_FAILED)) {
97
				if (s & RQ_IN_ACT_LOG)
98
					drbd_al_complete_io(mdev, req->i.sector);
P
Philipp Reisner 已提交
99 100 101 102
				put_ldev(mdev);
			} else if (__ratelimit(&drbd_ratelimit_state)) {
				dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu), "
				     "but my Disk seems to have failed :(\n",
103
				     (unsigned long long) req->i.sector);
P
Philipp Reisner 已提交
104 105 106 107
			}
		}
	}

108
	drbd_req_free(req);
P
Philipp Reisner 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
}

static void queue_barrier(struct drbd_conf *mdev)
{
	struct drbd_tl_epoch *b;

	/* We are within the req_lock. Once we queued the barrier for sending,
	 * we set the CREATE_BARRIER bit. It is cleared as soon as a new
	 * barrier/epoch object is added. This is the only place this bit is
	 * set. It indicates that the barrier for this epoch is already queued,
	 * and no new epoch has been created yet. */
	if (test_bit(CREATE_BARRIER, &mdev->flags))
		return;

	b = mdev->newest_tle;
	b->w.cb = w_send_barrier;
	/* inc_ap_pending done here, so we won't
	 * get imbalanced on connection loss.
	 * dec_ap_pending will be done in got_BarrierAck
	 * or (on connection loss) in tl_clear.  */
	inc_ap_pending(mdev);
	drbd_queue_work(&mdev->data.work, &b->w);
	set_bit(CREATE_BARRIER, &mdev->flags);
}

static void _about_to_complete_local_write(struct drbd_conf *mdev,
	struct drbd_request *req)
{
	const unsigned long s = req->rq_state;
	struct drbd_epoch_entry *e;
	struct hlist_node *n;
	struct hlist_head *slot;

142 143 144 145 146 147 148 149
	/* Before we can signal completion to the upper layers,
	 * we may need to close the current epoch.
	 * We can skip this, if this request has not even been sent, because we
	 * did not have a fully established connection yet/anymore, during
	 * bitmap exchange, or while we are C_AHEAD due to congestion policy.
	 */
	if (mdev->state.conn >= C_CONNECTED &&
	    (s & RQ_NET_SENT) != 0 &&
P
Philipp Reisner 已提交
150 151 152 153 154 155 156
	    req->epoch == mdev->newest_tle->br_number)
		queue_barrier(mdev);

	/* we need to do the conflict detection stuff,
	 * if we have the ee_hash (two_primaries) and
	 * this has been on the network */
	if ((s & RQ_NET_DONE) && mdev->ee_hash != NULL) {
157 158
		const sector_t sector = req->i.sector;
		const int size = req->i.size;
159
		struct drbd_interval *i;
P
Philipp Reisner 已提交
160 161 162 163

		/* ASSERT:
		 * there must be no conflicting requests, since
		 * they must have been failed on the spot */
164 165 166 167 168 169 170 171 172 173

		i = drbd_find_overlap(&mdev->write_requests, sector, size);
		if (i) {
			struct drbd_request *req2 =
				container_of(i, struct drbd_request, i);

			dev_alert(DEV, "LOGIC BUG: completed: %p %llus +%u; "
			      "other: %p %llus +%u\n",
			      req, (unsigned long long)sector, size,
			      i, (unsigned long long)req2->i.sector, req2->i.size);
P
Philipp Reisner 已提交
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
		}

		/* maybe "wake" those conflicting epoch entries
		 * that wait for this request to finish.
		 *
		 * currently, there can be only _one_ such ee
		 * (well, or some more, which would be pending
		 * P_DISCARD_ACK not yet sent by the asender...),
		 * since we block the receiver thread upon the
		 * first conflict detection, which will wait on
		 * misc_wait.  maybe we want to assert that?
		 *
		 * anyways, if we found one,
		 * we just have to do a wake_up.  */
#define OVERLAPS overlaps(sector, size, e->sector, e->size)
189
		slot = ee_hash_slot(mdev, req->i.sector);
B
Bart Van Assche 已提交
190
		hlist_for_each_entry(e, n, slot, collision) {
P
Philipp Reisner 已提交
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
			if (OVERLAPS) {
				wake_up(&mdev->misc_wait);
				break;
			}
		}
	}
#undef OVERLAPS
}

void complete_master_bio(struct drbd_conf *mdev,
		struct bio_and_error *m)
{
	bio_endio(m->bio, m->error);
	dec_ap_bio(mdev);
}

/* Helper for __req_mod().
 * Set m->bio to the master bio, if it is fit to be completed,
 * or leave it alone (it is initialized to NULL in __req_mod),
 * if it has already been completed, or cannot be completed yet.
 * If m->bio is set, the error status to be returned is placed in m->error.
 */
void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
{
	const unsigned long s = req->rq_state;
	struct drbd_conf *mdev = req->mdev;
	/* only WRITES may end up here without a master bio (on barrier ack) */
	int rw = req->master_bio ? bio_data_dir(req->master_bio) : WRITE;

	/* we must not complete the master bio, while it is
	 *	still being processed by _drbd_send_zc_bio (drbd_send_dblock)
	 *	not yet acknowledged by the peer
	 *	not yet completed by the local io subsystem
	 * these flags may get cleared in any order by
	 *	the worker,
	 *	the receiver,
	 *	the bio_endio completion callbacks.
	 */
	if (s & RQ_NET_QUEUED)
		return;
	if (s & RQ_NET_PENDING)
		return;
	if (s & RQ_LOCAL_PENDING)
		return;

	if (req->master_bio) {
		/* this is data_received (remote read)
		 * or protocol C P_WRITE_ACK
		 * or protocol B P_RECV_ACK
		 * or protocol A "handed_over_to_network" (SendAck)
		 * or canceled or failed,
		 * or killed from the transfer log due to connection loss.
		 */

		/*
		 * figure out whether to report success or failure.
		 *
		 * report success when at least one of the operations succeeded.
		 * or, to put the other way,
		 * only report failure, when both operations failed.
		 *
		 * what to do about the failures is handled elsewhere.
		 * what we need to do here is just: complete the master_bio.
		 *
		 * local completion error, if any, has been stored as ERR_PTR
		 * in private_bio within drbd_endio_pri.
		 */
		int ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
		int error = PTR_ERR(req->private_bio);

		/* remove the request from the conflict detection
		 * respective block_id verification hash */
263
		if (!hlist_unhashed(&req->collision)) {
B
Bart Van Assche 已提交
264
			hlist_del(&req->collision);
265 266 267
			if (!drbd_interval_empty(&req->i))
				drbd_remove_interval(&mdev->write_requests, &req->i);
		} else
P
Philipp Reisner 已提交
268
			D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
P
Philipp Reisner 已提交
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292

		/* for writes we need to do some extra housekeeping */
		if (rw == WRITE)
			_about_to_complete_local_write(mdev, req);

		/* Update disk stats */
		_drbd_end_io_acct(mdev, req);

		m->error = ok ? 0 : (error ?: -EIO);
		m->bio = req->master_bio;
		req->master_bio = NULL;
	}

	if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) {
		/* this is disconnected (local only) operation,
		 * or protocol C P_WRITE_ACK,
		 * or protocol A or B P_BARRIER_ACK,
		 * or killed from the transfer log due to connection loss. */
		_req_is_done(mdev, req, rw);
	}
	/* else: network part and not DONE yet. that is
	 * protocol A or B, barrier ack still pending... */
}

293 294 295 296
static void _req_may_be_done_not_susp(struct drbd_request *req, struct bio_and_error *m)
{
	struct drbd_conf *mdev = req->mdev;

297
	if (!is_susp(mdev->state))
298 299 300
		_req_may_be_done(req, m);
}

P
Philipp Reisner 已提交
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
/*
 * checks whether there was an overlapping request
 * or ee already registered.
 *
 * if so, return 1, in which case this request is completed on the spot,
 * without ever being submitted or send.
 *
 * return 0 if it is ok to submit this request.
 *
 * NOTE:
 * paranoia: assume something above us is broken, and issues different write
 * requests for the same block simultaneously...
 *
 * To ensure these won't be reordered differently on both nodes, resulting in
 * diverging data sets, we discard the later one(s). Not that this is supposed
 * to happen, but this is the rationale why we also have to check for
 * conflicting requests with local origin, and why we have to do so regardless
 * of whether we allowed multiple primaries.
 *
 * BTW, in case we only have one primary, the ee_hash is empty anyways, and the
 * second hlist_for_each_entry becomes a noop. This is even simpler than to
 * grab a reference on the net_conf, and check for the two_primaries flag...
 */
static int _req_conflicts(struct drbd_request *req)
{
	struct drbd_conf *mdev = req->mdev;
327 328
	const sector_t sector = req->i.sector;
	const int size = req->i.size;
329
	struct drbd_interval *i;
P
Philipp Reisner 已提交
330 331 332 333
	struct drbd_epoch_entry *e;
	struct hlist_node *n;
	struct hlist_head *slot;

B
Bart Van Assche 已提交
334
	D_ASSERT(hlist_unhashed(&req->collision));
P
Philipp Reisner 已提交
335 336 337 338 339 340 341 342 343

	if (!get_net_conf(mdev))
		return 0;

	/* BUG_ON */
	ERR_IF (mdev->tl_hash_s == 0)
		goto out_no_conflict;
	BUG_ON(mdev->tl_hash == NULL);

344 345 346 347 348 349 350 351 352 353 354 355
	i = drbd_find_overlap(&mdev->write_requests, sector, size);
	if (i) {
		struct drbd_request *req2 =
			container_of(i, struct drbd_request, i);

		dev_alert(DEV, "%s[%u] Concurrent local write detected! "
		      "[DISCARD L] new: %llus +%u; "
		      "pending: %llus +%u\n",
		      current->comm, current->pid,
		      (unsigned long long)sector, size,
		      (unsigned long long)req2->i.sector, req2->i.size);
		goto out_conflict;
P
Philipp Reisner 已提交
356 357 358 359 360 361 362
	}

	if (mdev->ee_hash_s) {
		/* now, check for overlapping requests with remote origin */
		BUG_ON(mdev->ee_hash == NULL);
#define OVERLAPS overlaps(e->sector, e->size, sector, size)
		slot = ee_hash_slot(mdev, sector);
B
Bart Van Assche 已提交
363
		hlist_for_each_entry(e, n, slot, collision) {
P
Philipp Reisner 已提交
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
			if (OVERLAPS) {
				dev_alert(DEV, "%s[%u] Concurrent remote write detected!"
				      " [DISCARD L] new: %llus +%u; "
				      "pending: %llus +%u\n",
				      current->comm, current->pid,
				      (unsigned long long)sector, size,
				      (unsigned long long)e->sector, e->size);
				goto out_conflict;
			}
		}
	}
#undef OVERLAPS

out_no_conflict:
	/* this is like it should be, and what we expected.
	 * our users do behave after all... */
	put_net_conf(mdev);
	return 0;

out_conflict:
	put_net_conf(mdev);
	return 1;
}

/* obviously this could be coded as many single functions
 * instead of one huge switch,
 * or by putting the code directly in the respective locations
 * (as it has been before).
 *
 * but having it this way
 *  enforces that it is all in this one place, where it is easier to audit,
 *  it makes it obvious that whatever "event" "happens" to a request should
 *  happen "atomically" within the req_lock,
 *  and it enforces that we have to think in a very structured manner
 *  about the "events" that may happen to a request during its life time ...
 */
400
int __req_mod(struct drbd_request *req, enum drbd_req_event what,
P
Philipp Reisner 已提交
401 402 403
		struct bio_and_error *m)
{
	struct drbd_conf *mdev = req->mdev;
404
	int rv = 0;
P
Philipp Reisner 已提交
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
	m->bio = NULL;

	switch (what) {
	default:
		dev_err(DEV, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
		break;

	/* does not happen...
	 * initialization done in drbd_req_new
	case created:
		break;
		*/

	case to_be_send: /* via network */
		/* reached via drbd_make_request_common
		 * and from w_read_retry_remote */
		D_ASSERT(!(req->rq_state & RQ_NET_MASK));
		req->rq_state |= RQ_NET_PENDING;
		inc_ap_pending(mdev);
		break;

	case to_be_submitted: /* locally */
		/* reached via drbd_make_request_common */
		D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK));
		req->rq_state |= RQ_LOCAL_PENDING;
		break;

	case completed_ok:
		if (bio_data_dir(req->master_bio) == WRITE)
434
			mdev->writ_cnt += req->i.size >> 9;
P
Philipp Reisner 已提交
435
		else
436
			mdev->read_cnt += req->i.size >> 9;
P
Philipp Reisner 已提交
437 438 439 440

		req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
		req->rq_state &= ~RQ_LOCAL_PENDING;

441
		_req_may_be_done_not_susp(req, m);
P
Philipp Reisner 已提交
442 443 444 445 446 447 448
		put_ldev(mdev);
		break;

	case write_completed_with_error:
		req->rq_state |= RQ_LOCAL_COMPLETED;
		req->rq_state &= ~RQ_LOCAL_PENDING;

449
		__drbd_chk_io_error(mdev, false);
450
		_req_may_be_done_not_susp(req, m);
P
Philipp Reisner 已提交
451 452 453 454 455 456 457
		put_ldev(mdev);
		break;

	case read_ahead_completed_with_error:
		/* it is legal to fail READA */
		req->rq_state |= RQ_LOCAL_COMPLETED;
		req->rq_state &= ~RQ_LOCAL_PENDING;
458
		_req_may_be_done_not_susp(req, m);
P
Philipp Reisner 已提交
459 460 461 462
		put_ldev(mdev);
		break;

	case read_completed_with_error:
463
		drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
P
Philipp Reisner 已提交
464 465 466 467 468 469

		req->rq_state |= RQ_LOCAL_COMPLETED;
		req->rq_state &= ~RQ_LOCAL_PENDING;

		D_ASSERT(!(req->rq_state & RQ_NET_MASK));

470
		__drbd_chk_io_error(mdev, false);
P
Philipp Reisner 已提交
471 472
		put_ldev(mdev);

473 474 475
		/* no point in retrying if there is no good remote data,
		 * or we have no connection. */
		if (mdev->state.pdsk != D_UP_TO_DATE) {
476
			_req_may_be_done_not_susp(req, m);
477 478 479 480 481 482
			break;
		}

		/* _req_mod(req,to_be_send); oops, recursion... */
		req->rq_state |= RQ_NET_PENDING;
		inc_ap_pending(mdev);
P
Philipp Reisner 已提交
483 484 485 486 487 488 489 490 491 492 493 494
		/* fall through: _req_mod(req,queue_for_net_read); */

	case queue_for_net_read:
		/* READ or READA, and
		 * no local disk,
		 * or target area marked as invalid,
		 * or just got an io-error. */
		/* from drbd_make_request_common
		 * or from bio_endio during read io-error recovery */

		/* so we can verify the handle in the answer packet
		 * corresponding hlist_del is in _req_may_be_done() */
495
		hlist_add_head(&req->collision, ar_hash_slot(mdev, req->i.sector));
P
Philipp Reisner 已提交
496

497
		set_bit(UNPLUG_REMOTE, &mdev->flags);
P
Philipp Reisner 已提交
498 499 500 501 502 503 504 505 506 507 508 509 510

		D_ASSERT(req->rq_state & RQ_NET_PENDING);
		req->rq_state |= RQ_NET_QUEUED;
		req->w.cb = (req->rq_state & RQ_LOCAL_MASK)
			? w_read_retry_remote
			: w_send_read_req;
		drbd_queue_work(&mdev->data.work, &req->w);
		break;

	case queue_for_net_write:
		/* assert something? */
		/* from drbd_make_request_common only */

511
		hlist_add_head(&req->collision, tl_hash_slot(mdev, req->i.sector));
P
Philipp Reisner 已提交
512
		/* corresponding hlist_del is in _req_may_be_done() */
513
		drbd_insert_interval(&mdev->write_requests, &req->i);
P
Philipp Reisner 已提交
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528

		/* NOTE
		 * In case the req ended up on the transfer log before being
		 * queued on the worker, it could lead to this request being
		 * missed during cleanup after connection loss.
		 * So we have to do both operations here,
		 * within the same lock that protects the transfer log.
		 *
		 * _req_add_to_epoch(req); this has to be after the
		 * _maybe_start_new_epoch(req); which happened in
		 * drbd_make_request_common, because we now may set the bit
		 * again ourselves to close the current epoch.
		 *
		 * Add req to the (now) current epoch (barrier). */

529 530 531 532 533
		/* otherwise we may lose an unplug, which may cause some remote
		 * io-scheduler timeout to expire, increasing maximum latency,
		 * hurting performance. */
		set_bit(UNPLUG_REMOTE, &mdev->flags);

P
Philipp Reisner 已提交
534 535 536 537 538 539 540
		/* see drbd_make_request_common,
		 * just after it grabs the req_lock */
		D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0);

		req->epoch = mdev->newest_tle->br_number;

		/* increment size of current epoch */
541
		mdev->newest_tle->n_writes++;
P
Philipp Reisner 已提交
542 543 544 545 546 547 548 549

		/* queue work item to send data */
		D_ASSERT(req->rq_state & RQ_NET_PENDING);
		req->rq_state |= RQ_NET_QUEUED;
		req->w.cb =  w_send_dblock;
		drbd_queue_work(&mdev->data.work, &req->w);

		/* close the epoch, in case it outgrew the limit */
550
		if (mdev->newest_tle->n_writes >= mdev->net_conf->max_epoch_size)
P
Philipp Reisner 已提交
551 552 553 554
			queue_barrier(mdev);

		break;

555 556 557 558 559 560 561 562
	case queue_for_send_oos:
		req->rq_state |= RQ_NET_QUEUED;
		req->w.cb =  w_send_oos;
		drbd_queue_work(&mdev->data.work, &req->w);
		break;

	case oos_handed_to_network:
		/* actually the same */
P
Philipp Reisner 已提交
563 564 565 566 567 568 569 570
	case send_canceled:
		/* treat it the same */
	case send_failed:
		/* real cleanup will be done from tl_clear.  just update flags
		 * so it is no longer marked as on the worker queue */
		req->rq_state &= ~RQ_NET_QUEUED;
		/* if we did it right, tl_clear should be scheduled only after
		 * this, so this should not be necessary! */
571
		_req_may_be_done_not_susp(req, m);
P
Philipp Reisner 已提交
572 573 574 575
		break;

	case handed_over_to_network:
		/* assert something? */
576
		if (bio_data_dir(req->master_bio) == WRITE)
577
			atomic_add(req->i.size >> 9, &mdev->ap_in_flight);
578

P
Philipp Reisner 已提交
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
		if (bio_data_dir(req->master_bio) == WRITE &&
		    mdev->net_conf->wire_protocol == DRBD_PROT_A) {
			/* this is what is dangerous about protocol A:
			 * pretend it was successfully written on the peer. */
			if (req->rq_state & RQ_NET_PENDING) {
				dec_ap_pending(mdev);
				req->rq_state &= ~RQ_NET_PENDING;
				req->rq_state |= RQ_NET_OK;
			} /* else: neg-ack was faster... */
			/* it is still not yet RQ_NET_DONE until the
			 * corresponding epoch barrier got acked as well,
			 * so we know what to dirty on connection loss */
		}
		req->rq_state &= ~RQ_NET_QUEUED;
		req->rq_state |= RQ_NET_SENT;
		/* because _drbd_send_zc_bio could sleep, and may want to
		 * dereference the bio even after the "write_acked_by_peer" and
		 * "completed_ok" events came in, once we return from
		 * _drbd_send_zc_bio (drbd_send_dblock), we have to check
		 * whether it is done already, and end it.  */
599
		_req_may_be_done_not_susp(req, m);
P
Philipp Reisner 已提交
600 601
		break;

602 603 604
	case read_retry_remote_canceled:
		req->rq_state &= ~RQ_NET_QUEUED;
		/* fall through, in case we raced with drbd_disconnect */
P
Philipp Reisner 已提交
605 606 607 608 609 610 611
	case connection_lost_while_pending:
		/* transfer log cleanup after connection loss */
		/* assert something? */
		if (req->rq_state & RQ_NET_PENDING)
			dec_ap_pending(mdev);
		req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
		req->rq_state |= RQ_NET_DONE;
612
		if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE)
613
			atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
614

P
Philipp Reisner 已提交
615 616 617
		/* if it is still queued, we may not complete it here.
		 * it will be canceled soon. */
		if (!(req->rq_state & RQ_NET_QUEUED))
618
			_req_may_be_done(req, m); /* Allowed while state.susp */
P
Philipp Reisner 已提交
619 620 621 622 623 624 625 626 627 628 629
		break;

	case write_acked_by_peer_and_sis:
		req->rq_state |= RQ_NET_SIS;
	case conflict_discarded_by_peer:
		/* for discarded conflicting writes of multiple primaries,
		 * there is no need to keep anything in the tl, potential
		 * node crashes are covered by the activity log. */
		if (what == conflict_discarded_by_peer)
			dev_alert(DEV, "Got DiscardAck packet %llus +%u!"
			      " DRBD is not a random data generator!\n",
630
			      (unsigned long long)req->i.sector, req->i.size);
P
Philipp Reisner 已提交
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
		req->rq_state |= RQ_NET_DONE;
		/* fall through */
	case write_acked_by_peer:
		/* protocol C; successfully written on peer.
		 * Nothing to do here.
		 * We want to keep the tl in place for all protocols, to cater
		 * for volatile write-back caches on lower level devices.
		 *
		 * A barrier request is expected to have forced all prior
		 * requests onto stable storage, so completion of a barrier
		 * request could set NET_DONE right here, and not wait for the
		 * P_BARRIER_ACK, but that is an unnecessary optimization. */

		/* this makes it effectively the same as for: */
	case recv_acked_by_peer:
		/* protocol B; pretends to be successfully written on peer.
		 * see also notes above in handed_over_to_network about
		 * protocol != C */
		req->rq_state |= RQ_NET_OK;
		D_ASSERT(req->rq_state & RQ_NET_PENDING);
		dec_ap_pending(mdev);
652
		atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
P
Philipp Reisner 已提交
653
		req->rq_state &= ~RQ_NET_PENDING;
654
		_req_may_be_done_not_susp(req, m);
P
Philipp Reisner 已提交
655 656 657 658
		break;

	case neg_acked:
		/* assert something? */
659
		if (req->rq_state & RQ_NET_PENDING) {
P
Philipp Reisner 已提交
660
			dec_ap_pending(mdev);
661
			atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
662
		}
P
Philipp Reisner 已提交
663 664 665
		req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);

		req->rq_state |= RQ_NET_DONE;
666
		_req_may_be_done_not_susp(req, m);
P
Philipp Reisner 已提交
667 668 669
		/* else: done by handed_over_to_network */
		break;

670 671 672 673
	case fail_frozen_disk_io:
		if (!(req->rq_state & RQ_LOCAL_COMPLETED))
			break;

674
		_req_may_be_done(req, m); /* Allowed while state.susp */
675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
		break;

	case restart_frozen_disk_io:
		if (!(req->rq_state & RQ_LOCAL_COMPLETED))
			break;

		req->rq_state &= ~RQ_LOCAL_COMPLETED;

		rv = MR_READ;
		if (bio_data_dir(req->master_bio) == WRITE)
			rv = MR_WRITE;

		get_ldev(mdev);
		req->w.cb = w_restart_disk_io;
		drbd_queue_work(&mdev->data.work, &req->w);
		break;

692 693
	case resend:
		/* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
694
		   before the connection loss (B&C only); only P_BARRIER_ACK was missing.
695
		   Trowing them out of the TL here by pretending we got a BARRIER_ACK
696
		   We ensure that the peer was not rebooted */
697 698 699 700 701 702 703 704 705
		if (!(req->rq_state & RQ_NET_OK)) {
			if (req->w.cb) {
				drbd_queue_work(&mdev->data.work, &req->w);
				rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
			}
			break;
		}
		/* else, fall through to barrier_acked */

P
Philipp Reisner 已提交
706
	case barrier_acked:
707 708 709
		if (!(req->rq_state & RQ_WRITE))
			break;

P
Philipp Reisner 已提交
710 711 712 713 714 715 716
		if (req->rq_state & RQ_NET_PENDING) {
			/* barrier came in before all requests have been acked.
			 * this is bad, because if the connection is lost now,
			 * we won't be able to clean them up... */
			dev_err(DEV, "FIXME (barrier_acked but pending)\n");
			list_move(&req->tl_requests, &mdev->out_of_sequence_requests);
		}
717 718 719
		if ((req->rq_state & RQ_NET_MASK) != 0) {
			req->rq_state |= RQ_NET_DONE;
			if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
720
				atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
721
		}
722
		_req_may_be_done(req, m); /* Allowed while state.susp */
P
Philipp Reisner 已提交
723 724 725 726 727 728 729
		break;

	case data_received:
		D_ASSERT(req->rq_state & RQ_NET_PENDING);
		dec_ap_pending(mdev);
		req->rq_state &= ~RQ_NET_PENDING;
		req->rq_state |= (RQ_NET_OK|RQ_NET_DONE);
730
		_req_may_be_done_not_susp(req, m);
P
Philipp Reisner 已提交
731 732
		break;
	};
733 734

	return rv;
P
Philipp Reisner 已提交
735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767
}

/* we may do a local read if:
 * - we are consistent (of course),
 * - or we are generally inconsistent,
 *   BUT we are still/already IN SYNC for this area.
 *   since size may be bigger than BM_BLOCK_SIZE,
 *   we may need to check several bits.
 */
static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size)
{
	unsigned long sbnr, ebnr;
	sector_t esector, nr_sectors;

	if (mdev->state.disk == D_UP_TO_DATE)
		return 1;
	if (mdev->state.disk >= D_OUTDATED)
		return 0;
	if (mdev->state.disk <  D_INCONSISTENT)
		return 0;
	/* state.disk == D_INCONSISTENT   We will have a look at the BitMap */
	nr_sectors = drbd_get_capacity(mdev->this_bdev);
	esector = sector + (size >> 9) - 1;

	D_ASSERT(sector  < nr_sectors);
	D_ASSERT(esector < nr_sectors);

	sbnr = BM_SECT_TO_BIT(sector);
	ebnr = BM_SECT_TO_BIT(esector);

	return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
}

768
static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
P
Philipp Reisner 已提交
769 770 771 772 773 774
{
	const int rw = bio_rw(bio);
	const int size = bio->bi_size;
	const sector_t sector = bio->bi_sector;
	struct drbd_tl_epoch *b = NULL;
	struct drbd_request *req;
775
	int local, remote, send_oos = 0;
P
Philipp Reisner 已提交
776
	int err = -EIO;
777
	int ret = 0;
P
Philipp Reisner 已提交
778 779 780 781 782 783 784 785 786 787 788

	/* allocate outside of all locks; */
	req = drbd_req_new(mdev, bio);
	if (!req) {
		dec_ap_bio(mdev);
		/* only pass the error to the upper layers.
		 * if user cannot handle io errors, that's not our business. */
		dev_err(DEV, "could not kmalloc() req\n");
		bio_endio(bio, -ENOMEM);
		return 0;
	}
789
	req->start_time = start_time;
P
Philipp Reisner 已提交
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833

	local = get_ldev(mdev);
	if (!local) {
		bio_put(req->private_bio); /* or we get a bio leak */
		req->private_bio = NULL;
	}
	if (rw == WRITE) {
		remote = 1;
	} else {
		/* READ || READA */
		if (local) {
			if (!drbd_may_do_local_read(mdev, sector, size)) {
				/* we could kick the syncer to
				 * sync this extent asap, wait for
				 * it, then continue locally.
				 * Or just issue the request remotely.
				 */
				local = 0;
				bio_put(req->private_bio);
				req->private_bio = NULL;
				put_ldev(mdev);
			}
		}
		remote = !local && mdev->state.pdsk >= D_UP_TO_DATE;
	}

	/* If we have a disk, but a READA request is mapped to remote,
	 * we are R_PRIMARY, D_INCONSISTENT, SyncTarget.
	 * Just fail that READA request right here.
	 *
	 * THINK: maybe fail all READA when not local?
	 *        or make this configurable...
	 *        if network is slow, READA won't do any good.
	 */
	if (rw == READA && mdev->state.disk >= D_INCONSISTENT && !local) {
		err = -EWOULDBLOCK;
		goto fail_and_free_req;
	}

	/* For WRITES going to the local disk, grab a reference on the target
	 * extent.  This waits for any resync activity in the corresponding
	 * resync extent to finish, and, if necessary, pulls in the target
	 * extent into the activity log, which involves further disk io because
	 * of transactional on-disk meta data updates. */
834 835
	if (rw == WRITE && local && !test_bit(AL_SUSPENDED, &mdev->flags)) {
		req->rq_state |= RQ_IN_ACT_LOG;
P
Philipp Reisner 已提交
836
		drbd_al_begin_io(mdev, sector);
837
	}
P
Philipp Reisner 已提交
838

839 840
	remote = remote && drbd_should_do_remote(mdev->state);
	send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
841
	D_ASSERT(!(remote && send_oos));
P
Philipp Reisner 已提交
842

843
	if (!(local || remote) && !is_susp(mdev->state)) {
L
Lars Ellenberg 已提交
844 845
		if (__ratelimit(&drbd_ratelimit_state))
			dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
P
Philipp Reisner 已提交
846 847 848 849 850 851 852 853 854
		goto fail_free_complete;
	}

	/* For WRITE request, we have to make sure that we have an
	 * unused_spare_tle, in case we need to start a new epoch.
	 * I try to be smart and avoid to pre-allocate always "just in case",
	 * but there is a race between testing the bit and pointer outside the
	 * spinlock, and grabbing the spinlock.
	 * if we lost that race, we retry.  */
855
	if (rw == WRITE && (remote || send_oos) &&
P
Philipp Reisner 已提交
856 857 858 859 860 861 862 863 864 865 866 867 868 869
	    mdev->unused_spare_tle == NULL &&
	    test_bit(CREATE_BARRIER, &mdev->flags)) {
allocate_barrier:
		b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO);
		if (!b) {
			dev_err(DEV, "Failed to alloc barrier.\n");
			err = -ENOMEM;
			goto fail_free_complete;
		}
	}

	/* GOOD, everything prepared, grab the spin_lock */
	spin_lock_irq(&mdev->req_lock);

870
	if (is_susp(mdev->state)) {
871 872
		/* If we got suspended, use the retry mechanism of
		   generic_make_request() to restart processing of this
873
		   bio. In the next call to drbd_make_request
874 875 876 877 878 879
		   we sleep in inc_ap_bio() */
		ret = 1;
		spin_unlock_irq(&mdev->req_lock);
		goto fail_free_complete;
	}

880
	if (remote || send_oos) {
881 882
		remote = drbd_should_do_remote(mdev->state);
		send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
883
		D_ASSERT(!(remote && send_oos));
884 885

		if (!(remote || send_oos))
P
Philipp Reisner 已提交
886 887 888 889 890 891 892 893 894 895 896 897
			dev_warn(DEV, "lost connection while grabbing the req_lock!\n");
		if (!(local || remote)) {
			dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
			spin_unlock_irq(&mdev->req_lock);
			goto fail_free_complete;
		}
	}

	if (b && mdev->unused_spare_tle == NULL) {
		mdev->unused_spare_tle = b;
		b = NULL;
	}
898
	if (rw == WRITE && (remote || send_oos) &&
P
Philipp Reisner 已提交
899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920
	    mdev->unused_spare_tle == NULL &&
	    test_bit(CREATE_BARRIER, &mdev->flags)) {
		/* someone closed the current epoch
		 * while we were grabbing the spinlock */
		spin_unlock_irq(&mdev->req_lock);
		goto allocate_barrier;
	}


	/* Update disk stats */
	_drbd_start_io_acct(mdev, req, bio);

	/* _maybe_start_new_epoch(mdev);
	 * If we need to generate a write barrier packet, we have to add the
	 * new epoch (barrier) object, and queue the barrier packet for sending,
	 * and queue the req's data after it _within the same lock_, otherwise
	 * we have race conditions were the reorder domains could be mixed up.
	 *
	 * Even read requests may start a new epoch and queue the corresponding
	 * barrier packet.  To get the write ordering right, we only have to
	 * make sure that, if this is a write request and it triggered a
	 * barrier packet, this request is queued within the same spinlock. */
921
	if ((remote || send_oos) && mdev->unused_spare_tle &&
P
Philipp Reisner 已提交
922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952
	    test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
		_tl_add_barrier(mdev, mdev->unused_spare_tle);
		mdev->unused_spare_tle = NULL;
	} else {
		D_ASSERT(!(remote && rw == WRITE &&
			   test_bit(CREATE_BARRIER, &mdev->flags)));
	}

	/* NOTE
	 * Actually, 'local' may be wrong here already, since we may have failed
	 * to write to the meta data, and may become wrong anytime because of
	 * local io-error for some other request, which would lead to us
	 * "detaching" the local disk.
	 *
	 * 'remote' may become wrong any time because the network could fail.
	 *
	 * This is a harmless race condition, though, since it is handled
	 * correctly at the appropriate places; so it just defers the failure
	 * of the respective operation.
	 */

	/* mark them early for readability.
	 * this just sets some state flags. */
	if (remote)
		_req_mod(req, to_be_send);
	if (local)
		_req_mod(req, to_be_submitted);

	/* check this request on the collision detection hash tables.
	 * if we have a conflict, just complete it here.
	 * THINK do we want to check reads, too? (I don't think so...) */
953 954
	if (rw == WRITE && _req_conflicts(req))
		goto fail_conflicting;
955 956 957

	list_add_tail(&req->tl_requests, &mdev->newest_tle->requests);

P
Philipp Reisner 已提交
958 959 960 961 962 963 964 965 966 967 968
	/* NOTE remote first: to get the concurrent write detection right,
	 * we must register the request before start of local IO.  */
	if (remote) {
		/* either WRITE and C_CONNECTED,
		 * or READ, and no local disk,
		 * or READ, but not in sync.
		 */
		_req_mod(req, (rw == WRITE)
				? queue_for_net_write
				: queue_for_net_read);
	}
969 970
	if (send_oos && drbd_set_out_of_sync(mdev, sector, size))
		_req_mod(req, queue_for_send_oos);
971

972 973
	if (remote &&
	    mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
974 975 976 977 978 979 980 981 982 983 984 985 986
		int congested = 0;

		if (mdev->net_conf->cong_fill &&
		    atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
			dev_info(DEV, "Congestion-fill threshold reached\n");
			congested = 1;
		}

		if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
			dev_info(DEV, "Congestion-extents threshold reached\n");
			congested = 1;
		}

987
		if (congested) {
988
			queue_barrier(mdev); /* last barrier, after mirrored writes */
989

990 991 992 993 994 995 996
			if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
				_drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
			else  /*mdev->net_conf->on_congestion == OC_DISCONNECT */
				_drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
		}
	}

P
Philipp Reisner 已提交
997 998 999 1000 1001 1002
	spin_unlock_irq(&mdev->req_lock);
	kfree(b); /* if someone else has beaten us to it... */

	if (local) {
		req->private_bio->bi_bdev = mdev->ldev->backing_bdev;

1003 1004 1005 1006 1007 1008
		/* State may have changed since we grabbed our reference on the
		 * mdev->ldev member. Double check, and short-circuit to endio.
		 * In case the last activity log transaction failed to get on
		 * stable storage, and this is a WRITE, we may not even submit
		 * this bio. */
		if (get_ldev(mdev)) {
1009 1010 1011
			if (drbd_insert_fault(mdev,   rw == WRITE ? DRBD_FAULT_DT_WR
						    : rw == READ  ? DRBD_FAULT_DT_RD
						    :               DRBD_FAULT_DT_RA))
1012 1013 1014 1015 1016
				bio_endio(req->private_bio, -EIO);
			else
				generic_make_request(req->private_bio);
			put_ldev(mdev);
		} else
P
Philipp Reisner 已提交
1017 1018 1019 1020 1021
			bio_endio(req->private_bio, -EIO);
	}

	return 0;

1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
fail_conflicting:
	/* this is a conflicting request.
	 * even though it may have been only _partially_
	 * overlapping with one of the currently pending requests,
	 * without even submitting or sending it, we will
	 * pretend that it was successfully served right now.
	 */
	_drbd_end_io_acct(mdev, req);
	spin_unlock_irq(&mdev->req_lock);
	if (remote)
		dec_ap_pending(mdev);
	/* THINK: do we want to fail it (-EIO), or pretend success?
	 * this pretends success. */
	err = 0;

P
Philipp Reisner 已提交
1037
fail_free_complete:
1038
	if (req->rq_state & RQ_IN_ACT_LOG)
P
Philipp Reisner 已提交
1039 1040 1041 1042 1043 1044 1045
		drbd_al_complete_io(mdev, sector);
fail_and_free_req:
	if (local) {
		bio_put(req->private_bio);
		req->private_bio = NULL;
		put_ldev(mdev);
	}
1046 1047 1048
	if (!ret)
		bio_endio(bio, err);

P
Philipp Reisner 已提交
1049 1050 1051 1052
	drbd_req_free(req);
	dec_ap_bio(mdev);
	kfree(b);

1053
	return ret;
P
Philipp Reisner 已提交
1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
}

/* helper function for drbd_make_request
 * if we can determine just by the mdev (state) that this request will fail,
 * return 1
 * otherwise return 0
 */
static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write)
{
	if (mdev->state.role != R_PRIMARY &&
		(!allow_oos || is_write)) {
		if (__ratelimit(&drbd_ratelimit_state)) {
			dev_err(DEV, "Process %s[%u] tried to %s; "
			    "since we are not in Primary state, "
			    "we cannot allow this\n",
			    current->comm, current->pid,
			    is_write ? "WRITE" : "READ");
		}
		return 1;
	}

	return 0;
}

1078
int drbd_make_request(struct request_queue *q, struct bio *bio)
P
Philipp Reisner 已提交
1079 1080 1081
{
	unsigned int s_enr, e_enr;
	struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
1082
	unsigned long start_time;
P
Philipp Reisner 已提交
1083 1084 1085 1086 1087 1088

	if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) {
		bio_endio(bio, -EPERM);
		return 0;
	}

1089 1090
	start_time = jiffies;

P
Philipp Reisner 已提交
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
	/*
	 * what we "blindly" assume:
	 */
	D_ASSERT(bio->bi_size > 0);
	D_ASSERT((bio->bi_size & 0x1ff) == 0);
	D_ASSERT(bio->bi_idx == 0);

	/* to make some things easier, force alignment of requests within the
	 * granularity of our hash tables */
	s_enr = bio->bi_sector >> HT_SHIFT;
	e_enr = (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT;

	if (likely(s_enr == e_enr)) {
		inc_ap_bio(mdev, 1);
1105
		return drbd_make_request_common(mdev, bio, start_time);
P
Philipp Reisner 已提交
1106 1107 1108 1109
	}

	/* can this bio be split generically?
	 * Maybe add our own split-arbitrary-bios function. */
1110
	if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_BIO_SIZE) {
P
Philipp Reisner 已提交
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
		/* rather error out here than BUG in bio_split */
		dev_err(DEV, "bio would need to, but cannot, be split: "
		    "(vcnt=%u,idx=%u,size=%u,sector=%llu)\n",
		    bio->bi_vcnt, bio->bi_idx, bio->bi_size,
		    (unsigned long long)bio->bi_sector);
		bio_endio(bio, -EINVAL);
	} else {
		/* This bio crosses some boundary, so we have to split it. */
		struct bio_pair *bp;
		/* works for the "do not cross hash slot boundaries" case
		 * e.g. sector 262269, size 4096
		 * s_enr = 262269 >> 6 = 4097
		 * e_enr = (262269+8-1) >> 6 = 4098
		 * HT_SHIFT = 6
		 * sps = 64, mask = 63
		 * first_sectors = 64 - (262269 & 63) = 3
		 */
		const sector_t sect = bio->bi_sector;
		const int sps = 1 << HT_SHIFT; /* sectors per slot */
		const int mask = sps - 1;
		const sector_t first_sectors = sps - (sect & mask);
1132
		bp = bio_split(bio, first_sectors);
P
Philipp Reisner 已提交
1133 1134 1135

		/* we need to get a "reference count" (ap_bio_cnt)
		 * to avoid races with the disconnect/reconnect/suspend code.
1136
		 * In case we need to split the bio here, we need to get three references
P
Philipp Reisner 已提交
1137 1138
		 * atomically, otherwise we might deadlock when trying to submit the
		 * second one! */
1139
		inc_ap_bio(mdev, 3);
P
Philipp Reisner 已提交
1140 1141 1142

		D_ASSERT(e_enr == s_enr + 1);

1143
		while (drbd_make_request_common(mdev, &bp->bio1, start_time))
1144 1145
			inc_ap_bio(mdev, 1);

1146
		while (drbd_make_request_common(mdev, &bp->bio2, start_time))
1147 1148 1149 1150
			inc_ap_bio(mdev, 1);

		dec_ap_bio(mdev);

P
Philipp Reisner 已提交
1151 1152 1153 1154 1155 1156
		bio_pair_release(bp);
	}
	return 0;
}

/* This is called by bio_add_page().  With this function we reduce
1157
 * the number of BIOs that span over multiple DRBD_MAX_BIO_SIZEs
P
Philipp Reisner 已提交
1158 1159 1160 1161 1162 1163 1164 1165 1166
 * units (was AL_EXTENTs).
 *
 * we do the calculation within the lower 32bit of the byte offsets,
 * since we don't care for actual offset, but only check whether it
 * would cross "activity log extent" boundaries.
 *
 * As long as the BIO is empty we have to allow at least one bvec,
 * regardless of size and offset.  so the resulting bio may still
 * cross extent boundaries.  those are dealt with (bio_split) in
1167
 * drbd_make_request.
P
Philipp Reisner 已提交
1168 1169 1170 1171 1172 1173 1174 1175 1176
 */
int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec)
{
	struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
	unsigned int bio_offset =
		(unsigned int)bvm->bi_sector << 9; /* 32 bit */
	unsigned int bio_size = bvm->bi_size;
	int limit, backing_limit;

1177 1178
	limit = DRBD_MAX_BIO_SIZE
	      - ((bio_offset & (DRBD_MAX_BIO_SIZE-1)) + bio_size);
P
Philipp Reisner 已提交
1179 1180 1181 1182 1183 1184 1185 1186
	if (limit < 0)
		limit = 0;
	if (bio_size == 0) {
		if (limit <= bvec->bv_len)
			limit = bvec->bv_len;
	} else if (limit && get_ldev(mdev)) {
		struct request_queue * const b =
			mdev->ldev->backing_bdev->bd_disk->queue;
1187
		if (b->merge_bvec_fn) {
P
Philipp Reisner 已提交
1188 1189 1190 1191 1192 1193 1194
			backing_limit = b->merge_bvec_fn(b, bvm, bvec);
			limit = min(limit, backing_limit);
		}
		put_ldev(mdev);
	}
	return limit;
}
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233

void request_timer_fn(unsigned long data)
{
	struct drbd_conf *mdev = (struct drbd_conf *) data;
	struct drbd_request *req; /* oldest request */
	struct list_head *le;
	unsigned long et = 0; /* effective timeout = ko_count * timeout */

	if (get_net_conf(mdev)) {
		et = mdev->net_conf->timeout*HZ/10 * mdev->net_conf->ko_count;
		put_net_conf(mdev);
	}
	if (!et || mdev->state.conn < C_WF_REPORT_PARAMS)
		return; /* Recurring timer stopped */

	spin_lock_irq(&mdev->req_lock);
	le = &mdev->oldest_tle->requests;
	if (list_empty(le)) {
		spin_unlock_irq(&mdev->req_lock);
		mod_timer(&mdev->request_timer, jiffies + et);
		return;
	}

	le = le->prev;
	req = list_entry(le, struct drbd_request, tl_requests);
	if (time_is_before_eq_jiffies(req->start_time + et)) {
		if (req->rq_state & RQ_NET_PENDING) {
			dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
			_drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE, NULL);
		} else {
			dev_warn(DEV, "Local backing block device frozen?\n");
			mod_timer(&mdev->request_timer, jiffies + et);
		}
	} else {
		mod_timer(&mdev->request_timer, req->start_time + et);
	}

	spin_unlock_irq(&mdev->req_lock);
}