drbd_req.c 36.1 KB
Newer Older
P
Philipp Reisner 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
   drbd_req.c

   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.

   Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
   Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
   Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.

   drbd is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation; either version 2, or (at your option)
   any later version.

   drbd is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.

   You should have received a copy of the GNU General Public License
   along with drbd; see the file COPYING.  If not, write to
   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.

 */

#include <linux/module.h>

#include <linux/slab.h>
#include <linux/drbd.h>
#include "drbd_int.h"
#include "drbd_req.h"


34 35
static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size);

P
Philipp Reisner 已提交
36 37 38 39 40 41 42 43
/* Update disk stats at start of I/O request */
static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio)
{
	const int rw = bio_data_dir(bio);
	int cpu;
	cpu = part_stat_lock();
	part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
	part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio));
P
Philipp Reisner 已提交
44 45
	(void) cpu; /* The macro invocations above want the cpu argument, I do not like
		       the compiler warning about cpu only assigned but never used... */
46
	part_inc_in_flight(&mdev->vdisk->part0, rw);
P
Philipp Reisner 已提交
47 48 49 50 51 52 53 54 55 56 57 58
	part_stat_unlock();
}

/* Update disk stats when completing request upwards */
static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req)
{
	int rw = bio_data_dir(req->master_bio);
	unsigned long duration = jiffies - req->start_time;
	int cpu;
	cpu = part_stat_lock();
	part_stat_add(cpu, &mdev->vdisk->part0, ticks[rw], duration);
	part_round_stats(cpu, &mdev->vdisk->part0);
59
	part_dec_in_flight(&mdev->vdisk->part0, rw);
P
Philipp Reisner 已提交
60 61 62
	part_stat_unlock();
}

63 64 65 66 67 68 69 70 71 72 73
static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
					       struct bio *bio_src)
{
	struct drbd_request *req;

	req = mempool_alloc(drbd_request_mempool, GFP_NOIO);
	if (!req)
		return NULL;

	drbd_req_make_private_bio(req, bio_src);
	req->rq_state    = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
74
	req->w.mdev      = mdev;
75 76
	req->master_bio  = bio_src;
	req->epoch       = 0;
77

78 79 80
	drbd_clear_interval(&req->i);
	req->i.sector     = bio_src->bi_sector;
	req->i.size      = bio_src->bi_size;
81
	req->i.local = true;
82 83
	req->i.waiting = false;

84 85 86 87 88 89 90 91 92 93 94 95
	INIT_LIST_HEAD(&req->tl_requests);
	INIT_LIST_HEAD(&req->w.list);

	return req;
}

static void drbd_req_free(struct drbd_request *req)
{
	mempool_free(req, drbd_request_mempool);
}

/* rw is bio_data_dir(), only READ or WRITE */
P
Philipp Reisner 已提交
96 97 98
static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const int rw)
{
	const unsigned long s = req->rq_state;
99 100 101 102 103 104 105 106 107

	/* remove it from the transfer log.
	 * well, only if it had been there in the first
	 * place... if it had not (local only or conflicting
	 * and never sent), it should still be "empty" as
	 * initialized in drbd_req_new(), so we can list_del() it
	 * here unconditionally */
	list_del(&req->tl_requests);

P
Philipp Reisner 已提交
108 109 110 111 112 113 114 115 116
	/* if it was a write, we may have to set the corresponding
	 * bit(s) out-of-sync first. If it had a local part, we need to
	 * release the reference to the activity log. */
	if (rw == WRITE) {
		/* Set out-of-sync unless both OK flags are set
		 * (local only or remote failed).
		 * Other places where we set out-of-sync:
		 * READ with local io-error */
		if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
117
			drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
P
Philipp Reisner 已提交
118 119

		if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
120
			drbd_set_in_sync(mdev, req->i.sector, req->i.size);
P
Philipp Reisner 已提交
121 122

		/* one might be tempted to move the drbd_al_complete_io
123
		 * to the local io completion callback drbd_request_endio.
P
Philipp Reisner 已提交
124 125 126 127 128 129 130 131 132 133
		 * but, if this was a mirror write, we may only
		 * drbd_al_complete_io after this is RQ_NET_DONE,
		 * otherwise the extent could be dropped from the al
		 * before it has actually been written on the peer.
		 * if we crash before our peer knows about the request,
		 * but after the extent has been dropped from the al,
		 * we would forget to resync the corresponding extent.
		 */
		if (s & RQ_LOCAL_MASK) {
			if (get_ldev_if_state(mdev, D_FAILED)) {
134
				if (s & RQ_IN_ACT_LOG)
135
					drbd_al_complete_io(mdev, &req->i);
P
Philipp Reisner 已提交
136 137
				put_ldev(mdev);
			} else if (__ratelimit(&drbd_ratelimit_state)) {
138 139 140
				dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu, %u), "
					 "but my Disk seems to have failed :(\n",
					 (unsigned long long) req->i.sector, req->i.size);
P
Philipp Reisner 已提交
141 142 143 144
			}
		}
	}

145
	drbd_req_free(req);
P
Philipp Reisner 已提交
146 147 148 149 150
}

static void queue_barrier(struct drbd_conf *mdev)
{
	struct drbd_tl_epoch *b;
151
	struct drbd_tconn *tconn = mdev->tconn;
P
Philipp Reisner 已提交
152 153 154 155 156 157

	/* We are within the req_lock. Once we queued the barrier for sending,
	 * we set the CREATE_BARRIER bit. It is cleared as soon as a new
	 * barrier/epoch object is added. This is the only place this bit is
	 * set. It indicates that the barrier for this epoch is already queued,
	 * and no new epoch has been created yet. */
158
	if (test_bit(CREATE_BARRIER, &tconn->flags))
P
Philipp Reisner 已提交
159 160
		return;

161
	b = tconn->newest_tle;
P
Philipp Reisner 已提交
162
	b->w.cb = w_send_barrier;
163
	b->w.mdev = mdev;
P
Philipp Reisner 已提交
164 165 166 167 168
	/* inc_ap_pending done here, so we won't
	 * get imbalanced on connection loss.
	 * dec_ap_pending will be done in got_BarrierAck
	 * or (on connection loss) in tl_clear.  */
	inc_ap_pending(mdev);
169 170
	drbd_queue_work(&tconn->data.work, &b->w);
	set_bit(CREATE_BARRIER, &tconn->flags);
P
Philipp Reisner 已提交
171 172 173 174 175 176 177
}

static void _about_to_complete_local_write(struct drbd_conf *mdev,
	struct drbd_request *req)
{
	const unsigned long s = req->rq_state;

178 179 180 181 182 183 184 185
	/* Before we can signal completion to the upper layers,
	 * we may need to close the current epoch.
	 * We can skip this, if this request has not even been sent, because we
	 * did not have a fully established connection yet/anymore, during
	 * bitmap exchange, or while we are C_AHEAD due to congestion policy.
	 */
	if (mdev->state.conn >= C_CONNECTED &&
	    (s & RQ_NET_SENT) != 0 &&
186
	    req->epoch == mdev->tconn->newest_tle->br_number)
P
Philipp Reisner 已提交
187 188 189 190 191 192 193 194 195 196
		queue_barrier(mdev);
}

void complete_master_bio(struct drbd_conf *mdev,
		struct bio_and_error *m)
{
	bio_endio(m->bio, m->error);
	dec_ap_bio(mdev);
}

197 198 199 200

static void drbd_remove_request_interval(struct rb_root *root,
					 struct drbd_request *req)
{
201
	struct drbd_conf *mdev = req->w.mdev;
202 203 204 205 206 207 208 209 210
	struct drbd_interval *i = &req->i;

	drbd_remove_interval(root, i);

	/* Wake up any processes waiting for this request to complete.  */
	if (i->waiting)
		wake_up(&mdev->misc_wait);
}

P
Philipp Reisner 已提交
211 212 213 214 215 216 217 218 219
/* Helper for __req_mod().
 * Set m->bio to the master bio, if it is fit to be completed,
 * or leave it alone (it is initialized to NULL in __req_mod),
 * if it has already been completed, or cannot be completed yet.
 * If m->bio is set, the error status to be returned is placed in m->error.
 */
void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
{
	const unsigned long s = req->rq_state;
220
	struct drbd_conf *mdev = req->w.mdev;
221
	int rw = req->rq_state & RQ_WRITE ? WRITE : READ;
P
Philipp Reisner 已提交
222 223 224 225 226 227 228 229 230 231

	/* we must not complete the master bio, while it is
	 *	still being processed by _drbd_send_zc_bio (drbd_send_dblock)
	 *	not yet acknowledged by the peer
	 *	not yet completed by the local io subsystem
	 * these flags may get cleared in any order by
	 *	the worker,
	 *	the receiver,
	 *	the bio_endio completion callbacks.
	 */
232
	if (s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED))
233 234 235 236 237
		return;
	if (req->i.waiting) {
		/* Retry all conflicting peer requests.  */
		wake_up(&mdev->misc_wait);
	}
P
Philipp Reisner 已提交
238 239 240 241 242 243
	if (s & RQ_NET_QUEUED)
		return;
	if (s & RQ_NET_PENDING)
		return;

	if (req->master_bio) {
244
		/* this is DATA_RECEIVED (remote read)
P
Philipp Reisner 已提交
245 246
		 * or protocol C P_WRITE_ACK
		 * or protocol B P_RECV_ACK
247
		 * or protocol A "HANDED_OVER_TO_NETWORK" (SendAck)
P
Philipp Reisner 已提交
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
		 * or canceled or failed,
		 * or killed from the transfer log due to connection loss.
		 */

		/*
		 * figure out whether to report success or failure.
		 *
		 * report success when at least one of the operations succeeded.
		 * or, to put the other way,
		 * only report failure, when both operations failed.
		 *
		 * what to do about the failures is handled elsewhere.
		 * what we need to do here is just: complete the master_bio.
		 *
		 * local completion error, if any, has been stored as ERR_PTR
263
		 * in private_bio within drbd_request_endio.
P
Philipp Reisner 已提交
264 265 266 267 268 269
		 */
		int ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
		int error = PTR_ERR(req->private_bio);

		/* remove the request from the conflict detection
		 * respective block_id verification hash */
270 271 272 273 274 275 276
		if (!drbd_interval_empty(&req->i)) {
			struct rb_root *root;

			if (rw == WRITE)
				root = &mdev->write_requests;
			else
				root = &mdev->read_requests;
277
			drbd_remove_request_interval(root, req);
278
		} else if (!(s & RQ_POSTPONED))
P
Philipp Reisner 已提交
279
			D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
P
Philipp Reisner 已提交
280 281 282 283 284 285 286 287

		/* for writes we need to do some extra housekeeping */
		if (rw == WRITE)
			_about_to_complete_local_write(mdev, req);

		/* Update disk stats */
		_drbd_end_io_acct(mdev, req);

288 289 290 291
		if (!(s & RQ_POSTPONED)) {
			m->error = ok ? 0 : (error ?: -EIO);
			m->bio = req->master_bio;
		}
P
Philipp Reisner 已提交
292 293 294
		req->master_bio = NULL;
	}

295 296 297
	if (s & RQ_LOCAL_PENDING)
		return;

P
Philipp Reisner 已提交
298 299
	if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) {
		/* this is disconnected (local only) operation,
300
		 * or protocol A, B, or C P_BARRIER_ACK,
P
Philipp Reisner 已提交
301 302 303 304
		 * or killed from the transfer log due to connection loss. */
		_req_is_done(mdev, req, rw);
	}
	/* else: network part and not DONE yet. that is
305
	 * protocol A, B, or C, barrier ack still pending... */
P
Philipp Reisner 已提交
306 307
}

308 309
static void _req_may_be_done_not_susp(struct drbd_request *req, struct bio_and_error *m)
{
310
	struct drbd_conf *mdev = req->w.mdev;
311

312
	if (!drbd_suspended(mdev))
313 314 315
		_req_may_be_done(req, m);
}

P
Philipp Reisner 已提交
316 317 318 319 320 321 322 323 324 325 326 327
/* obviously this could be coded as many single functions
 * instead of one huge switch,
 * or by putting the code directly in the respective locations
 * (as it has been before).
 *
 * but having it this way
 *  enforces that it is all in this one place, where it is easier to audit,
 *  it makes it obvious that whatever "event" "happens" to a request should
 *  happen "atomically" within the req_lock,
 *  and it enforces that we have to think in a very structured manner
 *  about the "events" that may happen to a request during its life time ...
 */
328
int __req_mod(struct drbd_request *req, enum drbd_req_event what,
P
Philipp Reisner 已提交
329 330
		struct bio_and_error *m)
{
331
	struct drbd_conf *mdev = req->w.mdev;
332
	struct net_conf *nc;
333
	int p, rv = 0;
334 335 336

	if (m)
		m->bio = NULL;
P
Philipp Reisner 已提交
337 338 339 340 341 342 343 344

	switch (what) {
	default:
		dev_err(DEV, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
		break;

	/* does not happen...
	 * initialization done in drbd_req_new
345
	case CREATED:
P
Philipp Reisner 已提交
346 347 348
		break;
		*/

349
	case TO_BE_SENT: /* via network */
350
		/* reached via __drbd_make_request
P
Philipp Reisner 已提交
351 352 353
		 * and from w_read_retry_remote */
		D_ASSERT(!(req->rq_state & RQ_NET_MASK));
		req->rq_state |= RQ_NET_PENDING;
354 355 356 357
		rcu_read_lock();
		nc = rcu_dereference(mdev->tconn->net_conf);
		p = nc->wire_protocol;
		rcu_read_unlock();
358 359 360
		req->rq_state |=
			p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
			p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
P
Philipp Reisner 已提交
361 362 363
		inc_ap_pending(mdev);
		break;

364
	case TO_BE_SUBMITTED: /* locally */
365
		/* reached via __drbd_make_request */
P
Philipp Reisner 已提交
366 367 368 369
		D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK));
		req->rq_state |= RQ_LOCAL_PENDING;
		break;

370
	case COMPLETED_OK:
371
		if (req->rq_state & RQ_WRITE)
372
			mdev->writ_cnt += req->i.size >> 9;
P
Philipp Reisner 已提交
373
		else
374
			mdev->read_cnt += req->i.size >> 9;
P
Philipp Reisner 已提交
375 376 377 378

		req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
		req->rq_state &= ~RQ_LOCAL_PENDING;

379
		_req_may_be_done_not_susp(req, m);
P
Philipp Reisner 已提交
380 381 382
		put_ldev(mdev);
		break;

383 384 385 386 387 388 389 390
	case ABORT_DISK_IO:
		req->rq_state |= RQ_LOCAL_ABORTED;
		if (req->rq_state & RQ_WRITE)
			_req_may_be_done_not_susp(req, m);
		else
			goto goto_queue_for_net_read;
		break;

391
	case WRITE_COMPLETED_WITH_ERROR:
P
Philipp Reisner 已提交
392 393 394
		req->rq_state |= RQ_LOCAL_COMPLETED;
		req->rq_state &= ~RQ_LOCAL_PENDING;

395
		__drbd_chk_io_error(mdev, false);
396
		_req_may_be_done_not_susp(req, m);
P
Philipp Reisner 已提交
397 398 399
		put_ldev(mdev);
		break;

400
	case READ_AHEAD_COMPLETED_WITH_ERROR:
P
Philipp Reisner 已提交
401 402 403
		/* it is legal to fail READA */
		req->rq_state |= RQ_LOCAL_COMPLETED;
		req->rq_state &= ~RQ_LOCAL_PENDING;
404
		_req_may_be_done_not_susp(req, m);
P
Philipp Reisner 已提交
405 406 407
		put_ldev(mdev);
		break;

408
	case READ_COMPLETED_WITH_ERROR:
409
		drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
P
Philipp Reisner 已提交
410 411 412 413 414 415

		req->rq_state |= RQ_LOCAL_COMPLETED;
		req->rq_state &= ~RQ_LOCAL_PENDING;

		D_ASSERT(!(req->rq_state & RQ_NET_MASK));

416
		__drbd_chk_io_error(mdev, false);
P
Philipp Reisner 已提交
417 418
		put_ldev(mdev);

419 420
	goto_queue_for_net_read:

421 422 423
		/* no point in retrying if there is no good remote data,
		 * or we have no connection. */
		if (mdev->state.pdsk != D_UP_TO_DATE) {
424
			_req_may_be_done_not_susp(req, m);
425 426 427
			break;
		}

428
		/* _req_mod(req,TO_BE_SENT); oops, recursion... */
429 430
		req->rq_state |= RQ_NET_PENDING;
		inc_ap_pending(mdev);
431
		/* fall through: _req_mod(req,QUEUE_FOR_NET_READ); */
P
Philipp Reisner 已提交
432

433
	case QUEUE_FOR_NET_READ:
P
Philipp Reisner 已提交
434 435 436 437
		/* READ or READA, and
		 * no local disk,
		 * or target area marked as invalid,
		 * or just got an io-error. */
438
		/* from __drbd_make_request
P
Philipp Reisner 已提交
439 440 441 442
		 * or from bio_endio during read io-error recovery */

		/* so we can verify the handle in the answer packet
		 * corresponding hlist_del is in _req_may_be_done() */
443
		D_ASSERT(drbd_interval_empty(&req->i));
444
		drbd_insert_interval(&mdev->read_requests, &req->i);
P
Philipp Reisner 已提交
445

446
		set_bit(UNPLUG_REMOTE, &mdev->flags);
P
Philipp Reisner 已提交
447 448 449 450 451 452

		D_ASSERT(req->rq_state & RQ_NET_PENDING);
		req->rq_state |= RQ_NET_QUEUED;
		req->w.cb = (req->rq_state & RQ_LOCAL_MASK)
			? w_read_retry_remote
			: w_send_read_req;
453
		drbd_queue_work(&mdev->tconn->data.work, &req->w);
P
Philipp Reisner 已提交
454 455
		break;

456
	case QUEUE_FOR_NET_WRITE:
P
Philipp Reisner 已提交
457
		/* assert something? */
458
		/* from __drbd_make_request only */
P
Philipp Reisner 已提交
459 460

		/* corresponding hlist_del is in _req_may_be_done() */
461
		D_ASSERT(drbd_interval_empty(&req->i));
462
		drbd_insert_interval(&mdev->write_requests, &req->i);
P
Philipp Reisner 已提交
463 464 465 466 467 468 469 470 471 472

		/* NOTE
		 * In case the req ended up on the transfer log before being
		 * queued on the worker, it could lead to this request being
		 * missed during cleanup after connection loss.
		 * So we have to do both operations here,
		 * within the same lock that protects the transfer log.
		 *
		 * _req_add_to_epoch(req); this has to be after the
		 * _maybe_start_new_epoch(req); which happened in
473
		 * __drbd_make_request, because we now may set the bit
P
Philipp Reisner 已提交
474 475 476 477
		 * again ourselves to close the current epoch.
		 *
		 * Add req to the (now) current epoch (barrier). */

478 479 480 481 482
		/* otherwise we may lose an unplug, which may cause some remote
		 * io-scheduler timeout to expire, increasing maximum latency,
		 * hurting performance. */
		set_bit(UNPLUG_REMOTE, &mdev->flags);

483
		/* see __drbd_make_request,
P
Philipp Reisner 已提交
484
		 * just after it grabs the req_lock */
485
		D_ASSERT(test_bit(CREATE_BARRIER, &mdev->tconn->flags) == 0);
P
Philipp Reisner 已提交
486

487
		req->epoch = mdev->tconn->newest_tle->br_number;
P
Philipp Reisner 已提交
488 489

		/* increment size of current epoch */
490
		mdev->tconn->newest_tle->n_writes++;
P
Philipp Reisner 已提交
491 492 493 494 495

		/* queue work item to send data */
		D_ASSERT(req->rq_state & RQ_NET_PENDING);
		req->rq_state |= RQ_NET_QUEUED;
		req->w.cb =  w_send_dblock;
496
		drbd_queue_work(&mdev->tconn->data.work, &req->w);
P
Philipp Reisner 已提交
497 498

		/* close the epoch, in case it outgrew the limit */
499 500 501 502 503
		rcu_read_lock();
		nc = rcu_dereference(mdev->tconn->net_conf);
		p = nc->max_epoch_size;
		rcu_read_unlock();
		if (mdev->tconn->newest_tle->n_writes >= p)
P
Philipp Reisner 已提交
504 505 506 507
			queue_barrier(mdev);

		break;

508
	case QUEUE_FOR_SEND_OOS:
509
		req->rq_state |= RQ_NET_QUEUED;
510
		req->w.cb =  w_send_out_of_sync;
511
		drbd_queue_work(&mdev->tconn->data.work, &req->w);
512 513
		break;

514
	case OOS_HANDED_TO_NETWORK:
515
		/* actually the same */
516
	case SEND_CANCELED:
P
Philipp Reisner 已提交
517
		/* treat it the same */
518
	case SEND_FAILED:
P
Philipp Reisner 已提交
519 520 521 522 523
		/* real cleanup will be done from tl_clear.  just update flags
		 * so it is no longer marked as on the worker queue */
		req->rq_state &= ~RQ_NET_QUEUED;
		/* if we did it right, tl_clear should be scheduled only after
		 * this, so this should not be necessary! */
524
		_req_may_be_done_not_susp(req, m);
P
Philipp Reisner 已提交
525 526
		break;

527
	case HANDED_OVER_TO_NETWORK:
P
Philipp Reisner 已提交
528
		/* assert something? */
529
		if (bio_data_dir(req->master_bio) == WRITE)
530
			atomic_add(req->i.size >> 9, &mdev->ap_in_flight);
531

P
Philipp Reisner 已提交
532
		if (bio_data_dir(req->master_bio) == WRITE &&
533
		    !(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK))) {
P
Philipp Reisner 已提交
534 535 536 537 538 539 540 541 542 543 544 545 546 547
			/* this is what is dangerous about protocol A:
			 * pretend it was successfully written on the peer. */
			if (req->rq_state & RQ_NET_PENDING) {
				dec_ap_pending(mdev);
				req->rq_state &= ~RQ_NET_PENDING;
				req->rq_state |= RQ_NET_OK;
			} /* else: neg-ack was faster... */
			/* it is still not yet RQ_NET_DONE until the
			 * corresponding epoch barrier got acked as well,
			 * so we know what to dirty on connection loss */
		}
		req->rq_state &= ~RQ_NET_QUEUED;
		req->rq_state |= RQ_NET_SENT;
		/* because _drbd_send_zc_bio could sleep, and may want to
548 549
		 * dereference the bio even after the "WRITE_ACKED_BY_PEER" and
		 * "COMPLETED_OK" events came in, once we return from
P
Philipp Reisner 已提交
550 551
		 * _drbd_send_zc_bio (drbd_send_dblock), we have to check
		 * whether it is done already, and end it.  */
552
		_req_may_be_done_not_susp(req, m);
P
Philipp Reisner 已提交
553 554
		break;

555
	case READ_RETRY_REMOTE_CANCELED:
556 557
		req->rq_state &= ~RQ_NET_QUEUED;
		/* fall through, in case we raced with drbd_disconnect */
558
	case CONNECTION_LOST_WHILE_PENDING:
P
Philipp Reisner 已提交
559 560 561 562
		/* transfer log cleanup after connection loss */
		/* assert something? */
		if (req->rq_state & RQ_NET_PENDING)
			dec_ap_pending(mdev);
563 564 565

		p = !(req->rq_state & RQ_WRITE) && req->rq_state & RQ_NET_PENDING;

P
Philipp Reisner 已提交
566 567
		req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
		req->rq_state |= RQ_NET_DONE;
568
		if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE)
569
			atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
570

P
Philipp Reisner 已提交
571 572
		/* if it is still queued, we may not complete it here.
		 * it will be canceled soon. */
573 574 575
		if (!(req->rq_state & RQ_NET_QUEUED)) {
			if (p)
				goto goto_read_retry_local;
576
			_req_may_be_done(req, m); /* Allowed while state.susp */
577
		}
P
Philipp Reisner 已提交
578 579
		break;

580
	case WRITE_ACKED_BY_PEER_AND_SIS:
P
Philipp Reisner 已提交
581
		req->rq_state |= RQ_NET_SIS;
582
	case DISCARD_WRITE:
P
Philipp Reisner 已提交
583 584 585 586 587
		/* for discarded conflicting writes of multiple primaries,
		 * there is no need to keep anything in the tl, potential
		 * node crashes are covered by the activity log. */
		req->rq_state |= RQ_NET_DONE;
		/* fall through */
588
	case WRITE_ACKED_BY_PEER:
589
		D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK);
P
Philipp Reisner 已提交
590 591 592 593 594 595 596 597 598 599
		/* protocol C; successfully written on peer.
		 * Nothing to do here.
		 * We want to keep the tl in place for all protocols, to cater
		 * for volatile write-back caches on lower level devices.
		 *
		 * A barrier request is expected to have forced all prior
		 * requests onto stable storage, so completion of a barrier
		 * request could set NET_DONE right here, and not wait for the
		 * P_BARRIER_ACK, but that is an unnecessary optimization. */

600
		goto ack_common;
P
Philipp Reisner 已提交
601
		/* this makes it effectively the same as for: */
602
	case RECV_ACKED_BY_PEER:
603
		D_ASSERT(req->rq_state & RQ_EXP_RECEIVE_ACK);
P
Philipp Reisner 已提交
604
		/* protocol B; pretends to be successfully written on peer.
605
		 * see also notes above in HANDED_OVER_TO_NETWORK about
P
Philipp Reisner 已提交
606
		 * protocol != C */
607
	ack_common:
P
Philipp Reisner 已提交
608 609 610
		req->rq_state |= RQ_NET_OK;
		D_ASSERT(req->rq_state & RQ_NET_PENDING);
		dec_ap_pending(mdev);
611
		atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
P
Philipp Reisner 已提交
612
		req->rq_state &= ~RQ_NET_PENDING;
613
		_req_may_be_done_not_susp(req, m);
P
Philipp Reisner 已提交
614 615
		break;

616
	case POSTPONE_WRITE:
617 618
		D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK);
		/* If this node has already detected the write conflict, the
619 620 621 622 623 624 625 626
		 * worker will be waiting on misc_wait.  Wake it up once this
		 * request has completed locally.
		 */
		D_ASSERT(req->rq_state & RQ_NET_PENDING);
		req->rq_state |= RQ_POSTPONED;
		_req_may_be_done_not_susp(req, m);
		break;

627
	case NEG_ACKED:
P
Philipp Reisner 已提交
628
		/* assert something? */
629
		if (req->rq_state & RQ_NET_PENDING) {
P
Philipp Reisner 已提交
630
			dec_ap_pending(mdev);
631 632
			if (req->rq_state & RQ_WRITE)
				atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
633
		}
P
Philipp Reisner 已提交
634 635 636
		req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);

		req->rq_state |= RQ_NET_DONE;
637

638
		if (!(req->rq_state & RQ_WRITE))
639 640
			goto goto_read_retry_local;

641
		_req_may_be_done_not_susp(req, m);
642
		/* else: done by HANDED_OVER_TO_NETWORK */
P
Philipp Reisner 已提交
643 644
		break;

645
	goto_read_retry_local:
646 647 648 649 650
		if (!drbd_may_do_local_read(mdev, req->i.sector, req->i.size)) {
			_req_may_be_done_not_susp(req, m);
			break;
		}
		D_ASSERT(!(req->rq_state & RQ_LOCAL_PENDING));
651
		req->rq_state |= RQ_LOCAL_PENDING;
652 653 654 655

		get_ldev(mdev);
		req->w.cb = w_restart_disk_io;
		drbd_queue_work(&mdev->tconn->data.work, &req->w);
656 657
		break;

658
	case FAIL_FROZEN_DISK_IO:
659 660 661
		if (!(req->rq_state & RQ_LOCAL_COMPLETED))
			break;

662
		_req_may_be_done(req, m); /* Allowed while state.susp */
663 664
		break;

665
	case RESTART_FROZEN_DISK_IO:
666 667 668 669 670 671 672 673 674 675 676
		if (!(req->rq_state & RQ_LOCAL_COMPLETED))
			break;

		req->rq_state &= ~RQ_LOCAL_COMPLETED;

		rv = MR_READ;
		if (bio_data_dir(req->master_bio) == WRITE)
			rv = MR_WRITE;

		get_ldev(mdev);
		req->w.cb = w_restart_disk_io;
677
		drbd_queue_work(&mdev->tconn->data.work, &req->w);
678 679
		break;

680
	case RESEND:
681
		/* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
682
		   before the connection loss (B&C only); only P_BARRIER_ACK was missing.
683
		   Trowing them out of the TL here by pretending we got a BARRIER_ACK
684
		   We ensure that the peer was not rebooted */
685 686
		if (!(req->rq_state & RQ_NET_OK)) {
			if (req->w.cb) {
687
				drbd_queue_work(&mdev->tconn->data.work, &req->w);
688 689 690 691
				rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
			}
			break;
		}
692
		/* else, fall through to BARRIER_ACKED */
693

694
	case BARRIER_ACKED:
695 696 697
		if (!(req->rq_state & RQ_WRITE))
			break;

P
Philipp Reisner 已提交
698
		if (req->rq_state & RQ_NET_PENDING) {
699
			/* barrier came in before all requests were acked.
P
Philipp Reisner 已提交
700 701
			 * this is bad, because if the connection is lost now,
			 * we won't be able to clean them up... */
702
			dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n");
703
			list_move(&req->tl_requests, &mdev->tconn->out_of_sequence_requests);
P
Philipp Reisner 已提交
704
		}
705 706
		if ((req->rq_state & RQ_NET_MASK) != 0) {
			req->rq_state |= RQ_NET_DONE;
707
			if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)))
708
				atomic_sub(req->i.size>>9, &mdev->ap_in_flight);
709
		}
710
		_req_may_be_done(req, m); /* Allowed while state.susp */
P
Philipp Reisner 已提交
711 712
		break;

713
	case DATA_RECEIVED:
P
Philipp Reisner 已提交
714 715 716 717
		D_ASSERT(req->rq_state & RQ_NET_PENDING);
		dec_ap_pending(mdev);
		req->rq_state &= ~RQ_NET_PENDING;
		req->rq_state |= (RQ_NET_OK|RQ_NET_DONE);
718
		_req_may_be_done_not_susp(req, m);
P
Philipp Reisner 已提交
719 720
		break;
	};
721 722

	return rv;
P
Philipp Reisner 已提交
723 724 725 726 727 728 729 730 731
}

/* we may do a local read if:
 * - we are consistent (of course),
 * - or we are generally inconsistent,
 *   BUT we are still/already IN SYNC for this area.
 *   since size may be bigger than BM_BLOCK_SIZE,
 *   we may need to check several bits.
 */
732
static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size)
P
Philipp Reisner 已提交
733 734 735 736 737
{
	unsigned long sbnr, ebnr;
	sector_t esector, nr_sectors;

	if (mdev->state.disk == D_UP_TO_DATE)
738
		return true;
739
	if (mdev->state.disk != D_INCONSISTENT)
740
		return false;
P
Philipp Reisner 已提交
741
	esector = sector + (size >> 9) - 1;
742
	nr_sectors = drbd_get_capacity(mdev->this_bdev);
P
Philipp Reisner 已提交
743 744 745 746 747 748
	D_ASSERT(sector  < nr_sectors);
	D_ASSERT(esector < nr_sectors);

	sbnr = BM_SECT_TO_BIT(sector);
	ebnr = BM_SECT_TO_BIT(esector);

749
	return drbd_bm_count_bits(mdev, sbnr, ebnr) == 0;
P
Philipp Reisner 已提交
750 751
}

752
static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector)
753 754 755
{
	enum drbd_read_balancing rbm;
	struct backing_dev_info *bdi;
756
	int stripe_shift;
757 758 759 760 761 762 763 764 765 766 767 768 769 770 771

	if (mdev->state.pdsk < D_UP_TO_DATE)
		return false;

	rcu_read_lock();
	rbm = rcu_dereference(mdev->ldev->disk_conf)->read_balancing;
	rcu_read_unlock();

	switch (rbm) {
	case RB_CONGESTED_REMOTE:
		bdi = &mdev->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
		return bdi_read_congested(bdi);
	case RB_LEAST_PENDING:
		return atomic_read(&mdev->local_cnt) >
			atomic_read(&mdev->ap_pending_cnt) + atomic_read(&mdev->rs_pending_cnt);
772 773 774 775 776 777 778 779
	case RB_32K_STRIPING:  /* stripe_shift = 15 */
	case RB_64K_STRIPING:
	case RB_128K_STRIPING:
	case RB_256K_STRIPING:
	case RB_512K_STRIPING:
	case RB_1M_STRIPING:   /* stripe_shift = 20 */
		stripe_shift = (rbm - RB_32K_STRIPING + 15);
		return (sector >> (stripe_shift - 9)) & 1;
780 781 782 783 784 785 786 787 788 789
	case RB_ROUND_ROBIN:
		return test_and_change_bit(READ_BALANCE_RR, &mdev->flags);
	case RB_PREFER_REMOTE:
		return true;
	case RB_PREFER_LOCAL:
	default:
		return false;
	}
}

790 791 792 793 794 795 796 797 798 799 800 801
/*
 * complete_conflicting_writes  -  wait for any conflicting write requests
 *
 * The write_requests tree contains all active write requests which we
 * currently know about.  Wait for any requests to complete which conflict with
 * the new one.
 */
static int complete_conflicting_writes(struct drbd_conf *mdev,
				       sector_t sector, int size)
{
	for(;;) {
		struct drbd_interval *i;
802
		int err;
803 804 805 806

		i = drbd_find_overlap(&mdev->write_requests, sector, size);
		if (!i)
			return 0;
807 808 809
		err = drbd_wait_misc(mdev, i);
		if (err)
			return err;
810 811 812
	}
}

813
int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
P
Philipp Reisner 已提交
814 815 816 817 818 819
{
	const int rw = bio_rw(bio);
	const int size = bio->bi_size;
	const sector_t sector = bio->bi_sector;
	struct drbd_tl_epoch *b = NULL;
	struct drbd_request *req;
820
	struct net_conf *nc;
821
	int local, remote, send_oos = 0;
822
	int err;
823
	int ret = 0;
P
Philipp Reisner 已提交
824 825 826 827 828 829 830 831 832 833 834

	/* allocate outside of all locks; */
	req = drbd_req_new(mdev, bio);
	if (!req) {
		dec_ap_bio(mdev);
		/* only pass the error to the upper layers.
		 * if user cannot handle io errors, that's not our business. */
		dev_err(DEV, "could not kmalloc() req\n");
		bio_endio(bio, -ENOMEM);
		return 0;
	}
835
	req->start_time = start_time;
P
Philipp Reisner 已提交
836 837 838 839 840 841 842 843 844 845 846

	local = get_ldev(mdev);
	if (!local) {
		bio_put(req->private_bio); /* or we get a bio leak */
		req->private_bio = NULL;
	}
	if (rw == WRITE) {
		remote = 1;
	} else {
		/* READ || READA */
		if (local) {
847 848
			if (!drbd_may_do_local_read(mdev, sector, size) ||
			    remote_due_to_read_balancing(mdev, sector)) {
P
Philipp Reisner 已提交
849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
				/* we could kick the syncer to
				 * sync this extent asap, wait for
				 * it, then continue locally.
				 * Or just issue the request remotely.
				 */
				local = 0;
				bio_put(req->private_bio);
				req->private_bio = NULL;
				put_ldev(mdev);
			}
		}
		remote = !local && mdev->state.pdsk >= D_UP_TO_DATE;
	}

	/* If we have a disk, but a READA request is mapped to remote,
	 * we are R_PRIMARY, D_INCONSISTENT, SyncTarget.
	 * Just fail that READA request right here.
	 *
	 * THINK: maybe fail all READA when not local?
	 *        or make this configurable...
	 *        if network is slow, READA won't do any good.
	 */
	if (rw == READA && mdev->state.disk >= D_INCONSISTENT && !local) {
		err = -EWOULDBLOCK;
		goto fail_and_free_req;
	}

	/* For WRITES going to the local disk, grab a reference on the target
	 * extent.  This waits for any resync activity in the corresponding
	 * resync extent to finish, and, if necessary, pulls in the target
	 * extent into the activity log, which involves further disk io because
	 * of transactional on-disk meta data updates. */
881 882
	if (rw == WRITE && local && !test_bit(AL_SUSPENDED, &mdev->flags)) {
		req->rq_state |= RQ_IN_ACT_LOG;
883
		drbd_al_begin_io(mdev, &req->i);
884
	}
P
Philipp Reisner 已提交
885

886
	remote = remote && drbd_should_do_remote(mdev->state);
887
	send_oos = rw == WRITE && drbd_should_send_out_of_sync(mdev->state);
888
	D_ASSERT(!(remote && send_oos));
P
Philipp Reisner 已提交
889

890
	if (!(local || remote) && !drbd_suspended(mdev)) {
L
Lars Ellenberg 已提交
891 892
		if (__ratelimit(&drbd_ratelimit_state))
			dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
893
		err = -EIO;
P
Philipp Reisner 已提交
894 895 896 897 898 899 900 901 902
		goto fail_free_complete;
	}

	/* For WRITE request, we have to make sure that we have an
	 * unused_spare_tle, in case we need to start a new epoch.
	 * I try to be smart and avoid to pre-allocate always "just in case",
	 * but there is a race between testing the bit and pointer outside the
	 * spinlock, and grabbing the spinlock.
	 * if we lost that race, we retry.  */
903
	if (rw == WRITE && (remote || send_oos) &&
904
	    mdev->tconn->unused_spare_tle == NULL &&
905
	    test_bit(CREATE_BARRIER, &mdev->tconn->flags)) {
P
Philipp Reisner 已提交
906 907 908 909 910 911 912 913 914 915
allocate_barrier:
		b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO);
		if (!b) {
			dev_err(DEV, "Failed to alloc barrier.\n");
			err = -ENOMEM;
			goto fail_free_complete;
		}
	}

	/* GOOD, everything prepared, grab the spin_lock */
916
	spin_lock_irq(&mdev->tconn->req_lock);
P
Philipp Reisner 已提交
917

918 919 920
	if (rw == WRITE) {
		err = complete_conflicting_writes(mdev, sector, size);
		if (err) {
921 922 923 924
			if (err != -ERESTARTSYS)
				_conn_request_state(mdev->tconn,
						    NS(conn, C_TIMEOUT),
						    CS_HARD);
925
			spin_unlock_irq(&mdev->tconn->req_lock);
926
			err = -EIO;
927 928 929 930
			goto fail_free_complete;
		}
	}

931
	if (drbd_suspended(mdev)) {
932 933
		/* If we got suspended, use the retry mechanism in
		   drbd_make_request() to restart processing of this
934
		   bio. In the next call to drbd_make_request
935 936
		   we sleep in inc_ap_bio() */
		ret = 1;
937
		spin_unlock_irq(&mdev->tconn->req_lock);
938 939 940
		goto fail_free_complete;
	}

941
	if (remote || send_oos) {
942
		remote = drbd_should_do_remote(mdev->state);
943
		send_oos = rw == WRITE && drbd_should_send_out_of_sync(mdev->state);
944
		D_ASSERT(!(remote && send_oos));
945 946

		if (!(remote || send_oos))
P
Philipp Reisner 已提交
947 948 949
			dev_warn(DEV, "lost connection while grabbing the req_lock!\n");
		if (!(local || remote)) {
			dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
950
			spin_unlock_irq(&mdev->tconn->req_lock);
951
			err = -EIO;
P
Philipp Reisner 已提交
952 953 954 955
			goto fail_free_complete;
		}
	}

956 957
	if (b && mdev->tconn->unused_spare_tle == NULL) {
		mdev->tconn->unused_spare_tle = b;
P
Philipp Reisner 已提交
958 959
		b = NULL;
	}
960
	if (rw == WRITE && (remote || send_oos) &&
961
	    mdev->tconn->unused_spare_tle == NULL &&
962
	    test_bit(CREATE_BARRIER, &mdev->tconn->flags)) {
P
Philipp Reisner 已提交
963 964
		/* someone closed the current epoch
		 * while we were grabbing the spinlock */
965
		spin_unlock_irq(&mdev->tconn->req_lock);
P
Philipp Reisner 已提交
966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982
		goto allocate_barrier;
	}


	/* Update disk stats */
	_drbd_start_io_acct(mdev, req, bio);

	/* _maybe_start_new_epoch(mdev);
	 * If we need to generate a write barrier packet, we have to add the
	 * new epoch (barrier) object, and queue the barrier packet for sending,
	 * and queue the req's data after it _within the same lock_, otherwise
	 * we have race conditions were the reorder domains could be mixed up.
	 *
	 * Even read requests may start a new epoch and queue the corresponding
	 * barrier packet.  To get the write ordering right, we only have to
	 * make sure that, if this is a write request and it triggered a
	 * barrier packet, this request is queued within the same spinlock. */
983
	if ((remote || send_oos) && mdev->tconn->unused_spare_tle &&
984
	    test_and_clear_bit(CREATE_BARRIER, &mdev->tconn->flags)) {
985
		_tl_add_barrier(mdev->tconn, mdev->tconn->unused_spare_tle);
986
		mdev->tconn->unused_spare_tle = NULL;
P
Philipp Reisner 已提交
987 988
	} else {
		D_ASSERT(!(remote && rw == WRITE &&
989
			   test_bit(CREATE_BARRIER, &mdev->tconn->flags)));
P
Philipp Reisner 已提交
990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
	}

	/* NOTE
	 * Actually, 'local' may be wrong here already, since we may have failed
	 * to write to the meta data, and may become wrong anytime because of
	 * local io-error for some other request, which would lead to us
	 * "detaching" the local disk.
	 *
	 * 'remote' may become wrong any time because the network could fail.
	 *
	 * This is a harmless race condition, though, since it is handled
	 * correctly at the appropriate places; so it just defers the failure
	 * of the respective operation.
	 */

	/* mark them early for readability.
	 * this just sets some state flags. */
	if (remote)
1008
		_req_mod(req, TO_BE_SENT);
P
Philipp Reisner 已提交
1009
	if (local)
1010
		_req_mod(req, TO_BE_SUBMITTED);
P
Philipp Reisner 已提交
1011

1012
	list_add_tail(&req->tl_requests, &mdev->tconn->newest_tle->requests);
1013

P
Philipp Reisner 已提交
1014 1015 1016 1017 1018 1019 1020 1021
	/* NOTE remote first: to get the concurrent write detection right,
	 * we must register the request before start of local IO.  */
	if (remote) {
		/* either WRITE and C_CONNECTED,
		 * or READ, and no local disk,
		 * or READ, but not in sync.
		 */
		_req_mod(req, (rw == WRITE)
1022 1023
				? QUEUE_FOR_NET_WRITE
				: QUEUE_FOR_NET_READ);
P
Philipp Reisner 已提交
1024
	}
1025
	if (send_oos && drbd_set_out_of_sync(mdev, sector, size))
1026
		_req_mod(req, QUEUE_FOR_SEND_OOS);
1027

1028 1029
	rcu_read_lock();
	nc = rcu_dereference(mdev->tconn->net_conf);
1030
	if (remote &&
1031
	    nc->on_congestion != OC_BLOCK && mdev->tconn->agreed_pro_version >= 96) {
1032 1033
		int congested = 0;

1034 1035
		if (nc->cong_fill &&
		    atomic_read(&mdev->ap_in_flight) >= nc->cong_fill) {
1036 1037 1038 1039
			dev_info(DEV, "Congestion-fill threshold reached\n");
			congested = 1;
		}

1040
		if (mdev->act_log->used >= nc->cong_extents) {
1041 1042 1043 1044
			dev_info(DEV, "Congestion-extents threshold reached\n");
			congested = 1;
		}

1045
		if (congested) {
1046
			queue_barrier(mdev); /* last barrier, after mirrored writes */
1047

1048
			if (nc->on_congestion == OC_PULL_AHEAD)
1049
				_drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
1050
			else  /*nc->on_congestion == OC_DISCONNECT */
1051 1052 1053
				_drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
		}
	}
1054
	rcu_read_unlock();
1055

1056
	spin_unlock_irq(&mdev->tconn->req_lock);
P
Philipp Reisner 已提交
1057 1058 1059 1060 1061
	kfree(b); /* if someone else has beaten us to it... */

	if (local) {
		req->private_bio->bi_bdev = mdev->ldev->backing_bdev;

1062 1063 1064 1065 1066 1067
		/* State may have changed since we grabbed our reference on the
		 * mdev->ldev member. Double check, and short-circuit to endio.
		 * In case the last activity log transaction failed to get on
		 * stable storage, and this is a WRITE, we may not even submit
		 * this bio. */
		if (get_ldev(mdev)) {
1068 1069 1070
			if (drbd_insert_fault(mdev,   rw == WRITE ? DRBD_FAULT_DT_WR
						    : rw == READ  ? DRBD_FAULT_DT_RD
						    :               DRBD_FAULT_DT_RA))
1071 1072 1073 1074 1075
				bio_endio(req->private_bio, -EIO);
			else
				generic_make_request(req->private_bio);
			put_ldev(mdev);
		} else
P
Philipp Reisner 已提交
1076 1077 1078 1079 1080 1081
			bio_endio(req->private_bio, -EIO);
	}

	return 0;

fail_free_complete:
1082
	if (req->rq_state & RQ_IN_ACT_LOG)
1083
		drbd_al_complete_io(mdev, &req->i);
P
Philipp Reisner 已提交
1084
fail_and_free_req:
1085
	if (local) {
P
Philipp Reisner 已提交
1086 1087 1088 1089
		bio_put(req->private_bio);
		req->private_bio = NULL;
		put_ldev(mdev);
	}
1090 1091 1092
	if (!ret)
		bio_endio(bio, err);

P
Philipp Reisner 已提交
1093 1094 1095 1096
	drbd_req_free(req);
	dec_ap_bio(mdev);
	kfree(b);

1097
	return ret;
P
Philipp Reisner 已提交
1098 1099
}

1100
int drbd_make_request(struct request_queue *q, struct bio *bio)
P
Philipp Reisner 已提交
1101 1102
{
	struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
1103
	unsigned long start_time;
P
Philipp Reisner 已提交
1104

1105 1106
	start_time = jiffies;

P
Philipp Reisner 已提交
1107 1108 1109 1110
	/*
	 * what we "blindly" assume:
	 */
	D_ASSERT(bio->bi_size > 0);
1111
	D_ASSERT(IS_ALIGNED(bio->bi_size, 512));
P
Philipp Reisner 已提交
1112

1113 1114 1115 1116 1117
	do {
		inc_ap_bio(mdev);
	} while (__drbd_make_request(mdev, bio, start_time));

	return 0;
P
Philipp Reisner 已提交
1118 1119
}

1120 1121 1122
/* This is called by bio_add_page().
 *
 * q->max_hw_sectors and other global limits are already enforced there.
P
Philipp Reisner 已提交
1123
 *
1124 1125 1126 1127
 * We need to call down to our lower level device,
 * in case it has special restrictions.
 *
 * We also may need to enforce configured max-bio-bvecs limits.
P
Philipp Reisner 已提交
1128 1129
 *
 * As long as the BIO is empty we have to allow at least one bvec,
1130
 * regardless of size and offset, so no need to ask lower levels.
P
Philipp Reisner 已提交
1131 1132 1133 1134 1135
 */
int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec)
{
	struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
	unsigned int bio_size = bvm->bi_size;
1136 1137 1138 1139
	int limit = DRBD_MAX_BIO_SIZE;
	int backing_limit;

	if (bio_size && get_ldev(mdev)) {
P
Philipp Reisner 已提交
1140 1141
		struct request_queue * const b =
			mdev->ldev->backing_bdev->bd_disk->queue;
1142
		if (b->merge_bvec_fn) {
P
Philipp Reisner 已提交
1143 1144 1145 1146 1147 1148 1149
			backing_limit = b->merge_bvec_fn(b, bvm, bvec);
			limit = min(limit, backing_limit);
		}
		put_ldev(mdev);
	}
	return limit;
}
1150 1151 1152 1153

void request_timer_fn(unsigned long data)
{
	struct drbd_conf *mdev = (struct drbd_conf *) data;
1154
	struct drbd_tconn *tconn = mdev->tconn;
1155
	struct drbd_request *req; /* oldest request */
1156
	struct block_device *bdev;
1157
	struct list_head *le;
1158
	struct net_conf *nc;
1159
	unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
1160 1161 1162

	rcu_read_lock();
	nc = rcu_dereference(tconn->net_conf);
1163 1164 1165 1166
	ent = nc ? nc->timeout * HZ/10 * nc->ko_count : 0;

	if (get_ldev(mdev)) {
		dt = rcu_dereference(mdev->ldev->disk_conf)->disk_timeout * HZ / 10;
1167
		bdev = mdev->ldev->backing_bdev;
1168 1169
		put_ldev(mdev);
	}
1170
	rcu_read_unlock();
1171

1172 1173 1174
	et = min_not_zero(dt, ent);

	if (!et || (mdev->state.conn < C_WF_REPORT_PARAMS && mdev->state.disk <= D_FAILED))
1175 1176
		return; /* Recurring timer stopped */

1177 1178
	spin_lock_irq(&tconn->req_lock);
	le = &tconn->oldest_tle->requests;
1179
	if (list_empty(le)) {
1180
		spin_unlock_irq(&tconn->req_lock);
1181 1182 1183 1184 1185 1186
		mod_timer(&mdev->request_timer, jiffies + et);
		return;
	}

	le = le->prev;
	req = list_entry(le, struct drbd_request, tl_requests);
1187 1188
	if (ent && req->rq_state & RQ_NET_PENDING) {
		if (time_is_before_eq_jiffies(req->start_time + ent)) {
1189
			dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
1190 1191 1192
			_drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
		}
	}
1193
	if (dt && req->rq_state & RQ_LOCAL_PENDING && req->private_bio->bi_bdev == bdev) {
1194 1195 1196
		if (time_is_before_eq_jiffies(req->start_time + dt)) {
			dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n");
			__drbd_chk_io_error(mdev, 1);
1197 1198
		}
	}
1199
	nt = (time_is_before_eq_jiffies(req->start_time + et) ? jiffies : req->start_time) + et;
1200
	spin_unlock_irq(&tconn->req_lock);
1201
	mod_timer(&mdev->request_timer, nt);
1202
}