smc_core.c 32.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 *  Shared Memory Communications over RDMA (SMC-R) and RoCE
 *
 *  Basic Transport Functions exploiting Infiniband API
 *
 *  Copyright IBM Corp. 2016
 *
 *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
 */

#include <linux/socket.h>
#include <linux/if_vlan.h>
#include <linux/random.h>
#include <linux/workqueue.h>
#include <net/tcp.h>
#include <net/sock.h>
#include <rdma/ib_verbs.h>
19
#include <rdma/ib_cache.h>
20 21 22 23 24

#include "smc.h"
#include "smc_clc.h"
#include "smc_core.h"
#include "smc_ib.h"
25
#include "smc_wr.h"
U
Ursula Braun 已提交
26
#include "smc_llc.h"
27
#include "smc_cdc.h"
28
#include "smc_close.h"
29
#include "smc_ism.h"
30

31 32
#define SMC_LGR_NUM_INCR		256
#define SMC_LGR_FREE_DELAY_SERV		(600 * HZ)
33
#define SMC_LGR_FREE_DELAY_CLNT		(SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
34
#define SMC_LGR_FREE_DELAY_FAST		(8 * HZ)
35

36 37 38 39 40
static struct smc_lgr_list smc_lgr_list = {	/* established link groups */
	.lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
	.list = LIST_HEAD_INIT(smc_lgr_list.list),
	.num = 0,
};
U
Ursula Braun 已提交
41

42 43
static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
			 struct smc_buf_desc *buf_desc);
44

45 46 47 48 49 50 51 52 53 54 55 56 57
/* return head of link group list and its lock for a given link group */
static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
						  spinlock_t **lgr_lock)
{
	if (lgr->is_smcd) {
		*lgr_lock = &lgr->smcd->lgr_lock;
		return &lgr->smcd->lgr_list;
	}

	*lgr_lock = &smc_lgr_list.lock;
	return &smc_lgr_list.list;
}

58 59 60 61 62 63
static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
{
	/* client link group creation always follows the server link group
	 * creation. For client use a somewhat higher removal delay time,
	 * otherwise there is a risk of out-of-sync link groups.
	 */
U
Ursula Braun 已提交
64 65 66 67 68 69
	if (!lgr->freeing && !lgr->freefast) {
		mod_delayed_work(system_wq, &lgr->free_work,
				 (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
						SMC_LGR_FREE_DELAY_CLNT :
						SMC_LGR_FREE_DELAY_SERV);
	}
70 71
}

72 73
void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr)
{
U
Ursula Braun 已提交
74 75 76 77 78
	if (!lgr->freeing && !lgr->freefast) {
		lgr->freefast = 1;
		mod_delayed_work(system_wq, &lgr->free_work,
				 SMC_LGR_FREE_DELAY_FAST);
	}
79 80
}

81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
/* Register connection's alert token in our lookup structure.
 * To use rbtrees we have to implement our own insert core.
 * Requires @conns_lock
 * @smc		connection to register
 * Returns 0 on success, != otherwise.
 */
static void smc_lgr_add_alert_token(struct smc_connection *conn)
{
	struct rb_node **link, *parent = NULL;
	u32 token = conn->alert_token_local;

	link = &conn->lgr->conns_all.rb_node;
	while (*link) {
		struct smc_connection *cur = rb_entry(*link,
					struct smc_connection, alert_node);

		parent = *link;
		if (cur->alert_token_local > token)
			link = &parent->rb_left;
		else
			link = &parent->rb_right;
	}
	/* Put the new node there */
	rb_link_node(&conn->alert_node, parent, link);
	rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
}

/* Register connection in link group by assigning an alert token
 * registered in a search tree.
 * Requires @conns_lock
 * Note that '0' is a reserved value and not assigned.
 */
static void smc_lgr_register_conn(struct smc_connection *conn)
{
	struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
	static atomic_t nexttoken = ATOMIC_INIT(0);

	/* find a new alert_token_local value not yet used by some connection
	 * in this link group
	 */
	sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
	while (!conn->alert_token_local) {
		conn->alert_token_local = atomic_inc_return(&nexttoken);
		if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
			conn->alert_token_local = 0;
	}
	smc_lgr_add_alert_token(conn);
	conn->lgr->conns_num++;
}

/* Unregister connection and reset the alert token of the given connection<
 */
static void __smc_lgr_unregister_conn(struct smc_connection *conn)
{
	struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
	struct smc_link_group *lgr = conn->lgr;

	rb_erase(&conn->alert_node, &lgr->conns_all);
	lgr->conns_num--;
	conn->alert_token_local = 0;
	sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
}

144
/* Unregister connection from lgr
145 146 147 148 149
 */
static void smc_lgr_unregister_conn(struct smc_connection *conn)
{
	struct smc_link_group *lgr = conn->lgr;

150 151
	if (!lgr)
		return;
152 153 154 155 156
	write_lock_bh(&lgr->conns_lock);
	if (conn->alert_token_local) {
		__smc_lgr_unregister_conn(conn);
	}
	write_unlock_bh(&lgr->conns_lock);
157
	conn->lgr = NULL;
158 159
}

160 161 162 163 164 165 166 167 168 169 170 171 172 173
/* Send delete link, either as client to request the initiation
 * of the DELETE LINK sequence from server; or as server to
 * initiate the delete processing. See smc_llc_rx_delete_link().
 */
static int smc_link_send_delete(struct smc_link *lnk)
{
	if (lnk->state == SMC_LNK_ACTIVE &&
	    !smc_llc_send_delete_link(lnk, SMC_LLC_REQ, true)) {
		smc_llc_link_deleting(lnk);
		return 0;
	}
	return -ENOTCONN;
}

U
Ursula Braun 已提交
174 175
static void smc_lgr_free(struct smc_link_group *lgr);

176 177 178 179 180
static void smc_lgr_free_work(struct work_struct *work)
{
	struct smc_link_group *lgr = container_of(to_delayed_work(work),
						  struct smc_link_group,
						  free_work);
181
	spinlock_t *lgr_lock;
U
Ursula Braun 已提交
182
	struct smc_link *lnk;
183 184
	bool conns;

185 186
	smc_lgr_list_head(lgr, &lgr_lock);
	spin_lock_bh(lgr_lock);
U
Ursula Braun 已提交
187 188 189 190
	if (lgr->freeing) {
		spin_unlock_bh(lgr_lock);
		return;
	}
191 192 193 194
	read_lock_bh(&lgr->conns_lock);
	conns = RB_EMPTY_ROOT(&lgr->conns_all);
	read_unlock_bh(&lgr->conns_lock);
	if (!conns) { /* number of lgr connections is no longer zero */
195
		spin_unlock_bh(lgr_lock);
196 197
		return;
	}
198
	list_del_init(&lgr->list); /* remove from smc_lgr_list */
199

U
Ursula Braun 已提交
200
	lnk = &lgr->lnk[SMC_SINGLE_LINK];
201 202
	if (!lgr->is_smcd && !lgr->terminating)	{
		/* try to send del link msg, on error free lgr immediately */
203 204
		if (lnk->state == SMC_LNK_ACTIVE &&
		    !smc_link_send_delete(lnk)) {
205 206
			/* reschedule in case we never receive a response */
			smc_lgr_schedule_free_work(lgr);
U
Ursula Braun 已提交
207
			spin_unlock_bh(lgr_lock);
208 209 210
			return;
		}
	}
U
Ursula Braun 已提交
211 212 213
	lgr->freeing = 1; /* this instance does the freeing, no new schedule */
	spin_unlock_bh(lgr_lock);
	cancel_delayed_work(&lgr->free_work);
214

U
Ursula Braun 已提交
215 216
	if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
		smc_llc_link_inactive(lnk);
217
	if (lgr->is_smcd && !lgr->terminating)
U
Ursula Braun 已提交
218 219
		smc_ism_signal_shutdown(lgr);
	smc_lgr_free(lgr);
220 221
}

222 223 224 225 226
static void smc_lgr_terminate_work(struct work_struct *work)
{
	struct smc_link_group *lgr = container_of(work, struct smc_link_group,
						  terminate_work);

227
	smc_lgr_terminate(lgr, true);
228 229
}

230
/* create a new SMC link group */
231
static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
232 233
{
	struct smc_link_group *lgr;
234
	struct list_head *lgr_list;
235
	struct smc_link *lnk;
236
	spinlock_t *lgr_lock;
237 238
	u8 rndvec[3];
	int rc = 0;
U
Ursula Braun 已提交
239
	int i;
240

241
	if (ini->is_smcd && ini->vlan_id) {
242 243
		if (smc_ism_get_vlan(ini->ism_dev, ini->vlan_id)) {
			rc = SMC_CLC_DECL_ISMVLANERR;
244
			goto out;
245
		}
246 247
	}

248 249
	lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
	if (!lgr) {
250
		rc = SMC_CLC_DECL_MEM;
251
		goto ism_put_vlan;
252
	}
253
	lgr->is_smcd = ini->is_smcd;
254
	lgr->sync_err = 0;
U
Ursula Braun 已提交
255 256 257
	lgr->terminating = 0;
	lgr->freefast = 0;
	lgr->freeing = 0;
258
	lgr->vlan_id = ini->vlan_id;
U
Ursula Braun 已提交
259 260
	rwlock_init(&lgr->sndbufs_lock);
	rwlock_init(&lgr->rmbs_lock);
261
	rwlock_init(&lgr->conns_lock);
U
Ursula Braun 已提交
262 263 264 265
	for (i = 0; i < SMC_RMBE_SIZES; i++) {
		INIT_LIST_HEAD(&lgr->sndbufs[i]);
		INIT_LIST_HEAD(&lgr->rmbs[i]);
	}
266 267
	smc_lgr_list.num += SMC_LGR_NUM_INCR;
	memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
268
	INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
269
	INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
270
	lgr->conns_all = RB_ROOT;
271
	if (ini->is_smcd) {
272
		/* SMC-D specific settings */
273
		get_device(&ini->ism_dev->dev);
274 275
		lgr->peer_gid = ini->ism_gid;
		lgr->smcd = ini->ism_dev;
276
		lgr_list = &ini->ism_dev->lgr_list;
277
		lgr_lock = &lgr->smcd->lgr_lock;
278
		lgr->peer_shutdown = 0;
279
		atomic_inc(&ini->ism_dev->lgr_cnt);
280 281
	} else {
		/* SMC-R specific settings */
282
		get_device(&ini->ib_dev->ibdev->dev);
283
		lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
284 285
		memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
		       SMC_SYSTEMID_LEN);
286 287 288 289 290

		lnk = &lgr->lnk[SMC_SINGLE_LINK];
		/* initialize link */
		lnk->state = SMC_LNK_ACTIVATING;
		lnk->link_id = SMC_SINGLE_LINK;
291 292
		lnk->smcibdev = ini->ib_dev;
		lnk->ibport = ini->ib_port;
293
		lgr_list = &smc_lgr_list.list;
294
		lgr_lock = &smc_lgr_list.lock;
295 296 297 298
		lnk->path_mtu =
			ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
		if (!ini->ib_dev->initialized)
			smc_ib_setup_per_ibdev(ini->ib_dev);
299 300 301
		get_random_bytes(rndvec, sizeof(rndvec));
		lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
			(rndvec[2] << 16);
302
		rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
303 304
					  ini->vlan_id, lnk->gid,
					  &lnk->sgid_index);
305 306
		if (rc)
			goto free_lgr;
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
		rc = smc_llc_link_init(lnk);
		if (rc)
			goto free_lgr;
		rc = smc_wr_alloc_link_mem(lnk);
		if (rc)
			goto clear_llc_lnk;
		rc = smc_ib_create_protection_domain(lnk);
		if (rc)
			goto free_link_mem;
		rc = smc_ib_create_queue_pair(lnk);
		if (rc)
			goto dealloc_pd;
		rc = smc_wr_create_link(lnk);
		if (rc)
			goto destroy_qp;
	}
323
	smc->conn.lgr = lgr;
324
	spin_lock_bh(lgr_lock);
325
	list_add(&lgr->list, lgr_list);
326
	spin_unlock_bh(lgr_lock);
327 328
	return 0;

329 330 331 332 333 334
destroy_qp:
	smc_ib_destroy_queue_pair(lnk);
dealloc_pd:
	smc_ib_dealloc_protection_domain(lnk);
free_link_mem:
	smc_wr_free_link_mem(lnk);
335 336
clear_llc_lnk:
	smc_llc_link_clear(lnk);
337 338
free_lgr:
	kfree(lgr);
339 340 341
ism_put_vlan:
	if (ini->is_smcd && ini->vlan_id)
		smc_ism_put_vlan(ini->ism_dev, ini->vlan_id);
342
out:
343 344 345 346 347 348
	if (rc < 0) {
		if (rc == -ENOMEM)
			rc = SMC_CLC_DECL_MEM;
		else
			rc = SMC_CLC_DECL_INTERR;
	}
349 350 351
	return rc;
}

352 353
static void smc_buf_unuse(struct smc_connection *conn,
			  struct smc_link_group *lgr)
U
Ursula Braun 已提交
354
{
355
	if (conn->sndbuf_desc)
U
Ursula Braun 已提交
356 357
		conn->sndbuf_desc->used = 0;
	if (conn->rmb_desc) {
358
		if (!conn->rmb_desc->regerr) {
359
			if (!lgr->is_smcd && !list_empty(&lgr->list)) {
360 361 362 363 364
				/* unregister rmb with peer */
				smc_llc_do_delete_rkey(
						&lgr->lnk[SMC_SINGLE_LINK],
						conn->rmb_desc);
			}
365
			conn->rmb_desc->used = 0;
366 367 368 369 370 371
		} else {
			/* buf registration failed, reuse not possible */
			write_lock_bh(&lgr->rmbs_lock);
			list_del(&conn->rmb_desc->list);
			write_unlock_bh(&lgr->rmbs_lock);

372
			smc_buf_free(lgr, true, conn->rmb_desc);
373
		}
U
Ursula Braun 已提交
374 375 376
	}
}

377 378 379
/* remove a finished connection from its link group */
void smc_conn_free(struct smc_connection *conn)
{
380 381 382
	struct smc_link_group *lgr = conn->lgr;

	if (!lgr)
383
		return;
384
	if (lgr->is_smcd) {
385 386
		if (!list_empty(&lgr->list))
			smc_ism_unset_conn(conn);
387 388
		tasklet_kill(&conn->rx_tsklet);
	} else {
389
		smc_cdc_tx_dismiss_slots(conn);
390
	}
391 392 393 394
	if (!list_empty(&lgr->list)) {
		smc_lgr_unregister_conn(conn);
		smc_buf_unuse(conn, lgr); /* allow buffer reuse */
	}
395 396 397

	if (!lgr->conns_num)
		smc_lgr_schedule_free_work(lgr);
398 399 400 401 402
}

static void smc_link_clear(struct smc_link *lnk)
{
	lnk->peer_qpn = 0;
403
	smc_llc_link_clear(lnk);
404
	smc_ib_modify_qp_reset(lnk);
405
	smc_wr_free_link(lnk);
406 407
	smc_ib_destroy_queue_pair(lnk);
	smc_ib_dealloc_protection_domain(lnk);
408
	smc_wr_free_link_mem(lnk);
409 410
}

411 412
static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
			  struct smc_buf_desc *buf_desc)
U
Ursula Braun 已提交
413
{
414 415
	struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];

416 417 418 419 420 421 422 423 424
	if (is_rmb) {
		if (buf_desc->mr_rx[SMC_SINGLE_LINK])
			smc_ib_put_memory_region(
					buf_desc->mr_rx[SMC_SINGLE_LINK]);
		smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
				    DMA_FROM_DEVICE);
	} else {
		smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
				    DMA_TO_DEVICE);
U
Ursula Braun 已提交
425
	}
426
	sg_free_table(&buf_desc->sgt[SMC_SINGLE_LINK]);
427 428
	if (buf_desc->pages)
		__free_pages(buf_desc->pages, buf_desc->order);
429
	kfree(buf_desc);
U
Ursula Braun 已提交
430 431
}

432 433 434
static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
			  struct smc_buf_desc *buf_desc)
{
435 436 437
	if (is_dmb) {
		/* restore original buf len */
		buf_desc->len += sizeof(struct smcd_cdc_msg);
438
		smc_ism_unregister_dmb(lgr->smcd, buf_desc);
439
	} else {
440
		kfree(buf_desc->cpu_addr);
441
	}
442 443 444 445 446 447 448 449 450 451 452 453
	kfree(buf_desc);
}

static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
			 struct smc_buf_desc *buf_desc)
{
	if (lgr->is_smcd)
		smcd_buf_free(lgr, is_rmb, buf_desc);
	else
		smcr_buf_free(lgr, is_rmb, buf_desc);
}

454
static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
U
Ursula Braun 已提交
455
{
456 457
	struct smc_buf_desc *buf_desc, *bf_desc;
	struct list_head *buf_list;
U
Ursula Braun 已提交
458 459 460
	int i;

	for (i = 0; i < SMC_RMBE_SIZES; i++) {
461 462 463 464 465
		if (is_rmb)
			buf_list = &lgr->rmbs[i];
		else
			buf_list = &lgr->sndbufs[i];
		list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
U
Ursula Braun 已提交
466
					 list) {
467
			list_del(&buf_desc->list);
468
			smc_buf_free(lgr, is_rmb, buf_desc);
U
Ursula Braun 已提交
469 470 471 472
		}
	}
}

473 474 475 476 477 478 479 480
static void smc_lgr_free_bufs(struct smc_link_group *lgr)
{
	/* free send buffers */
	__smc_lgr_free_bufs(lgr, false);
	/* free rmbs */
	__smc_lgr_free_bufs(lgr, true);
}

481
/* remove a link group */
U
Ursula Braun 已提交
482
static void smc_lgr_free(struct smc_link_group *lgr)
483
{
484
	smc_lgr_free_bufs(lgr);
485
	if (lgr->is_smcd) {
486 487 488 489
		if (!lgr->terminating) {
			smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
			put_device(&lgr->smcd->dev);
		}
490 491
		if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
			wake_up(&lgr->smcd->lgrs_deleted);
492
	} else {
493
		smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
494 495
		put_device(&lgr->lnk[SMC_SINGLE_LINK].smcibdev->ibdev->dev);
	}
496 497 498
	kfree(lgr);
}

499 500
void smc_lgr_forget(struct smc_link_group *lgr)
{
501 502 503 504 505
	struct list_head *lgr_list;
	spinlock_t *lgr_lock;

	lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
	spin_lock_bh(lgr_lock);
506
	/* do not use this link group for new connections */
507 508 509
	if (!list_empty(lgr_list))
		list_del_init(lgr_list);
	spin_unlock_bh(lgr_lock);
510 511
}

512 513 514 515 516 517 518 519 520 521 522 523 524 525
static void smcd_unregister_all_dmbs(struct smc_link_group *lgr)
{
	int i;

	for (i = 0; i < SMC_RMBE_SIZES; i++) {
		struct smc_buf_desc *buf_desc;

		list_for_each_entry(buf_desc, &lgr->rmbs[i], list) {
			buf_desc->len += sizeof(struct smcd_cdc_msg);
			smc_ism_unregister_dmb(lgr->smcd, buf_desc);
		}
	}
}

526 527 528 529 530 531 532 533
static void smc_sk_wake_ups(struct smc_sock *smc)
{
	smc->sk.sk_write_space(&smc->sk);
	smc->sk.sk_data_ready(&smc->sk);
	smc->sk.sk_state_change(&smc->sk);
}

/* kill a connection */
534
static void smc_conn_kill(struct smc_connection *conn, bool soft)
535 536 537
{
	struct smc_sock *smc = container_of(conn, struct smc_sock, conn);

538 539 540 541
	if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
		conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
	else
		smc_close_abort(conn);
542
	conn->killed = 1;
543
	smc->sk.sk_err = ECONNABORTED;
544
	smc_sk_wake_ups(smc);
545 546
	if (conn->lgr->is_smcd) {
		smc_ism_unset_conn(conn);
547 548 549 550
		if (soft)
			tasklet_kill(&conn->rx_tsklet);
		else
			tasklet_unlock_wait(&conn->rx_tsklet);
551
	}
552
	smc_lgr_unregister_conn(conn);
U
Ursula Braun 已提交
553
	smc_close_active_abort(smc);
554 555
}

556 557 558 559 560 561 562 563 564 565 566 567 568 569
static void smc_lgr_cleanup(struct smc_link_group *lgr)
{
	if (lgr->is_smcd) {
		smc_ism_signal_shutdown(lgr);
		smcd_unregister_all_dmbs(lgr);
		smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
		put_device(&lgr->smcd->dev);
	} else {
		struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];

		wake_up(&lnk->wr_reg_wait);
	}
}

570
/* terminate link group */
571
static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
572 573
{
	struct smc_connection *conn;
574
	struct smc_sock *smc;
575 576
	struct rb_node *node;

577 578
	if (lgr->terminating)
		return;	/* lgr already terminating */
579 580
	if (!soft)
		cancel_delayed_work_sync(&lgr->free_work);
581
	lgr->terminating = 1;
582 583
	if (!lgr->is_smcd)
		smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
584

585 586
	/* kill remaining link group connections */
	read_lock_bh(&lgr->conns_lock);
587 588
	node = rb_first(&lgr->conns_all);
	while (node) {
589
		read_unlock_bh(&lgr->conns_lock);
590
		conn = rb_entry(node, struct smc_connection, alert_node);
591
		smc = container_of(conn, struct smc_sock, conn);
U
Ursula Braun 已提交
592
		sock_hold(&smc->sk); /* sock_put below */
593
		lock_sock(&smc->sk);
594
		smc_conn_kill(conn, soft);
595
		release_sock(&smc->sk);
U
Ursula Braun 已提交
596
		sock_put(&smc->sk); /* sock_hold above */
597
		read_lock_bh(&lgr->conns_lock);
598 599
		node = rb_first(&lgr->conns_all);
	}
600
	read_unlock_bh(&lgr->conns_lock);
601
	smc_lgr_cleanup(lgr);
602 603 604 605
	if (soft)
		smc_lgr_schedule_free_work_fast(lgr);
	else
		smc_lgr_free(lgr);
606 607
}

608 609 610 611 612
/* unlink and terminate link group
 * @soft: true if link group shutdown can take its time
 *	  false if immediate link group shutdown is required
 */
void smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
613
{
614 615 616 617
	spinlock_t *lgr_lock;

	smc_lgr_list_head(lgr, &lgr_lock);
	spin_lock_bh(lgr_lock);
618 619 620 621
	if (lgr->terminating) {
		spin_unlock_bh(lgr_lock);
		return;	/* lgr already terminating */
	}
622 623
	if (!soft)
		lgr->freeing = 1;
624
	list_del_init(&lgr->list);
625
	spin_unlock_bh(lgr_lock);
626
	__smc_lgr_terminate(lgr, soft);
627 628
}

629 630 631 632
/* Called when IB port is terminated */
void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
{
	struct smc_link_group *lgr, *l;
633
	LIST_HEAD(lgr_free_list);
634

635
	spin_lock_bh(&smc_lgr_list.lock);
636
	list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
637 638
		if (!lgr->is_smcd &&
		    lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
639
		    lgr->lnk[SMC_SINGLE_LINK].ibport == ibport)
640
			list_move(&lgr->list, &lgr_free_list);
641
	}
642
	spin_unlock_bh(&smc_lgr_list.lock);
643 644 645

	list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
		list_del_init(&lgr->list);
646
		__smc_lgr_terminate(lgr, true);
647
	}
648 649
}

650
/* Called when peer lgr shutdown (regularly or abnormally) is received */
H
Hans Wippel 已提交
651
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
652 653 654 655 656
{
	struct smc_link_group *lgr, *l;
	LIST_HEAD(lgr_free_list);

	/* run common cleanup function and build free list */
657
	spin_lock_bh(&dev->lgr_lock);
658 659
	list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
		if ((!peer_gid || lgr->peer_gid == peer_gid) &&
H
Hans Wippel 已提交
660
		    (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
661 662
			if (peer_gid) /* peer triggered termination */
				lgr->peer_shutdown = 1;
663 664 665
			list_move(&lgr->list, &lgr_free_list);
		}
	}
666
	spin_unlock_bh(&dev->lgr_lock);
667 668 669 670

	/* cancel the regular free workers and actually free lgrs */
	list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
		list_del_init(&lgr->list);
671
		schedule_work(&lgr->terminate_work);
672 673 674
	}
}

675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
/* Called when an SMCD device is removed or the smc module is unloaded */
void smc_smcd_terminate_all(struct smcd_dev *smcd)
{
	struct smc_link_group *lgr, *lg;
	LIST_HEAD(lgr_free_list);

	spin_lock_bh(&smcd->lgr_lock);
	list_splice_init(&smcd->lgr_list, &lgr_free_list);
	list_for_each_entry(lgr, &lgr_free_list, list)
		lgr->freeing = 1;
	spin_unlock_bh(&smcd->lgr_lock);

	list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
		list_del_init(&lgr->list);
		__smc_lgr_terminate(lgr, false);
	}
691 692 693

	if (atomic_read(&smcd->lgr_cnt))
		wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
694 695
}

696 697 698
/* Determine vlan of internal TCP socket.
 * @vlan_id: address to store the determined vlan id into
 */
699
int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
700 701
{
	struct dst_entry *dst = sk_dst_get(clcsock->sk);
702 703
	struct net_device *ndev;
	int i, nest_lvl, rc = 0;
704

705
	ini->vlan_id = 0;
706 707 708 709 710 711 712 713 714
	if (!dst) {
		rc = -ENOTCONN;
		goto out;
	}
	if (!dst->dev) {
		rc = -ENODEV;
		goto out_rel;
	}

715 716
	ndev = dst->dev;
	if (is_vlan_dev(ndev)) {
717
		ini->vlan_id = vlan_dev_vlan_id(ndev);
718 719 720 721
		goto out_rel;
	}

	rtnl_lock();
722
	nest_lvl = ndev->lower_level;
723 724 725 726 727 728 729 730
	for (i = 0; i < nest_lvl; i++) {
		struct list_head *lower = &ndev->adj_list.lower;

		if (list_empty(lower))
			break;
		lower = lower->next;
		ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
		if (is_vlan_dev(ndev)) {
731
			ini->vlan_id = vlan_dev_vlan_id(ndev);
732 733 734 735
			break;
		}
	}
	rtnl_unlock();
736 737 738 739 740 741 742

out_rel:
	dst_release(dst);
out:
	return rc;
}

743 744
static bool smcr_lgr_match(struct smc_link_group *lgr,
			   struct smc_clc_msg_local *lcl,
745
			   enum smc_lgr_role role, u32 clcqpn)
746
{
747 748 749 750 751 752
	return !memcmp(lgr->peer_systemid, lcl->id_for_peer,
		       SMC_SYSTEMID_LEN) &&
		!memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid,
			SMC_GID_SIZE) &&
		!memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
			sizeof(lcl->mac)) &&
753 754 755
		lgr->role == role &&
		(lgr->role == SMC_SERV ||
		 lgr->lnk[SMC_SINGLE_LINK].peer_qpn == clcqpn);
756
}
757

758 759 760 761
static bool smcd_lgr_match(struct smc_link_group *lgr,
			   struct smcd_dev *smcismdev, u64 peer_gid)
{
	return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev;
762 763 764
}

/* create a new SMC connection (and a new link group if necessary) */
765
int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
766 767
{
	struct smc_connection *conn = &smc->conn;
768
	struct list_head *lgr_list;
769 770
	struct smc_link_group *lgr;
	enum smc_lgr_role role;
771
	spinlock_t *lgr_lock;
772 773
	int rc = 0;

774
	lgr_list = ini->is_smcd ? &ini->ism_dev->lgr_list : &smc_lgr_list.list;
775
	lgr_lock = ini->is_smcd ? &ini->ism_dev->lgr_lock : &smc_lgr_list.lock;
776
	ini->cln_first_contact = SMC_FIRST_CONTACT;
777
	role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
778
	if (role == SMC_CLNT && ini->srv_first_contact)
779 780 781 782
		/* create new link group as well */
		goto create;

	/* determine if an existing link group can be reused */
783
	spin_lock_bh(lgr_lock);
784
	list_for_each_entry(lgr, lgr_list, list) {
785
		write_lock_bh(&lgr->conns_lock);
786 787 788
		if ((ini->is_smcd ?
		     smcd_lgr_match(lgr, ini->ism_dev, ini->ism_gid) :
		     smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
789
		    !lgr->sync_err &&
790
		    lgr->vlan_id == ini->vlan_id &&
791 792
		    (role == SMC_CLNT ||
		     lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
793
			/* link group found */
794
			ini->cln_first_contact = SMC_REUSE_CONTACT;
795 796
			conn->lgr = lgr;
			smc_lgr_register_conn(conn); /* add smc conn to lgr */
797 798
			if (delayed_work_pending(&lgr->free_work))
				cancel_delayed_work(&lgr->free_work);
799 800 801 802 803
			write_unlock_bh(&lgr->conns_lock);
			break;
		}
		write_unlock_bh(&lgr->conns_lock);
	}
804
	spin_unlock_bh(lgr_lock);
805

806
	if (role == SMC_CLNT && !ini->srv_first_contact &&
807
	    ini->cln_first_contact == SMC_FIRST_CONTACT) {
808 809 810 811
		/* Server reuses a link group, but Client wants to start
		 * a new one
		 * send out_of_sync decline, reason synchr. error
		 */
812
		return SMC_CLC_DECL_SYNCERR;
813 814 815
	}

create:
816
	if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
817
		rc = smc_lgr_create(smc, ini);
818 819
		if (rc)
			goto out;
820 821
		lgr = conn->lgr;
		write_lock_bh(&lgr->conns_lock);
822
		smc_lgr_register_conn(conn); /* add smc conn to lgr */
823
		write_unlock_bh(&lgr->conns_lock);
824
	}
825
	conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
826
	conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
S
Stefan Raspl 已提交
827
	conn->urg_state = SMC_URG_READ;
828
	if (ini->is_smcd) {
829 830 831
		conn->rx_off = sizeof(struct smcd_cdc_msg);
		smcd_cdc_rx_init(conn); /* init tasklet for this conn */
	}
832 833 834
#ifndef KERNEL_HAS_ATOMIC64
	spin_lock_init(&conn->acurs_lock);
#endif
835 836

out:
837
	return rc;
838
}
U
Ursula Braun 已提交
839

840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
/* convert the RMB size into the compressed notation - minimum 16K.
 * In contrast to plain ilog2, this rounds towards the next power of 2,
 * so the socket application gets at least its desired sndbuf / rcvbuf size.
 */
static u8 smc_compress_bufsize(int size)
{
	u8 compressed;

	if (size <= SMC_BUF_MIN_SIZE)
		return 0;

	size = (size - 1) >> 14;
	compressed = ilog2(size) + 1;
	if (compressed >= SMC_RMBE_SIZES)
		compressed = SMC_RMBE_SIZES - 1;
	return compressed;
}

/* convert the RMB size from compressed notation into integer */
int smc_uncompress_bufsize(u8 compressed)
{
	u32 size;

	size = 0x00000001 << (((int)compressed) + 14);
	return (int)size;
}

867 868
/* try to reuse a sndbuf or rmb description slot for a certain
 * buffer size; if not available, return NULL
U
Ursula Braun 已提交
869
 */
870 871 872
static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
					     rwlock_t *lock,
					     struct list_head *buf_list)
U
Ursula Braun 已提交
873
{
874
	struct smc_buf_desc *buf_slot;
U
Ursula Braun 已提交
875

876 877 878 879 880
	read_lock_bh(lock);
	list_for_each_entry(buf_slot, buf_list, list) {
		if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
			read_unlock_bh(lock);
			return buf_slot;
U
Ursula Braun 已提交
881 882
		}
	}
883
	read_unlock_bh(lock);
U
Ursula Braun 已提交
884 885 886
	return NULL;
}

U
Ursula Braun 已提交
887 888 889 890 891 892 893 894 895
/* one of the conditions for announcing a receiver's current window size is
 * that it "results in a minimum increase in the window size of 10% of the
 * receive buffer space" [RFC7609]
 */
static inline int smc_rmb_wnd_update_limit(int rmbe_size)
{
	return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
}

896 897
static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
						bool is_rmb, int bufsize)
898 899 900 901 902 903 904 905 906 907
{
	struct smc_buf_desc *buf_desc;
	struct smc_link *lnk;
	int rc;

	/* try to alloc a new buffer */
	buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
	if (!buf_desc)
		return ERR_PTR(-ENOMEM);

908 909 910 911 912 913
	buf_desc->order = get_order(bufsize);
	buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
				      __GFP_NOMEMALLOC | __GFP_COMP |
				      __GFP_NORETRY | __GFP_ZERO,
				      buf_desc->order);
	if (!buf_desc->pages) {
914 915 916
		kfree(buf_desc);
		return ERR_PTR(-EAGAIN);
	}
917
	buf_desc->cpu_addr = (void *)page_address(buf_desc->pages);
918 919 920 921 922 923

	/* build the sg table from the pages */
	lnk = &lgr->lnk[SMC_SINGLE_LINK];
	rc = sg_alloc_table(&buf_desc->sgt[SMC_SINGLE_LINK], 1,
			    GFP_KERNEL);
	if (rc) {
924
		smc_buf_free(lgr, is_rmb, buf_desc);
925 926 927 928 929 930 931 932 933 934
		return ERR_PTR(rc);
	}
	sg_set_buf(buf_desc->sgt[SMC_SINGLE_LINK].sgl,
		   buf_desc->cpu_addr, bufsize);

	/* map sg table to DMA address */
	rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc,
			       is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
	/* SMC protocol depends on mapping to one DMA address only */
	if (rc != 1)  {
935
		smc_buf_free(lgr, is_rmb, buf_desc);
936 937 938 939 940 941 942 943 944 945
		return ERR_PTR(-EAGAIN);
	}

	/* create a new memory region for the RMB */
	if (is_rmb) {
		rc = smc_ib_get_memory_region(lnk->roce_pd,
					      IB_ACCESS_REMOTE_WRITE |
					      IB_ACCESS_LOCAL_WRITE,
					      buf_desc);
		if (rc) {
946
			smc_buf_free(lgr, is_rmb, buf_desc);
947 948 949 950
			return ERR_PTR(rc);
		}
	}

951
	buf_desc->len = bufsize;
952 953 954
	return buf_desc;
}

955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
#define SMCD_DMBE_SIZES		7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */

static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
						bool is_dmb, int bufsize)
{
	struct smc_buf_desc *buf_desc;
	int rc;

	if (smc_compress_bufsize(bufsize) > SMCD_DMBE_SIZES)
		return ERR_PTR(-EAGAIN);

	/* try to alloc a new DMB */
	buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
	if (!buf_desc)
		return ERR_PTR(-ENOMEM);
	if (is_dmb) {
		rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
		if (rc) {
			kfree(buf_desc);
			return ERR_PTR(-EAGAIN);
		}
976 977 978
		buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
		/* CDC header stored in buf. So, pretend it was smaller */
		buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg);
979 980 981 982 983 984 985 986 987 988 989 990 991 992
	} else {
		buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL |
					     __GFP_NOWARN | __GFP_NORETRY |
					     __GFP_NOMEMALLOC);
		if (!buf_desc->cpu_addr) {
			kfree(buf_desc);
			return ERR_PTR(-EAGAIN);
		}
		buf_desc->len = bufsize;
	}
	return buf_desc;
}

static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
U
Ursula Braun 已提交
993
{
994
	struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
U
Ursula Braun 已提交
995 996
	struct smc_connection *conn = &smc->conn;
	struct smc_link_group *lgr = conn->lgr;
997
	struct list_head *buf_list;
998
	int bufsize, bufsize_short;
999 1000
	int sk_buf_size;
	rwlock_t *lock;
U
Ursula Braun 已提交
1001

1002 1003 1004 1005 1006 1007 1008
	if (is_rmb)
		/* use socket recv buffer size (w/o overhead) as start value */
		sk_buf_size = smc->sk.sk_rcvbuf / 2;
	else
		/* use socket send buffer size (w/o overhead) as start value */
		sk_buf_size = smc->sk.sk_sndbuf / 2;

1009
	for (bufsize_short = smc_compress_bufsize(sk_buf_size);
1010
	     bufsize_short >= 0; bufsize_short--) {
1011

1012 1013 1014 1015 1016 1017
		if (is_rmb) {
			lock = &lgr->rmbs_lock;
			buf_list = &lgr->rmbs[bufsize_short];
		} else {
			lock = &lgr->sndbufs_lock;
			buf_list = &lgr->sndbufs[bufsize_short];
1018
		}
1019
		bufsize = smc_uncompress_bufsize(bufsize_short);
1020 1021 1022
		if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC)
			continue;

1023
		/* check for reusable slot in the link group */
1024
		buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
1025 1026
		if (buf_desc) {
			memset(buf_desc->cpu_addr, 0, bufsize);
U
Ursula Braun 已提交
1027 1028
			break; /* found reusable slot */
		}
1029

1030 1031 1032 1033 1034
		if (is_smcd)
			buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
		else
			buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);

1035 1036 1037
		if (PTR_ERR(buf_desc) == -ENOMEM)
			break;
		if (IS_ERR(buf_desc))
1038
			continue;
1039

1040 1041 1042 1043 1044
		buf_desc->used = 1;
		write_lock_bh(lock);
		list_add(&buf_desc->list, buf_list);
		write_unlock_bh(lock);
		break; /* found */
U
Ursula Braun 已提交
1045
	}
1046

1047
	if (IS_ERR(buf_desc))
1048 1049 1050 1051
		return -ENOMEM;

	if (is_rmb) {
		conn->rmb_desc = buf_desc;
1052 1053
		conn->rmbe_size_short = bufsize_short;
		smc->sk.sk_rcvbuf = bufsize * 2;
1054
		atomic_set(&conn->bytes_to_rcv, 0);
1055 1056
		conn->rmbe_update_limit =
			smc_rmb_wnd_update_limit(buf_desc->len);
1057 1058
		if (is_smcd)
			smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
U
Ursula Braun 已提交
1059
	} else {
1060 1061 1062
		conn->sndbuf_desc = buf_desc;
		smc->sk.sk_sndbuf = bufsize * 2;
		atomic_set(&conn->sndbuf_space, bufsize);
U
Ursula Braun 已提交
1063
	}
1064 1065 1066
	return 0;
}

1067 1068 1069 1070
void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
{
	struct smc_link_group *lgr = conn->lgr;

1071 1072
	if (!conn->lgr || conn->lgr->is_smcd)
		return;
1073 1074 1075 1076 1077 1078 1079 1080
	smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
			       conn->sndbuf_desc, DMA_TO_DEVICE);
}

void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
{
	struct smc_link_group *lgr = conn->lgr;

1081 1082
	if (!conn->lgr || conn->lgr->is_smcd)
		return;
1083 1084 1085 1086 1087 1088 1089 1090
	smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
				  conn->sndbuf_desc, DMA_TO_DEVICE);
}

void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
{
	struct smc_link_group *lgr = conn->lgr;

1091 1092
	if (!conn->lgr || conn->lgr->is_smcd)
		return;
1093 1094 1095 1096 1097 1098 1099 1100
	smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
			       conn->rmb_desc, DMA_FROM_DEVICE);
}

void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
{
	struct smc_link_group *lgr = conn->lgr;

1101 1102
	if (!conn->lgr || conn->lgr->is_smcd)
		return;
1103 1104 1105 1106
	smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
				  conn->rmb_desc, DMA_FROM_DEVICE);
}

1107 1108 1109 1110 1111 1112
/* create the send and receive buffer for an SMC socket;
 * receive buffers are called RMBs;
 * (even though the SMC protocol allows more than one RMB-element per RMB,
 * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
 * extra RMB for every connection in a link group
 */
1113
int smc_buf_create(struct smc_sock *smc, bool is_smcd)
1114 1115 1116 1117
{
	int rc;

	/* create send buffer */
1118
	rc = __smc_buf_create(smc, is_smcd, false);
1119 1120 1121
	if (rc)
		return rc;
	/* create rmb */
1122
	rc = __smc_buf_create(smc, is_smcd, true);
1123
	if (rc)
1124
		smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
1125
	return rc;
U
Ursula Braun 已提交
1126
}
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138

static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
{
	int i;

	for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
		if (!test_and_set_bit(i, lgr->rtokens_used_mask))
			return i;
	}
	return -ENOSPC;
}

1139 1140
/* add a new rtoken from peer */
int smc_rtoken_add(struct smc_link_group *lgr, __be64 nw_vaddr, __be32 nw_rkey)
1141
{
1142 1143
	u64 dma_addr = be64_to_cpu(nw_vaddr);
	u32 rkey = ntohl(nw_rkey);
1144 1145 1146 1147
	int i;

	for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
		if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
1148
		    (lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) &&
1149
		    test_bit(i, lgr->rtokens_used_mask)) {
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
			/* already in list */
			return i;
		}
	}
	i = smc_rmb_reserve_rtoken_idx(lgr);
	if (i < 0)
		return i;
	lgr->rtokens[i][SMC_SINGLE_LINK].rkey = rkey;
	lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = dma_addr;
	return i;
}

/* delete an rtoken */
int smc_rtoken_delete(struct smc_link_group *lgr, __be32 nw_rkey)
{
	u32 rkey = ntohl(nw_rkey);
	int i;

	for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
		if (lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey &&
		    test_bit(i, lgr->rtokens_used_mask)) {
			lgr->rtokens[i][SMC_SINGLE_LINK].rkey = 0;
			lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = 0;

			clear_bit(i, lgr->rtokens_used_mask);
1175 1176 1177
			return 0;
		}
	}
1178 1179 1180 1181 1182 1183 1184 1185 1186
	return -ENOENT;
}

/* save rkey and dma_addr received from peer during clc handshake */
int smc_rmb_rtoken_handling(struct smc_connection *conn,
			    struct smc_clc_msg_accept_confirm *clc)
{
	conn->rtoken_idx = smc_rtoken_add(conn->lgr, clc->rmb_dma_addr,
					  clc->rmb_rkey);
1187 1188 1189 1190
	if (conn->rtoken_idx < 0)
		return conn->rtoken_idx;
	return 0;
}
1191

1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
static void smc_core_going_away(void)
{
	struct smc_ib_device *smcibdev;
	struct smcd_dev *smcd;

	spin_lock(&smc_ib_devices.lock);
	list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
		int i;

		for (i = 0; i < SMC_MAX_PORTS; i++)
			set_bit(i, smcibdev->ports_going_away);
	}
	spin_unlock(&smc_ib_devices.lock);

	spin_lock(&smcd_dev_list.lock);
	list_for_each_entry(smcd, &smcd_dev_list.list, list) {
		smcd->going_away = 1;
	}
	spin_unlock(&smcd_dev_list.lock);
}

1213 1214
/* Clean up all SMC link groups */
static void smc_lgrs_shutdown(void)
1215 1216 1217
{
	struct smc_link_group *lgr, *lg;
	LIST_HEAD(lgr_freeing_list);
1218
	struct smcd_dev *smcd;
1219

1220 1221
	smc_core_going_away();

1222
	spin_lock_bh(&smc_lgr_list.lock);
1223
	list_splice_init(&smc_lgr_list.list, &lgr_freeing_list);
1224
	spin_unlock_bh(&smc_lgr_list.lock);
1225 1226 1227

	spin_lock(&smcd_dev_list.lock);
	list_for_each_entry(smcd, &smcd_dev_list.list, list)
1228
		smc_smcd_terminate_all(smcd);
1229 1230
	spin_unlock(&smcd_dev_list.lock);

1231 1232
	list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) {
		list_del_init(&lgr->list);
1233 1234 1235 1236 1237 1238 1239 1240
		if (!lgr->is_smcd) {
			struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];

			if (lnk->state == SMC_LNK_ACTIVE)
				smc_llc_send_delete_link(lnk, SMC_LLC_REQ,
							 false);
			smc_llc_link_inactive(lnk);
		}
1241 1242 1243 1244
		cancel_delayed_work_sync(&lgr->free_work);
		smc_lgr_free(lgr); /* free link group */
	}
}
1245 1246 1247 1248 1249 1250

/* Called (from smc_exit) when module is removed */
void smc_core_exit(void)
{
	smc_lgrs_shutdown();
}