smc_core.c 44.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 *  Shared Memory Communications over RDMA (SMC-R) and RoCE
 *
 *  Basic Transport Functions exploiting Infiniband API
 *
 *  Copyright IBM Corp. 2016
 *
 *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
 */

#include <linux/socket.h>
#include <linux/if_vlan.h>
#include <linux/random.h>
#include <linux/workqueue.h>
16
#include <linux/wait.h>
17
#include <linux/reboot.h>
18 19 20
#include <net/tcp.h>
#include <net/sock.h>
#include <rdma/ib_verbs.h>
21
#include <rdma/ib_cache.h>
22 23 24 25 26

#include "smc.h"
#include "smc_clc.h"
#include "smc_core.h"
#include "smc_ib.h"
27
#include "smc_wr.h"
U
Ursula Braun 已提交
28
#include "smc_llc.h"
29
#include "smc_cdc.h"
30
#include "smc_close.h"
31
#include "smc_ism.h"
32

33 34
#define SMC_LGR_NUM_INCR		256
#define SMC_LGR_FREE_DELAY_SERV		(600 * HZ)
35
#define SMC_LGR_FREE_DELAY_CLNT		(SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
36
#define SMC_LGR_FREE_DELAY_FAST		(8 * HZ)
37

38 39 40 41 42
static struct smc_lgr_list smc_lgr_list = {	/* established link groups */
	.lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
	.list = LIST_HEAD_INIT(smc_lgr_list.list),
	.num = 0,
};
U
Ursula Braun 已提交
43

44
static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */
45 46
static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);

47 48 49 50 51 52 53
struct smc_ib_up_work {
	struct work_struct	work;
	struct smc_link_group	*lgr;
	struct smc_ib_device	*smcibdev;
	u8			ibport;
};

54 55
static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
			 struct smc_buf_desc *buf_desc);
56
static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
57

58 59
static void smc_link_up_work(struct work_struct *work);

60 61 62 63 64 65 66 67 68 69 70 71 72
/* return head of link group list and its lock for a given link group */
static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
						  spinlock_t **lgr_lock)
{
	if (lgr->is_smcd) {
		*lgr_lock = &lgr->smcd->lgr_lock;
		return &lgr->smcd->lgr_list;
	}

	*lgr_lock = &smc_lgr_list.lock;
	return &smc_lgr_list.list;
}

73 74 75 76 77 78
static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
{
	/* client link group creation always follows the server link group
	 * creation. For client use a somewhat higher removal delay time,
	 * otherwise there is a risk of out-of-sync link groups.
	 */
U
Ursula Braun 已提交
79 80 81 82 83 84
	if (!lgr->freeing && !lgr->freefast) {
		mod_delayed_work(system_wq, &lgr->free_work,
				 (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
						SMC_LGR_FREE_DELAY_CLNT :
						SMC_LGR_FREE_DELAY_SERV);
	}
85 86
}

87 88
void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr)
{
U
Ursula Braun 已提交
89 90 91 92 93
	if (!lgr->freeing && !lgr->freefast) {
		lgr->freefast = 1;
		mod_delayed_work(system_wq, &lgr->free_work,
				 SMC_LGR_FREE_DELAY_FAST);
	}
94 95
}

96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
/* Register connection's alert token in our lookup structure.
 * To use rbtrees we have to implement our own insert core.
 * Requires @conns_lock
 * @smc		connection to register
 * Returns 0 on success, != otherwise.
 */
static void smc_lgr_add_alert_token(struct smc_connection *conn)
{
	struct rb_node **link, *parent = NULL;
	u32 token = conn->alert_token_local;

	link = &conn->lgr->conns_all.rb_node;
	while (*link) {
		struct smc_connection *cur = rb_entry(*link,
					struct smc_connection, alert_node);

		parent = *link;
		if (cur->alert_token_local > token)
			link = &parent->rb_left;
		else
			link = &parent->rb_right;
	}
	/* Put the new node there */
	rb_link_node(&conn->alert_node, parent, link);
	rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
}

/* Register connection in link group by assigning an alert token
 * registered in a search tree.
 * Requires @conns_lock
 * Note that '0' is a reserved value and not assigned.
 */
128
static int smc_lgr_register_conn(struct smc_connection *conn)
129 130 131 132 133 134 135 136 137 138 139 140 141 142
{
	struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
	static atomic_t nexttoken = ATOMIC_INIT(0);

	/* find a new alert_token_local value not yet used by some connection
	 * in this link group
	 */
	sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
	while (!conn->alert_token_local) {
		conn->alert_token_local = atomic_inc_return(&nexttoken);
		if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
			conn->alert_token_local = 0;
	}
	smc_lgr_add_alert_token(conn);
143 144

	/* assign the new connection to a link */
145 146 147
	if (!conn->lgr->is_smcd) {
		struct smc_link *lnk;
		int i;
148

149 150 151 152 153 154 155 156 157 158
		/* tbd - link balancing */
		for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
			lnk = &conn->lgr->lnk[i];
			if (lnk->state == SMC_LNK_ACTIVATING ||
			    lnk->state == SMC_LNK_ACTIVE)
				conn->lnk = lnk;
		}
		if (!conn->lnk)
			return SMC_CLC_DECL_NOACTLINK;
	}
159
	conn->lgr->conns_num++;
160
	return 0;
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
}

/* Unregister connection and reset the alert token of the given connection<
 */
static void __smc_lgr_unregister_conn(struct smc_connection *conn)
{
	struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
	struct smc_link_group *lgr = conn->lgr;

	rb_erase(&conn->alert_node, &lgr->conns_all);
	lgr->conns_num--;
	conn->alert_token_local = 0;
	sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
}

176
/* Unregister connection from lgr
177 178 179 180 181
 */
static void smc_lgr_unregister_conn(struct smc_connection *conn)
{
	struct smc_link_group *lgr = conn->lgr;

182 183
	if (!lgr)
		return;
184 185 186 187 188
	write_lock_bh(&lgr->conns_lock);
	if (conn->alert_token_local) {
		__smc_lgr_unregister_conn(conn);
	}
	write_unlock_bh(&lgr->conns_lock);
189
	conn->lgr = NULL;
190 191
}

192 193 194 195 196 197 198 199 200 201 202 203
void smc_lgr_cleanup_early(struct smc_connection *conn)
{
	struct smc_link_group *lgr = conn->lgr;

	if (!lgr)
		return;

	smc_conn_free(conn);
	smc_lgr_forget(lgr);
	smc_lgr_schedule_free_work_fast(lgr);
}

204 205 206 207
/* Send delete link, either as client to request the initiation
 * of the DELETE LINK sequence from server; or as server to
 * initiate the delete processing. See smc_llc_rx_delete_link().
 */
208
static int smcr_link_send_delete(struct smc_link *lnk, bool orderly)
209 210
{
	if (lnk->state == SMC_LNK_ACTIVE &&
211 212
	    !smc_llc_send_delete_link(lnk, 0, SMC_LLC_REQ, orderly,
				      SMC_LLC_DEL_PROG_INIT_TERM)) {
213 214 215 216 217
		return 0;
	}
	return -ENOTCONN;
}

U
Ursula Braun 已提交
218 219
static void smc_lgr_free(struct smc_link_group *lgr);

220 221 222 223 224
static void smc_lgr_free_work(struct work_struct *work)
{
	struct smc_link_group *lgr = container_of(to_delayed_work(work),
						  struct smc_link_group,
						  free_work);
225
	spinlock_t *lgr_lock;
226
	bool conns;
227
	int i;
228

229 230
	smc_lgr_list_head(lgr, &lgr_lock);
	spin_lock_bh(lgr_lock);
U
Ursula Braun 已提交
231 232 233 234
	if (lgr->freeing) {
		spin_unlock_bh(lgr_lock);
		return;
	}
235 236 237 238
	read_lock_bh(&lgr->conns_lock);
	conns = RB_EMPTY_ROOT(&lgr->conns_all);
	read_unlock_bh(&lgr->conns_lock);
	if (!conns) { /* number of lgr connections is no longer zero */
239
		spin_unlock_bh(lgr_lock);
240 241
		return;
	}
242
	list_del_init(&lgr->list); /* remove from smc_lgr_list */
243 244

	if (!lgr->is_smcd && !lgr->terminating)	{
245 246 247 248 249 250 251 252 253 254 255 256 257
		bool do_wait = false;

		for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
			struct smc_link *lnk = &lgr->lnk[i];
			/* try to send del link msg, on err free immediately */
			if (lnk->state == SMC_LNK_ACTIVE &&
			    !smcr_link_send_delete(lnk, true)) {
				/* reschedule in case we never receive a resp */
				smc_lgr_schedule_free_work(lgr);
				do_wait = true;
			}
		}
		if (do_wait) {
U
Ursula Braun 已提交
258
			spin_unlock_bh(lgr_lock);
259
			return; /* wait for resp, see smc_llc_rx_delete_link */
260 261
		}
	}
U
Ursula Braun 已提交
262 263 264
	lgr->freeing = 1; /* this instance does the freeing, no new schedule */
	spin_unlock_bh(lgr_lock);
	cancel_delayed_work(&lgr->free_work);
265

266
	if (lgr->is_smcd && !lgr->terminating)
U
Ursula Braun 已提交
267
		smc_ism_signal_shutdown(lgr);
268 269 270 271
	if (!lgr->is_smcd) {
		for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
			struct smc_link *lnk = &lgr->lnk[i];

272
			if (smc_link_usable(lnk))
273
				lnk->state = SMC_LNK_INACTIVE;
274
		}
275
		wake_up_interruptible_all(&lgr->llc_waiter);
276
	}
U
Ursula Braun 已提交
277
	smc_lgr_free(lgr);
278 279
}

280 281 282 283 284
static void smc_lgr_terminate_work(struct work_struct *work)
{
	struct smc_link_group *lgr = container_of(work, struct smc_link_group,
						  terminate_work);

285
	__smc_lgr_terminate(lgr, true);
286 287
}

288 289 290 291 292 293 294 295 296 297 298
/* return next unique link id for the lgr */
static u8 smcr_next_link_id(struct smc_link_group *lgr)
{
	u8 link_id;
	int i;

	while (1) {
		link_id = ++lgr->next_link_id;
		if (!link_id)	/* skip zero as link_id */
			link_id = ++lgr->next_link_id;
		for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
299
			if (smc_link_usable(&lgr->lnk[i]) &&
300 301 302 303 304 305 306 307 308 309
			    lgr->lnk[i].link_id == link_id)
				continue;
		}
		break;
	}
	return link_id;
}

static int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
			  u8 link_idx, struct smc_init_info *ini)
310 311 312 313 314 315 316
{
	u8 rndvec[3];
	int rc;

	get_device(&ini->ib_dev->ibdev->dev);
	atomic_inc(&ini->ib_dev->lnk_cnt);
	lnk->state = SMC_LNK_ACTIVATING;
317
	lnk->link_id = smcr_next_link_id(lgr);
318
	lnk->lgr = lgr;
319
	lnk->link_idx = link_idx;
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
	lnk->smcibdev = ini->ib_dev;
	lnk->ibport = ini->ib_port;
	lnk->path_mtu = ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
	if (!ini->ib_dev->initialized) {
		rc = (int)smc_ib_setup_per_ibdev(ini->ib_dev);
		if (rc)
			goto out;
	}
	get_random_bytes(rndvec, sizeof(rndvec));
	lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
		(rndvec[2] << 16);
	rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
				  ini->vlan_id, lnk->gid, &lnk->sgid_index);
	if (rc)
		goto out;
	rc = smc_llc_link_init(lnk);
	if (rc)
		goto out;
	rc = smc_wr_alloc_link_mem(lnk);
	if (rc)
		goto clear_llc_lnk;
	rc = smc_ib_create_protection_domain(lnk);
	if (rc)
		goto free_link_mem;
	rc = smc_ib_create_queue_pair(lnk);
	if (rc)
		goto dealloc_pd;
	rc = smc_wr_create_link(lnk);
	if (rc)
		goto destroy_qp;
	return 0;

destroy_qp:
	smc_ib_destroy_queue_pair(lnk);
dealloc_pd:
	smc_ib_dealloc_protection_domain(lnk);
free_link_mem:
	smc_wr_free_link_mem(lnk);
clear_llc_lnk:
	smc_llc_link_clear(lnk);
out:
	put_device(&ini->ib_dev->ibdev->dev);
	memset(lnk, 0, sizeof(struct smc_link));
363
	lnk->state = SMC_LNK_UNUSED;
364 365 366 367 368
	if (!atomic_dec_return(&ini->ib_dev->lnk_cnt))
		wake_up(&ini->ib_dev->lnks_deleted);
	return rc;
}

369
/* create a new SMC link group */
370
static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
371 372
{
	struct smc_link_group *lgr;
373
	struct list_head *lgr_list;
374
	struct smc_link *lnk;
375
	spinlock_t *lgr_lock;
376
	u8 link_idx;
377
	int rc = 0;
U
Ursula Braun 已提交
378
	int i;
379

380
	if (ini->is_smcd && ini->vlan_id) {
381 382
		if (smc_ism_get_vlan(ini->ism_dev, ini->vlan_id)) {
			rc = SMC_CLC_DECL_ISMVLANERR;
383
			goto out;
384
		}
385 386
	}

387 388
	lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
	if (!lgr) {
389
		rc = SMC_CLC_DECL_MEM;
390
		goto ism_put_vlan;
391
	}
392
	lgr->is_smcd = ini->is_smcd;
393
	lgr->sync_err = 0;
U
Ursula Braun 已提交
394 395 396
	lgr->terminating = 0;
	lgr->freefast = 0;
	lgr->freeing = 0;
397
	lgr->vlan_id = ini->vlan_id;
398 399
	mutex_init(&lgr->sndbufs_lock);
	mutex_init(&lgr->rmbs_lock);
400
	rwlock_init(&lgr->conns_lock);
U
Ursula Braun 已提交
401 402 403 404
	for (i = 0; i < SMC_RMBE_SIZES; i++) {
		INIT_LIST_HEAD(&lgr->sndbufs[i]);
		INIT_LIST_HEAD(&lgr->rmbs[i]);
	}
405
	lgr->next_link_id = 0;
406 407
	smc_lgr_list.num += SMC_LGR_NUM_INCR;
	memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
408
	INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
409
	INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
410
	lgr->conns_all = RB_ROOT;
411
	if (ini->is_smcd) {
412
		/* SMC-D specific settings */
413
		get_device(&ini->ism_dev->dev);
414 415
		lgr->peer_gid = ini->ism_gid;
		lgr->smcd = ini->ism_dev;
416
		lgr_list = &ini->ism_dev->lgr_list;
417
		lgr_lock = &lgr->smcd->lgr_lock;
418
		lgr->peer_shutdown = 0;
419
		atomic_inc(&ini->ism_dev->lgr_cnt);
420 421 422
	} else {
		/* SMC-R specific settings */
		lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
423 424
		memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
		       SMC_SYSTEMID_LEN);
425 426
		memcpy(lgr->pnet_id, ini->ib_dev->pnetid[ini->ib_port - 1],
		       SMC_MAX_PNETID_LEN);
427 428
		smc_llc_lgr_init(lgr, smc);

429 430 431
		link_idx = SMC_SINGLE_LINK;
		lnk = &lgr->lnk[link_idx];
		rc = smcr_link_init(lgr, lnk, link_idx, ini);
432 433
		if (rc)
			goto free_lgr;
434 435
		lgr_list = &smc_lgr_list.list;
		lgr_lock = &smc_lgr_list.lock;
436
		atomic_inc(&lgr_cnt);
437
	}
438
	smc->conn.lgr = lgr;
439
	spin_lock_bh(lgr_lock);
440
	list_add(&lgr->list, lgr_list);
441
	spin_unlock_bh(lgr_lock);
442 443 444 445
	return 0;

free_lgr:
	kfree(lgr);
446 447 448
ism_put_vlan:
	if (ini->is_smcd && ini->vlan_id)
		smc_ism_put_vlan(ini->ism_dev, ini->vlan_id);
449
out:
450 451 452 453 454 455
	if (rc < 0) {
		if (rc == -ENOMEM)
			rc = SMC_CLC_DECL_MEM;
		else
			rc = SMC_CLC_DECL_INTERR;
	}
456 457 458
	return rc;
}

459
static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc,
460
			   struct smc_link_group *lgr)
461
{
462 463
	int rc;

464 465
	if (rmb_desc->is_conf_rkey && !list_empty(&lgr->list)) {
		/* unregister rmb with peer */
466 467 468 469 470 471 472 473 474
		rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
		if (!rc) {
			/* protect against smc_llc_cli_rkey_exchange() */
			mutex_lock(&lgr->llc_conf_mutex);
			smc_llc_do_delete_rkey(lgr, rmb_desc);
			rmb_desc->is_conf_rkey = false;
			mutex_unlock(&lgr->llc_conf_mutex);
			smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
		}
475
	}
476

477 478
	if (rmb_desc->is_reg_err) {
		/* buf registration failed, reuse not possible */
479
		mutex_lock(&lgr->rmbs_lock);
480
		list_del(&rmb_desc->list);
481
		mutex_unlock(&lgr->rmbs_lock);
482 483 484 485 486 487 488

		smc_buf_free(lgr, true, rmb_desc);
	} else {
		rmb_desc->used = 0;
	}
}

489 490
static void smc_buf_unuse(struct smc_connection *conn,
			  struct smc_link_group *lgr)
U
Ursula Braun 已提交
491
{
492
	if (conn->sndbuf_desc)
U
Ursula Braun 已提交
493
		conn->sndbuf_desc->used = 0;
494 495 496
	if (conn->rmb_desc && lgr->is_smcd)
		conn->rmb_desc->used = 0;
	else if (conn->rmb_desc)
497
		smcr_buf_unuse(conn->rmb_desc, lgr);
U
Ursula Braun 已提交
498 499
}

500 501 502
/* remove a finished connection from its link group */
void smc_conn_free(struct smc_connection *conn)
{
503 504 505
	struct smc_link_group *lgr = conn->lgr;

	if (!lgr)
506
		return;
507
	if (lgr->is_smcd) {
508 509
		if (!list_empty(&lgr->list))
			smc_ism_unset_conn(conn);
510 511
		tasklet_kill(&conn->rx_tsklet);
	} else {
512
		smc_cdc_tx_dismiss_slots(conn);
513
	}
514 515 516 517
	if (!list_empty(&lgr->list)) {
		smc_lgr_unregister_conn(conn);
		smc_buf_unuse(conn, lgr); /* allow buffer reuse */
	}
518 519 520

	if (!lgr->conns_num)
		smc_lgr_schedule_free_work(lgr);
521 522
}

523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
/* unregister a link from a buf_desc */
static void smcr_buf_unmap_link(struct smc_buf_desc *buf_desc, bool is_rmb,
				struct smc_link *lnk)
{
	if (is_rmb)
		buf_desc->is_reg_mr[lnk->link_idx] = false;
	if (!buf_desc->is_map_ib[lnk->link_idx])
		return;
	if (is_rmb) {
		if (buf_desc->mr_rx[lnk->link_idx]) {
			smc_ib_put_memory_region(
					buf_desc->mr_rx[lnk->link_idx]);
			buf_desc->mr_rx[lnk->link_idx] = NULL;
		}
		smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_FROM_DEVICE);
	} else {
		smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_TO_DEVICE);
	}
	sg_free_table(&buf_desc->sgt[lnk->link_idx]);
	buf_desc->is_map_ib[lnk->link_idx] = false;
}

/* unmap all buffers of lgr for a deleted link */
static void smcr_buf_unmap_lgr(struct smc_link *lnk)
{
	struct smc_link_group *lgr = lnk->lgr;
	struct smc_buf_desc *buf_desc, *bf;
	int i;

	for (i = 0; i < SMC_RMBE_SIZES; i++) {
		mutex_lock(&lgr->rmbs_lock);
		list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list)
			smcr_buf_unmap_link(buf_desc, true, lnk);
		mutex_unlock(&lgr->rmbs_lock);
		mutex_lock(&lgr->sndbufs_lock);
		list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i],
					 list)
			smcr_buf_unmap_link(buf_desc, false, lnk);
		mutex_unlock(&lgr->sndbufs_lock);
	}
}

static void smcr_rtoken_clear_link(struct smc_link *lnk)
{
	struct smc_link_group *lgr = lnk->lgr;
	int i;

	for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
		lgr->rtokens[i][lnk->link_idx].rkey = 0;
		lgr->rtokens[i][lnk->link_idx].dma_addr = 0;
	}
}

576
/* must be called under lgr->llc_conf_mutex lock */
577
void smcr_link_clear(struct smc_link *lnk)
578
{
579 580
	struct smc_ib_device *smcibdev;

581
	if (!lnk->lgr || lnk->state == SMC_LNK_UNUSED)
582
		return;
583
	lnk->peer_qpn = 0;
584
	smc_llc_link_clear(lnk);
585 586
	smcr_buf_unmap_lgr(lnk);
	smcr_rtoken_clear_link(lnk);
587
	smc_ib_modify_qp_reset(lnk);
588
	smc_wr_free_link(lnk);
589 590
	smc_ib_destroy_queue_pair(lnk);
	smc_ib_dealloc_protection_domain(lnk);
591
	smc_wr_free_link_mem(lnk);
592
	put_device(&lnk->smcibdev->ibdev->dev);
593 594 595 596 597
	smcibdev = lnk->smcibdev;
	memset(lnk, 0, sizeof(struct smc_link));
	lnk->state = SMC_LNK_UNUSED;
	if (!atomic_dec_return(&smcibdev->lnk_cnt))
		wake_up(&smcibdev->lnks_deleted);
598 599
}

600 601
static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
			  struct smc_buf_desc *buf_desc)
U
Ursula Braun 已提交
602
{
603
	int i;
604

605 606
	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
		smcr_buf_unmap_link(buf_desc, is_rmb, &lgr->lnk[i]);
607

608 609
	if (buf_desc->pages)
		__free_pages(buf_desc->pages, buf_desc->order);
610
	kfree(buf_desc);
U
Ursula Braun 已提交
611 612
}

613 614 615
static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
			  struct smc_buf_desc *buf_desc)
{
616 617 618
	if (is_dmb) {
		/* restore original buf len */
		buf_desc->len += sizeof(struct smcd_cdc_msg);
619
		smc_ism_unregister_dmb(lgr->smcd, buf_desc);
620
	} else {
621
		kfree(buf_desc->cpu_addr);
622
	}
623 624 625 626 627 628 629 630 631 632 633 634
	kfree(buf_desc);
}

static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
			 struct smc_buf_desc *buf_desc)
{
	if (lgr->is_smcd)
		smcd_buf_free(lgr, is_rmb, buf_desc);
	else
		smcr_buf_free(lgr, is_rmb, buf_desc);
}

635
static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
U
Ursula Braun 已提交
636
{
637 638
	struct smc_buf_desc *buf_desc, *bf_desc;
	struct list_head *buf_list;
U
Ursula Braun 已提交
639 640 641
	int i;

	for (i = 0; i < SMC_RMBE_SIZES; i++) {
642 643 644 645 646
		if (is_rmb)
			buf_list = &lgr->rmbs[i];
		else
			buf_list = &lgr->sndbufs[i];
		list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
U
Ursula Braun 已提交
647
					 list) {
648
			list_del(&buf_desc->list);
649
			smc_buf_free(lgr, is_rmb, buf_desc);
U
Ursula Braun 已提交
650 651 652 653
		}
	}
}

654 655 656 657 658 659 660 661
static void smc_lgr_free_bufs(struct smc_link_group *lgr)
{
	/* free send buffers */
	__smc_lgr_free_bufs(lgr, false);
	/* free rmbs */
	__smc_lgr_free_bufs(lgr, true);
}

662
/* remove a link group */
U
Ursula Braun 已提交
663
static void smc_lgr_free(struct smc_link_group *lgr)
664
{
665 666
	int i;

667
	smc_lgr_free_bufs(lgr);
668
	if (lgr->is_smcd) {
669 670 671 672
		if (!lgr->terminating) {
			smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
			put_device(&lgr->smcd->dev);
		}
673 674
		if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
			wake_up(&lgr->smcd->lgrs_deleted);
675
	} else {
676
		for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
677 678
			if (lgr->lnk[i].state != SMC_LNK_UNUSED)
				smcr_link_clear(&lgr->lnk[i]);
679
		}
680
		smc_llc_lgr_clear(lgr);
681 682
		if (!atomic_dec_return(&lgr_cnt))
			wake_up(&lgrs_deleted);
683
	}
684 685 686
	kfree(lgr);
}

687 688
void smc_lgr_forget(struct smc_link_group *lgr)
{
689 690 691 692 693
	struct list_head *lgr_list;
	spinlock_t *lgr_lock;

	lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
	spin_lock_bh(lgr_lock);
694
	/* do not use this link group for new connections */
695 696 697
	if (!list_empty(lgr_list))
		list_del_init(lgr_list);
	spin_unlock_bh(lgr_lock);
698 699
}

700 701 702 703 704 705 706 707 708 709 710 711 712 713
static void smcd_unregister_all_dmbs(struct smc_link_group *lgr)
{
	int i;

	for (i = 0; i < SMC_RMBE_SIZES; i++) {
		struct smc_buf_desc *buf_desc;

		list_for_each_entry(buf_desc, &lgr->rmbs[i], list) {
			buf_desc->len += sizeof(struct smcd_cdc_msg);
			smc_ism_unregister_dmb(lgr->smcd, buf_desc);
		}
	}
}

714 715 716 717 718 719 720 721
static void smc_sk_wake_ups(struct smc_sock *smc)
{
	smc->sk.sk_write_space(&smc->sk);
	smc->sk.sk_data_ready(&smc->sk);
	smc->sk.sk_state_change(&smc->sk);
}

/* kill a connection */
722
static void smc_conn_kill(struct smc_connection *conn, bool soft)
723 724 725
{
	struct smc_sock *smc = container_of(conn, struct smc_sock, conn);

726 727 728 729
	if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
		conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
	else
		smc_close_abort(conn);
730
	conn->killed = 1;
731
	smc->sk.sk_err = ECONNABORTED;
732
	smc_sk_wake_ups(smc);
733 734
	if (conn->lgr->is_smcd) {
		smc_ism_unset_conn(conn);
735 736 737 738
		if (soft)
			tasklet_kill(&conn->rx_tsklet);
		else
			tasklet_unlock_wait(&conn->rx_tsklet);
739 740
	} else {
		smc_cdc_tx_dismiss_slots(conn);
741
	}
742
	smc_lgr_unregister_conn(conn);
U
Ursula Braun 已提交
743
	smc_close_active_abort(smc);
744 745
}

746 747
static void smc_lgr_cleanup(struct smc_link_group *lgr)
{
748 749
	int i;

750 751 752 753 754 755
	if (lgr->is_smcd) {
		smc_ism_signal_shutdown(lgr);
		smcd_unregister_all_dmbs(lgr);
		smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
		put_device(&lgr->smcd->dev);
	} else {
756 757
		for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
			struct smc_link *lnk = &lgr->lnk[i];
758

759
			if (smc_link_usable(lnk))
760
				lnk->state = SMC_LNK_INACTIVE;
761
		}
762
		wake_up_interruptible_all(&lgr->llc_waiter);
763 764 765
	}
}

766 767 768 769
/* terminate link group
 * @soft: true if link group shutdown can take its time
 *	  false if immediate link group shutdown is required
 */
770
static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
771 772
{
	struct smc_connection *conn;
773
	struct smc_sock *smc;
774 775
	struct rb_node *node;

776 777
	if (lgr->terminating)
		return;	/* lgr already terminating */
778 779
	if (!soft)
		cancel_delayed_work_sync(&lgr->free_work);
780
	lgr->terminating = 1;
781

782 783
	/* kill remaining link group connections */
	read_lock_bh(&lgr->conns_lock);
784 785
	node = rb_first(&lgr->conns_all);
	while (node) {
786
		read_unlock_bh(&lgr->conns_lock);
787
		conn = rb_entry(node, struct smc_connection, alert_node);
788
		smc = container_of(conn, struct smc_sock, conn);
U
Ursula Braun 已提交
789
		sock_hold(&smc->sk); /* sock_put below */
790
		lock_sock(&smc->sk);
791
		smc_conn_kill(conn, soft);
792
		release_sock(&smc->sk);
U
Ursula Braun 已提交
793
		sock_put(&smc->sk); /* sock_hold above */
794
		read_lock_bh(&lgr->conns_lock);
795 796
		node = rb_first(&lgr->conns_all);
	}
797
	read_unlock_bh(&lgr->conns_lock);
798
	smc_lgr_cleanup(lgr);
799 800 801 802
	if (soft)
		smc_lgr_schedule_free_work_fast(lgr);
	else
		smc_lgr_free(lgr);
803 804
}

805 806
/* unlink link group and schedule termination */
void smc_lgr_terminate_sched(struct smc_link_group *lgr)
807
{
808 809 810 811
	spinlock_t *lgr_lock;

	smc_lgr_list_head(lgr, &lgr_lock);
	spin_lock_bh(lgr_lock);
812
	if (list_empty(&lgr->list) || lgr->terminating || lgr->freeing) {
813 814 815 816
		spin_unlock_bh(lgr_lock);
		return;	/* lgr already terminating */
	}
	list_del_init(&lgr->list);
817
	spin_unlock_bh(lgr_lock);
818
	schedule_work(&lgr->terminate_work);
819 820
}

821 822 823 824
/* Called when IB port is terminated */
void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
{
	struct smc_link_group *lgr, *l;
825
	LIST_HEAD(lgr_free_list);
826
	int i;
827

828
	spin_lock_bh(&smc_lgr_list.lock);
829
	list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
830 831 832 833
		if (lgr->is_smcd)
			continue;
		/* tbd - terminate only when no more links are active */
		for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
834
			if (!smc_link_usable(&lgr->lnk[i]))
835 836 837 838 839 840
				continue;
			if (lgr->lnk[i].smcibdev == smcibdev &&
			    lgr->lnk[i].ibport == ibport) {
				list_move(&lgr->list, &lgr_free_list);
				lgr->freeing = 1;
			}
841
		}
842
	}
843
	spin_unlock_bh(&smc_lgr_list.lock);
844 845 846

	list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
		list_del_init(&lgr->list);
847
		__smc_lgr_terminate(lgr, false);
848
	}
849 850
}

851
/* Called when peer lgr shutdown (regularly or abnormally) is received */
H
Hans Wippel 已提交
852
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
853 854 855 856 857
{
	struct smc_link_group *lgr, *l;
	LIST_HEAD(lgr_free_list);

	/* run common cleanup function and build free list */
858
	spin_lock_bh(&dev->lgr_lock);
859 860
	list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
		if ((!peer_gid || lgr->peer_gid == peer_gid) &&
H
Hans Wippel 已提交
861
		    (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
862 863
			if (peer_gid) /* peer triggered termination */
				lgr->peer_shutdown = 1;
864 865 866
			list_move(&lgr->list, &lgr_free_list);
		}
	}
867
	spin_unlock_bh(&dev->lgr_lock);
868 869 870 871

	/* cancel the regular free workers and actually free lgrs */
	list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
		list_del_init(&lgr->list);
872
		schedule_work(&lgr->terminate_work);
873 874 875
	}
}

876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891
/* Called when an SMCD device is removed or the smc module is unloaded */
void smc_smcd_terminate_all(struct smcd_dev *smcd)
{
	struct smc_link_group *lgr, *lg;
	LIST_HEAD(lgr_free_list);

	spin_lock_bh(&smcd->lgr_lock);
	list_splice_init(&smcd->lgr_list, &lgr_free_list);
	list_for_each_entry(lgr, &lgr_free_list, list)
		lgr->freeing = 1;
	spin_unlock_bh(&smcd->lgr_lock);

	list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
		list_del_init(&lgr->list);
		__smc_lgr_terminate(lgr, false);
	}
892 893 894

	if (atomic_read(&smcd->lgr_cnt))
		wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
895 896
}

897 898 899 900 901 902 903 904
/* Called when an SMCR device is removed or the smc module is unloaded.
 * If smcibdev is given, all SMCR link groups using this device are terminated.
 * If smcibdev is NULL, all SMCR link groups are terminated.
 */
void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
{
	struct smc_link_group *lgr, *lg;
	LIST_HEAD(lgr_free_list);
905
	int i;
906 907 908 909 910 911 912 913

	spin_lock_bh(&smc_lgr_list.lock);
	if (!smcibdev) {
		list_splice_init(&smc_lgr_list.list, &lgr_free_list);
		list_for_each_entry(lgr, &lgr_free_list, list)
			lgr->freeing = 1;
	} else {
		list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
914 915 916 917 918 919
			for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
				if (lgr->lnk[i].smcibdev == smcibdev) {
					list_move(&lgr->list, &lgr_free_list);
					lgr->freeing = 1;
					break;
				}
920 921 922 923 924 925 926 927 928
			}
		}
	}
	spin_unlock_bh(&smc_lgr_list.lock);

	list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
		list_del_init(&lgr->list);
		__smc_lgr_terminate(lgr, false);
	}
929 930 931 932 933 934 935 936 937

	if (smcibdev) {
		if (atomic_read(&smcibdev->lnk_cnt))
			wait_event(smcibdev->lnks_deleted,
				   !atomic_read(&smcibdev->lnk_cnt));
	} else {
		if (atomic_read(&lgr_cnt))
			wait_event(lgrs_deleted, !atomic_read(&lgr_cnt));
	}
938 939
}

940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
/* link is up - establish alternate link if applicable */
static void smcr_link_up(struct smc_link_group *lgr,
			 struct smc_ib_device *smcibdev, u8 ibport)
{
	struct smc_link *link = NULL;

	if (list_empty(&lgr->list) ||
	    lgr->type == SMC_LGR_SYMMETRIC ||
	    lgr->type == SMC_LGR_ASYMMETRIC_PEER)
		return;

	if (lgr->role == SMC_SERV) {
		/* trigger local add link processing */
		link = smc_llc_usable_link(lgr);
		if (!link)
			return;
		/* tbd: call smc_llc_srv_add_link_local(link); */
	} else {
		/* invite server to start add link processing */
		u8 gid[SMC_GID_SIZE];

		if (smc_ib_determine_gid(smcibdev, ibport, lgr->vlan_id, gid,
					 NULL))
			return;
		if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
			/* some other llc task is ongoing */
			wait_event_interruptible_timeout(lgr->llc_waiter,
				(lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
				SMC_LLC_WAIT_TIME);
		}
		if (list_empty(&lgr->list) ||
		    !smc_ib_port_active(smcibdev, ibport))
			return; /* lgr or device no longer active */
		link = smc_llc_usable_link(lgr);
		if (!link)
			return;
		smc_llc_send_add_link(link, smcibdev->mac[ibport - 1], gid,
				      NULL, SMC_LLC_REQ);
	}
}

void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
{
	struct smc_ib_up_work *ib_work;
	struct smc_link_group *lgr, *n;

	list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
		if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
			    SMC_MAX_PNETID_LEN) ||
		    lgr->type == SMC_LGR_SYMMETRIC ||
		    lgr->type == SMC_LGR_ASYMMETRIC_PEER)
			continue;
		ib_work = kmalloc(sizeof(*ib_work), GFP_KERNEL);
		if (!ib_work)
			continue;
		INIT_WORK(&ib_work->work, smc_link_up_work);
		ib_work->lgr = lgr;
		ib_work->smcibdev = smcibdev;
		ib_work->ibport = ibport;
		schedule_work(&ib_work->work);
	}
}

static void smc_link_up_work(struct work_struct *work)
{
	struct smc_ib_up_work *ib_work = container_of(work,
						      struct smc_ib_up_work,
						      work);
	struct smc_link_group *lgr = ib_work->lgr;

	if (list_empty(&lgr->list))
		goto out;
	smcr_link_up(lgr, ib_work->smcibdev, ib_work->ibport);
out:
	kfree(ib_work);
}

1017 1018 1019
/* Determine vlan of internal TCP socket.
 * @vlan_id: address to store the determined vlan id into
 */
1020
int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
1021 1022
{
	struct dst_entry *dst = sk_dst_get(clcsock->sk);
1023 1024
	struct net_device *ndev;
	int i, nest_lvl, rc = 0;
1025

1026
	ini->vlan_id = 0;
1027 1028 1029 1030 1031 1032 1033 1034 1035
	if (!dst) {
		rc = -ENOTCONN;
		goto out;
	}
	if (!dst->dev) {
		rc = -ENODEV;
		goto out_rel;
	}

1036 1037
	ndev = dst->dev;
	if (is_vlan_dev(ndev)) {
1038
		ini->vlan_id = vlan_dev_vlan_id(ndev);
1039 1040 1041 1042
		goto out_rel;
	}

	rtnl_lock();
1043
	nest_lvl = ndev->lower_level;
1044 1045 1046 1047 1048 1049 1050 1051
	for (i = 0; i < nest_lvl; i++) {
		struct list_head *lower = &ndev->adj_list.lower;

		if (list_empty(lower))
			break;
		lower = lower->next;
		ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
		if (is_vlan_dev(ndev)) {
1052
			ini->vlan_id = vlan_dev_vlan_id(ndev);
1053 1054 1055 1056
			break;
		}
	}
	rtnl_unlock();
1057 1058 1059 1060 1061 1062 1063

out_rel:
	dst_release(dst);
out:
	return rc;
}

1064 1065
static bool smcr_lgr_match(struct smc_link_group *lgr,
			   struct smc_clc_msg_local *lcl,
1066
			   enum smc_lgr_role role, u32 clcqpn)
1067
{
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
	int i;

	if (memcmp(lgr->peer_systemid, lcl->id_for_peer, SMC_SYSTEMID_LEN) ||
	    lgr->role != role)
		return false;

	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
		if (lgr->lnk[i].state != SMC_LNK_ACTIVE)
			continue;
		if ((lgr->role == SMC_SERV || lgr->lnk[i].peer_qpn == clcqpn) &&
		    !memcmp(lgr->lnk[i].peer_gid, &lcl->gid, SMC_GID_SIZE) &&
		    !memcmp(lgr->lnk[i].peer_mac, lcl->mac, sizeof(lcl->mac)))
			return true;
	}
	return false;
1083
}
1084

1085 1086 1087 1088
static bool smcd_lgr_match(struct smc_link_group *lgr,
			   struct smcd_dev *smcismdev, u64 peer_gid)
{
	return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev;
1089 1090 1091
}

/* create a new SMC connection (and a new link group if necessary) */
1092
int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
1093 1094
{
	struct smc_connection *conn = &smc->conn;
1095
	struct list_head *lgr_list;
1096 1097
	struct smc_link_group *lgr;
	enum smc_lgr_role role;
1098
	spinlock_t *lgr_lock;
1099 1100
	int rc = 0;

1101
	lgr_list = ini->is_smcd ? &ini->ism_dev->lgr_list : &smc_lgr_list.list;
1102
	lgr_lock = ini->is_smcd ? &ini->ism_dev->lgr_lock : &smc_lgr_list.lock;
1103
	ini->cln_first_contact = SMC_FIRST_CONTACT;
1104
	role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
1105
	if (role == SMC_CLNT && ini->srv_first_contact)
1106 1107 1108 1109
		/* create new link group as well */
		goto create;

	/* determine if an existing link group can be reused */
1110
	spin_lock_bh(lgr_lock);
1111
	list_for_each_entry(lgr, lgr_list, list) {
1112
		write_lock_bh(&lgr->conns_lock);
1113 1114 1115
		if ((ini->is_smcd ?
		     smcd_lgr_match(lgr, ini->ism_dev, ini->ism_gid) :
		     smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
1116
		    !lgr->sync_err &&
1117
		    lgr->vlan_id == ini->vlan_id &&
1118 1119
		    (role == SMC_CLNT ||
		     lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
1120
			/* link group found */
1121
			ini->cln_first_contact = SMC_REUSE_CONTACT;
1122
			conn->lgr = lgr;
1123
			rc = smc_lgr_register_conn(conn); /* add conn to lgr */
1124
			write_unlock_bh(&lgr->conns_lock);
1125 1126
			if (!rc && delayed_work_pending(&lgr->free_work))
				cancel_delayed_work(&lgr->free_work);
1127 1128 1129 1130
			break;
		}
		write_unlock_bh(&lgr->conns_lock);
	}
1131
	spin_unlock_bh(lgr_lock);
1132 1133
	if (rc)
		return rc;
1134

1135
	if (role == SMC_CLNT && !ini->srv_first_contact &&
1136
	    ini->cln_first_contact == SMC_FIRST_CONTACT) {
1137 1138 1139 1140
		/* Server reuses a link group, but Client wants to start
		 * a new one
		 * send out_of_sync decline, reason synchr. error
		 */
1141
		return SMC_CLC_DECL_SYNCERR;
1142 1143 1144
	}

create:
1145
	if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
1146
		rc = smc_lgr_create(smc, ini);
1147 1148
		if (rc)
			goto out;
1149 1150
		lgr = conn->lgr;
		write_lock_bh(&lgr->conns_lock);
1151
		rc = smc_lgr_register_conn(conn); /* add smc conn to lgr */
1152
		write_unlock_bh(&lgr->conns_lock);
1153 1154
		if (rc)
			goto out;
1155
	}
1156
	conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
1157
	conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
S
Stefan Raspl 已提交
1158
	conn->urg_state = SMC_URG_READ;
1159
	if (ini->is_smcd) {
1160 1161 1162
		conn->rx_off = sizeof(struct smcd_cdc_msg);
		smcd_cdc_rx_init(conn); /* init tasklet for this conn */
	}
1163 1164 1165
#ifndef KERNEL_HAS_ATOMIC64
	spin_lock_init(&conn->acurs_lock);
#endif
1166 1167

out:
1168
	return rc;
1169
}
U
Ursula Braun 已提交
1170

1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
/* convert the RMB size into the compressed notation - minimum 16K.
 * In contrast to plain ilog2, this rounds towards the next power of 2,
 * so the socket application gets at least its desired sndbuf / rcvbuf size.
 */
static u8 smc_compress_bufsize(int size)
{
	u8 compressed;

	if (size <= SMC_BUF_MIN_SIZE)
		return 0;

	size = (size - 1) >> 14;
	compressed = ilog2(size) + 1;
	if (compressed >= SMC_RMBE_SIZES)
		compressed = SMC_RMBE_SIZES - 1;
	return compressed;
}

/* convert the RMB size from compressed notation into integer */
int smc_uncompress_bufsize(u8 compressed)
{
	u32 size;

	size = 0x00000001 << (((int)compressed) + 14);
	return (int)size;
}

1198 1199
/* try to reuse a sndbuf or rmb description slot for a certain
 * buffer size; if not available, return NULL
U
Ursula Braun 已提交
1200
 */
1201
static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
1202
					     struct mutex *lock,
1203
					     struct list_head *buf_list)
U
Ursula Braun 已提交
1204
{
1205
	struct smc_buf_desc *buf_slot;
U
Ursula Braun 已提交
1206

1207
	mutex_lock(lock);
1208 1209
	list_for_each_entry(buf_slot, buf_list, list) {
		if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
1210
			mutex_unlock(lock);
1211
			return buf_slot;
U
Ursula Braun 已提交
1212 1213
		}
	}
1214
	mutex_unlock(lock);
U
Ursula Braun 已提交
1215 1216 1217
	return NULL;
}

U
Ursula Braun 已提交
1218 1219 1220 1221 1222 1223 1224 1225 1226
/* one of the conditions for announcing a receiver's current window size is
 * that it "results in a minimum increase in the window size of 10% of the
 * receive buffer space" [RFC7609]
 */
static inline int smc_rmb_wnd_update_limit(int rmbe_size)
{
	return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
}

1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
/* map an rmb buf to a link */
static int smcr_buf_map_link(struct smc_buf_desc *buf_desc, bool is_rmb,
			     struct smc_link *lnk)
{
	int rc;

	if (buf_desc->is_map_ib[lnk->link_idx])
		return 0;

	rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], 1, GFP_KERNEL);
	if (rc)
		return rc;
	sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl,
		   buf_desc->cpu_addr, buf_desc->len);

	/* map sg table to DMA address */
	rc = smc_ib_buf_map_sg(lnk, buf_desc,
			       is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
	/* SMC protocol depends on mapping to one DMA address only */
	if (rc != 1) {
		rc = -EAGAIN;
		goto free_table;
	}

	/* create a new memory region for the RMB */
	if (is_rmb) {
		rc = smc_ib_get_memory_region(lnk->roce_pd,
					      IB_ACCESS_REMOTE_WRITE |
					      IB_ACCESS_LOCAL_WRITE,
					      buf_desc, lnk->link_idx);
		if (rc)
			goto buf_unmap;
		smc_ib_sync_sg_for_device(lnk, buf_desc, DMA_FROM_DEVICE);
	}
	buf_desc->is_map_ib[lnk->link_idx] = true;
	return 0;

buf_unmap:
	smc_ib_buf_unmap_sg(lnk, buf_desc,
			    is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
free_table:
	sg_free_table(&buf_desc->sgt[lnk->link_idx]);
	return rc;
}

1272 1273 1274
/* register a new rmb on IB device,
 * must be called under lgr->llc_conf_mutex lock
 */
1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
int smcr_link_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc)
{
	if (list_empty(&link->lgr->list))
		return -ENOLINK;
	if (!rmb_desc->is_reg_mr[link->link_idx]) {
		/* register memory region for new rmb */
		if (smc_wr_reg_send(link, rmb_desc->mr_rx[link->link_idx])) {
			rmb_desc->is_reg_err = true;
			return -EFAULT;
		}
		rmb_desc->is_reg_mr[link->link_idx] = true;
	}
	return 0;
}

1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
static int _smcr_buf_map_lgr(struct smc_link *lnk, struct mutex *lock,
			     struct list_head *lst, bool is_rmb)
{
	struct smc_buf_desc *buf_desc, *bf;
	int rc = 0;

	mutex_lock(lock);
	list_for_each_entry_safe(buf_desc, bf, lst, list) {
		if (!buf_desc->used)
			continue;
		rc = smcr_buf_map_link(buf_desc, is_rmb, lnk);
		if (rc)
			goto out;
	}
out:
	mutex_unlock(lock);
	return rc;
}

/* map all used buffers of lgr for a new link */
int smcr_buf_map_lgr(struct smc_link *lnk)
{
	struct smc_link_group *lgr = lnk->lgr;
	int i, rc = 0;

	for (i = 0; i < SMC_RMBE_SIZES; i++) {
		rc = _smcr_buf_map_lgr(lnk, &lgr->rmbs_lock,
				       &lgr->rmbs[i], true);
		if (rc)
			return rc;
		rc = _smcr_buf_map_lgr(lnk, &lgr->sndbufs_lock,
				       &lgr->sndbufs[i], false);
		if (rc)
			return rc;
	}
	return 0;
}

1328 1329 1330
/* register all used buffers of lgr for a new link,
 * must be called under lgr->llc_conf_mutex lock
 */
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
int smcr_buf_reg_lgr(struct smc_link *lnk)
{
	struct smc_link_group *lgr = lnk->lgr;
	struct smc_buf_desc *buf_desc, *bf;
	int i, rc = 0;

	mutex_lock(&lgr->rmbs_lock);
	for (i = 0; i < SMC_RMBE_SIZES; i++) {
		list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) {
			if (!buf_desc->used)
				continue;
			rc = smcr_link_reg_rmb(lnk, buf_desc);
			if (rc)
				goto out;
		}
	}
out:
	mutex_unlock(&lgr->rmbs_lock);
	return rc;
}

1352 1353
static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
						bool is_rmb, int bufsize)
1354 1355 1356 1357 1358 1359 1360 1361
{
	struct smc_buf_desc *buf_desc;

	/* try to alloc a new buffer */
	buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
	if (!buf_desc)
		return ERR_PTR(-ENOMEM);

1362 1363 1364 1365 1366 1367
	buf_desc->order = get_order(bufsize);
	buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
				      __GFP_NOMEMALLOC | __GFP_COMP |
				      __GFP_NORETRY | __GFP_ZERO,
				      buf_desc->order);
	if (!buf_desc->pages) {
1368 1369 1370
		kfree(buf_desc);
		return ERR_PTR(-EAGAIN);
	}
1371
	buf_desc->cpu_addr = (void *)page_address(buf_desc->pages);
1372 1373 1374
	buf_desc->len = bufsize;
	return buf_desc;
}
1375

1376 1377 1378 1379 1380 1381 1382
/* map buf_desc on all usable links,
 * unused buffers stay mapped as long as the link is up
 */
static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
				     struct smc_buf_desc *buf_desc, bool is_rmb)
{
	int i, rc = 0;
1383

1384 1385
	/* protect against parallel link reconfiguration */
	mutex_lock(&lgr->llc_conf_mutex);
1386 1387
	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
		struct smc_link *lnk = &lgr->lnk[i];
1388

1389
		if (!smc_link_usable(lnk))
1390 1391 1392 1393
			continue;
		if (smcr_buf_map_link(buf_desc, is_rmb, lnk)) {
			rc = -ENOMEM;
			goto out;
1394 1395
		}
	}
1396
out:
1397
	mutex_unlock(&lgr->llc_conf_mutex);
1398
	return rc;
1399 1400
}

1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421
#define SMCD_DMBE_SIZES		7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */

static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
						bool is_dmb, int bufsize)
{
	struct smc_buf_desc *buf_desc;
	int rc;

	if (smc_compress_bufsize(bufsize) > SMCD_DMBE_SIZES)
		return ERR_PTR(-EAGAIN);

	/* try to alloc a new DMB */
	buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
	if (!buf_desc)
		return ERR_PTR(-ENOMEM);
	if (is_dmb) {
		rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
		if (rc) {
			kfree(buf_desc);
			return ERR_PTR(-EAGAIN);
		}
1422 1423 1424
		buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
		/* CDC header stored in buf. So, pretend it was smaller */
		buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg);
1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438
	} else {
		buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL |
					     __GFP_NOWARN | __GFP_NORETRY |
					     __GFP_NOMEMALLOC);
		if (!buf_desc->cpu_addr) {
			kfree(buf_desc);
			return ERR_PTR(-EAGAIN);
		}
		buf_desc->len = bufsize;
	}
	return buf_desc;
}

static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
U
Ursula Braun 已提交
1439
{
1440
	struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
U
Ursula Braun 已提交
1441 1442
	struct smc_connection *conn = &smc->conn;
	struct smc_link_group *lgr = conn->lgr;
1443
	struct list_head *buf_list;
1444
	int bufsize, bufsize_short;
1445
	struct mutex *lock;	/* lock buffer list */
1446
	int sk_buf_size;
U
Ursula Braun 已提交
1447

1448 1449 1450 1451 1452 1453 1454
	if (is_rmb)
		/* use socket recv buffer size (w/o overhead) as start value */
		sk_buf_size = smc->sk.sk_rcvbuf / 2;
	else
		/* use socket send buffer size (w/o overhead) as start value */
		sk_buf_size = smc->sk.sk_sndbuf / 2;

1455
	for (bufsize_short = smc_compress_bufsize(sk_buf_size);
1456
	     bufsize_short >= 0; bufsize_short--) {
1457

1458 1459 1460 1461 1462 1463
		if (is_rmb) {
			lock = &lgr->rmbs_lock;
			buf_list = &lgr->rmbs[bufsize_short];
		} else {
			lock = &lgr->sndbufs_lock;
			buf_list = &lgr->sndbufs[bufsize_short];
1464
		}
1465
		bufsize = smc_uncompress_bufsize(bufsize_short);
1466 1467 1468
		if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC)
			continue;

1469
		/* check for reusable slot in the link group */
1470
		buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
1471 1472
		if (buf_desc) {
			memset(buf_desc->cpu_addr, 0, bufsize);
U
Ursula Braun 已提交
1473 1474
			break; /* found reusable slot */
		}
1475

1476 1477 1478 1479 1480
		if (is_smcd)
			buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
		else
			buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);

1481 1482 1483
		if (PTR_ERR(buf_desc) == -ENOMEM)
			break;
		if (IS_ERR(buf_desc))
1484
			continue;
1485

1486
		buf_desc->used = 1;
1487
		mutex_lock(lock);
1488
		list_add(&buf_desc->list, buf_list);
1489
		mutex_unlock(lock);
1490
		break; /* found */
U
Ursula Braun 已提交
1491
	}
1492

1493
	if (IS_ERR(buf_desc))
1494 1495
		return -ENOMEM;

1496 1497
	if (!is_smcd) {
		if (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) {
1498
			smcr_buf_unuse(buf_desc, lgr);
1499 1500 1501 1502
			return -ENOMEM;
		}
	}

1503 1504
	if (is_rmb) {
		conn->rmb_desc = buf_desc;
1505 1506
		conn->rmbe_size_short = bufsize_short;
		smc->sk.sk_rcvbuf = bufsize * 2;
1507
		atomic_set(&conn->bytes_to_rcv, 0);
1508 1509
		conn->rmbe_update_limit =
			smc_rmb_wnd_update_limit(buf_desc->len);
1510 1511
		if (is_smcd)
			smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
U
Ursula Braun 已提交
1512
	} else {
1513 1514 1515
		conn->sndbuf_desc = buf_desc;
		smc->sk.sk_sndbuf = bufsize * 2;
		atomic_set(&conn->sndbuf_space, bufsize);
U
Ursula Braun 已提交
1516
	}
1517 1518 1519
	return 0;
}

1520 1521
void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
{
1522
	if (!conn->lgr || conn->lgr->is_smcd || !smc_link_usable(conn->lnk))
1523
		return;
1524
	smc_ib_sync_sg_for_cpu(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
1525 1526 1527 1528
}

void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
{
1529
	if (!conn->lgr || conn->lgr->is_smcd || !smc_link_usable(conn->lnk))
1530
		return;
1531
	smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
1532 1533 1534 1535
}

void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
{
1536
	int i;
1537

1538 1539
	if (!conn->lgr || conn->lgr->is_smcd)
		return;
1540
	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1541
		if (!smc_link_usable(&conn->lgr->lnk[i]))
1542 1543 1544 1545
			continue;
		smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc,
				       DMA_FROM_DEVICE);
	}
1546 1547 1548 1549
}

void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
{
1550
	int i;
1551

1552 1553
	if (!conn->lgr || conn->lgr->is_smcd)
		return;
1554
	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1555
		if (!smc_link_usable(&conn->lgr->lnk[i]))
1556 1557 1558 1559
			continue;
		smc_ib_sync_sg_for_device(&conn->lgr->lnk[i], conn->rmb_desc,
					  DMA_FROM_DEVICE);
	}
1560 1561
}

1562 1563 1564 1565 1566 1567
/* create the send and receive buffer for an SMC socket;
 * receive buffers are called RMBs;
 * (even though the SMC protocol allows more than one RMB-element per RMB,
 * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
 * extra RMB for every connection in a link group
 */
1568
int smc_buf_create(struct smc_sock *smc, bool is_smcd)
1569 1570 1571 1572
{
	int rc;

	/* create send buffer */
1573
	rc = __smc_buf_create(smc, is_smcd, false);
1574 1575 1576
	if (rc)
		return rc;
	/* create rmb */
1577
	rc = __smc_buf_create(smc, is_smcd, true);
1578
	if (rc)
1579
		smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
1580
	return rc;
U
Ursula Braun 已提交
1581
}
1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593

static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
{
	int i;

	for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
		if (!test_and_set_bit(i, lgr->rtokens_used_mask))
			return i;
	}
	return -ENOSPC;
}

1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640
static int smc_rtoken_find_by_link(struct smc_link_group *lgr, int lnk_idx,
				   u32 rkey)
{
	int i;

	for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
		if (test_bit(i, lgr->rtokens_used_mask) &&
		    lgr->rtokens[i][lnk_idx].rkey == rkey)
			return i;
	}
	return -ENOENT;
}

/* set rtoken for a new link to an existing rmb */
void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new,
		    __be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey)
{
	int rtok_idx;

	rtok_idx = smc_rtoken_find_by_link(lgr, link_idx, ntohl(nw_rkey_known));
	if (rtok_idx == -ENOENT)
		return;
	lgr->rtokens[rtok_idx][link_idx_new].rkey = ntohl(nw_rkey);
	lgr->rtokens[rtok_idx][link_idx_new].dma_addr = be64_to_cpu(nw_vaddr);
}

/* set rtoken for a new link whose link_id is given */
void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id,
		     __be64 nw_vaddr, __be32 nw_rkey)
{
	u64 dma_addr = be64_to_cpu(nw_vaddr);
	u32 rkey = ntohl(nw_rkey);
	bool found = false;
	int link_idx;

	for (link_idx = 0; link_idx < SMC_LINKS_PER_LGR_MAX; link_idx++) {
		if (lgr->lnk[link_idx].link_id == link_id) {
			found = true;
			break;
		}
	}
	if (!found)
		return;
	lgr->rtokens[rtok_idx][link_idx].rkey = rkey;
	lgr->rtokens[rtok_idx][link_idx].dma_addr = dma_addr;
}

1641
/* add a new rtoken from peer */
1642
int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey)
1643
{
1644
	struct smc_link_group *lgr = smc_get_lgr(lnk);
1645 1646
	u64 dma_addr = be64_to_cpu(nw_vaddr);
	u32 rkey = ntohl(nw_rkey);
1647 1648 1649
	int i;

	for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1650 1651
		if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
		    lgr->rtokens[i][lnk->link_idx].dma_addr == dma_addr &&
1652
		    test_bit(i, lgr->rtokens_used_mask)) {
1653 1654 1655 1656 1657 1658 1659
			/* already in list */
			return i;
		}
	}
	i = smc_rmb_reserve_rtoken_idx(lgr);
	if (i < 0)
		return i;
1660 1661
	lgr->rtokens[i][lnk->link_idx].rkey = rkey;
	lgr->rtokens[i][lnk->link_idx].dma_addr = dma_addr;
1662 1663 1664
	return i;
}

1665
/* delete an rtoken from all links */
1666
int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey)
1667
{
1668
	struct smc_link_group *lgr = smc_get_lgr(lnk);
1669
	u32 rkey = ntohl(nw_rkey);
1670
	int i, j;
1671 1672

	for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1673
		if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
1674
		    test_bit(i, lgr->rtokens_used_mask)) {
1675 1676 1677 1678
			for (j = 0; j < SMC_LINKS_PER_LGR_MAX; j++) {
				lgr->rtokens[i][j].rkey = 0;
				lgr->rtokens[i][j].dma_addr = 0;
			}
1679
			clear_bit(i, lgr->rtokens_used_mask);
1680 1681 1682
			return 0;
		}
	}
1683 1684 1685 1686 1687
	return -ENOENT;
}

/* save rkey and dma_addr received from peer during clc handshake */
int smc_rmb_rtoken_handling(struct smc_connection *conn,
1688
			    struct smc_link *lnk,
1689 1690
			    struct smc_clc_msg_accept_confirm *clc)
{
1691
	conn->rtoken_idx = smc_rtoken_add(lnk, clc->rmb_dma_addr,
1692
					  clc->rmb_rkey);
1693 1694 1695 1696
	if (conn->rtoken_idx < 0)
		return conn->rtoken_idx;
	return 0;
}
1697

1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718
static void smc_core_going_away(void)
{
	struct smc_ib_device *smcibdev;
	struct smcd_dev *smcd;

	spin_lock(&smc_ib_devices.lock);
	list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
		int i;

		for (i = 0; i < SMC_MAX_PORTS; i++)
			set_bit(i, smcibdev->ports_going_away);
	}
	spin_unlock(&smc_ib_devices.lock);

	spin_lock(&smcd_dev_list.lock);
	list_for_each_entry(smcd, &smcd_dev_list.list, list) {
		smcd->going_away = 1;
	}
	spin_unlock(&smcd_dev_list.lock);
}

1719 1720
/* Clean up all SMC link groups */
static void smc_lgrs_shutdown(void)
1721
{
1722
	struct smcd_dev *smcd;
1723

1724 1725
	smc_core_going_away();

1726
	smc_smcr_terminate_all(NULL);
1727 1728 1729

	spin_lock(&smcd_dev_list.lock);
	list_for_each_entry(smcd, &smcd_dev_list.list, list)
1730
		smc_smcd_terminate_all(smcd);
1731
	spin_unlock(&smcd_dev_list.lock);
1732
}
1733

1734 1735 1736 1737
static int smc_core_reboot_event(struct notifier_block *this,
				 unsigned long event, void *ptr)
{
	smc_lgrs_shutdown();
1738
	smc_ib_unregister_client();
1739 1740 1741 1742 1743 1744 1745
	return 0;
}

static struct notifier_block smc_reboot_notifier = {
	.notifier_call = smc_core_reboot_event,
};

1746 1747
int __init smc_core_init(void)
{
1748
	return register_reboot_notifier(&smc_reboot_notifier);
1749 1750
}

1751 1752 1753
/* Called (from smc_exit) when module is removed */
void smc_core_exit(void)
{
1754
	unregister_reboot_notifier(&smc_reboot_notifier);
1755 1756
	smc_lgrs_shutdown();
}