ehca_hca.c 11.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
/*
 *  IBM eServer eHCA Infiniband device driver for Linux on POWER
 *
 *  HCA query functions
 *
 *  Authors: Heiko J Schick <schickhj@de.ibm.com>
 *           Christoph Raisch <raisch@de.ibm.com>
 *
 *  Copyright (c) 2005 IBM Corporation
 *
 *  All rights reserved.
 *
 *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
 *  BSD.
 *
 * OpenIB BSD License
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * Redistributions of source code must retain the above copyright notice, this
 * list of conditions and the following disclaimer.
 *
 * Redistributions in binary form must reproduce the above copyright notice,
 * this list of conditions and the following disclaimer in the documentation
 * and/or other materials
 * provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include "ehca_tools.h"
43
#include "ehca_iverbs.h"
44 45
#include "hcp_if.h"

46 47 48 49 50
static unsigned int limit_uint(unsigned int value)
{
	return min_t(unsigned int, value, INT_MAX);
}

51 52
int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
{
53
	int i, ret = 0;
54 55 56 57
	struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
					      ib_device);
	struct hipz_query_hca *rblock;

58 59 60 61 62 63 64 65 66 67 68 69 70 71
	static const u32 cap_mapping[] = {
		IB_DEVICE_RESIZE_MAX_WR,      HCA_CAP_WQE_RESIZE,
		IB_DEVICE_BAD_PKEY_CNTR,      HCA_CAP_BAD_P_KEY_CTR,
		IB_DEVICE_BAD_QKEY_CNTR,      HCA_CAP_Q_KEY_VIOL_CTR,
		IB_DEVICE_RAW_MULTI,          HCA_CAP_RAW_PACKET_MCAST,
		IB_DEVICE_AUTO_PATH_MIG,      HCA_CAP_AUTO_PATH_MIG,
		IB_DEVICE_CHANGE_PHY_PORT,    HCA_CAP_SQD_RTS_PORT_CHANGE,
		IB_DEVICE_UD_AV_PORT_ENFORCE, HCA_CAP_AH_PORT_NR_CHECK,
		IB_DEVICE_CURR_QP_STATE_MOD,  HCA_CAP_CUR_QP_STATE_MOD,
		IB_DEVICE_SHUTDOWN_PORT,      HCA_CAP_SHUTDOWN_PORT,
		IB_DEVICE_INIT_TYPE,          HCA_CAP_INIT_TYPE,
		IB_DEVICE_PORT_ACTIVE_EVENT,  HCA_CAP_PORT_ACTIVE_EVENT,
	};

72
	rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
73 74 75 76 77 78 79 80 81 82 83 84
	if (!rblock) {
		ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
		return -ENOMEM;
	}

	if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
		ehca_err(&shca->ib_device, "Can't query device properties");
		ret = -EINVAL;
		goto query_device1;
	}

	memset(props, 0, sizeof(struct ib_device_attr));
85
	props->page_size_cap   = shca->hca_cap_mr_pgsize;
86 87 88 89 90
	props->fw_ver          = rblock->hw_ver;
	props->max_mr_size     = rblock->max_mr_size;
	props->vendor_id       = rblock->vendor_id >> 8;
	props->vendor_part_id  = rblock->vendor_part_id >> 16;
	props->hw_ver          = rblock->hw_ver;
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
	props->max_qp          = limit_uint(rblock->max_qp);
	props->max_qp_wr       = limit_uint(rblock->max_wqes_wq);
	props->max_sge         = limit_uint(rblock->max_sge);
	props->max_sge_rd      = limit_uint(rblock->max_sge_rd);
	props->max_cq          = limit_uint(rblock->max_cq);
	props->max_cqe         = limit_uint(rblock->max_cqe);
	props->max_mr          = limit_uint(rblock->max_mr);
	props->max_mw          = limit_uint(rblock->max_mw);
	props->max_pd          = limit_uint(rblock->max_pd);
	props->max_ah          = limit_uint(rblock->max_ah);
	props->max_ee          = limit_uint(rblock->max_rd_ee_context);
	props->max_rdd         = limit_uint(rblock->max_rd_domain);
	props->max_fmr         = limit_uint(rblock->max_mr);
	props->local_ca_ack_delay  = limit_uint(rblock->local_ca_ack_delay);
	props->max_qp_rd_atom  = limit_uint(rblock->max_rr_qp);
	props->max_ee_rd_atom  = limit_uint(rblock->max_rr_ee_context);
	props->max_res_rd_atom = limit_uint(rblock->max_rr_hca);
	props->max_qp_init_rd_atom = limit_uint(rblock->max_act_wqs_qp);
	props->max_ee_init_rd_atom = limit_uint(rblock->max_act_wqs_ee_context);
110 111

	if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
112 113
		props->max_srq         = limit_uint(props->max_qp);
		props->max_srq_wr      = limit_uint(props->max_qp_wr);
114 115 116
		props->max_srq_sge     = 3;
	}

117 118 119 120 121 122
	props->max_pkeys           = 16;
	props->local_ca_ack_delay  = limit_uint(rblock->local_ca_ack_delay);
	props->max_raw_ipv6_qp     = limit_uint(rblock->max_raw_ipv6_qp);
	props->max_raw_ethy_qp     = limit_uint(rblock->max_raw_ethy_qp);
	props->max_mcast_grp       = limit_uint(rblock->max_mcast_grp);
	props->max_mcast_qp_attach = limit_uint(rblock->max_mcast_qp_attach);
123
	props->max_total_mcast_qp_attach
124
		= limit_uint(rblock->max_total_mcast_qp_attach);
125

126 127 128 129 130 131 132
	/* translate device capabilities */
	props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID |
		IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_N_NOTIFY_CQ;
	for (i = 0; i < ARRAY_SIZE(cap_mapping); i += 2)
		if (rblock->hca_cap_indicators & cap_mapping[i + 1])
			props->device_cap_flags |= cap_mapping[i];

133
query_device1:
134
	ehca_free_fw_ctrlblock(rblock);
135 136 137 138

	return ret;
}

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
static int map_mtu(struct ehca_shca *shca, u32 fw_mtu)
{
	switch (fw_mtu) {
	case 0x1:
		return IB_MTU_256;
	case 0x2:
		return IB_MTU_512;
	case 0x3:
		return IB_MTU_1024;
	case 0x4:
		return IB_MTU_2048;
	case 0x5:
		return IB_MTU_4096;
	default:
		ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
			 fw_mtu);
		return 0;
	}
}

static int map_number_of_vls(struct ehca_shca *shca, u32 vl_cap)
{
	switch (vl_cap) {
	case 0x1:
		return 1;
	case 0x2:
		return 2;
	case 0x3:
		return 4;
	case 0x4:
		return 8;
	case 0x5:
		return 15;
	default:
		ehca_err(&shca->ib_device, "invalid Vl Capability: %x.",
			 vl_cap);
		return 0;
	}
}

179 180 181 182
int ehca_query_port(struct ib_device *ibdev,
		    u8 port, struct ib_port_attr *props)
{
	int ret = 0;
183
	u64 h_ret;
184 185 186 187
	struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
					      ib_device);
	struct hipz_query_port *rblock;

188
	rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
189 190 191 192 193
	if (!rblock) {
		ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
		return -ENOMEM;
	}

194 195
	h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
	if (h_ret != H_SUCCESS) {
196 197 198 199 200 201 202
		ehca_err(&shca->ib_device, "Can't query port properties");
		ret = -EINVAL;
		goto query_port1;
	}

	memset(props, 0, sizeof(struct ib_port_attr));

203
	props->active_mtu = props->max_mtu = map_mtu(shca, rblock->max_mtu);
J
Joachim Fenkes 已提交
204
	props->port_cap_flags  = rblock->capability_mask;
205
	props->gid_tbl_len     = rblock->gid_tbl_len;
206 207 208 209
	if (rblock->max_msg_sz)
		props->max_msg_sz      = rblock->max_msg_sz;
	else
		props->max_msg_sz      = 0x1 << 31;
210 211 212 213 214 215 216 217 218
	props->bad_pkey_cntr   = rblock->bad_pkey_cntr;
	props->qkey_viol_cntr  = rblock->qkey_viol_cntr;
	props->pkey_tbl_len    = rblock->pkey_tbl_len;
	props->lid             = rblock->lid;
	props->sm_lid          = rblock->sm_lid;
	props->lmc             = rblock->lmc;
	props->sm_sl           = rblock->sm_sl;
	props->subnet_timeout  = rblock->subnet_timeout;
	props->init_type_reply = rblock->init_type_reply;
219
	props->max_vl_num      = map_number_of_vls(shca, rblock->vl_cap);
220

221 222 223 224 225 226 227 228 229 230 231 232 233 234
	if (rblock->state && rblock->phys_width) {
		props->phys_state      = rblock->phys_pstate;
		props->state           = rblock->phys_state;
		props->active_width    = rblock->phys_width;
		props->active_speed    = rblock->phys_speed;
	} else {
		/* old firmware releases don't report physical
		 * port info, so use default values
		 */
		props->phys_state      = 5;
		props->state           = rblock->state;
		props->active_width    = IB_WIDTH_12X;
		props->active_speed    = 0x1;
	}
235

236
query_port1:
237
	ehca_free_fw_ctrlblock(rblock);
238 239 240 241

	return ret;
}

242 243 244 245
int ehca_query_sma_attr(struct ehca_shca *shca,
			u8 port, struct ehca_sma_attr *attr)
{
	int ret = 0;
246
	u64 h_ret;
247 248 249 250 251 252 253 254
	struct hipz_query_port *rblock;

	rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
	if (!rblock) {
		ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
		return -ENOMEM;
	}

255 256
	h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
	if (h_ret != H_SUCCESS) {
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
		ehca_err(&shca->ib_device, "Can't query port properties");
		ret = -EINVAL;
		goto query_sma_attr1;
	}

	memset(attr, 0, sizeof(struct ehca_sma_attr));

	attr->lid    = rblock->lid;
	attr->lmc    = rblock->lmc;
	attr->sm_sl  = rblock->sm_sl;
	attr->sm_lid = rblock->sm_lid;

	attr->pkey_tbl_len = rblock->pkey_tbl_len;
	memcpy(attr->pkeys, rblock->pkey_entries, sizeof(attr->pkeys));

query_sma_attr1:
	ehca_free_fw_ctrlblock(rblock);

	return ret;
}

278 279 280
int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
{
	int ret = 0;
281 282
	u64 h_ret;
	struct ehca_shca *shca;
283 284
	struct hipz_query_port *rblock;

285
	shca = container_of(ibdev, struct ehca_shca, ib_device);
286 287 288 289 290
	if (index > 16) {
		ehca_err(&shca->ib_device, "Invalid index: %x.", index);
		return -EINVAL;
	}

291
	rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
292 293 294 295 296
	if (!rblock) {
		ehca_err(&shca->ib_device,  "Can't allocate rblock memory.");
		return -ENOMEM;
	}

297 298
	h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
	if (h_ret != H_SUCCESS) {
299 300 301 302 303 304 305 306
		ehca_err(&shca->ib_device, "Can't query port properties");
		ret = -EINVAL;
		goto query_pkey1;
	}

	memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16));

query_pkey1:
307
	ehca_free_fw_ctrlblock(rblock);
308 309 310 311 312 313 314 315

	return ret;
}

int ehca_query_gid(struct ib_device *ibdev, u8 port,
		   int index, union ib_gid *gid)
{
	int ret = 0;
316
	u64 h_ret;
317 318 319 320 321 322 323 324 325
	struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
					      ib_device);
	struct hipz_query_port *rblock;

	if (index > 255) {
		ehca_err(&shca->ib_device, "Invalid index: %x.", index);
		return -EINVAL;
	}

326
	rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
327 328 329 330 331
	if (!rblock) {
		ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
		return -ENOMEM;
	}

332 333
	h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
	if (h_ret != H_SUCCESS) {
334 335 336 337 338 339 340 341 342
		ehca_err(&shca->ib_device, "Can't query port properties");
		ret = -EINVAL;
		goto query_gid1;
	}

	memcpy(&gid->raw[0], &rblock->gid_prefix, sizeof(u64));
	memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64));

query_gid1:
343
	ehca_free_fw_ctrlblock(rblock);
344 345 346 347

	return ret;
}

348
static const u32 allowed_port_caps = (
J
Joachim Fenkes 已提交
349 350 351 352
	IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP |
	IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP |
	IB_PORT_VENDOR_CLASS_SUP);

353 354 355 356
int ehca_modify_port(struct ib_device *ibdev,
		     u8 port, int port_modify_mask,
		     struct ib_port_modify *props)
{
J
Joachim Fenkes 已提交
357
	int ret = 0;
358
	struct ehca_shca *shca;
J
Joachim Fenkes 已提交
359 360 361 362
	struct hipz_query_port *rblock;
	u32 cap;
	u64 hret;

363
	shca = container_of(ibdev, struct ehca_shca, ib_device);
J
Joachim Fenkes 已提交
364 365 366 367 368 369 370 371 372
	if ((props->set_port_cap_mask | props->clr_port_cap_mask)
	    & ~allowed_port_caps) {
		ehca_err(&shca->ib_device, "Non-changeable bits set in masks  "
			 "set=%x  clr=%x  allowed=%x", props->set_port_cap_mask,
			 props->clr_port_cap_mask, allowed_port_caps);
		return -EINVAL;
	}

	if (mutex_lock_interruptible(&shca->modify_mutex))
373
		return -ERESTARTSYS;
J
Joachim Fenkes 已提交
374 375 376 377 378 379 380 381

	rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
	if (!rblock) {
		ehca_err(&shca->ib_device,  "Can't allocate rblock memory.");
		ret = -ENOMEM;
		goto modify_port1;
	}

382 383
	hret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
	if (hret != H_SUCCESS) {
J
Joachim Fenkes 已提交
384 385 386 387 388 389 390 391 392 393 394
		ehca_err(&shca->ib_device, "Can't query port properties");
		ret = -EINVAL;
		goto modify_port2;
	}

	cap = (rblock->capability_mask | props->set_port_cap_mask)
		& ~props->clr_port_cap_mask;

	hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
				  cap, props->init_type, port_modify_mask);
	if (hret != H_SUCCESS) {
395
		ehca_err(&shca->ib_device, "Modify port failed  h_ret=%li",
396
			 hret);
J
Joachim Fenkes 已提交
397 398 399 400 401 402 403
		ret = -EINVAL;
	}

modify_port2:
	ehca_free_fw_ctrlblock(rblock);

modify_port1:
404
	mutex_unlock(&shca->modify_mutex);
J
Joachim Fenkes 已提交
405 406

	return ret;
407
}