core_priv.h 8.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
/*
 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#ifndef _CORE_PRIV_H
#define _CORE_PRIV_H

#include <linux/list.h>
#include <linux/spinlock.h>
38
#include <linux/cgroup_rdma.h>
L
Linus Torvalds 已提交
39

40
#include <rdma/ib_verbs.h>
41
#include <rdma/opa_addr.h>
42 43
#include <rdma/ib_mad.h>
#include "mad_priv.h"
L
Linus Torvalds 已提交
44

45 46 47 48 49 50 51
struct pkey_index_qp_list {
	struct list_head    pkey_index_list;
	u16                 pkey_index;
	/* Lock to hold while iterating the qp_list. */
	spinlock_t          qp_list_lock;
	struct list_head    qp_list;
};
L
Linus Torvalds 已提交
52

M
Matan Barak 已提交
53 54 55 56 57 58 59 60 61 62 63 64 65
#if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS)
int cma_configfs_init(void);
void cma_configfs_exit(void);
#else
static inline int cma_configfs_init(void)
{
	return 0;
}

static inline void cma_configfs_exit(void)
{
}
#endif
66 67 68
struct cma_device;
void cma_ref_dev(struct cma_device *cma_dev);
void cma_deref_dev(struct cma_device *cma_dev);
M
Matan Barak 已提交
69 70 71 72 73 74 75 76
typedef bool (*cma_device_filter)(struct ib_device *, void *);
struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter	filter,
					     void		*cookie);
int cma_get_default_gid_type(struct cma_device *cma_dev,
			     unsigned int port);
int cma_set_default_gid_type(struct cma_device *cma_dev,
			     unsigned int port,
			     enum ib_gid_type default_gid_type);
77 78 79
int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port);
int cma_set_default_roce_tos(struct cma_device *a_dev, unsigned int port,
			     u8 default_roce_tos);
M
Matan Barak 已提交
80
struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev);
81

82 83 84
int  ib_device_register_sysfs(struct ib_device *device,
			      int (*port_callback)(struct ib_device *,
						   u8, struct kobject *));
L
Linus Torvalds 已提交
85 86
void ib_device_unregister_sysfs(struct ib_device *device);

87
void ib_cache_setup(void);
L
Linus Torvalds 已提交
88 89
void ib_cache_cleanup(void);

90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
	      struct net_device *idev, void *cookie);

typedef int (*roce_netdev_filter)(struct ib_device *device, u8 port,
	     struct net_device *idev, void *cookie);

void ib_enum_roce_netdev(struct ib_device *ib_dev,
			 roce_netdev_filter filter,
			 void *filter_cookie,
			 roce_netdev_callback cb,
			 void *cookie);
void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
			      void *filter_cookie,
			      roce_netdev_callback cb,
			      void *cookie);

106 107 108 109 110 111 112 113
typedef int (*nldev_callback)(struct ib_device *device,
			      struct sk_buff *skb,
			      struct netlink_callback *cb,
			      unsigned int idx);

int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
		     struct netlink_callback *cb);

114 115 116 117 118
enum ib_cache_gid_default_mode {
	IB_CACHE_GID_DEFAULT_MODE_SET,
	IB_CACHE_GID_DEFAULT_MODE_DELETE
};

M
Matan Barak 已提交
119 120
int ib_cache_gid_parse_type_str(const char *buf);

121 122
const char *ib_cache_gid_type_str(enum ib_gid_type gid_type);

123 124
void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
				  struct net_device *ndev,
125
				  unsigned long gid_type_mask,
126 127 128 129 130 131 132 133 134 135 136 137 138 139
				  enum ib_cache_gid_default_mode mode);

int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
		     union ib_gid *gid, struct ib_gid_attr *attr);

int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
		     union ib_gid *gid, struct ib_gid_attr *attr);

int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
				     struct net_device *ndev);

int roce_gid_mgmt_init(void);
void roce_gid_mgmt_cleanup(void);

140
unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port);
141 142 143 144 145

int ib_cache_setup_one(struct ib_device *device);
void ib_cache_cleanup_one(struct ib_device *device);
void ib_cache_release_one(struct ib_device *device);

146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
#ifdef CONFIG_CGROUP_RDMA
int ib_device_register_rdmacg(struct ib_device *device);
void ib_device_unregister_rdmacg(struct ib_device *device);

int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj,
			 struct ib_device *device,
			 enum rdmacg_resource_type resource_index);

void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj,
			struct ib_device *device,
			enum rdmacg_resource_type resource_index);
#else
static inline int ib_device_register_rdmacg(struct ib_device *device)
{ return 0; }

static inline void ib_device_unregister_rdmacg(struct ib_device *device)
{ }

static inline int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj,
				       struct ib_device *device,
				       enum rdmacg_resource_type resource_index)
{ return 0; }

static inline void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj,
				      struct ib_device *device,
				      enum rdmacg_resource_type resource_index)
{ }
#endif

175 176 177
static inline bool rdma_is_upper_dev_rcu(struct net_device *dev,
					 struct net_device *upper)
{
178
	return netdev_has_upper_dev_all_rcu(dev, upper);
179 180
}

181 182 183
int addr_init(void);
void addr_cleanup(void);

184 185 186
int ib_mad_init(void);
void ib_mad_cleanup(void);

187 188 189
int ib_sa_init(void);
void ib_sa_cleanup(void);

190 191
int rdma_nl_init(void);
void rdma_nl_exit(void);
192

193
int ib_nl_handle_resolve_resp(struct sk_buff *skb,
194 195
			      struct nlmsghdr *nlh,
			      struct netlink_ext_ack *extack);
196
int ib_nl_handle_set_timeout(struct sk_buff *skb,
197 198
			     struct nlmsghdr *nlh,
			     struct netlink_ext_ack *extack);
199
int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
200 201
			     struct nlmsghdr *nlh,
			     struct netlink_ext_ack *extack);
202

203 204 205
int ib_get_cached_subnet_prefix(struct ib_device *device,
				u8                port_num,
				u64              *sn_pfx);
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224

#ifdef CONFIG_SECURITY_INFINIBAND
void ib_security_destroy_port_pkey_list(struct ib_device *device);

void ib_security_cache_change(struct ib_device *device,
			      u8 port_num,
			      u64 subnet_prefix);

int ib_security_modify_qp(struct ib_qp *qp,
			  struct ib_qp_attr *qp_attr,
			  int qp_attr_mask,
			  struct ib_udata *udata);

int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev);
void ib_destroy_qp_security_begin(struct ib_qp_security *sec);
void ib_destroy_qp_security_abort(struct ib_qp_security *sec);
void ib_destroy_qp_security_end(struct ib_qp_security *sec);
int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev);
void ib_close_shared_qp_security(struct ib_qp_security *sec);
225 226 227 228
int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
				enum ib_qp_type qp_type);
void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent);
int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index);
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
#else
static inline void ib_security_destroy_port_pkey_list(struct ib_device *device)
{
}

static inline void ib_security_cache_change(struct ib_device *device,
					    u8 port_num,
					    u64 subnet_prefix)
{
}

static inline int ib_security_modify_qp(struct ib_qp *qp,
					struct ib_qp_attr *qp_attr,
					int qp_attr_mask,
					struct ib_udata *udata)
{
	return qp->device->modify_qp(qp->real_qp,
				     qp_attr,
				     qp_attr_mask,
				     udata);
}

static inline int ib_create_qp_security(struct ib_qp *qp,
					struct ib_device *dev)
{
	return 0;
}

static inline void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
{
}

static inline void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
{
}

static inline void ib_destroy_qp_security_end(struct ib_qp_security *sec)
{
}

static inline int ib_open_shared_qp_security(struct ib_qp *qp,
					     struct ib_device *dev)
{
	return 0;
}

static inline void ib_close_shared_qp_security(struct ib_qp_security *sec)
{
}
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293

static inline int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
					      enum ib_qp_type qp_type)
{
	return 0;
}

static inline void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
{
}

static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
					  u16 pkey_index)
{
	return 0;
}
294
#endif
295 296 297 298 299

struct ib_device *__ib_device_get_by_index(u32 ifindex);
/* RDMA device netlink */
void nldev_init(void);
void nldev_exit(void);
L
Linus Torvalds 已提交
300
#endif /* _CORE_PRIV_H */