efx.h 11.2 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/****************************************************************************
B
Ben Hutchings 已提交
3
 * Driver for Solarflare network controllers and boards
4
 * Copyright 2005-2006 Fen Systems Ltd.
B
Ben Hutchings 已提交
5
 * Copyright 2006-2013 Solarflare Communications Inc.
6 7 8 9 10 11
 */

#ifndef EFX_EFX_H
#define EFX_EFX_H

#include "net_driver.h"
B
Ben Hutchings 已提交
12
#include "filter.h"
13

14 15 16
int efx_net_open(struct net_device *net_dev);
int efx_net_stop(struct net_device *net_dev);

17
/* TX */
18 19 20 21 22 23 24 25 26
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
				struct net_device *net_dev);
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
27
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
28
		 void *type_data);
29
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
30
extern unsigned int efx_piobuf_size;
31
extern bool efx_separate_tx_channels;
32 33

/* RX */
34 35
void efx_set_default_rx_indir_table(struct efx_nic *efx,
				    struct efx_rss_context *ctx);
36 37 38 39 40
void efx_rx_config_page_split(struct efx_nic *efx);
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
41
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
42
void efx_rx_slow_fill(struct timer_list *t);
43 44 45
void __efx_rx_packet(struct efx_channel *channel);
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
		   unsigned int n_frags, unsigned int len, u16 flags);
46 47
static inline void efx_rx_flush_packet(struct efx_channel *channel)
{
48 49
	if (channel->rx_pkt_n_frags)
		__efx_rx_packet(channel);
50
}
51
void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
52 53 54 55 56 57 58

#define EFX_MAX_DMAQ_SIZE 4096UL
#define EFX_DEFAULT_DMAQ_SIZE 1024UL
#define EFX_MIN_DMAQ_SIZE 512UL

#define EFX_MAX_EVQ_SIZE 16384UL
#define EFX_MIN_EVQ_SIZE 512UL
59

60 61 62 63 64 65 66 67 68
/* Maximum number of TCP segments we support for soft-TSO */
#define EFX_TSO_MAX_SEGS	100

/* The smallest [rt]xq_entries that the driver supports.  RX minimum
 * is a bit arbitrary.  For TX, we must have space for at least 2
 * TSO skbs.
 */
#define EFX_RXQ_MIN_ENT		128U
#define EFX_TXQ_MIN_ENT(efx)	(2 * efx_tx_max_skb_descs(efx))
69

70 71 72 73
/* All EF10 architecture NICs steal one bit of the DMAQ size for various
 * other purposes when counting TxQ entries, so we halve the queue size.
 */
#define EFX_TXQ_MAX_ENT(efx)	(EFX_WORKAROUND_EF10(efx) ? \
74 75
				 EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)

76 77 78 79 80
static inline bool efx_rss_enabled(struct efx_nic *efx)
{
	return efx->rss_spread > 1;
}

B
Ben Hutchings 已提交
81
/* Filters */
82

83 84
void efx_mac_reconfigure(struct efx_nic *efx);

85 86 87 88 89 90 91 92 93 94
/**
 * efx_filter_insert_filter - add or replace a filter
 * @efx: NIC in which to insert the filter
 * @spec: Specification for the filter
 * @replace_equal: Flag for whether the specified filter may replace an
 *	existing filter with equal priority
 *
 * On success, return the filter ID.
 * On failure, return a negative error code.
 *
95 96 97 98 99 100 101 102 103 104 105 106 107 108
 * If existing filters have equal match values to the new filter spec,
 * then the new filter might replace them or the function might fail,
 * as follows.
 *
 * 1. If the existing filters have lower priority, or @replace_equal
 *    is set and they have equal priority, replace them.
 *
 * 2. If the existing filters have higher priority, return -%EPERM.
 *
 * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not
 *    support delivery to multiple recipients, return -%EEXIST.
 *
 * This implies that filters for multiple multicast recipients must
 * all be inserted with the same priority and @replace_equal = %false.
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
 */
static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
					   struct efx_filter_spec *spec,
					   bool replace_equal)
{
	return efx->type->filter_insert(efx, spec, replace_equal);
}

/**
 * efx_filter_remove_id_safe - remove a filter by ID, carefully
 * @efx: NIC from which to remove the filter
 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
 *
 * This function will range-check @filter_id, so it is safe to call
 * with a value passed from userland.
 */
static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
					    enum efx_filter_priority priority,
					    u32 filter_id)
{
	return efx->type->filter_remove_safe(efx, priority, filter_id);
}

/**
 * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
 * @efx: NIC from which to remove the filter
 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
 * @spec: Buffer in which to store filter specification
 *
 * This function will range-check @filter_id, so it is safe to call
 * with a value passed from userland.
 */
static inline int
efx_filter_get_filter_safe(struct efx_nic *efx,
			   enum efx_filter_priority priority,
			   u32 filter_id, struct efx_filter_spec *spec)
{
	return efx->type->filter_get_safe(efx, priority, filter_id, spec);
}

static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
					   enum efx_filter_priority priority)
{
	return efx->type->filter_count_rx_used(efx, priority);
}
static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
{
	return efx->type->filter_get_rx_id_limit(efx);
}
static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
					enum efx_filter_priority priority,
					u32 *buf, u32 size)
{
	return efx->type->filter_get_rx_ids(efx, priority, buf, size);
}
166
#ifdef CONFIG_RFS_ACCEL
167 168
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
		   u16 rxq_index, u32 flow_id);
E
Edward Cree 已提交
169
bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota);
170
static inline void efx_filter_rfs_expire(struct work_struct *data)
171
{
172 173
	struct efx_channel *channel = container_of(data, struct efx_channel,
						   filter_work);
E
Edward Cree 已提交
174
	unsigned int time = jiffies - channel->rfs_last_expiry, quota;
175

E
Edward Cree 已提交
176 177 178
	quota = channel->rfs_filter_count * time / (30 * HZ);
	if (quota > 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
		channel->rfs_last_expiry += time;
179 180 181
}
#define efx_filter_rfs_enabled() 1
#else
182
static inline void efx_filter_rfs_expire(struct work_struct *data) {}
183 184
#define efx_filter_rfs_enabled() 0
#endif
185
bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
B
Ben Hutchings 已提交
186

E
Edward Cree 已提交
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
bool efx_filter_spec_equal(const struct efx_filter_spec *left,
			   const struct efx_filter_spec *right);
u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);

#ifdef CONFIG_RFS_ACCEL
bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
			bool *force);

struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
					const struct efx_filter_spec *spec);

/* @new is written to indicate if entry was newly added (true) or if an old
 * entry was found and returned (false).
 */
struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
				       const struct efx_filter_spec *spec,
				       bool *new);

void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
#endif

208
/* RSS contexts */
209 210
struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
211 212 213 214 215 216
void efx_free_rss_context_entry(struct efx_rss_context *ctx);
static inline bool efx_rss_active(struct efx_rss_context *ctx)
{
	return ctx->context_id != EFX_EF10_RSS_CONTEXT_INVALID;
}

217
/* Channels */
218 219 220
int efx_channel_dummy_op_int(struct efx_channel *channel);
void efx_channel_dummy_op_void(struct efx_channel *channel);
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
221 222

/* Ports */
223 224
int efx_reconfigure_port(struct efx_nic *efx);
int __efx_reconfigure_port(struct efx_nic *efx);
B
Ben Hutchings 已提交
225

226 227 228
/* Ethtool support */
extern const struct ethtool_ops efx_ethtool_ops;

B
Ben Hutchings 已提交
229
/* Reset handling */
230 231 232 233
int efx_reset(struct efx_nic *efx, enum reset_type method);
void efx_reset_down(struct efx_nic *efx, enum reset_type method);
int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
int efx_try_recovery(struct efx_nic *efx);
234 235

/* Global */
236
void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
237 238
unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks);
239 240 241 242 243
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
			    unsigned int rx_usecs, bool rx_adaptive,
			    bool rx_may_override_tx);
void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
			    unsigned int *rx_usecs, bool *rx_adaptive);
244 245
void efx_stop_eventq(struct efx_channel *channel);
void efx_start_eventq(struct efx_channel *channel);
246 247

/* Dummy PHY ops for PHY drivers */
248 249
int efx_port_dummy_op_int(struct efx_nic *efx);
void efx_port_dummy_op_void(struct efx_nic *efx);
250

251 252 253
/* Update the generic software stats in the passed stats array */
void efx_update_sw_stats(struct efx_nic *efx, u64 *stats);

254 255
/* MTD */
#ifdef CONFIG_SFC_MTD
256 257
int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
		size_t n_parts, size_t sizeof_part);
258 259 260 261
static inline int efx_mtd_probe(struct efx_nic *efx)
{
	return efx->type->mtd_probe(efx);
}
262 263
void efx_mtd_rename(struct efx_nic *efx);
void efx_mtd_remove(struct efx_nic *efx);
264 265 266 267 268
#else
static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
static inline void efx_mtd_rename(struct efx_nic *efx) {}
static inline void efx_mtd_remove(struct efx_nic *efx) {}
#endif
269

270 271 272 273 274 275 276
#ifdef CONFIG_SFC_SRIOV
static inline unsigned int efx_vf_size(struct efx_nic *efx)
{
	return 1 << efx->vi_scale;
}
#endif

277 278
static inline void efx_schedule_channel(struct efx_channel *channel)
{
279 280 281
	netif_vdbg(channel->efx, intr, channel->efx->net_dev,
		   "channel %d scheduling NAPI poll on CPU%d\n",
		   channel->channel, raw_smp_processor_id());
282

283
	napi_schedule(&channel->napi_str);
284 285
}

286 287
static inline void efx_schedule_channel_irq(struct efx_channel *channel)
{
288
	channel->event_test_cpu = raw_smp_processor_id();
289 290 291
	efx_schedule_channel(channel);
}

292
void efx_link_status_changed(struct efx_nic *efx);
293 294 295
void efx_link_set_advertising(struct efx_nic *efx,
			      const unsigned long *advertising);
void efx_link_clear_advertising(struct efx_nic *efx);
296
void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
S
Steve Hodgson 已提交
297

298 299 300 301 302 303 304 305
static inline void efx_device_detach_sync(struct efx_nic *efx)
{
	struct net_device *dev = efx->net_dev;

	/* Lock/freeze all TX queues so that we can be sure the
	 * TX scheduler is stopped when we're done and before
	 * netif_device_present() becomes false.
	 */
306
	netif_tx_lock_bh(dev);
307
	netif_device_detach(dev);
308
	netif_tx_unlock_bh(dev);
309 310
}

311 312 313 314 315 316
static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx)
{
	if ((efx->state != STATE_DISABLED) && !efx->reset_pending)
		netif_device_attach(efx->net_dev);
}

317 318 319 320 321 322 323 324 325
static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
{
	if (WARN_ON(down_read_trylock(sem))) {
		up_read(sem);
		return false;
	}
	return true;
}

326 327 328
int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
		       bool flush);

329
#endif /* EFX_EFX_H */