gve_utils.c 2.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Google virtual Ethernet (gve) driver
 *
 * Copyright (C) 2015-2021 Google, Inc.
 */

#include "gve.h"
#include "gve_adminq.h"
#include "gve_utils.h"

void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
{
	struct gve_notify_block *block =
			&priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];

	block->tx = NULL;
}

void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
{
21 22
	unsigned int active_cpus = min_t(int, priv->num_ntfy_blks / 2,
					 num_online_cpus());
23 24 25 26 27 28
	int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx);
	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
	struct gve_tx_ring *tx = &priv->tx[queue_idx];

	block->tx = tx;
	tx->ntfy_id = ntfy_idx;
29 30
	netif_set_xps_queue(priv->dev, get_cpu_mask(ntfy_idx % active_cpus),
			    queue_idx);
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
}

void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
{
	struct gve_notify_block *block =
			&priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)];

	block->rx = NULL;
}

void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
{
	u32 ntfy_idx = gve_rx_idx_to_ntfy(priv, queue_idx);
	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
	struct gve_rx_ring *rx = &priv->rx[queue_idx];

	block->rx = rx;
	rx->ntfy_id = ntfy_idx;
}

struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
52
			    struct gve_rx_slot_page_info *page_info, u16 len,
53
			    u16 padding, struct gve_rx_ctx *ctx)
54
{
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
	void *va = page_info->page_address + padding + page_info->page_offset;
	int skb_linear_offset = 0;
	bool set_protocol = false;
	struct sk_buff *skb;

	if (ctx) {
		if (!ctx->skb_head)
			ctx->skb_head = napi_alloc_skb(napi, ctx->total_expected_size);

		if (unlikely(!ctx->skb_head))
			return NULL;
		skb = ctx->skb_head;
		skb_linear_offset = skb->len;
		set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1;
	} else {
		skb = napi_alloc_skb(napi, len);
71 72 73

		if (unlikely(!skb))
			return NULL;
74 75
		set_protocol = true;
	}
76
	__skb_put(skb, len);
77
	skb_copy_to_linear_data_offset(skb, skb_linear_offset, va, len);
78

79 80
	if (set_protocol)
		skb->protocol = eth_type_trans(skb, dev);
81 82 83 84

	return skb;
}

B
Bailey Forrest 已提交
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info)
{
	page_info->pagecnt_bias--;
	if (page_info->pagecnt_bias == 0) {
		int pagecount = page_count(page_info->page);

		/* If we have run out of bias - set it back up to INT_MAX
		 * minus the existing refs.
		 */
		page_info->pagecnt_bias = INT_MAX - pagecount;

		/* Set pagecount back up to max. */
		page_ref_add(page_info->page, INT_MAX - pagecount);
	}
}