verifier.c 5.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
/*
 * Copyright (C) 2016 Netronome Systems, Inc.
 *
 * This software is dual licensed under the GNU General License Version 2,
 * June 1991 as shown in the file COPYING in the top-level directory of this
 * source tree or the BSD 2-Clause License provided below.  You have the
 * option to license this software under the complete terms of either license.
 *
 * The BSD 2-Clause License:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      1. Redistributions of source code must retain the above
 *         copyright notice, this list of conditions and the following
 *         disclaimer.
 *
 *      2. Redistributions in binary form must reproduce the above
 *         copyright notice, this list of conditions and the following
 *         disclaimer in the documentation and/or other materials
 *         provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#define pr_fmt(fmt)	"NFP net bpf: " fmt

#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/kernel.h>
#include <linux/pkt_cls.h>

41
#include "main.h"
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78

/* Analyzer/verifier definitions */
struct nfp_bpf_analyzer_priv {
	struct nfp_prog *prog;
	struct nfp_insn_meta *meta;
};

static struct nfp_insn_meta *
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
		  unsigned int insn_idx, unsigned int n_insns)
{
	unsigned int forward, backward, i;

	backward = meta->n - insn_idx;
	forward = insn_idx - meta->n;

	if (min(forward, backward) > n_insns - insn_idx - 1) {
		backward = n_insns - insn_idx - 1;
		meta = nfp_prog_last_meta(nfp_prog);
	}
	if (min(forward, backward) > insn_idx && backward > insn_idx) {
		forward = insn_idx;
		meta = nfp_prog_first_meta(nfp_prog);
	}

	if (forward < backward)
		for (i = 0; i < forward; i++)
			meta = nfp_meta_next(meta);
	else
		for (i = 0; i < backward; i++)
			meta = nfp_meta_prev(meta);

	return meta;
}

static int
nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
79
		   struct bpf_verifier_env *env)
80
{
81
	const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0;
82
	u64 imm;
83

84 85 86
	if (nfp_prog->act == NN_ACT_XDP)
		return 0;

87 88 89 90 91 92
	if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) {
		char tn_buf[48];

		tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off);
		pr_info("unsupported exit state: %d, var_off: %s\n",
			reg0->type, tn_buf);
93 94 95
		return -EINVAL;
	}

96 97
	imm = reg0->var_off.value;
	if (nfp_prog->act != NN_ACT_DIRECT && imm != 0 && (imm & ~0U) != ~0U) {
98
		pr_info("unsupported exit state: %d, imm: %llx\n",
99
			reg0->type, imm);
100 101 102
		return -EINVAL;
	}

103 104 105
	if (nfp_prog->act == NN_ACT_DIRECT && imm <= TC_ACT_REDIRECT &&
	    imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
	    imm != TC_ACT_QUEUED) {
106
		pr_info("unsupported exit state: %d, imm: %llx\n",
107
			reg0->type, imm);
108 109 110 111 112 113
		return -EINVAL;
	}

	return 0;
}

114
static int
115 116
nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
			   struct nfp_insn_meta *meta,
117
			   const struct bpf_reg_state *reg)
J
Jakub Kicinski 已提交
118
{
119 120
	s32 old_off, new_off;

J
Jakub Kicinski 已提交
121 122 123 124 125
	if (!tnum_is_const(reg->var_off)) {
		pr_info("variable ptr stack access\n");
		return -EINVAL;
	}

126 127
	if (meta->ptr.type == NOT_INIT)
		return 0;
J
Jakub Kicinski 已提交
128

129 130 131
	old_off = meta->ptr.off + meta->ptr.var_off.value;
	new_off = reg->off + reg->var_off.value;

132 133 134 135 136 137
	meta->ptr_not_const |= old_off != new_off;

	if (!meta->ptr_not_const)
		return 0;

	if (old_off % 4 == new_off % 4)
138 139 140 141 142
		return 0;

	pr_info("stack access changed location was:%d is:%d\n",
		old_off, new_off);
	return -EINVAL;
J
Jakub Kicinski 已提交
143 144
}

145
static int
146
nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
147
		  struct bpf_verifier_env *env, u8 reg_no)
148
{
149
	const struct bpf_reg_state *reg = cur_regs(env) + reg_no;
J
Jakub Kicinski 已提交
150
	int err;
151 152

	if (reg->type != PTR_TO_CTX &&
J
Jakub Kicinski 已提交
153
	    reg->type != PTR_TO_STACK &&
154 155
	    reg->type != PTR_TO_PACKET) {
		pr_info("unsupported ptr type: %d\n", reg->type);
156
		return -EINVAL;
157
	}
158

J
Jakub Kicinski 已提交
159
	if (reg->type == PTR_TO_STACK) {
160
		err = nfp_bpf_check_stack_access(nfp_prog, meta, reg);
J
Jakub Kicinski 已提交
161 162 163 164
		if (err)
			return err;
	}

165 166 167
	if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
		pr_info("ptr type changed for instruction %d -> %d\n",
			meta->ptr.type, reg->type);
168
		return -EINVAL;
169
	}
170

171
	meta->ptr = *reg;
172

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
	return 0;
}

static int
nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
{
	struct nfp_bpf_analyzer_priv *priv = env->analyzer_priv;
	struct nfp_insn_meta *meta = priv->meta;

	meta = nfp_bpf_goto_meta(priv->prog, meta, insn_idx, env->prog->len);
	priv->meta = meta;

	if (meta->insn.src_reg >= MAX_BPF_REG ||
	    meta->insn.dst_reg >= MAX_BPF_REG) {
		pr_err("program uses extended registers - jit hardening?\n");
		return -EINVAL;
	}

	if (meta->insn.code == (BPF_JMP | BPF_EXIT))
		return nfp_bpf_check_exit(priv->prog, env);

	if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM))
195 196
		return nfp_bpf_check_ptr(priv->prog, meta, env,
					 meta->insn.src_reg);
197
	if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM))
198 199
		return nfp_bpf_check_ptr(priv->prog, meta, env,
					 meta->insn.dst_reg);
200 201 202 203 204 205 206 207 208 209 210 211 212

	return 0;
}

static const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
	.insn_hook = nfp_verify_insn,
};

int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog)
{
	struct nfp_bpf_analyzer_priv *priv;
	int ret;

J
Jakub Kicinski 已提交
213 214
	nfp_prog->stack_depth = prog->aux->stack_depth;

215 216 217 218 219 220 221 222 223 224 225 226 227
	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->prog = nfp_prog;
	priv->meta = nfp_prog_first_meta(nfp_prog);

	ret = bpf_analyzer(prog, &nfp_bpf_analyzer_ops, priv);

	kfree(priv);

	return ret;
}