verifier.c 9.0 KB
Newer Older
1
/*
2
 * Copyright (C) 2016-2017 Netronome Systems, Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
 *
 * This software is dual licensed under the GNU General License Version 2,
 * June 1991 as shown in the file COPYING in the top-level directory of this
 * source tree or the BSD 2-Clause License provided below.  You have the
 * option to license this software under the complete terms of either license.
 *
 * The BSD 2-Clause License:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      1. Redistributions of source code must retain the above
 *         copyright notice, this list of conditions and the following
 *         disclaimer.
 *
 *      2. Redistributions in binary form must reproduce the above
 *         copyright notice, this list of conditions and the following
 *         disclaimer in the documentation and/or other materials
 *         provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/kernel.h>
#include <linux/pkt_cls.h>

39
#include "fw.h"
40
#include "main.h"
41

42 43 44
#define pr_vlog(env, fmt, ...)	\
	bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__)

45
struct nfp_insn_meta *
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
		  unsigned int insn_idx, unsigned int n_insns)
{
	unsigned int forward, backward, i;

	backward = meta->n - insn_idx;
	forward = insn_idx - meta->n;

	if (min(forward, backward) > n_insns - insn_idx - 1) {
		backward = n_insns - insn_idx - 1;
		meta = nfp_prog_last_meta(nfp_prog);
	}
	if (min(forward, backward) > insn_idx && backward > insn_idx) {
		forward = insn_idx;
		meta = nfp_prog_first_meta(nfp_prog);
	}

	if (forward < backward)
		for (i = 0; i < forward; i++)
			meta = nfp_meta_next(meta);
	else
		for (i = 0; i < backward; i++)
			meta = nfp_meta_prev(meta);

	return meta;
}

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
static void
nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
		       struct nfp_insn_meta *meta,
		       const struct bpf_reg_state *reg2)
{
	unsigned int location =	UINT_MAX;
	int imm;

	/* Datapath usually can give us guarantees on how much adjust head
	 * can be done without the need for any checks.  Optimize the simple
	 * case where there is only one adjust head by a constant.
	 */
	if (reg2->type != SCALAR_VALUE || !tnum_is_const(reg2->var_off))
		goto exit_set_location;
	imm = reg2->var_off.value;
	/* Translator will skip all checks, we need to guarantee min pkt len */
	if (imm > ETH_ZLEN - ETH_HLEN)
		goto exit_set_location;
	if (imm > (int)bpf->adjust_head.guaranteed_add ||
	    imm < -bpf->adjust_head.guaranteed_sub)
		goto exit_set_location;

	if (nfp_prog->adjust_head_location) {
		/* Only one call per program allowed */
		if (nfp_prog->adjust_head_location != meta->n)
			goto exit_set_location;

100
		if (meta->arg2.reg.var_off.value != imm)
101 102 103 104 105 106 107 108
			goto exit_set_location;
	}

	location = meta->n;
exit_set_location:
	nfp_prog->adjust_head_location = location;
}

109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
static int
nfp_bpf_stack_arg_ok(const char *fname, struct bpf_verifier_env *env,
		     const struct bpf_reg_state *reg,
		     struct nfp_bpf_reg_state *old_arg)
{
	s64 off, old_off;

	if (reg->type != PTR_TO_STACK) {
		pr_vlog(env, "%s: unsupported ptr type %d\n",
			fname, reg->type);
		return false;
	}
	if (!tnum_is_const(reg->var_off)) {
		pr_vlog(env, "%s: variable pointer\n", fname);
		return false;
	}

	off = reg->var_off.value + reg->off;
	if (-off % 4) {
		pr_vlog(env, "%s: unaligned stack pointer %lld\n", fname, -off);
		return false;
	}

	/* Rest of the checks is only if we re-parse the same insn */
	if (!old_arg)
		return true;

	old_off = old_arg->reg.var_off.value + old_arg->reg.off;
	old_arg->var_off |= off != old_off;

	return true;
}

142
static int
143 144
nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
		   struct nfp_insn_meta *meta)
145
{
146
	const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
147
	const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
148
	struct nfp_app_bpf *bpf = nfp_prog->bpf;
149 150 151
	u32 func_id = meta->insn.imm;

	switch (func_id) {
152 153
	case BPF_FUNC_xdp_adjust_head:
		if (!bpf->adjust_head.off_max) {
154
			pr_vlog(env, "adjust_head not supported by FW\n");
155 156 157
			return -EOPNOTSUPP;
		}
		if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
158
			pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
159 160
			return -EOPNOTSUPP;
		}
161 162

		nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
163
		break;
164 165 166

	case BPF_FUNC_map_lookup_elem:
		if (!bpf->helpers.map_lookup) {
167
			pr_vlog(env, "map_lookup: not supported by FW\n");
168 169 170
			return -EOPNOTSUPP;
		}

171 172
		if (!nfp_bpf_stack_arg_ok("map_lookup", env, reg2,
					  meta->func_id ? &meta->arg2 : NULL))
173 174 175 176 177 178 179
			return -EOPNOTSUPP;

		/* Rest of the checks is only if we re-parse the same insn */
		if (!meta->func_id)
			break;

		if (meta->arg1.map_ptr != reg1->map_ptr) {
180
			pr_vlog(env, "map_lookup: called for different map\n");
181 182 183
			return -EOPNOTSUPP;
		}
		break;
184
	default:
185
		pr_vlog(env, "unsupported function id: %d\n", func_id);
186 187 188
		return -EOPNOTSUPP;
	}

189 190
	meta->func_id = func_id;
	meta->arg1 = *reg1;
191
	meta->arg2.reg = *reg2;
192

193 194 195
	return 0;
}

196 197
static int
nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
198
		   struct bpf_verifier_env *env)
199
{
200
	const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0;
201
	u64 imm;
202

203
	if (nfp_prog->type == BPF_PROG_TYPE_XDP)
204 205
		return 0;

206 207 208 209
	if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) {
		char tn_buf[48];

		tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off);
210
		pr_vlog(env, "unsupported exit state: %d, var_off: %s\n",
211
			reg0->type, tn_buf);
212 213 214
		return -EINVAL;
	}

215
	imm = reg0->var_off.value;
216 217
	if (nfp_prog->type == BPF_PROG_TYPE_SCHED_CLS &&
	    imm <= TC_ACT_REDIRECT &&
218 219
	    imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
	    imm != TC_ACT_QUEUED) {
220
		pr_vlog(env, "unsupported exit state: %d, imm: %llx\n",
221
			reg0->type, imm);
222 223 224 225 226 227
		return -EINVAL;
	}

	return 0;
}

228
static int
229 230
nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
			   struct nfp_insn_meta *meta,
231 232
			   const struct bpf_reg_state *reg,
			   struct bpf_verifier_env *env)
J
Jakub Kicinski 已提交
233
{
234 235
	s32 old_off, new_off;

J
Jakub Kicinski 已提交
236
	if (!tnum_is_const(reg->var_off)) {
237
		pr_vlog(env, "variable ptr stack access\n");
J
Jakub Kicinski 已提交
238 239 240
		return -EINVAL;
	}

241 242
	if (meta->ptr.type == NOT_INIT)
		return 0;
J
Jakub Kicinski 已提交
243

244 245 246
	old_off = meta->ptr.off + meta->ptr.var_off.value;
	new_off = reg->off + reg->var_off.value;

247 248 249 250 251 252
	meta->ptr_not_const |= old_off != new_off;

	if (!meta->ptr_not_const)
		return 0;

	if (old_off % 4 == new_off % 4)
253 254
		return 0;

255
	pr_vlog(env, "stack access changed location was:%d is:%d\n",
256 257
		old_off, new_off);
	return -EINVAL;
J
Jakub Kicinski 已提交
258 259
}

260
static int
261
nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
262
		  struct bpf_verifier_env *env, u8 reg_no)
263
{
264
	const struct bpf_reg_state *reg = cur_regs(env) + reg_no;
J
Jakub Kicinski 已提交
265
	int err;
266 267

	if (reg->type != PTR_TO_CTX &&
J
Jakub Kicinski 已提交
268
	    reg->type != PTR_TO_STACK &&
269
	    reg->type != PTR_TO_MAP_VALUE &&
270
	    reg->type != PTR_TO_PACKET) {
271
		pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
272
		return -EINVAL;
273
	}
274

J
Jakub Kicinski 已提交
275
	if (reg->type == PTR_TO_STACK) {
276
		err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env);
J
Jakub Kicinski 已提交
277 278 279 280
		if (err)
			return err;
	}

281 282
	if (reg->type == PTR_TO_MAP_VALUE) {
		if (is_mbpf_store(meta)) {
283
			pr_vlog(env, "map writes not supported\n");
284 285 286 287
			return -EOPNOTSUPP;
		}
	}

288
	if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
289
		pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
290
			meta->ptr.type, reg->type);
291
		return -EINVAL;
292
	}
293

294
	meta->ptr = *reg;
295

296 297 298 299 300 301
	return 0;
}

static int
nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
{
302 303
	struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
	struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
304

305 306
	meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
	nfp_prog->verifier_meta = meta;
307

308 309 310 311 312 313
	if (!nfp_bpf_supported_opcode(meta->insn.code)) {
		pr_vlog(env, "instruction %#02x not supported\n",
			meta->insn.code);
		return -EINVAL;
	}

314 315
	if (meta->insn.src_reg >= MAX_BPF_REG ||
	    meta->insn.dst_reg >= MAX_BPF_REG) {
316
		pr_vlog(env, "program uses extended registers - jit hardening?\n");
317 318 319
		return -EINVAL;
	}

320
	if (meta->insn.code == (BPF_JMP | BPF_CALL))
321
		return nfp_bpf_check_call(nfp_prog, env, meta);
322
	if (meta->insn.code == (BPF_JMP | BPF_EXIT))
323
		return nfp_bpf_check_exit(nfp_prog, env);
324

325
	if (is_mbpf_load(meta))
326
		return nfp_bpf_check_ptr(nfp_prog, meta, env,
327
					 meta->insn.src_reg);
328
	if (is_mbpf_store(meta))
329
		return nfp_bpf_check_ptr(nfp_prog, meta, env,
330
					 meta->insn.dst_reg);
331 332 333 334

	return 0;
}

335
const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
336 337
	.insn_hook = nfp_verify_insn,
};