verifier.c 8.5 KB
Newer Older
1
/*
2
 * Copyright (C) 2016-2017 Netronome Systems, Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
 *
 * This software is dual licensed under the GNU General License Version 2,
 * June 1991 as shown in the file COPYING in the top-level directory of this
 * source tree or the BSD 2-Clause License provided below.  You have the
 * option to license this software under the complete terms of either license.
 *
 * The BSD 2-Clause License:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      1. Redistributions of source code must retain the above
 *         copyright notice, this list of conditions and the following
 *         disclaimer.
 *
 *      2. Redistributions in binary form must reproduce the above
 *         copyright notice, this list of conditions and the following
 *         disclaimer in the documentation and/or other materials
 *         provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/kernel.h>
#include <linux/pkt_cls.h>

39
#include "fw.h"
40
#include "main.h"
41

42 43 44
#define pr_vlog(env, fmt, ...)	\
	bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__)

45
struct nfp_insn_meta *
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
		  unsigned int insn_idx, unsigned int n_insns)
{
	unsigned int forward, backward, i;

	backward = meta->n - insn_idx;
	forward = insn_idx - meta->n;

	if (min(forward, backward) > n_insns - insn_idx - 1) {
		backward = n_insns - insn_idx - 1;
		meta = nfp_prog_last_meta(nfp_prog);
	}
	if (min(forward, backward) > insn_idx && backward > insn_idx) {
		forward = insn_idx;
		meta = nfp_prog_first_meta(nfp_prog);
	}

	if (forward < backward)
		for (i = 0; i < forward; i++)
			meta = nfp_meta_next(meta);
	else
		for (i = 0; i < backward; i++)
			meta = nfp_meta_prev(meta);

	return meta;
}

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
static void
nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
		       struct nfp_insn_meta *meta,
		       const struct bpf_reg_state *reg2)
{
	unsigned int location =	UINT_MAX;
	int imm;

	/* Datapath usually can give us guarantees on how much adjust head
	 * can be done without the need for any checks.  Optimize the simple
	 * case where there is only one adjust head by a constant.
	 */
	if (reg2->type != SCALAR_VALUE || !tnum_is_const(reg2->var_off))
		goto exit_set_location;
	imm = reg2->var_off.value;
	/* Translator will skip all checks, we need to guarantee min pkt len */
	if (imm > ETH_ZLEN - ETH_HLEN)
		goto exit_set_location;
	if (imm > (int)bpf->adjust_head.guaranteed_add ||
	    imm < -bpf->adjust_head.guaranteed_sub)
		goto exit_set_location;

	if (nfp_prog->adjust_head_location) {
		/* Only one call per program allowed */
		if (nfp_prog->adjust_head_location != meta->n)
			goto exit_set_location;

		if (meta->arg2.var_off.value != imm)
			goto exit_set_location;
	}

	location = meta->n;
exit_set_location:
	nfp_prog->adjust_head_location = location;
}

109
static int
110 111
nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
		   struct nfp_insn_meta *meta)
112
{
113
	const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
114
	const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
115
	struct nfp_app_bpf *bpf = nfp_prog->bpf;
116
	u32 func_id = meta->insn.imm;
117
	s64 off, old_off;
118 119

	switch (func_id) {
120 121
	case BPF_FUNC_xdp_adjust_head:
		if (!bpf->adjust_head.off_max) {
122
			pr_vlog(env, "adjust_head not supported by FW\n");
123 124 125
			return -EOPNOTSUPP;
		}
		if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
126
			pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
127 128
			return -EOPNOTSUPP;
		}
129 130

		nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
131
		break;
132 133 134

	case BPF_FUNC_map_lookup_elem:
		if (!bpf->helpers.map_lookup) {
135
			pr_vlog(env, "map_lookup: not supported by FW\n");
136 137 138
			return -EOPNOTSUPP;
		}
		if (reg2->type != PTR_TO_STACK) {
139 140
			pr_vlog(env,
				"map_lookup: unsupported key ptr type %d\n",
141 142 143 144
				reg2->type);
			return -EOPNOTSUPP;
		}
		if (!tnum_is_const(reg2->var_off)) {
145
			pr_vlog(env, "map_lookup: variable key pointer\n");
146 147 148 149 150
			return -EOPNOTSUPP;
		}

		off = reg2->var_off.value + reg2->off;
		if (-off % 4) {
151 152
			pr_vlog(env,
				"map_lookup: unaligned stack pointer %lld\n",
153 154 155 156 157 158 159 160 161 162 163 164
				-off);
			return -EOPNOTSUPP;
		}

		/* Rest of the checks is only if we re-parse the same insn */
		if (!meta->func_id)
			break;

		old_off = meta->arg2.var_off.value + meta->arg2.off;
		meta->arg2_var_off |= off != old_off;

		if (meta->arg1.map_ptr != reg1->map_ptr) {
165
			pr_vlog(env, "map_lookup: called for different map\n");
166 167 168
			return -EOPNOTSUPP;
		}
		break;
169
	default:
170
		pr_vlog(env, "unsupported function id: %d\n", func_id);
171 172 173
		return -EOPNOTSUPP;
	}

174 175
	meta->func_id = func_id;
	meta->arg1 = *reg1;
176 177
	meta->arg2 = *reg2;

178 179 180
	return 0;
}

181 182
static int
nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
183
		   struct bpf_verifier_env *env)
184
{
185
	const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0;
186
	u64 imm;
187

188
	if (nfp_prog->type == BPF_PROG_TYPE_XDP)
189 190
		return 0;

191 192 193 194
	if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) {
		char tn_buf[48];

		tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off);
195
		pr_vlog(env, "unsupported exit state: %d, var_off: %s\n",
196
			reg0->type, tn_buf);
197 198 199
		return -EINVAL;
	}

200
	imm = reg0->var_off.value;
201 202
	if (nfp_prog->type == BPF_PROG_TYPE_SCHED_CLS &&
	    imm <= TC_ACT_REDIRECT &&
203 204
	    imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
	    imm != TC_ACT_QUEUED) {
205
		pr_vlog(env, "unsupported exit state: %d, imm: %llx\n",
206
			reg0->type, imm);
207 208 209 210 211 212
		return -EINVAL;
	}

	return 0;
}

213
static int
214 215
nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
			   struct nfp_insn_meta *meta,
216 217
			   const struct bpf_reg_state *reg,
			   struct bpf_verifier_env *env)
J
Jakub Kicinski 已提交
218
{
219 220
	s32 old_off, new_off;

J
Jakub Kicinski 已提交
221
	if (!tnum_is_const(reg->var_off)) {
222
		pr_vlog(env, "variable ptr stack access\n");
J
Jakub Kicinski 已提交
223 224 225
		return -EINVAL;
	}

226 227
	if (meta->ptr.type == NOT_INIT)
		return 0;
J
Jakub Kicinski 已提交
228

229 230 231
	old_off = meta->ptr.off + meta->ptr.var_off.value;
	new_off = reg->off + reg->var_off.value;

232 233 234 235 236 237
	meta->ptr_not_const |= old_off != new_off;

	if (!meta->ptr_not_const)
		return 0;

	if (old_off % 4 == new_off % 4)
238 239
		return 0;

240
	pr_vlog(env, "stack access changed location was:%d is:%d\n",
241 242
		old_off, new_off);
	return -EINVAL;
J
Jakub Kicinski 已提交
243 244
}

245
static int
246
nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
247
		  struct bpf_verifier_env *env, u8 reg_no)
248
{
249
	const struct bpf_reg_state *reg = cur_regs(env) + reg_no;
J
Jakub Kicinski 已提交
250
	int err;
251 252

	if (reg->type != PTR_TO_CTX &&
J
Jakub Kicinski 已提交
253
	    reg->type != PTR_TO_STACK &&
254
	    reg->type != PTR_TO_MAP_VALUE &&
255
	    reg->type != PTR_TO_PACKET) {
256
		pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
257
		return -EINVAL;
258
	}
259

J
Jakub Kicinski 已提交
260
	if (reg->type == PTR_TO_STACK) {
261
		err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env);
J
Jakub Kicinski 已提交
262 263 264 265
		if (err)
			return err;
	}

266 267
	if (reg->type == PTR_TO_MAP_VALUE) {
		if (is_mbpf_store(meta)) {
268
			pr_vlog(env, "map writes not supported\n");
269 270 271 272
			return -EOPNOTSUPP;
		}
	}

273
	if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
274
		pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
275
			meta->ptr.type, reg->type);
276
		return -EINVAL;
277
	}
278

279
	meta->ptr = *reg;
280

281 282 283 284 285 286
	return 0;
}

static int
nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
{
287 288
	struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
	struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
289

290 291
	meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
	nfp_prog->verifier_meta = meta;
292 293 294

	if (meta->insn.src_reg >= MAX_BPF_REG ||
	    meta->insn.dst_reg >= MAX_BPF_REG) {
295
		pr_vlog(env, "program uses extended registers - jit hardening?\n");
296 297 298
		return -EINVAL;
	}

299
	if (meta->insn.code == (BPF_JMP | BPF_CALL))
300
		return nfp_bpf_check_call(nfp_prog, env, meta);
301
	if (meta->insn.code == (BPF_JMP | BPF_EXIT))
302
		return nfp_bpf_check_exit(nfp_prog, env);
303

304
	if (is_mbpf_load(meta))
305
		return nfp_bpf_check_ptr(nfp_prog, meta, env,
306
					 meta->insn.src_reg);
307
	if (is_mbpf_store(meta))
308
		return nfp_bpf_check_ptr(nfp_prog, meta, env,
309
					 meta->insn.dst_reg);
310 311 312 313

	return 0;
}

314
const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
315 316
	.insn_hook = nfp_verify_insn,
};