提交 0baef373 编写于 作者: B Benjamin Tissoires 提交者: Jiri Kosina

HID: bpf jmp table: simplify the logic of cleaning up programs

Kind of a hack, but works for now:

Instead of listening for any close of eBPF program, we now
decrement the refcount when we insert it in our internal
map of fd progs.

This is safe to do because:
- we listen to any call of destructor of programs
- when a program is being destroyed, we disable it by removing
  it from any RCU list used by any HID device (so it will never
  be called)
- we then trigger a job to cleanup the prog fd map, but we overwrite
  the removal of the elements to not do anything on the programs, just
  remove the allocated space

This is better than previously because we can remove the map of known
programs and their usage count. We now rely on the refcount of
bpf, which has greater chances of being accurate.
Signed-off-by: NBenjamin Tissoires <benjamin.tissoires@redhat.com>
Signed-off-by: NJiri Kosina <jkosina@suse.cz>
上级 dbb60c8a
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#define HID_BPF_MAX_PROGS 1024 #define HID_BPF_MAX_PROGS 1024
extern bool call_hid_bpf_prog_release(u64 prog, int table_cnt) __ksym; extern void call_hid_bpf_prog_put_deferred(struct work_struct *work) __ksym;
struct { struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY); __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
...@@ -16,13 +16,6 @@ struct { ...@@ -16,13 +16,6 @@ struct {
__uint(value_size, sizeof(__u32)); __uint(value_size, sizeof(__u32));
} hid_jmp_table SEC(".maps"); } hid_jmp_table SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, HID_BPF_MAX_PROGS * HID_BPF_PROG_TYPE_MAX);
__type(key, void *);
__type(value, __u8);
} progs_map SEC(".maps");
SEC("fmod_ret/__hid_bpf_tail_call") SEC("fmod_ret/__hid_bpf_tail_call")
int BPF_PROG(hid_tail_call, struct hid_bpf_ctx *hctx) int BPF_PROG(hid_tail_call, struct hid_bpf_ctx *hctx)
{ {
...@@ -31,35 +24,10 @@ int BPF_PROG(hid_tail_call, struct hid_bpf_ctx *hctx) ...@@ -31,35 +24,10 @@ int BPF_PROG(hid_tail_call, struct hid_bpf_ctx *hctx)
return 0; return 0;
} }
static void release_prog(u64 prog) SEC("fentry/bpf_prog_put_deferred")
{ int BPF_PROG(hid_bpf_prog_put_deferred, struct work_struct *work)
u8 *value;
value = bpf_map_lookup_elem(&progs_map, &prog);
if (!value)
return;
if (call_hid_bpf_prog_release(prog, *value))
bpf_map_delete_elem(&progs_map, &prog);
}
SEC("fexit/bpf_prog_release")
int BPF_PROG(hid_prog_release, struct inode *inode, struct file *filp)
{ {
u64 prog = (u64)filp->private_data; call_hid_bpf_prog_put_deferred(work);
release_prog(prog);
return 0;
}
SEC("fexit/bpf_free_inode")
int BPF_PROG(hid_free_inode, struct inode *inode)
{
u64 prog = (u64)inode->i_private;
release_prog(prog);
return 0; return 0;
} }
......
...@@ -94,7 +94,7 @@ hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr ...@@ -94,7 +94,7 @@ hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr
* can use. * can use.
*/ */
BTF_SET8_START(hid_bpf_kfunc_ids) BTF_SET8_START(hid_bpf_kfunc_ids)
BTF_ID_FLAGS(func, call_hid_bpf_prog_release) BTF_ID_FLAGS(func, call_hid_bpf_prog_put_deferred)
BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL) BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL)
BTF_SET8_END(hid_bpf_kfunc_ids) BTF_SET8_END(hid_bpf_kfunc_ids)
......
...@@ -22,6 +22,6 @@ int hid_bpf_prog_run(struct hid_device *hdev, enum hid_bpf_prog_type type, ...@@ -22,6 +22,6 @@ int hid_bpf_prog_run(struct hid_device *hdev, enum hid_bpf_prog_type type,
struct bpf_prog; struct bpf_prog;
/* HID-BPF internal kfuncs API */ /* HID-BPF internal kfuncs API */
bool call_hid_bpf_prog_release(u64 prog, int table_cnt); void call_hid_bpf_prog_put_deferred(struct work_struct *work);
#endif #endif
...@@ -39,7 +39,6 @@ struct hid_bpf_prog_entry { ...@@ -39,7 +39,6 @@ struct hid_bpf_prog_entry {
struct hid_bpf_jmp_table { struct hid_bpf_jmp_table {
struct bpf_map *map; struct bpf_map *map;
struct bpf_map *prog_keys;
struct hid_bpf_prog_entry entries[HID_BPF_MAX_PROGS]; /* compacted list, circular buffer */ struct hid_bpf_prog_entry entries[HID_BPF_MAX_PROGS]; /* compacted list, circular buffer */
int tail, head; int tail, head;
struct bpf_prog *progs[HID_BPF_MAX_PROGS]; /* idx -> progs mapping */ struct bpf_prog *progs[HID_BPF_MAX_PROGS]; /* idx -> progs mapping */
...@@ -286,32 +285,23 @@ static void hid_bpf_release_prog_at(int idx) ...@@ -286,32 +285,23 @@ static void hid_bpf_release_prog_at(int idx)
*/ */
static int hid_bpf_insert_prog(int prog_fd, struct bpf_prog *prog) static int hid_bpf_insert_prog(int prog_fd, struct bpf_prog *prog)
{ {
int i, cnt, index = -1, map_fd = -1, progs_map_fd = -1, err = -EINVAL; int i, index = -1, map_fd = -1, err = -EINVAL;
/* retrieve a fd of our prog_array map in BPF */ /* retrieve a fd of our prog_array map in BPF */
map_fd = skel_map_get_fd_by_id(jmp_table.map->id); map_fd = skel_map_get_fd_by_id(jmp_table.map->id);
/* take an fd for the table of progs we monitor with SEC("fexit/bpf_prog_release") */
progs_map_fd = skel_map_get_fd_by_id(jmp_table.prog_keys->id);
if (map_fd < 0 || progs_map_fd < 0) { if (map_fd < 0) {
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
cnt = 0; /* find the first available index in the jmp_table */
/* find the first available index in the jmp_table
* and count how many time this program has been inserted
*/
for (i = 0; i < HID_BPF_MAX_PROGS; i++) { for (i = 0; i < HID_BPF_MAX_PROGS; i++) {
if (!jmp_table.progs[i] && index < 0) { if (!jmp_table.progs[i] && index < 0) {
/* mark the index as used */ /* mark the index as used */
jmp_table.progs[i] = prog; jmp_table.progs[i] = prog;
index = i; index = i;
__set_bit(i, jmp_table.enabled); __set_bit(i, jmp_table.enabled);
cnt++;
} else {
if (jmp_table.progs[i] == prog)
cnt++;
} }
} }
if (index < 0) { if (index < 0) {
...@@ -324,10 +314,15 @@ static int hid_bpf_insert_prog(int prog_fd, struct bpf_prog *prog) ...@@ -324,10 +314,15 @@ static int hid_bpf_insert_prog(int prog_fd, struct bpf_prog *prog)
if (err) if (err)
goto out; goto out;
/* insert the program in the prog list table */ /*
err = skel_map_update_elem(progs_map_fd, &prog, &cnt, 0); * The program has been safely inserted, decrement the reference count
if (err) * so it doesn't interfere with the number of actual user handles.
goto out; * This is safe to do because:
* - we overrite the put_ptr in the prog fd map
* - we also have a cleanup function that monitors when a program gets
* released and we manually do the cleanup in the prog fd map
*/
bpf_prog_sub(prog, 1);
/* return the index */ /* return the index */
err = index; err = index;
...@@ -337,8 +332,6 @@ static int hid_bpf_insert_prog(int prog_fd, struct bpf_prog *prog) ...@@ -337,8 +332,6 @@ static int hid_bpf_insert_prog(int prog_fd, struct bpf_prog *prog)
__hid_bpf_do_release_prog(map_fd, index); __hid_bpf_do_release_prog(map_fd, index);
if (map_fd >= 0) if (map_fd >= 0)
close_fd(map_fd); close_fd(map_fd);
if (progs_map_fd >= 0)
close_fd(progs_map_fd);
return err; return err;
} }
...@@ -457,41 +450,38 @@ void __hid_bpf_destroy_device(struct hid_device *hdev) ...@@ -457,41 +450,38 @@ void __hid_bpf_destroy_device(struct hid_device *hdev)
schedule_work(&release_work); schedule_work(&release_work);
} }
noinline bool void call_hid_bpf_prog_put_deferred(struct work_struct *work)
call_hid_bpf_prog_release(u64 prog_key, int table_cnt)
{ {
/* compare with how many refs are left in the bpf program */ struct bpf_prog_aux *aux;
struct bpf_prog *prog = (struct bpf_prog *)prog_key; struct bpf_prog *prog;
int idx; bool found = false;
int i;
if (!prog)
return false;
if (atomic64_read(&prog->aux->refcnt) != table_cnt) aux = container_of(work, struct bpf_prog_aux, work);
return false; prog = aux->prog;
/* we don't need locking here because the entries in the progs table /* we don't need locking here because the entries in the progs table
* are stable: * are stable:
* if there are other users (and the progs entries might change), we * if there are other users (and the progs entries might change), we
* would return in the statement above. * would simply not have been called.
*/ */
for (idx = 0; idx < HID_BPF_MAX_PROGS; idx++) { for (i = 0; i < HID_BPF_MAX_PROGS; i++) {
if (jmp_table.progs[idx] == prog) { if (jmp_table.progs[i] == prog) {
__clear_bit(idx, jmp_table.enabled); __clear_bit(i, jmp_table.enabled);
break; found = true;
} }
} }
if (idx >= HID_BPF_MAX_PROGS) {
/* should never happen if we get our refcount right */
idx = -1;
}
/* schedule release of all detached progs */ if (found)
schedule_work(&release_work); /* schedule release of all detached progs */
return idx >= 0; schedule_work(&release_work);
}
static void hid_bpf_prog_fd_array_put_ptr(void *ptr)
{
} }
#define HID_BPF_PROGS_COUNT 3 #define HID_BPF_PROGS_COUNT 2
static struct bpf_link *links[HID_BPF_PROGS_COUNT]; static struct bpf_link *links[HID_BPF_PROGS_COUNT];
static struct entrypoints_bpf *skel; static struct entrypoints_bpf *skel;
...@@ -501,9 +491,6 @@ void hid_bpf_free_links_and_skel(void) ...@@ -501,9 +491,6 @@ void hid_bpf_free_links_and_skel(void)
int i; int i;
/* the following is enough to release all programs attached to hid */ /* the following is enough to release all programs attached to hid */
if (jmp_table.prog_keys)
bpf_map_put_with_uref(jmp_table.prog_keys);
if (jmp_table.map) if (jmp_table.map)
bpf_map_put_with_uref(jmp_table.map); bpf_map_put_with_uref(jmp_table.map);
...@@ -533,6 +520,8 @@ void hid_bpf_free_links_and_skel(void) ...@@ -533,6 +520,8 @@ void hid_bpf_free_links_and_skel(void)
idx++; \ idx++; \
} while (0) } while (0)
static struct bpf_map_ops hid_bpf_prog_fd_maps_ops;
int hid_bpf_preload_skel(void) int hid_bpf_preload_skel(void)
{ {
int err, idx = 0; int err, idx = 0;
...@@ -551,15 +540,14 @@ int hid_bpf_preload_skel(void) ...@@ -551,15 +540,14 @@ int hid_bpf_preload_skel(void)
goto out; goto out;
} }
jmp_table.prog_keys = bpf_map_get_with_uref(skel->maps.progs_map.map_fd); /* our jump table is stealing refs, so we should not decrement on removal of elements */
if (IS_ERR(jmp_table.prog_keys)) { hid_bpf_prog_fd_maps_ops = *jmp_table.map->ops;
err = PTR_ERR(jmp_table.prog_keys); hid_bpf_prog_fd_maps_ops.map_fd_put_ptr = hid_bpf_prog_fd_array_put_ptr;
goto out;
} jmp_table.map->ops = &hid_bpf_prog_fd_maps_ops;
ATTACH_AND_STORE_LINK(hid_tail_call); ATTACH_AND_STORE_LINK(hid_tail_call);
ATTACH_AND_STORE_LINK(hid_prog_release); ATTACH_AND_STORE_LINK(hid_bpf_prog_put_deferred);
ATTACH_AND_STORE_LINK(hid_free_inode);
return 0; return 0;
out: out:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册