poll.c 22.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/hashtable.h>
#include <linux/io_uring.h>

#include <trace/events/io_uring.h>

#include <uapi/linux/io_uring.h>

#include "io_uring.h"
#include "refs.h"
#include "opdef.h"
19
#include "kbuf.h"
20
#include "poll.h"
21
#include "cancel.h"
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41

struct io_poll_update {
	struct file			*file;
	u64				old_user_data;
	u64				new_user_data;
	__poll_t			events;
	bool				update_events;
	bool				update_user_data;
};

struct io_poll_table {
	struct poll_table_struct pt;
	struct io_kiocb *req;
	int nr_entries;
	int error;
};

#define IO_POLL_CANCEL_FLAG	BIT(31)
#define IO_POLL_REF_MASK	GENMASK(30, 0)

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
#define IO_WQE_F_DOUBLE		1

static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
{
	unsigned long priv = (unsigned long)wqe->private;

	return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
}

static inline bool wqe_is_double(struct wait_queue_entry *wqe)
{
	unsigned long priv = (unsigned long)wqe->private;

	return priv & IO_WQE_F_DOUBLE;
}

58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
/*
 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
 * bump it and acquire ownership. It's disallowed to modify requests while not
 * owning it, that prevents from races for enqueueing task_work's and b/w
 * arming poll and wakeups.
 */
static inline bool io_poll_get_ownership(struct io_kiocb *req)
{
	return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
}

static void io_poll_mark_cancelled(struct io_kiocb *req)
{
	atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
}

static struct io_poll *io_poll_get_double(struct io_kiocb *req)
{
	/* pure poll stashes this in ->async_data, poll driven retry elsewhere */
	if (req->opcode == IORING_OP_POLL_ADD)
		return req->async_data;
	return req->apoll->double_poll;
}

static struct io_poll *io_poll_get_single(struct io_kiocb *req)
{
	if (req->opcode == IORING_OP_POLL_ADD)
		return io_kiocb_to_cmd(req);
	return &req->apoll->poll;
}

static void io_poll_req_insert(struct io_kiocb *req)
{
91 92 93
	struct io_hash_table *table = &req->ctx->cancel_table;
	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
	struct io_hash_bucket *hb = &table->hbs[index];
94

95 96 97 98 99 100 101
	spin_lock(&hb->lock);
	hlist_add_head(&req->hash_node, &hb->list);
	spin_unlock(&hb->lock);
}

static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
{
102 103 104
	struct io_hash_table *table = &req->ctx->cancel_table;
	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
	spinlock_t *lock = &table->hbs[index].lock;
105 106 107 108

	spin_lock(lock);
	hash_del(&req->hash_node);
	spin_unlock(lock);
109 110
}

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
static void io_poll_req_insert_locked(struct io_kiocb *req)
{
	struct io_hash_table *table = &req->ctx->cancel_table_locked;
	u32 index = hash_long(req->cqe.user_data, table->hash_bits);

	hlist_add_head(&req->hash_node, &table->hbs[index].list);
}

static void io_poll_tw_hash_eject(struct io_kiocb *req, bool *locked)
{
	struct io_ring_ctx *ctx = req->ctx;

	if (req->flags & REQ_F_HASH_LOCKED) {
		/*
		 * ->cancel_table_locked is protected by ->uring_lock in
		 * contrast to per bucket spinlocks. Likely, tctx_task_work()
		 * already grabbed the mutex for us, but there is a chance it
		 * failed.
		 */
		io_tw_lock(ctx, locked);
		hash_del(&req->hash_node);
	} else {
		io_poll_req_delete(req, ctx);
	}
}

137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
static void io_init_poll_iocb(struct io_poll *poll, __poll_t events,
			      wait_queue_func_t wake_func)
{
	poll->head = NULL;
#define IO_POLL_UNMASK	(EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
	/* mask in events that we always want/need */
	poll->events = events | IO_POLL_UNMASK;
	INIT_LIST_HEAD(&poll->wait.entry);
	init_waitqueue_func_entry(&poll->wait, wake_func);
}

static inline void io_poll_remove_entry(struct io_poll *poll)
{
	struct wait_queue_head *head = smp_load_acquire(&poll->head);

	if (head) {
		spin_lock_irq(&head->lock);
		list_del_init(&poll->wait.entry);
		poll->head = NULL;
		spin_unlock_irq(&head->lock);
	}
}

static void io_poll_remove_entries(struct io_kiocb *req)
{
	/*
	 * Nothing to do if neither of those flags are set. Avoid dipping
	 * into the poll/apoll/double cachelines if we can.
	 */
	if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
		return;

	/*
	 * While we hold the waitqueue lock and the waitqueue is nonempty,
	 * wake_up_pollfree() will wait for us.  However, taking the waitqueue
	 * lock in the first place can race with the waitqueue being freed.
	 *
	 * We solve this as eventpoll does: by taking advantage of the fact that
	 * all users of wake_up_pollfree() will RCU-delay the actual free.  If
	 * we enter rcu_read_lock() and see that the pointer to the queue is
	 * non-NULL, we can then lock it without the memory being freed out from
	 * under us.
	 *
	 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
	 * case the caller deletes the entry from the queue, leaving it empty.
	 * In that case, only RCU prevents the queue memory from being freed.
	 */
	rcu_read_lock();
	if (req->flags & REQ_F_SINGLE_POLL)
		io_poll_remove_entry(io_poll_get_single(req));
	if (req->flags & REQ_F_DOUBLE_POLL)
		io_poll_remove_entry(io_poll_get_double(req));
	rcu_read_unlock();
}

/*
 * All poll tw should go through this. Checks for poll events, manages
 * references, does rewait, etc.
 *
 * Returns a negative error on failure. >0 when no action require, which is
 * either spurious wakeup or multishot CQE is served. 0 when it's done with
 * the request, then the mask is stored in req->cqe.res.
 */
static int io_poll_check_events(struct io_kiocb *req, bool *locked)
{
	struct io_ring_ctx *ctx = req->ctx;
	int v, ret;

	/* req->task == current here, checking PF_EXITING is safe */
	if (unlikely(req->task->flags & PF_EXITING))
		return -ECANCELED;

	do {
		v = atomic_read(&req->poll_refs);

		/* tw handler should be the owner, and so have some references */
		if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
			return 0;
		if (v & IO_POLL_CANCEL_FLAG)
			return -ECANCELED;

		if (!req->cqe.res) {
			struct poll_table_struct pt = { ._key = req->apoll_events };
			req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
		}

		if ((unlikely(!req->cqe.res)))
			continue;
		if (req->apoll_events & EPOLLONESHOT)
			return 0;

		/* multishot, just fill a CQE and proceed */
		if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
			__poll_t mask = mangle_poll(req->cqe.res &
						    req->apoll_events);

233 234 235 236 237 238 239 240
			if (!io_post_aux_cqe(ctx, req->cqe.user_data,
					     mask, IORING_CQE_F_MORE))
				return -ECANCELED;
		} else {
			ret = io_poll_issue(req, locked);
			if (ret)
				return ret;
		}
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268

		/*
		 * Release all references, retry if someone tried to restart
		 * task_work while we were executing it.
		 */
	} while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs));

	return 1;
}

static void io_poll_task_func(struct io_kiocb *req, bool *locked)
{
	int ret;

	ret = io_poll_check_events(req, locked);
	if (ret > 0)
		return;

	if (!ret) {
		struct io_poll *poll = io_kiocb_to_cmd(req);

		req->cqe.res = mangle_poll(req->cqe.res & poll->events);
	} else {
		req->cqe.res = ret;
		req_set_fail(req);
	}

	io_poll_remove_entries(req);
269 270
	io_poll_tw_hash_eject(req, locked);

271 272
	io_req_set_res(req, req->cqe.res, 0);
	io_req_task_complete(req, locked);
273 274 275 276 277 278 279 280 281 282 283
}

static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
{
	int ret;

	ret = io_poll_check_events(req, locked);
	if (ret > 0)
		return;

	io_poll_remove_entries(req);
284
	io_poll_tw_hash_eject(req, locked);
285 286 287 288 289 290 291

	if (!ret)
		io_req_task_submit(req, locked);
	else
		io_req_complete_failed(req, ret);
}

292
static void __io_poll_execute(struct io_kiocb *req, int mask)
293 294 295 296 297 298 299 300 301 302 303 304 305
{
	io_req_set_res(req, mask, 0);
	/*
	 * This is useful for poll that is armed on behalf of another
	 * request, and where the wakeup path could be on a different
	 * CPU. We want to avoid pulling in req->apoll->events for that
	 * case.
	 */
	if (req->opcode == IORING_OP_POLL_ADD)
		req->io_task_work.func = io_poll_task_func;
	else
		req->io_task_work.func = io_apoll_task_func;

306
	trace_io_uring_task_add(req, mask);
307 308 309
	io_req_task_work_add(req);
}

310
static inline void io_poll_execute(struct io_kiocb *req, int res)
311 312
{
	if (io_poll_get_ownership(req))
313
		__io_poll_execute(req, res);
314 315 316 317 318 319
}

static void io_poll_cancel_req(struct io_kiocb *req)
{
	io_poll_mark_cancelled(req);
	/* kick tw, which should complete the request */
320
	io_poll_execute(req, 0);
321 322 323 324 325 326 327 328 329 330 331 332 333 334
}

#define IO_ASYNC_POLL_COMMON	(EPOLLONESHOT | EPOLLPRI)

static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
			void *key)
{
	struct io_kiocb *req = wqe_to_req(wait);
	struct io_poll *poll = container_of(wait, struct io_poll, wait);
	__poll_t mask = key_to_poll(key);

	if (unlikely(mask & POLLFREE)) {
		io_poll_mark_cancelled(req);
		/* we have to kick tw in case it's not already */
335
		io_poll_execute(req, 0);
336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369

		/*
		 * If the waitqueue is being freed early but someone is already
		 * holds ownership over it, we have to tear down the request as
		 * best we can. That means immediately removing the request from
		 * its waitqueue and preventing all further accesses to the
		 * waitqueue via the request.
		 */
		list_del_init(&poll->wait.entry);

		/*
		 * Careful: this *must* be the last step, since as soon
		 * as req->head is NULL'ed out, the request can be
		 * completed and freed, since aio_poll_complete_work()
		 * will no longer need to take the waitqueue lock.
		 */
		smp_store_release(&poll->head, NULL);
		return 1;
	}

	/* for instances that support it check for an event match first */
	if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
		return 0;

	if (io_poll_get_ownership(req)) {
		/* optional, saves extra locking for removal in tw handler */
		if (mask && poll->events & EPOLLONESHOT) {
			list_del_init(&poll->wait.entry);
			poll->head = NULL;
			if (wqe_is_double(wait))
				req->flags &= ~REQ_F_DOUBLE_POLL;
			else
				req->flags &= ~REQ_F_SINGLE_POLL;
		}
370
		__io_poll_execute(req, mask);
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
	}
	return 1;
}

static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
			    struct wait_queue_head *head,
			    struct io_poll **poll_ptr)
{
	struct io_kiocb *req = pt->req;
	unsigned long wqe_private = (unsigned long) req;

	/*
	 * The file being polled uses multiple waitqueues for poll handling
	 * (e.g. one for read, one for write). Setup a separate io_poll
	 * if this happens.
	 */
	if (unlikely(pt->nr_entries)) {
		struct io_poll *first = poll;

		/* double add on the same waitqueue head, ignore */
		if (first->head == head)
			return;
		/* already have a 2nd entry, fail a third attempt */
		if (*poll_ptr) {
			if ((*poll_ptr)->head == head)
				return;
			pt->error = -EINVAL;
			return;
		}

		poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
		if (!poll) {
			pt->error = -ENOMEM;
			return;
		}
		/* mark as double wq entry */
407
		wqe_private |= IO_WQE_F_DOUBLE;
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
		req->flags |= REQ_F_DOUBLE_POLL;
		io_init_poll_iocb(poll, first->events, first->wait.func);
		*poll_ptr = poll;
		if (req->opcode == IORING_OP_POLL_ADD)
			req->flags |= REQ_F_ASYNC_DATA;
	}

	req->flags |= REQ_F_SINGLE_POLL;
	pt->nr_entries++;
	poll->head = head;
	poll->wait.private = (void *) wqe_private;

	if (poll->events & EPOLLEXCLUSIVE)
		add_wait_queue_exclusive(head, &poll->wait);
	else
		add_wait_queue(head, &poll->wait);
}

static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
			       struct poll_table_struct *p)
{
	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
	struct io_poll *poll = io_kiocb_to_cmd(pt->req);

	__io_queue_proc(poll, pt, head,
			(struct io_poll **) &pt->req->async_data);
}

static int __io_arm_poll_handler(struct io_kiocb *req,
				 struct io_poll *poll,
				 struct io_poll_table *ipt, __poll_t mask)
{
	struct io_ring_ctx *ctx = req->ctx;
	int v;

	INIT_HLIST_NODE(&req->hash_node);
	req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
	io_init_poll_iocb(poll, mask, io_poll_wake);
	poll->file = req->file;

	req->apoll_events = poll->events;

	ipt->pt._key = mask;
	ipt->req = req;
	ipt->error = 0;
	ipt->nr_entries = 0;

	/*
	 * Take the ownership to delay any tw execution up until we're done
	 * with poll arming. see io_poll_get_ownership().
	 */
	atomic_set(&req->poll_refs, 1);
	mask = vfs_poll(req->file, &ipt->pt) & poll->events;

462 463
	if (mask &&
	   ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
464 465 466 467
		io_poll_remove_entries(req);
		/* no one else has access to the req, forget about the ref */
		return mask;
	}
468

469 470 471 472 473 474 475
	if (!mask && unlikely(ipt->error || !ipt->nr_entries)) {
		io_poll_remove_entries(req);
		if (!ipt->error)
			ipt->error = -EINVAL;
		return 0;
	}

476 477 478 479
	if (req->flags & REQ_F_HASH_LOCKED)
		io_poll_req_insert_locked(req);
	else
		io_poll_req_insert(req);
480

481
	if (mask && (poll->events & EPOLLET)) {
482 483 484 485 486 487
		/* can't multishot if failed, just queue the event we've got */
		if (unlikely(ipt->error || !ipt->nr_entries)) {
			poll->events |= EPOLLONESHOT;
			req->apoll_events |= EPOLLONESHOT;
			ipt->error = 0;
		}
488
		__io_poll_execute(req, mask);
489 490 491 492 493 494 495 496 497
		return 0;
	}

	/*
	 * Release ownership. If someone tried to queue a tw while it was
	 * locked, kick it off for them.
	 */
	v = atomic_dec_return(&req->poll_refs);
	if (unlikely(v & IO_POLL_REF_MASK))
498
		__io_poll_execute(req, 0);
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
	return 0;
}

static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
			       struct poll_table_struct *p)
{
	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
	struct async_poll *apoll = pt->req->apoll;

	__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
}

int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
{
	const struct io_op_def *def = &io_op_defs[req->opcode];
	struct io_ring_ctx *ctx = req->ctx;
	struct async_poll *apoll;
	struct io_poll_table ipt;
517
	__poll_t mask = POLLPRI | POLLERR | EPOLLET;
518 519
	int ret;

520 521 522 523 524 525 526 527 528
	/*
	 * apoll requests already grab the mutex to complete in the tw handler,
	 * so removal from the mutex-backed hash is free, use it by default.
	 */
	if (issue_flags & IO_URING_F_UNLOCKED)
		req->flags &= ~REQ_F_HASH_LOCKED;
	else
		req->flags |= REQ_F_HASH_LOCKED;

529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
	if (!def->pollin && !def->pollout)
		return IO_APOLL_ABORTED;
	if (!file_can_poll(req->file))
		return IO_APOLL_ABORTED;
	if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
		return IO_APOLL_ABORTED;
	if (!(req->flags & REQ_F_APOLL_MULTISHOT))
		mask |= EPOLLONESHOT;

	if (def->pollin) {
		mask |= EPOLLIN | EPOLLRDNORM;

		/* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
		if (req->flags & REQ_F_CLEAR_POLLIN)
			mask &= ~EPOLLIN;
	} else {
		mask |= EPOLLOUT | EPOLLWRNORM;
	}
	if (def->poll_exclusive)
		mask |= EPOLLEXCLUSIVE;
	if (req->flags & REQ_F_POLLED) {
		apoll = req->apoll;
		kfree(apoll->double_poll);
	} else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
		   !list_empty(&ctx->apoll_cache)) {
		apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
						poll.wait.entry);
		list_del_init(&apoll->poll.wait.entry);
	} else {
		apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
		if (unlikely(!apoll))
			return IO_APOLL_ABORTED;
	}
	apoll->double_poll = NULL;
	req->apoll = apoll;
	req->flags |= REQ_F_POLLED;
	ipt.pt._qproc = io_async_queue_proc;

	io_kbuf_recycle(req, issue_flags);

	ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
	if (ret || ipt.error)
		return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;

573
	trace_io_uring_poll_arm(req, mask, apoll->poll.events);
574 575 576
	return IO_APOLL_OK;
}

577 578 579
static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
					    struct io_hash_table *table,
					    bool cancel_all)
580
{
581
	unsigned nr_buckets = 1U << table->hash_bits;
582 583 584 585 586
	struct hlist_node *tmp;
	struct io_kiocb *req;
	bool found = false;
	int i;

587 588
	for (i = 0; i < nr_buckets; i++) {
		struct io_hash_bucket *hb = &table->hbs[i];
589

590 591
		spin_lock(&hb->lock);
		hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
592 593 594 595 596 597
			if (io_match_task_safe(req, tsk, cancel_all)) {
				hlist_del_init(&req->hash_node);
				io_poll_cancel_req(req);
				found = true;
			}
		}
598
		spin_unlock(&hb->lock);
599 600 601 602
	}
	return found;
}

603 604 605 606 607 608 609
/*
 * Returns true if we found and killed one or more poll requests
 */
__cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
			       bool cancel_all)
	__must_hold(&ctx->uring_lock)
{
610 611 612 613 614
	bool ret;

	ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all);
	ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
	return ret;
615 616
}

617
static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
618
				     struct io_cancel_data *cd,
619
				     struct io_hash_table *table,
620
				     struct io_hash_bucket **out_bucket)
621 622
{
	struct io_kiocb *req;
623 624
	u32 index = hash_long(cd->data, table->hash_bits);
	struct io_hash_bucket *hb = &table->hbs[index];
625

626 627
	*out_bucket = NULL;

628 629
	spin_lock(&hb->lock);
	hlist_for_each_entry(req, &hb->list, hash_node) {
630 631 632 633 634 635 636 637 638
		if (cd->data != req->cqe.user_data)
			continue;
		if (poll_only && req->opcode != IORING_OP_POLL_ADD)
			continue;
		if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
			if (cd->seq == req->work.cancel_seq)
				continue;
			req->work.cancel_seq = cd->seq;
		}
639
		*out_bucket = hb;
640 641
		return req;
	}
642
	spin_unlock(&hb->lock);
643 644 645 646
	return NULL;
}

static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
647
					  struct io_cancel_data *cd,
648
					  struct io_hash_table *table,
649
					  struct io_hash_bucket **out_bucket)
650
{
651
	unsigned nr_buckets = 1U << table->hash_bits;
652 653 654
	struct io_kiocb *req;
	int i;

655 656
	*out_bucket = NULL;

657 658
	for (i = 0; i < nr_buckets; i++) {
		struct io_hash_bucket *hb = &table->hbs[i];
659

660 661
		spin_lock(&hb->lock);
		hlist_for_each_entry(req, &hb->list, hash_node) {
662 663 664 665 666 667
			if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
			    req->file != cd->file)
				continue;
			if (cd->seq == req->work.cancel_seq)
				continue;
			req->work.cancel_seq = cd->seq;
668
			*out_bucket = hb;
669 670
			return req;
		}
671
		spin_unlock(&hb->lock);
672 673 674 675
	}
	return NULL;
}

676
static int io_poll_disarm(struct io_kiocb *req)
677
{
678 679
	if (!req)
		return -ENOENT;
680
	if (!io_poll_get_ownership(req))
681
		return -EALREADY;
682 683
	io_poll_remove_entries(req);
	hash_del(&req->hash_node);
684
	return 0;
685 686
}

687
static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
688
			    struct io_hash_table *table)
689
{
690
	struct io_hash_bucket *bucket;
691 692 693
	struct io_kiocb *req;

	if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
694
		req = io_poll_file_find(ctx, cd, table, &bucket);
695
	else
696
		req = io_poll_find(ctx, false, cd, table, &bucket);
697 698 699 700 701 702

	if (req)
		io_poll_cancel_req(req);
	if (bucket)
		spin_unlock(&bucket->lock);
	return req ? 0 : -ENOENT;
703 704
}

705 706
int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
		   unsigned issue_flags)
707
{
708 709 710 711 712 713 714 715 716 717
	int ret;

	ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table);
	if (ret != -ENOENT)
		return ret;

	io_ring_submit_lock(ctx, issue_flags);
	ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked);
	io_ring_submit_unlock(ctx, issue_flags);
	return ret;
718 719
}

720 721 722 723 724 725 726 727 728 729 730
static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
				     unsigned int flags)
{
	u32 events;

	events = READ_ONCE(sqe->poll32_events);
#ifdef __BIG_ENDIAN
	events = swahw32(events);
#endif
	if (!(flags & IORING_POLL_ADD_MULTI))
		events |= EPOLLONESHOT;
731 732 733 734
	if (!(flags & IORING_POLL_ADD_LEVEL))
		events |= EPOLLET;
	return demangle_poll(events) |
		(events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
}

int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
	struct io_poll_update *upd = io_kiocb_to_cmd(req);
	u32 flags;

	if (sqe->buf_index || sqe->splice_fd_in)
		return -EINVAL;
	flags = READ_ONCE(sqe->len);
	if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
		      IORING_POLL_ADD_MULTI))
		return -EINVAL;
	/* meaningless without update */
	if (flags == IORING_POLL_ADD_MULTI)
		return -EINVAL;

	upd->old_user_data = READ_ONCE(sqe->addr);
	upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
	upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;

	upd->new_user_data = READ_ONCE(sqe->off);
	if (!upd->update_user_data && upd->new_user_data)
		return -EINVAL;
	if (upd->update_events)
		upd->events = io_poll_parse_events(sqe, flags);
	else if (sqe->poll32_events)
		return -EINVAL;

	return 0;
}

int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
	struct io_poll *poll = io_kiocb_to_cmd(req);
	u32 flags;

	if (sqe->buf_index || sqe->off || sqe->addr)
		return -EINVAL;
	flags = READ_ONCE(sqe->len);
775
	if (flags & ~(IORING_POLL_ADD_MULTI|IORING_POLL_ADD_LEVEL))
776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
		return -EINVAL;
	if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
		return -EINVAL;

	poll->events = io_poll_parse_events(sqe, flags);
	return 0;
}

int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
{
	struct io_poll *poll = io_kiocb_to_cmd(req);
	struct io_poll_table ipt;
	int ret;

	ipt.pt._qproc = io_poll_queue_proc;

792 793 794 795 796 797 798 799 800 801
	/*
	 * If sqpoll or single issuer, there is no contention for ->uring_lock
	 * and we'll end up holding it in tw handlers anyway.
	 */
	if (!(issue_flags & IO_URING_F_UNLOCKED) &&
	    (req->ctx->flags & (IORING_SETUP_SQPOLL | IORING_SETUP_SINGLE_ISSUER)))
		req->flags |= REQ_F_HASH_LOCKED;
	else
		req->flags &= ~REQ_F_HASH_LOCKED;

802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
	ret = __io_arm_poll_handler(req, poll, &ipt, poll->events);
	if (ret) {
		io_req_set_res(req, ret, 0);
		return IOU_OK;
	}
	if (ipt.error) {
		req_set_fail(req);
		return ipt.error;
	}

	return IOU_ISSUE_SKIP_COMPLETE;
}

int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
{
	struct io_poll_update *poll_update = io_kiocb_to_cmd(req);
	struct io_cancel_data cd = { .data = poll_update->old_user_data, };
	struct io_ring_ctx *ctx = req->ctx;
820
	struct io_hash_bucket *bucket;
821 822 823 824
	struct io_kiocb *preq;
	int ret2, ret = 0;
	bool locked;

825
	preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
826
	ret2 = io_poll_disarm(preq);
827 828
	if (bucket)
		spin_unlock(&bucket->lock);
829 830 831 832
	if (!ret2)
		goto found;
	if (ret2 != -ENOENT) {
		ret = ret2;
833 834
		goto out;
	}
835 836 837 838 839 840 841 842 843

	io_ring_submit_lock(ctx, issue_flags);
	preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
	ret2 = io_poll_disarm(preq);
	if (bucket)
		spin_unlock(&bucket->lock);
	io_ring_submit_unlock(ctx, issue_flags);
	if (ret2) {
		ret = ret2;
844 845 846
		goto out;
	}

847
found:
848 849 850 851 852
	if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
		ret = -EFAULT;
		goto out;
	}

853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883
	if (poll_update->update_events || poll_update->update_user_data) {
		/* only mask one event flags, keep behavior flags */
		if (poll_update->update_events) {
			struct io_poll *poll = io_kiocb_to_cmd(preq);

			poll->events &= ~0xffff;
			poll->events |= poll_update->events & 0xffff;
			poll->events |= IO_POLL_UNMASK;
		}
		if (poll_update->update_user_data)
			preq->cqe.user_data = poll_update->new_user_data;

		ret2 = io_poll_add(preq, issue_flags);
		/* successfully updated, don't complete poll request */
		if (!ret2 || ret2 == -EIOCBQUEUED)
			goto out;
	}

	req_set_fail(preq);
	io_req_set_res(preq, -ECANCELED, 0);
	locked = !(issue_flags & IO_URING_F_UNLOCKED);
	io_req_task_complete(preq, &locked);
out:
	if (ret < 0) {
		req_set_fail(req);
		return ret;
	}
	/* complete update request, we're done with it */
	io_req_set_res(req, ret, 0);
	return IOU_OK;
}