poll.c 24.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/hashtable.h>
#include <linux/io_uring.h>

#include <trace/events/io_uring.h>

#include <uapi/linux/io_uring.h>

#include "io_uring.h"
#include "refs.h"
#include "opdef.h"
19
#include "kbuf.h"
20
#include "poll.h"
21
#include "cancel.h"
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36

struct io_poll_update {
	struct file			*file;
	u64				old_user_data;
	u64				new_user_data;
	__poll_t			events;
	bool				update_events;
	bool				update_user_data;
};

struct io_poll_table {
	struct poll_table_struct pt;
	struct io_kiocb *req;
	int nr_entries;
	int error;
37
	bool owning;
38 39
	/* output value, set only if arm poll returns >0 */
	__poll_t result_mask;
40 41 42 43 44
};

#define IO_POLL_CANCEL_FLAG	BIT(31)
#define IO_POLL_REF_MASK	GENMASK(30, 0)

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
#define IO_WQE_F_DOUBLE		1

static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
{
	unsigned long priv = (unsigned long)wqe->private;

	return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
}

static inline bool wqe_is_double(struct wait_queue_entry *wqe)
{
	unsigned long priv = (unsigned long)wqe->private;

	return priv & IO_WQE_F_DOUBLE;
}

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
/*
 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
 * bump it and acquire ownership. It's disallowed to modify requests while not
 * owning it, that prevents from races for enqueueing task_work's and b/w
 * arming poll and wakeups.
 */
static inline bool io_poll_get_ownership(struct io_kiocb *req)
{
	return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
}

static void io_poll_mark_cancelled(struct io_kiocb *req)
{
	atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
}

static struct io_poll *io_poll_get_double(struct io_kiocb *req)
{
	/* pure poll stashes this in ->async_data, poll driven retry elsewhere */
	if (req->opcode == IORING_OP_POLL_ADD)
		return req->async_data;
	return req->apoll->double_poll;
}

static struct io_poll *io_poll_get_single(struct io_kiocb *req)
{
	if (req->opcode == IORING_OP_POLL_ADD)
		return io_kiocb_to_cmd(req);
	return &req->apoll->poll;
}

static void io_poll_req_insert(struct io_kiocb *req)
{
94 95 96
	struct io_hash_table *table = &req->ctx->cancel_table;
	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
	struct io_hash_bucket *hb = &table->hbs[index];
97

98 99 100 101 102 103 104
	spin_lock(&hb->lock);
	hlist_add_head(&req->hash_node, &hb->list);
	spin_unlock(&hb->lock);
}

static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
{
105 106 107
	struct io_hash_table *table = &req->ctx->cancel_table;
	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
	spinlock_t *lock = &table->hbs[index].lock;
108 109 110 111

	spin_lock(lock);
	hash_del(&req->hash_node);
	spin_unlock(lock);
112 113
}

114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
static void io_poll_req_insert_locked(struct io_kiocb *req)
{
	struct io_hash_table *table = &req->ctx->cancel_table_locked;
	u32 index = hash_long(req->cqe.user_data, table->hash_bits);

	hlist_add_head(&req->hash_node, &table->hbs[index].list);
}

static void io_poll_tw_hash_eject(struct io_kiocb *req, bool *locked)
{
	struct io_ring_ctx *ctx = req->ctx;

	if (req->flags & REQ_F_HASH_LOCKED) {
		/*
		 * ->cancel_table_locked is protected by ->uring_lock in
		 * contrast to per bucket spinlocks. Likely, tctx_task_work()
		 * already grabbed the mutex for us, but there is a chance it
		 * failed.
		 */
		io_tw_lock(ctx, locked);
		hash_del(&req->hash_node);
	} else {
		io_poll_req_delete(req, ctx);
	}
}

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
static void io_init_poll_iocb(struct io_poll *poll, __poll_t events,
			      wait_queue_func_t wake_func)
{
	poll->head = NULL;
#define IO_POLL_UNMASK	(EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
	/* mask in events that we always want/need */
	poll->events = events | IO_POLL_UNMASK;
	INIT_LIST_HEAD(&poll->wait.entry);
	init_waitqueue_func_entry(&poll->wait, wake_func);
}

static inline void io_poll_remove_entry(struct io_poll *poll)
{
	struct wait_queue_head *head = smp_load_acquire(&poll->head);

	if (head) {
		spin_lock_irq(&head->lock);
		list_del_init(&poll->wait.entry);
		poll->head = NULL;
		spin_unlock_irq(&head->lock);
	}
}

static void io_poll_remove_entries(struct io_kiocb *req)
{
	/*
	 * Nothing to do if neither of those flags are set. Avoid dipping
	 * into the poll/apoll/double cachelines if we can.
	 */
	if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
		return;

	/*
	 * While we hold the waitqueue lock and the waitqueue is nonempty,
	 * wake_up_pollfree() will wait for us.  However, taking the waitqueue
	 * lock in the first place can race with the waitqueue being freed.
	 *
	 * We solve this as eventpoll does: by taking advantage of the fact that
	 * all users of wake_up_pollfree() will RCU-delay the actual free.  If
	 * we enter rcu_read_lock() and see that the pointer to the queue is
	 * non-NULL, we can then lock it without the memory being freed out from
	 * under us.
	 *
	 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
	 * case the caller deletes the entry from the queue, leaving it empty.
	 * In that case, only RCU prevents the queue memory from being freed.
	 */
	rcu_read_lock();
	if (req->flags & REQ_F_SINGLE_POLL)
		io_poll_remove_entry(io_poll_get_single(req));
	if (req->flags & REQ_F_DOUBLE_POLL)
		io_poll_remove_entry(io_poll_get_double(req));
	rcu_read_unlock();
}

/*
 * All poll tw should go through this. Checks for poll events, manages
 * references, does rewait, etc.
 *
 * Returns a negative error on failure. >0 when no action require, which is
 * either spurious wakeup or multishot CQE is served. 0 when it's done with
 * the request, then the mask is stored in req->cqe.res.
 */
static int io_poll_check_events(struct io_kiocb *req, bool *locked)
{
	struct io_ring_ctx *ctx = req->ctx;
	int v, ret;

	/* req->task == current here, checking PF_EXITING is safe */
	if (unlikely(req->task->flags & PF_EXITING))
		return -ECANCELED;

	do {
		v = atomic_read(&req->poll_refs);

		/* tw handler should be the owner, and so have some references */
		if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
			return 0;
		if (v & IO_POLL_CANCEL_FLAG)
			return -ECANCELED;

		if (!req->cqe.res) {
			struct poll_table_struct pt = { ._key = req->apoll_events };
			req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
		}

		if ((unlikely(!req->cqe.res)))
			continue;
		if (req->apoll_events & EPOLLONESHOT)
			return 0;

		/* multishot, just fill a CQE and proceed */
		if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
			__poll_t mask = mangle_poll(req->cqe.res &
						    req->apoll_events);

236 237 238 239 240 241 242 243
			if (!io_post_aux_cqe(ctx, req->cqe.user_data,
					     mask, IORING_CQE_F_MORE))
				return -ECANCELED;
		} else {
			ret = io_poll_issue(req, locked);
			if (ret)
				return ret;
		}
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271

		/*
		 * Release all references, retry if someone tried to restart
		 * task_work while we were executing it.
		 */
	} while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs));

	return 1;
}

static void io_poll_task_func(struct io_kiocb *req, bool *locked)
{
	int ret;

	ret = io_poll_check_events(req, locked);
	if (ret > 0)
		return;

	if (!ret) {
		struct io_poll *poll = io_kiocb_to_cmd(req);

		req->cqe.res = mangle_poll(req->cqe.res & poll->events);
	} else {
		req->cqe.res = ret;
		req_set_fail(req);
	}

	io_poll_remove_entries(req);
272 273
	io_poll_tw_hash_eject(req, locked);

274 275
	io_req_set_res(req, req->cqe.res, 0);
	io_req_task_complete(req, locked);
276 277 278 279 280 281 282 283 284 285 286
}

static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
{
	int ret;

	ret = io_poll_check_events(req, locked);
	if (ret > 0)
		return;

	io_poll_remove_entries(req);
287
	io_poll_tw_hash_eject(req, locked);
288 289 290 291 292 293 294

	if (!ret)
		io_req_task_submit(req, locked);
	else
		io_req_complete_failed(req, ret);
}

295
static void __io_poll_execute(struct io_kiocb *req, int mask)
296 297 298 299 300 301 302 303 304 305 306 307 308
{
	io_req_set_res(req, mask, 0);
	/*
	 * This is useful for poll that is armed on behalf of another
	 * request, and where the wakeup path could be on a different
	 * CPU. We want to avoid pulling in req->apoll->events for that
	 * case.
	 */
	if (req->opcode == IORING_OP_POLL_ADD)
		req->io_task_work.func = io_poll_task_func;
	else
		req->io_task_work.func = io_apoll_task_func;

309
	trace_io_uring_task_add(req, mask);
310 311 312
	io_req_task_work_add(req);
}

313
static inline void io_poll_execute(struct io_kiocb *req, int res)
314 315
{
	if (io_poll_get_ownership(req))
316
		__io_poll_execute(req, res);
317 318 319 320 321 322
}

static void io_poll_cancel_req(struct io_kiocb *req)
{
	io_poll_mark_cancelled(req);
	/* kick tw, which should complete the request */
323
	io_poll_execute(req, 0);
324 325 326 327
}

#define IO_ASYNC_POLL_COMMON	(EPOLLONESHOT | EPOLLPRI)

328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
{
	io_poll_mark_cancelled(req);
	/* we have to kick tw in case it's not already */
	io_poll_execute(req, 0);

	/*
	 * If the waitqueue is being freed early but someone is already
	 * holds ownership over it, we have to tear down the request as
	 * best we can. That means immediately removing the request from
	 * its waitqueue and preventing all further accesses to the
	 * waitqueue via the request.
	 */
	list_del_init(&poll->wait.entry);

	/*
	 * Careful: this *must* be the last step, since as soon
	 * as req->head is NULL'ed out, the request can be
	 * completed and freed, since aio_poll_complete_work()
	 * will no longer need to take the waitqueue lock.
	 */
	smp_store_release(&poll->head, NULL);
	return 1;
}

353 354 355 356 357 358 359
static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
			void *key)
{
	struct io_kiocb *req = wqe_to_req(wait);
	struct io_poll *poll = container_of(wait, struct io_poll, wait);
	__poll_t mask = key_to_poll(key);

360 361
	if (unlikely(mask & POLLFREE))
		return io_pollfree_wake(req, poll);
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376

	/* for instances that support it check for an event match first */
	if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
		return 0;

	if (io_poll_get_ownership(req)) {
		/* optional, saves extra locking for removal in tw handler */
		if (mask && poll->events & EPOLLONESHOT) {
			list_del_init(&poll->wait.entry);
			poll->head = NULL;
			if (wqe_is_double(wait))
				req->flags &= ~REQ_F_DOUBLE_POLL;
			else
				req->flags &= ~REQ_F_SINGLE_POLL;
		}
377
		__io_poll_execute(req, mask);
378 379 380 381
	}
	return 1;
}

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
static void io_poll_double_prepare(struct io_kiocb *req)
{
	struct wait_queue_head *head;
	struct io_poll *poll = io_poll_get_single(req);

	/* head is RCU protected, see io_poll_remove_entries() comments */
	rcu_read_lock();
	head = smp_load_acquire(&poll->head);
	if (head) {
		/*
		 * poll arm may not hold ownership and so race with
		 * io_poll_wake() by modifying req->flags. There is only one
		 * poll entry queued, serialise with it by taking its head lock.
		 */
		spin_lock_irq(&head->lock);
		req->flags |= REQ_F_DOUBLE_POLL;
		spin_unlock_irq(&head->lock);
	}
	rcu_read_unlock();
}

403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
			    struct wait_queue_head *head,
			    struct io_poll **poll_ptr)
{
	struct io_kiocb *req = pt->req;
	unsigned long wqe_private = (unsigned long) req;

	/*
	 * The file being polled uses multiple waitqueues for poll handling
	 * (e.g. one for read, one for write). Setup a separate io_poll
	 * if this happens.
	 */
	if (unlikely(pt->nr_entries)) {
		struct io_poll *first = poll;

		/* double add on the same waitqueue head, ignore */
		if (first->head == head)
			return;
		/* already have a 2nd entry, fail a third attempt */
		if (*poll_ptr) {
			if ((*poll_ptr)->head == head)
				return;
			pt->error = -EINVAL;
			return;
		}

		poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
		if (!poll) {
			pt->error = -ENOMEM;
			return;
		}
434 435

		io_poll_double_prepare(req);
436
		/* mark as double wq entry */
437
		wqe_private |= IO_WQE_F_DOUBLE;
438 439 440 441
		io_init_poll_iocb(poll, first->events, first->wait.func);
		*poll_ptr = poll;
		if (req->opcode == IORING_OP_POLL_ADD)
			req->flags |= REQ_F_ASYNC_DATA;
442 443 444
	} else {
		/* fine to modify, there is no poll queued to race with us */
		req->flags |= REQ_F_SINGLE_POLL;
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
	}

	pt->nr_entries++;
	poll->head = head;
	poll->wait.private = (void *) wqe_private;

	if (poll->events & EPOLLEXCLUSIVE)
		add_wait_queue_exclusive(head, &poll->wait);
	else
		add_wait_queue(head, &poll->wait);
}

static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
			       struct poll_table_struct *p)
{
	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
	struct io_poll *poll = io_kiocb_to_cmd(pt->req);

	__io_queue_proc(poll, pt, head,
			(struct io_poll **) &pt->req->async_data);
}

467 468 469 470 471 472
static bool io_poll_can_finish_inline(struct io_kiocb *req,
				      struct io_poll_table *pt)
{
	return pt->owning || io_poll_get_ownership(req);
}

473 474 475 476 477 478
/*
 * Returns 0 when it's handed over for polling. The caller owns the requests if
 * it returns non-zero, but otherwise should not touch it. Negative values
 * contain an error code. When the result is >0, the polling has completed
 * inline and ipt.result_mask is set to the mask.
 */
479 480
static int __io_arm_poll_handler(struct io_kiocb *req,
				 struct io_poll *poll,
481 482
				 struct io_poll_table *ipt, __poll_t mask,
				 unsigned issue_flags)
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
{
	struct io_ring_ctx *ctx = req->ctx;
	int v;

	INIT_HLIST_NODE(&req->hash_node);
	req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
	io_init_poll_iocb(poll, mask, io_poll_wake);
	poll->file = req->file;
	req->apoll_events = poll->events;

	ipt->pt._key = mask;
	ipt->req = req;
	ipt->error = 0;
	ipt->nr_entries = 0;
	/*
498 499 500 501 502 503 504 505 506
	 * Polling is either completed here or via task_work, so if we're in the
	 * task context we're naturally serialised with tw by merit of running
	 * the same task. When it's io-wq, take the ownership to prevent tw
	 * from running. However, when we're in the task context, skip taking
	 * it as an optimisation.
	 *
	 * Note: even though the request won't be completed/freed, without
	 * ownership we still can race with io_poll_wake().
	 * io_poll_can_finish_inline() tries to deal with that.
507
	 */
508 509 510
	ipt->owning = issue_flags & IO_URING_F_UNLOCKED;

	atomic_set(&req->poll_refs, (int)ipt->owning);
511 512
	mask = vfs_poll(req->file, &ipt->pt) & poll->events;

513 514 515
	if (unlikely(ipt->error || !ipt->nr_entries)) {
		io_poll_remove_entries(req);

516 517 518 519
		if (!io_poll_can_finish_inline(req, ipt)) {
			io_poll_mark_cancelled(req);
			return 0;
		} else if (mask && (poll->events & EPOLLET)) {
520 521 522
			ipt->result_mask = mask;
			return 1;
		}
523
		return ipt->error ?: -EINVAL;
524 525
	}

526 527
	if (mask &&
	   ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
528 529
		if (!io_poll_can_finish_inline(req, ipt))
			return 0;
530
		io_poll_remove_entries(req);
531
		ipt->result_mask = mask;
532
		/* no one else has access to the req, forget about the ref */
533
		return 1;
534
	}
535

536 537 538 539
	if (req->flags & REQ_F_HASH_LOCKED)
		io_poll_req_insert_locked(req);
	else
		io_poll_req_insert(req);
540

541 542
	if (mask && (poll->events & EPOLLET) &&
	    io_poll_can_finish_inline(req, ipt)) {
543
		__io_poll_execute(req, mask);
544 545 546
		return 0;
	}

547 548 549 550 551 552 553 554 555
	if (ipt->owning) {
		/*
		 * Release ownership. If someone tried to queue a tw while it was
		 * locked, kick it off for them.
		 */
		v = atomic_dec_return(&req->poll_refs);
		if (unlikely(v & IO_POLL_REF_MASK))
			__io_poll_execute(req, 0);
	}
556 557 558 559 560 561 562 563 564 565 566 567
	return 0;
}

static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
			       struct poll_table_struct *p)
{
	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
	struct async_poll *apoll = pt->req->apoll;

	__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
}

568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
					     unsigned issue_flags)
{
	struct io_ring_ctx *ctx = req->ctx;
	struct async_poll *apoll;

	if (req->flags & REQ_F_POLLED) {
		apoll = req->apoll;
		kfree(apoll->double_poll);
	} else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
		   !list_empty(&ctx->apoll_cache)) {
		apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
						poll.wait.entry);
		list_del_init(&apoll->poll.wait.entry);
	} else {
		apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
		if (unlikely(!apoll))
			return NULL;
	}
	apoll->double_poll = NULL;
	req->apoll = apoll;
	return apoll;
}

592 593 594 595 596
int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
{
	const struct io_op_def *def = &io_op_defs[req->opcode];
	struct async_poll *apoll;
	struct io_poll_table ipt;
597
	__poll_t mask = POLLPRI | POLLERR | EPOLLET;
598 599
	int ret;

600 601 602 603 604 605 606 607 608
	/*
	 * apoll requests already grab the mutex to complete in the tw handler,
	 * so removal from the mutex-backed hash is free, use it by default.
	 */
	if (issue_flags & IO_URING_F_UNLOCKED)
		req->flags &= ~REQ_F_HASH_LOCKED;
	else
		req->flags |= REQ_F_HASH_LOCKED;

609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
	if (!def->pollin && !def->pollout)
		return IO_APOLL_ABORTED;
	if (!file_can_poll(req->file))
		return IO_APOLL_ABORTED;
	if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
		return IO_APOLL_ABORTED;
	if (!(req->flags & REQ_F_APOLL_MULTISHOT))
		mask |= EPOLLONESHOT;

	if (def->pollin) {
		mask |= EPOLLIN | EPOLLRDNORM;

		/* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
		if (req->flags & REQ_F_CLEAR_POLLIN)
			mask &= ~EPOLLIN;
	} else {
		mask |= EPOLLOUT | EPOLLWRNORM;
	}
	if (def->poll_exclusive)
		mask |= EPOLLEXCLUSIVE;
629 630 631 632

	apoll = io_req_alloc_apoll(req, issue_flags);
	if (!apoll)
		return IO_APOLL_ABORTED;
633 634 635 636 637
	req->flags |= REQ_F_POLLED;
	ipt.pt._qproc = io_async_queue_proc;

	io_kbuf_recycle(req, issue_flags);

638
	ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
639 640
	if (ret)
		return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
641
	trace_io_uring_poll_arm(req, mask, apoll->poll.events);
642 643 644
	return IO_APOLL_OK;
}

645 646 647
static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
					    struct io_hash_table *table,
					    bool cancel_all)
648
{
649
	unsigned nr_buckets = 1U << table->hash_bits;
650 651 652 653 654
	struct hlist_node *tmp;
	struct io_kiocb *req;
	bool found = false;
	int i;

655 656
	for (i = 0; i < nr_buckets; i++) {
		struct io_hash_bucket *hb = &table->hbs[i];
657

658 659
		spin_lock(&hb->lock);
		hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
660 661 662 663 664 665
			if (io_match_task_safe(req, tsk, cancel_all)) {
				hlist_del_init(&req->hash_node);
				io_poll_cancel_req(req);
				found = true;
			}
		}
666
		spin_unlock(&hb->lock);
667 668 669 670
	}
	return found;
}

671 672 673 674 675 676 677
/*
 * Returns true if we found and killed one or more poll requests
 */
__cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
			       bool cancel_all)
	__must_hold(&ctx->uring_lock)
{
678 679 680 681 682
	bool ret;

	ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all);
	ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
	return ret;
683 684
}

685
static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
686
				     struct io_cancel_data *cd,
687
				     struct io_hash_table *table,
688
				     struct io_hash_bucket **out_bucket)
689 690
{
	struct io_kiocb *req;
691 692
	u32 index = hash_long(cd->data, table->hash_bits);
	struct io_hash_bucket *hb = &table->hbs[index];
693

694 695
	*out_bucket = NULL;

696 697
	spin_lock(&hb->lock);
	hlist_for_each_entry(req, &hb->list, hash_node) {
698 699 700 701 702 703 704 705 706
		if (cd->data != req->cqe.user_data)
			continue;
		if (poll_only && req->opcode != IORING_OP_POLL_ADD)
			continue;
		if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
			if (cd->seq == req->work.cancel_seq)
				continue;
			req->work.cancel_seq = cd->seq;
		}
707
		*out_bucket = hb;
708 709
		return req;
	}
710
	spin_unlock(&hb->lock);
711 712 713 714
	return NULL;
}

static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
715
					  struct io_cancel_data *cd,
716
					  struct io_hash_table *table,
717
					  struct io_hash_bucket **out_bucket)
718
{
719
	unsigned nr_buckets = 1U << table->hash_bits;
720 721 722
	struct io_kiocb *req;
	int i;

723 724
	*out_bucket = NULL;

725 726
	for (i = 0; i < nr_buckets; i++) {
		struct io_hash_bucket *hb = &table->hbs[i];
727

728 729
		spin_lock(&hb->lock);
		hlist_for_each_entry(req, &hb->list, hash_node) {
730 731 732 733 734 735
			if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
			    req->file != cd->file)
				continue;
			if (cd->seq == req->work.cancel_seq)
				continue;
			req->work.cancel_seq = cd->seq;
736
			*out_bucket = hb;
737 738
			return req;
		}
739
		spin_unlock(&hb->lock);
740 741 742 743
	}
	return NULL;
}

744
static int io_poll_disarm(struct io_kiocb *req)
745
{
746 747
	if (!req)
		return -ENOENT;
748
	if (!io_poll_get_ownership(req))
749
		return -EALREADY;
750 751
	io_poll_remove_entries(req);
	hash_del(&req->hash_node);
752
	return 0;
753 754
}

755
static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
756
			    struct io_hash_table *table)
757
{
758
	struct io_hash_bucket *bucket;
759 760 761
	struct io_kiocb *req;

	if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
762
		req = io_poll_file_find(ctx, cd, table, &bucket);
763
	else
764
		req = io_poll_find(ctx, false, cd, table, &bucket);
765 766 767 768 769 770

	if (req)
		io_poll_cancel_req(req);
	if (bucket)
		spin_unlock(&bucket->lock);
	return req ? 0 : -ENOENT;
771 772
}

773 774
int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
		   unsigned issue_flags)
775
{
776 777 778 779 780 781 782 783 784 785
	int ret;

	ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table);
	if (ret != -ENOENT)
		return ret;

	io_ring_submit_lock(ctx, issue_flags);
	ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked);
	io_ring_submit_unlock(ctx, issue_flags);
	return ret;
786 787
}

788 789 790 791 792 793 794 795 796 797 798
static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
				     unsigned int flags)
{
	u32 events;

	events = READ_ONCE(sqe->poll32_events);
#ifdef __BIG_ENDIAN
	events = swahw32(events);
#endif
	if (!(flags & IORING_POLL_ADD_MULTI))
		events |= EPOLLONESHOT;
799 800 801 802
	if (!(flags & IORING_POLL_ADD_LEVEL))
		events |= EPOLLET;
	return demangle_poll(events) |
		(events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842
}

int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
	struct io_poll_update *upd = io_kiocb_to_cmd(req);
	u32 flags;

	if (sqe->buf_index || sqe->splice_fd_in)
		return -EINVAL;
	flags = READ_ONCE(sqe->len);
	if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
		      IORING_POLL_ADD_MULTI))
		return -EINVAL;
	/* meaningless without update */
	if (flags == IORING_POLL_ADD_MULTI)
		return -EINVAL;

	upd->old_user_data = READ_ONCE(sqe->addr);
	upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
	upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;

	upd->new_user_data = READ_ONCE(sqe->off);
	if (!upd->update_user_data && upd->new_user_data)
		return -EINVAL;
	if (upd->update_events)
		upd->events = io_poll_parse_events(sqe, flags);
	else if (sqe->poll32_events)
		return -EINVAL;

	return 0;
}

int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
	struct io_poll *poll = io_kiocb_to_cmd(req);
	u32 flags;

	if (sqe->buf_index || sqe->off || sqe->addr)
		return -EINVAL;
	flags = READ_ONCE(sqe->len);
843
	if (flags & ~(IORING_POLL_ADD_MULTI|IORING_POLL_ADD_LEVEL))
844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859
		return -EINVAL;
	if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
		return -EINVAL;

	poll->events = io_poll_parse_events(sqe, flags);
	return 0;
}

int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
{
	struct io_poll *poll = io_kiocb_to_cmd(req);
	struct io_poll_table ipt;
	int ret;

	ipt.pt._qproc = io_poll_queue_proc;

860 861 862 863 864 865 866 867 868 869
	/*
	 * If sqpoll or single issuer, there is no contention for ->uring_lock
	 * and we'll end up holding it in tw handlers anyway.
	 */
	if (!(issue_flags & IO_URING_F_UNLOCKED) &&
	    (req->ctx->flags & (IORING_SETUP_SQPOLL | IORING_SETUP_SINGLE_ISSUER)))
		req->flags |= REQ_F_HASH_LOCKED;
	else
		req->flags &= ~REQ_F_HASH_LOCKED;

870
	ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
871
	if (ret > 0) {
872
		io_req_set_res(req, ipt.result_mask, 0);
873 874
		return IOU_OK;
	}
875
	return ret ?: IOU_ISSUE_SKIP_COMPLETE;
876 877 878 879 880 881 882
}

int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
{
	struct io_poll_update *poll_update = io_kiocb_to_cmd(req);
	struct io_cancel_data cd = { .data = poll_update->old_user_data, };
	struct io_ring_ctx *ctx = req->ctx;
883
	struct io_hash_bucket *bucket;
884 885 886 887
	struct io_kiocb *preq;
	int ret2, ret = 0;
	bool locked;

888
	preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
889
	ret2 = io_poll_disarm(preq);
890 891
	if (bucket)
		spin_unlock(&bucket->lock);
892 893 894 895
	if (!ret2)
		goto found;
	if (ret2 != -ENOENT) {
		ret = ret2;
896 897
		goto out;
	}
898 899 900 901 902 903 904 905 906

	io_ring_submit_lock(ctx, issue_flags);
	preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
	ret2 = io_poll_disarm(preq);
	if (bucket)
		spin_unlock(&bucket->lock);
	io_ring_submit_unlock(ctx, issue_flags);
	if (ret2) {
		ret = ret2;
907 908 909
		goto out;
	}

910
found:
911 912 913 914 915
	if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
		ret = -EFAULT;
		goto out;
	}

916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946
	if (poll_update->update_events || poll_update->update_user_data) {
		/* only mask one event flags, keep behavior flags */
		if (poll_update->update_events) {
			struct io_poll *poll = io_kiocb_to_cmd(preq);

			poll->events &= ~0xffff;
			poll->events |= poll_update->events & 0xffff;
			poll->events |= IO_POLL_UNMASK;
		}
		if (poll_update->update_user_data)
			preq->cqe.user_data = poll_update->new_user_data;

		ret2 = io_poll_add(preq, issue_flags);
		/* successfully updated, don't complete poll request */
		if (!ret2 || ret2 == -EIOCBQUEUED)
			goto out;
	}

	req_set_fail(preq);
	io_req_set_res(preq, -ECANCELED, 0);
	locked = !(issue_flags & IO_URING_F_UNLOCKED);
	io_req_task_complete(preq, &locked);
out:
	if (ret < 0) {
		req_set_fail(req);
		return ret;
	}
	/* complete update request, we're done with it */
	io_req_set_res(req, ret, 0);
	return IOU_OK;
}