io_uring.h 6.9 KB
Newer Older
1 2 3 4
#ifndef IOU_CORE_H
#define IOU_CORE_H

#include <linux/errno.h>
5
#include <linux/lockdep.h>
6 7
#include "io_uring_types.h"

8 9 10 11
#ifndef CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
#endif

12 13 14 15 16
enum {
	IOU_OK			= 0,
	IOU_ISSUE_SKIP_COMPLETE	= -EIOCBQUEUED,
};

17
struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, s32 res,
			      u32 cflags, u64 extra1, u64 extra2);

static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
{
	if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
		struct io_uring_cqe *cqe = ctx->cqe_cached;

		if (ctx->flags & IORING_SETUP_CQE32) {
			unsigned int off = ctx->cqe_cached - ctx->rings->cqes;

			cqe += off;
		}

		ctx->cached_cq_tail++;
		ctx->cqe_cached++;
		return cqe;
	}

	return __io_get_cqe(ctx);
}

static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
				     struct io_kiocb *req)
{
	struct io_uring_cqe *cqe;

	if (!(ctx->flags & IORING_SETUP_CQE32)) {
		trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
					req->cqe.res, req->cqe.flags, 0, 0);

		/*
		 * If we can't get a cq entry, userspace overflowed the
		 * submission (by quite a lot). Increment the overflow count in
		 * the ring.
		 */
		cqe = io_get_cqe(ctx);
		if (likely(cqe)) {
			memcpy(cqe, &req->cqe, sizeof(*cqe));
			return true;
		}

		return io_cqring_event_overflow(ctx, req->cqe.user_data,
						req->cqe.res, req->cqe.flags,
						0, 0);
	} else {
		u64 extra1 = 0, extra2 = 0;

		if (req->flags & REQ_F_CQE32_INIT) {
			extra1 = req->extra1;
			extra2 = req->extra2;
		}

		trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
					req->cqe.res, req->cqe.flags, extra1, extra2);

		/*
		 * If we can't get a cq entry, userspace overflowed the
		 * submission (by quite a lot). Increment the overflow count in
		 * the ring.
		 */
		cqe = io_get_cqe(ctx);
		if (likely(cqe)) {
			memcpy(cqe, &req->cqe, sizeof(struct io_uring_cqe));
			WRITE_ONCE(cqe->big_cqe[0], extra1);
			WRITE_ONCE(cqe->big_cqe[1], extra2);
			return true;
		}

		return io_cqring_event_overflow(ctx, req->cqe.user_data,
				req->cqe.res, req->cqe.flags,
				extra1, extra2);
	}
}

93 94 95 96 97 98 99 100 101
static inline void req_set_fail(struct io_kiocb *req)
{
	req->flags |= REQ_F_FAIL;
	if (req->flags & REQ_F_CQE_SKIP) {
		req->flags &= ~REQ_F_CQE_SKIP;
		req->flags |= REQ_F_SKIP_LINK_CQES;
	}
}

102 103 104 105 106 107
static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
{
	req->cqe.res = res;
	req->cqe.flags = cflags;
}

108 109 110 111 112
static inline bool req_has_async_data(struct io_kiocb *req)
{
	return req->flags & REQ_F_ASYNC_DATA;
}

113 114 115 116 117 118
static inline void io_put_file(struct file *file)
{
	if (file)
		fput(file);
}

119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
					 unsigned issue_flags)
{
	lockdep_assert_held(&ctx->uring_lock);
	if (issue_flags & IO_URING_F_UNLOCKED)
		mutex_unlock(&ctx->uring_lock);
}

static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
				       unsigned issue_flags)
{
	/*
	 * "Normal" inline submissions always hold the uring_lock, since we
	 * grab it from the system call. Same is true for the SQPOLL offload.
	 * The only exception is when we've detached the request and issue it
	 * from an async worker thread, grab the lock for that case.
	 */
	if (issue_flags & IO_URING_F_UNLOCKED)
		mutex_lock(&ctx->uring_lock);
	lockdep_assert_held(&ctx->uring_lock);
}

141 142 143 144 145 146
static inline void io_commit_cqring(struct io_ring_ctx *ctx)
{
	/* order cqe stores with ring update */
	smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
}

147 148 149 150 151 152 153 154 155 156 157
static inline void io_cqring_wake(struct io_ring_ctx *ctx)
{
	/*
	 * wake_up_all() may seem excessive, but io_wake_function() and
	 * io_should_wake() handle the termination of the loop and only
	 * wake as many waiters as we need to.
	 */
	if (wq_has_sleeper(&ctx->cq_wait))
		wake_up_all(&ctx->cq_wait);
}

158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
static inline bool io_sqring_full(struct io_ring_ctx *ctx)
{
	struct io_rings *r = ctx->rings;

	return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
}

static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
{
	struct io_rings *rings = ctx->rings;

	/* make sure SQ entry isn't read before tail */
	return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
}

static inline bool io_run_task_work(void)
{
	if (test_thread_flag(TIF_NOTIFY_SIGNAL) || task_work_pending(current)) {
		__set_current_state(TASK_RUNNING);
		clear_notify_signal();
		if (task_work_pending(current))
			task_work_run();
		return true;
	}

	return false;
}

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
{
	if (!*locked) {
		mutex_lock(&ctx->uring_lock);
		*locked = true;
	}
}

static inline void io_req_add_compl_list(struct io_kiocb *req)
{
	struct io_submit_state *state = &req->ctx->submit_state;

	if (!(req->flags & REQ_F_CQE_SKIP))
		state->flush_cqes = true;
	wq_list_add_tail(&req->comp_list, &state->compl_reqs);
}

203
int io_run_task_work_sig(void);
204
void io_req_complete_failed(struct io_kiocb *req, s32 res);
205
void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
206 207
void io_req_complete_post(struct io_kiocb *req);
void __io_req_complete_post(struct io_kiocb *req);
208
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
209
void io_cqring_ev_posted(struct io_ring_ctx *ctx);
210
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
211

212
struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
213

214 215 216
struct file *io_file_get_normal(struct io_kiocb *req, int fd);
struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
			       unsigned issue_flags);
217 218

bool io_is_uring_fops(struct file *file);
219 220
bool io_alloc_async_data(struct io_kiocb *req);
void io_req_task_work_add(struct io_kiocb *req);
221
void io_req_task_prio_work_add(struct io_kiocb *req);
222
void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
223 224
void io_req_task_queue(struct io_kiocb *req);
void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
225 226
void io_req_task_complete(struct io_kiocb *req, bool *locked);
void io_req_task_queue_fail(struct io_kiocb *req, int ret);
227
void io_req_task_submit(struct io_kiocb *req, bool *locked);
228
void tctx_task_work(struct callback_head *cb);
229 230 231 232
__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
int io_uring_alloc_task_context(struct task_struct *task,
				struct io_ring_ctx *ctx);

233
int io_poll_issue(struct io_kiocb *req, bool *locked);
234 235
int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
236 237
void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node);
int io_req_prep_async(struct io_kiocb *req);
238

239 240 241
struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
void io_wq_submit_work(struct io_wq_work *work);

242 243 244
void io_free_req(struct io_kiocb *req);
void io_queue_next(struct io_kiocb *req);

245 246 247
bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
			bool cancel_all);

248 249
#define io_for_each_link(pos, head) \
	for (pos = (head); pos; pos = pos->link)
250

251
#endif