提交 4e8a3561 编写于 作者: R Rich Felker

overhaul aio implementation for correctness

previously, aio operations were not tracked by file descriptor; each
operation was completely independent. this resulted in non-conforming
behavior for non-seekable/append-mode writes (which are required to be
ordered) and made it impossible to implement aio_cancel, which in turn
made closing file descriptors with outstanding aio operations unsafe.

the new implementation is significantly heavier (roughly twice the
size, and seems to be slightly slower) and presently aims mainly at
correctness, not performance.

most of the public interfaces have been moved into a single file,
aio.c, because there is little benefit to be had from splitting them.
whenever any aio functions are used, aio_cancel and the internal
queue lifetime management and fd-to-queue mapping code must be linked,
and these functions make up the bulk of the code size.

the close function's interaction with aio is implemented with weak
alias magic, to avoid pulling in heavy aio cancellation code in
programs that don't use aio, and the expensive cancellation path
(which includes signal blocking) is optimized out when there are no
active aio queues.
上级 594ffed8
#include <aio.h>
#include <pthread.h>
#include <semaphore.h>
#include <limits.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include "syscall.h"
#include "atomic.h"
#include "libc.h"
#include "pthread_impl.h"
/* The following is a threads-based implementation of AIO with minimal
* dependence on implementation details. Most synchronization is
* performed with pthread primitives, but atomics and futex operations
* are used for notification in a couple places where the pthread
* primitives would be inefficient or impractical.
*
* For each fd with outstanding aio operations, an aio_queue structure
* is maintained. These are reference-counted and destroyed by the last
* aio worker thread to exit. Accessing any member of the aio_queue
* structure requires a lock on the aio_queue. Adding and removing aio
* queues themselves requires a write lock on the global map object,
* a 4-level table mapping file descriptor numbers to aio queues. A
* read lock on the map is used to obtain locks on existing queues by
* excluding destruction of the queue by a different thread while it is
* being locked.
*
* Each aio queue has a list of active threads/operations. Presently there
* is a one to one relationship between threads and operations. The only
* members of the aio_thread structure which are accessed by other threads
* are the linked list pointers, op (which is immutable), running (which
* is updated atomically), and err (which is synchronized via running),
* so no locking is necessary. Most of the other other members are used
* for sharing data between the main flow of execution and cancellation
* cleanup handler.
*
* Taking any aio locks requires having all signals blocked. This is
* necessary because aio_cancel is needed by close, and close is required
* to be async-signal safe. All aio worker threads run with all signals
* blocked permanently.
*/
struct aio_args {
struct aiocb *cb;
int op;
int err;
sem_t sem;
};
struct aio_thread {
pthread_t td;
struct aiocb *cb;
struct aio_thread *next, *prev;
struct aio_queue *q;
int running, err, op;
ssize_t ret;
};
struct aio_queue {
int fd, seekable, append, ref, init;
pthread_mutex_t lock;
pthread_cond_t cond;
struct aio_thread *head;
};
static pthread_rwlock_t maplock = PTHREAD_RWLOCK_INITIALIZER;
static struct aio_queue *****map;
static volatile int aio_fd_cnt;
volatile int __aio_fut;
static struct aio_queue *__aio_get_queue(int fd, int need)
{
if (fd < 0) return 0;
int a=fd>>24;
unsigned char b=fd>>16, c=fd>>8, d=fd;
struct aio_queue *q = 0;
pthread_rwlock_rdlock(&maplock);
if ((!map || !map[a] || !map[a][b] || !map[a][b][c] || !(q=map[a][b][c][d])) && need) {
pthread_rwlock_unlock(&maplock);
pthread_rwlock_wrlock(&maplock);
if (!map) map = calloc(sizeof *map, (-1U/2+1)>>24);
if (!map) goto out;
if (!map[a]) map[a] = calloc(sizeof **map, 256);
if (!map[a]) goto out;
if (!map[a][b]) map[a][b] = calloc(sizeof ***map, 256);
if (!map[a][b]) goto out;
if (!map[a][b][c]) map[a][b][c] = calloc(sizeof ****map, 256);
if (!map[a][b][c]) goto out;
if (!(q = map[a][b][c][d])) {
map[a][b][c][d] = q = calloc(sizeof *****map, 1);
if (q) {
q->fd = fd;
pthread_mutex_init(&q->lock, 0);
pthread_cond_init(&q->cond, 0);
a_inc(&aio_fd_cnt);
}
}
}
if (q) pthread_mutex_lock(&q->lock);
out:
pthread_rwlock_unlock(&maplock);
return q;
}
static void __aio_unref_queue(struct aio_queue *q)
{
if (q->ref > 1) {
q->ref--;
pthread_mutex_unlock(&q->lock);
return;
}
/* This is potentially the last reference, but a new reference
* may arrive since we cannot free the queue object without first
* taking the maplock, which requires releasing the queue lock. */
pthread_mutex_unlock(&q->lock);
pthread_rwlock_wrlock(&maplock);
pthread_mutex_lock(&q->lock);
if (q->ref == 1) {
int fd=q->fd;
int a=fd>>24;
unsigned char b=fd>>16, c=fd>>8, d=fd;
map[a][b][c][d] = 0;
a_dec(&aio_fd_cnt);
pthread_rwlock_unlock(&maplock);
pthread_mutex_unlock(&q->lock);
free(q);
} else {
q->ref--;
pthread_rwlock_unlock(&maplock);
pthread_mutex_unlock(&q->lock);
}
}
static void cleanup(void *ctx)
{
struct aio_thread *at = ctx;
struct aio_queue *q = at->q;
struct aiocb *cb = at->cb;
struct sigevent sev = cb->aio_sigevent;
/* There are four potential types of waiters we could need to wake:
* 1. Callers of aio_cancel/close.
* 2. Callers of aio_suspend with a single aiocb.
* 3. Callers of aio_suspend with a list.
* 4. AIO worker threads waiting for sequenced operations.
* Types 1-3 are notified via atomics/futexes, mainly for AS-safety
* considerations. Type 4 is notified later via a cond var. */
a_store(&cb->__ret, at->ret);
if (a_swap(&at->running, 0) < 0)
__wake(&at->running, -1, 1);
if (a_swap(&cb->__err, at->err) != EINPROGRESS)
__wake(&cb->__err, -1, 1);
if (a_swap(&__aio_fut, 0))
__wake(&__aio_fut, -1, 1);
pthread_mutex_lock(&q->lock);
if (at->next) at->next->prev = at->prev;
if (at->prev) at->prev->next = at->next;
else q->head = at->next;
/* Signal aio worker threads waiting for sequenced operations. */
pthread_cond_broadcast(&q->cond);
__aio_unref_queue(q);
if (sev.sigev_notify == SIGEV_SIGNAL) {
siginfo_t si = {
.si_signo = sev.sigev_signo,
.si_value = sev.sigev_value,
.si_code = SI_ASYNCIO,
.si_pid = getpid(),
.si_uid = getuid()
};
__syscall(SYS_rt_sigqueueinfo, si.si_pid, si.si_signo, &si);
}
if (sev.sigev_notify == SIGEV_THREAD) {
a_store(&__pthread_self()->cancel, 0);
sev.sigev_notify_function(sev.sigev_value);
}
}
static void *io_thread_func(void *ctx)
{
struct aio_thread at, *p;
struct aio_args *args = ctx;
struct aiocb *cb = args->cb;
int fd = cb->aio_fildes;
int op = args->op;
void *buf = (void *)cb->aio_buf;
size_t len = cb->aio_nbytes;
off_t off = cb->aio_offset;
struct aio_queue *q = __aio_get_queue(fd, 1);
ssize_t ret;
args->err = q ? 0 : EAGAIN;
sem_post(&args->sem);
if (!q) return 0;
at.op = op;
at.running = 1;
at.ret = -1;
at.err = ECANCELED;
at.q = q;
at.td = __pthread_self();
at.cb = cb;
at.prev = 0;
if ((at.next = q->head)) at.next->prev = &at;
q->head = &at;
q->ref++;
if (!q->init) {
int seekable = lseek(fd, 0, SEEK_CUR) >= 0;
q->seekable = seekable;
q->append = !seekable || (fcntl(fd, F_GETFL) & O_APPEND);
q->init = 1;
}
pthread_cleanup_push(cleanup, &at);
/* Wait for sequenced operations. */
if (op!=LIO_READ && (op!=LIO_WRITE || q->append)) {
for (;;) {
for (p=at.next; p && p->op!=LIO_WRITE; p=p->next);
if (!p) break;
pthread_cond_wait(&q->cond, &q->lock);
}
}
pthread_mutex_unlock(&q->lock);
switch (op) {
case LIO_WRITE:
ret = q->append ? write(fd, buf, len) : pwrite(fd, buf, len, off);
break;
case LIO_READ:
ret = !q->seekable ? read(fd, buf, len) : pread(fd, buf, len, off);
break;
case O_SYNC:
ret = fsync(fd);
break;
case O_DSYNC:
ret = fdatasync(fd);
break;
}
at.ret = ret;
at.err = ret<0 ? errno : 0;
pthread_cleanup_pop(1);
return 0;
}
static int submit(struct aiocb *cb, int op)
{
int ret = 0;
pthread_attr_t a;
sigset_t allmask, origmask;
pthread_t td;
struct aio_args args = { .cb = cb, .op = op };
sem_init(&args.sem, 0, 0);
if (cb->aio_sigevent.sigev_notify == SIGEV_THREAD) {
if (cb->aio_sigevent.sigev_notify_attributes)
a = *cb->aio_sigevent.sigev_notify_attributes;
else
pthread_attr_init(&a);
} else {
pthread_attr_init(&a);
pthread_attr_setstacksize(&a, PTHREAD_STACK_MIN);
pthread_attr_setguardsize(&a, 0);
}
pthread_attr_setdetachstate(&a, PTHREAD_CREATE_DETACHED);
sigfillset(&allmask);
pthread_sigmask(SIG_BLOCK, &allmask, &origmask);
cb->__err = EINPROGRESS;
if (pthread_create(&td, &a, io_thread_func, &args)) {
errno = EAGAIN;
ret = -1;
}
pthread_sigmask(SIG_SETMASK, &origmask, 0);
if (!ret) {
while (sem_wait(&args.sem));
if (args.err) {
errno = args.err;
ret = -1;
}
}
return ret;
}
int aio_read(struct aiocb *cb)
{
return submit(cb, LIO_READ);
}
int aio_write(struct aiocb *cb)
{
return submit(cb, LIO_WRITE);
}
int aio_fsync(int op, struct aiocb *cb)
{
if (op != O_SYNC && op != O_DSYNC) {
errno = EINVAL;
return -1;
}
return submit(cb, op);
}
ssize_t aio_return(struct aiocb *cb)
{
return cb->__ret;
}
int aio_error(const struct aiocb *cb)
{
a_barrier();
return cb->__err & 0x7fffffff;
}
int aio_cancel(int fd, struct aiocb *cb)
{
sigset_t allmask, origmask;
int ret = AIO_ALLDONE;
struct aio_thread *p;
struct aio_queue *q;
/* Unspecified behavior case. Report an error. */
if (cb && fd != cb->aio_fildes) {
errno = EINVAL;
return -1;
}
sigfillset(&allmask);
pthread_sigmask(SIG_BLOCK, &allmask, &origmask);
if (!(q = __aio_get_queue(fd, 0))) {
if (fcntl(fd, F_GETFD) < 0) ret = -1;
goto done;
}
for (p = q->head; p; p = p->next) {
if (cb && cb != p->cb) continue;
/* Transition target from running to running-with-waiters */
if (a_cas(&p->running, 1, -1)) {
pthread_cancel(p->td);
__wait(&p->running, 0, -1, 1);
if (p->err == ECANCELED) ret = AIO_CANCELED;
}
}
pthread_mutex_unlock(&q->lock);
done:
pthread_sigmask(SIG_SETMASK, &origmask, 0);
return ret;
}
int __aio_close(int fd)
{
a_barrier();
if (aio_fd_cnt) aio_cancel(fd, 0);
return fd;
}
LFS64(aio_cancel);
LFS64(aio_error);
LFS64(aio_fsync);
LFS64(aio_read);
LFS64(aio_write);
LFS64(aio_return);
#include <aio.h>
#include <pthread.h>
#include <errno.h>
#include "libc.h"
int aio_cancel(int fd, struct aiocb *cb)
{
if (!cb) {
/* FIXME: for correctness, we should return AIO_ALLDONE
* if there are no outstanding aio operations on this
* file descriptor, but that would require making aio
* much slower, and seems to have little advantage since
* we don't support cancellation anyway. */
return AIO_NOTCANCELED;
}
return cb->__err==EINPROGRESS ? AIO_NOTCANCELED : AIO_ALLDONE;
}
LFS64(aio_cancel);
#include <aio.h>
#include "libc.h"
int aio_error(const struct aiocb *cb)
{
return cb->__err;
}
LFS64(aio_error);
#include <aio.h>
#include <errno.h>
#include "libc.h"
int aio_fsync(int op, struct aiocb *cb)
{
/* FIXME: unsupported */
errno = EINVAL;
return -1;
}
LFS64(aio_fsync);
#include <aio.h>
#include <fcntl.h>
#include <errno.h>
#include <unistd.h>
#include "pthread_impl.h"
#include "libc.h"
static void dummy(void)
{
}
weak_alias(dummy, __aio_wake);
static void notify_signal(struct sigevent *sev)
{
siginfo_t si = {
.si_signo = sev->sigev_signo,
.si_value = sev->sigev_value,
.si_code = SI_ASYNCIO,
.si_pid = getpid(),
.si_uid = getuid()
};
__syscall(SYS_rt_sigqueueinfo, si.si_pid, si.si_signo, &si);
}
static void *io_thread(void *p)
{
struct aiocb *cb = p;
int fd = cb->aio_fildes;
void *buf = (void *)cb->aio_buf;
size_t len = cb->aio_nbytes;
off_t off = cb->aio_offset;
int op = cb->aio_lio_opcode;
struct sigevent sev = cb->aio_sigevent;
ssize_t ret;
if (op == LIO_WRITE) {
if ( (fcntl(fd, F_GETFL) & O_APPEND)
||((ret = pwrite(fd, buf, len, off))<0 && errno==ESPIPE) )
ret = write(fd, buf, len);
} else if (op == LIO_READ) {
if ( (ret = pread(fd, buf, len, off))<0 && errno==ESPIPE )
ret = read(fd, buf, len);
} else {
ret = 0;
}
cb->__ret = ret;
if (ret < 0) a_store(&cb->__err, errno);
else a_store(&cb->__err, 0);
__aio_wake();
switch (sev.sigev_notify) {
case SIGEV_SIGNAL:
notify_signal(&sev);
break;
case SIGEV_THREAD:
sev.sigev_notify_function(sev.sigev_value);
break;
}
return 0;
}
static int new_req(struct aiocb *cb)
{
int ret = 0;
pthread_attr_t a;
sigset_t set;
pthread_t td;
if (cb->aio_sigevent.sigev_notify == SIGEV_THREAD) {
if (cb->aio_sigevent.sigev_notify_attributes)
a = *cb->aio_sigevent.sigev_notify_attributes;
else
pthread_attr_init(&a);
} else {
pthread_attr_init(&a);
pthread_attr_setstacksize(&a, PAGE_SIZE);
pthread_attr_setguardsize(&a, 0);
}
pthread_attr_setdetachstate(&a, PTHREAD_CREATE_DETACHED);
sigfillset(&set);
pthread_sigmask(SIG_BLOCK, &set, &set);
cb->__err = EINPROGRESS;
if (pthread_create(&td, &a, io_thread, cb)) {
errno = EAGAIN;
ret = -1;
}
pthread_sigmask(SIG_SETMASK, &set, 0);
cb->__td = td;
return ret;
}
int aio_read(struct aiocb *cb)
{
cb->aio_lio_opcode = LIO_READ;
return new_req(cb);
}
int aio_write(struct aiocb *cb)
{
cb->aio_lio_opcode = LIO_WRITE;
return new_req(cb);
}
LFS64(aio_read);
LFS64(aio_write);
#include <aio.h>
#include "libc.h"
ssize_t aio_return(struct aiocb *cb)
{
return cb->__ret;
}
LFS64(aio_return);
#include <aio.h>
#include <errno.h>
#include "pthread_impl.h"
#include <time.h>
#include "atomic.h"
#include "libc.h"
#include "pthread_impl.h"
/* Due to the requirement that aio_suspend be async-signal-safe, we cannot
* use any locks, wait queues, etc. that would make it more efficient. The
* only obviously-correct algorithm is to generate a wakeup every time any
* aio operation finishes and have aio_suspend re-evaluate the completion
* status of each aiocb it was waiting on. */
static volatile int seq;
void __aio_wake(void)
{
a_inc(&seq);
__wake(&seq, -1, 1);
}
extern volatile int __aio_fut;
int aio_suspend(const struct aiocb *const cbs[], int cnt, const struct timespec *ts)
{
int i, last, first=1, ret=0;
int i, tid = 0, ret, expect = 0;
struct timespec at;
volatile int dummy_fut, *pfut;
int nzcnt = 0;
const struct aiocb *cb = 0;
if (cnt<0) {
errno = EINVAL;
return -1;
}
for (;;) {
last = seq;
for (i=0; i<cnt; i++) if (cbs[i]) {
if (aio_error(cbs[i]) != EINPROGRESS) return 0;
nzcnt++;
cb = cbs[i];
}
for (i=0; i<cnt; i++) {
if (cbs[i] && cbs[i]->__err != EINPROGRESS)
return 0;
if (ts) {
clock_gettime(CLOCK_MONOTONIC, &at);
at.tv_sec += ts->tv_sec;
if ((at.tv_nsec += ts->tv_nsec) >= 1000000000) {
at.tv_nsec -= 1000000000;
at.tv_sec++;
}
}
if (first && ts) {
clock_gettime(CLOCK_MONOTONIC, &at);
at.tv_sec += ts->tv_sec;
if ((at.tv_nsec += ts->tv_nsec) >= 1000000000) {
at.tv_nsec -= 1000000000;
at.tv_sec++;
}
first = 0;
}
for (;;) {
for (i=0; i<cnt; i++)
if (cbs[i] && aio_error(cbs[i]) != EINPROGRESS)
return 0;
ret = __timedwait(&seq, last, CLOCK_MONOTONIC,
ts ? &at : 0, 0, 0, 1);
switch (nzcnt) {
case 0:
pfut = &dummy_fut;
break;
case 1:
pfut = (void *)&cb->__err;
expect = EINPROGRESS | 0x80000000;
a_cas(pfut, EINPROGRESS, expect);
break;
default:
pfut = &__aio_fut;
if (!tid) tid = __pthread_self()->tid;
expect = a_cas(pfut, 0, tid);
if (!expect) expect = tid;
/* Need to recheck the predicate before waiting. */
for (i=0; i<cnt; i++)
if (cbs[i] && aio_error(cbs[i]) != EINPROGRESS)
return 0;
break;
}
if (ret == ETIMEDOUT) ret = EAGAIN;
ret = __timedwait(pfut, expect, CLOCK_MONOTONIC, ts?&at:0, 0, 0, 1);
if (ret) {
errno = ret;
errno = ret==ETIMEDOUT ? EAGAIN : ret;
return -1;
}
}
......
#include "stdio_impl.h"
static int dummy(int fd)
{
return fd;
}
weak_alias(dummy, __aio_close);
int __stdio_close(FILE *f)
{
return syscall(SYS_close, f->fd);
return syscall(SYS_close, __aio_close(f->fd));
}
......@@ -3,8 +3,16 @@
#include "syscall.h"
#include "libc.h"
static int dummy(int fd)
{
return fd;
}
weak_alias(dummy, __aio_close);
int close(int fd)
{
fd = __aio_close(fd);
int r = __syscall_cp(SYS_close, fd);
if (r == -EINTR) r = -EINPROGRESS;
return __syscall_ret(r);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册