dma-helpers.c 6.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 * DMA helper functions
 *
 * Copyright (c) 2009 Red Hat
 *
 * This work is licensed under the terms of the GNU General Public License
 * (GNU GPL), version 2 or later.
 */

#include "dma.h"
K
Kevin Wolf 已提交
11
#include "trace.h"
12 13 14

void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
{
15
    qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
16 17 18 19 20
    qsg->nsg = 0;
    qsg->nalloc = alloc_hint;
    qsg->size = 0;
}

21
void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
22 23 24
{
    if (qsg->nsg == qsg->nalloc) {
        qsg->nalloc = 2 * qsg->nalloc + 1;
25
        qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
26 27 28 29 30 31 32 33 34
    }
    qsg->sg[qsg->nsg].base = base;
    qsg->sg[qsg->nsg].len = len;
    qsg->size += len;
    ++qsg->nsg;
}

void qemu_sglist_destroy(QEMUSGList *qsg)
{
35
    g_free(qsg->sg);
36 37
}

38
typedef struct {
39
    BlockDriverAIOCB common;
40 41 42 43
    BlockDriverState *bs;
    BlockDriverAIOCB *acb;
    QEMUSGList *sg;
    uint64_t sector_num;
44
    bool to_dev;
45
    bool in_cancel;
46
    int sg_cur_index;
47
    dma_addr_t sg_cur_byte;
48 49
    QEMUIOVector iov;
    QEMUBH *bh;
50
    DMAIOFunc *io_func;
51
} DMAAIOCB;
52 53 54 55 56

static void dma_bdrv_cb(void *opaque, int ret);

static void reschedule_dma(void *opaque)
{
57
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
58 59 60

    qemu_bh_delete(dbs->bh);
    dbs->bh = NULL;
61
    dma_bdrv_cb(dbs, 0);
62 63 64 65
}

static void continue_after_map_failure(void *opaque)
{
66
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
67 68 69 70 71

    dbs->bh = qemu_bh_new(reschedule_dma, dbs);
    qemu_bh_schedule(dbs->bh);
}

72
static void dma_bdrv_unmap(DMAAIOCB *dbs)
73 74 75 76 77
{
    int i;

    for (i = 0; i < dbs->iov.niov; ++i) {
        cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
78
                                  dbs->iov.iov[i].iov_len, !dbs->to_dev,
79 80
                                  dbs->iov.iov[i].iov_len);
    }
81 82 83 84 85
    qemu_iovec_reset(&dbs->iov);
}

static void dma_complete(DMAAIOCB *dbs, int ret)
{
K
Kevin Wolf 已提交
86 87
    trace_dma_complete(dbs, ret, dbs->common.cb);

88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
    dma_bdrv_unmap(dbs);
    if (dbs->common.cb) {
        dbs->common.cb(dbs->common.opaque, ret);
    }
    qemu_iovec_destroy(&dbs->iov);
    if (dbs->bh) {
        qemu_bh_delete(dbs->bh);
        dbs->bh = NULL;
    }
    if (!dbs->in_cancel) {
        /* Requests may complete while dma_aio_cancel is in progress.  In
         * this case, the AIOCB should not be released because it is still
         * referenced by dma_aio_cancel.  */
        qemu_aio_release(dbs);
    }
103 104
}

B
blueswir1 已提交
105
static void dma_bdrv_cb(void *opaque, int ret)
106 107
{
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
A
Anthony Liguori 已提交
108
    target_phys_addr_t cur_addr, cur_len;
109 110
    void *mem;

K
Kevin Wolf 已提交
111 112
    trace_dma_bdrv_cb(dbs, ret);

113 114 115
    dbs->acb = NULL;
    dbs->sector_num += dbs->iov.size / 512;
    dma_bdrv_unmap(dbs);
116 117

    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
118
        dma_complete(dbs, ret);
119 120 121 122 123 124
        return;
    }

    while (dbs->sg_cur_index < dbs->sg->nsg) {
        cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
        cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
125
        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->to_dev);
126 127 128 129 130 131 132 133 134 135 136
        if (!mem)
            break;
        qemu_iovec_add(&dbs->iov, mem, cur_len);
        dbs->sg_cur_byte += cur_len;
        if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
            dbs->sg_cur_byte = 0;
            ++dbs->sg_cur_index;
        }
    }

    if (dbs->iov.size == 0) {
K
Kevin Wolf 已提交
137
        trace_dma_map_wait(dbs);
138 139 140 141
        cpu_register_map_client(dbs, continue_after_map_failure);
        return;
    }

142 143
    dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov,
                            dbs->iov.size / 512, dma_bdrv_cb, dbs);
144
    assert(dbs->acb);
145 146
}

147 148 149 150
static void dma_aio_cancel(BlockDriverAIOCB *acb)
{
    DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);

K
Kevin Wolf 已提交
151 152
    trace_dma_aio_cancel(dbs);

153
    if (dbs->acb) {
154 155 156 157 158
        BlockDriverAIOCB *acb = dbs->acb;
        dbs->acb = NULL;
        dbs->in_cancel = true;
        bdrv_aio_cancel(acb);
        dbs->in_cancel = false;
159
    }
160 161
    dbs->common.cb = NULL;
    dma_complete(dbs, 0);
162 163 164 165 166 167 168
}

static AIOPool dma_aio_pool = {
    .aiocb_size         = sizeof(DMAAIOCB),
    .cancel             = dma_aio_cancel,
};

169
BlockDriverAIOCB *dma_bdrv_io(
170
    BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
171
    DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
172
    void *opaque, bool to_dev)
173
{
174
    DMAAIOCB *dbs = qemu_aio_get(&dma_aio_pool, bs, cb, opaque);
175

K
Kevin Wolf 已提交
176 177
    trace_dma_bdrv_io(dbs, bs, sector_num, to_dev);

178
    dbs->acb = NULL;
179 180 181 182 183
    dbs->bs = bs;
    dbs->sg = sg;
    dbs->sector_num = sector_num;
    dbs->sg_cur_index = 0;
    dbs->sg_cur_byte = 0;
184
    dbs->to_dev = to_dev;
185
    dbs->io_func = io_func;
186 187 188
    dbs->bh = NULL;
    qemu_iovec_init(&dbs->iov, sg->nsg);
    dma_bdrv_cb(dbs, 0);
189
    return &dbs->common;
190 191 192 193 194 195 196
}


BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
                                QEMUSGList *sg, uint64_t sector,
                                void (*cb)(void *opaque, int ret), void *opaque)
{
197
    return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque, false);
198 199 200 201 202 203
}

BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
                                 QEMUSGList *sg, uint64_t sector,
                                 void (*cb)(void *opaque, int ret), void *opaque)
{
204
    return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque, true);
205
}
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236


static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg, bool to_dev)
{
    uint64_t resid;
    int sg_cur_index;

    resid = sg->size;
    sg_cur_index = 0;
    len = MIN(len, resid);
    while (len > 0) {
        ScatterGatherEntry entry = sg->sg[sg_cur_index++];
        int32_t xfer = MIN(len, entry.len);
        cpu_physical_memory_rw(entry.base, ptr, xfer, !to_dev);
        ptr += xfer;
        len -= xfer;
        resid -= xfer;
    }

    return resid;
}

uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
{
    return dma_buf_rw(ptr, len, sg, 0);
}

uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
{
    return dma_buf_rw(ptr, len, sg, 1);
}
237 238 239 240 241 242

void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
                    QEMUSGList *sg, enum BlockAcctType type)
{
    bdrv_acct_start(bs, cookie, sg->size, type);
}