xen_disk.c 32.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 *  xen paravirt block device backend
 *
 *  (c) Gerd Hoffmann <kraxel@redhat.com>
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; under version 2 of the License.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License along
16
 *  with this program; if not, see <http://www.gnu.org/licenses/>.
17 18 19
 *
 *  Contributions after 2012-01-13 are licensed under the terms of the
 *  GNU GPL, version 2 or (at your option) any later version.
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
 */

#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <unistd.h>
#include <signal.h>
#include <inttypes.h>
#include <time.h>
#include <fcntl.h>
#include <errno.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <sys/uio.h>

38
#include "hw/hw.h"
P
Paolo Bonzini 已提交
39
#include "hw/xen/xen_backend.h"
40
#include "xen_blkif.h"
41
#include "sysemu/blockdev.h"
42 43 44 45 46 47 48 49 50 51 52 53

/* ------------------------------------------------------------- */

static int batch_maps   = 0;

static int max_requests = 32;

/* ------------------------------------------------------------- */

#define BLOCK_SIZE  512
#define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)

54 55 56 57 58 59 60
struct PersistentGrant {
    void *page;
    struct XenBlkDev *blkdev;
};

typedef struct PersistentGrant PersistentGrant;

61 62 63 64 65 66 67 68 69
struct ioreq {
    blkif_request_t     req;
    int16_t             status;

    /* parsed request */
    off_t               start;
    QEMUIOVector        v;
    int                 presync;
    int                 postsync;
70
    uint8_t             mapped;
71 72 73 74 75 76 77

    /* grant mapping */
    uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
    uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
    int                 prot;
    void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
    void                *pages;
78
    int                 num_unmap;
79 80 81 82 83 84

    /* aio status */
    int                 aio_inflight;
    int                 aio_errors;

    struct XenBlkDev    *blkdev;
B
Blue Swirl 已提交
85
    QLIST_ENTRY(ioreq)   list;
86
    BlockAcctCookie     acct;
87 88 89 90 91 92 93 94 95
};

struct XenBlkDev {
    struct XenDevice    xendev;  /* must be first */
    char                *params;
    char                *mode;
    char                *type;
    char                *dev;
    char                *devtype;
96
    bool                directiosafe;
97 98 99 100 101 102 103 104 105 106 107 108
    const char          *fileproto;
    const char          *filename;
    int                 ring_ref;
    void                *sring;
    int64_t             file_blk;
    int64_t             file_size;
    int                 protocol;
    blkif_back_rings_t  rings;
    int                 more_work;
    int                 cnt_map;

    /* request lists */
B
Blue Swirl 已提交
109 110 111
    QLIST_HEAD(inflight_head, ioreq) inflight;
    QLIST_HEAD(finished_head, ioreq) finished;
    QLIST_HEAD(freelist_head, ioreq) freelist;
112 113 114 115
    int                 requests_total;
    int                 requests_inflight;
    int                 requests_finished;

116
    /* Persistent grants extension */
O
Olaf Hering 已提交
117
    gboolean            feature_discard;
118 119 120 121 122
    gboolean            feature_persistent;
    GTree               *persistent_gnts;
    unsigned int        persistent_gnt_count;
    unsigned int        max_grants;

123
    /* qemu block driver */
G
Gerd Hoffmann 已提交
124
    DriveInfo           *dinfo;
125 126 127 128 129 130
    BlockDriverState    *bs;
    QEMUBH              *bh;
};

/* ------------------------------------------------------------- */

R
Roger Pau Monne 已提交
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
static void ioreq_reset(struct ioreq *ioreq)
{
    memset(&ioreq->req, 0, sizeof(ioreq->req));
    ioreq->status = 0;
    ioreq->start = 0;
    ioreq->presync = 0;
    ioreq->postsync = 0;
    ioreq->mapped = 0;

    memset(ioreq->domids, 0, sizeof(ioreq->domids));
    memset(ioreq->refs, 0, sizeof(ioreq->refs));
    ioreq->prot = 0;
    memset(ioreq->page, 0, sizeof(ioreq->page));
    ioreq->pages = NULL;

    ioreq->aio_inflight = 0;
    ioreq->aio_errors = 0;

    ioreq->blkdev = NULL;
    memset(&ioreq->list, 0, sizeof(ioreq->list));
    memset(&ioreq->acct, 0, sizeof(ioreq->acct));

    qemu_iovec_reset(&ioreq->v);
}

156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
{
    uint ua = GPOINTER_TO_UINT(a);
    uint ub = GPOINTER_TO_UINT(b);
    return (ua > ub) - (ua < ub);
}

static void destroy_grant(gpointer pgnt)
{
    PersistentGrant *grant = pgnt;
    XenGnttab gnt = grant->blkdev->xendev.gnttabdev;

    if (xc_gnttab_munmap(gnt, grant->page, 1) != 0) {
        xen_be_printf(&grant->blkdev->xendev, 0,
                      "xc_gnttab_munmap failed: %s\n",
                      strerror(errno));
    }
    grant->blkdev->persistent_gnt_count--;
    xen_be_printf(&grant->blkdev->xendev, 3,
                  "unmapped grant %p\n", grant->page);
    g_free(grant);
}

179 180 181 182
static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
{
    struct ioreq *ioreq = NULL;

B
Blue Swirl 已提交
183
    if (QLIST_EMPTY(&blkdev->freelist)) {
184 185 186 187
        if (blkdev->requests_total >= max_requests) {
            goto out;
        }
        /* allocate new struct */
188
        ioreq = g_malloc0(sizeof(*ioreq));
189 190
        ioreq->blkdev = blkdev;
        blkdev->requests_total++;
191 192
        qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
    } else {
193 194 195
        /* get one from freelist */
        ioreq = QLIST_FIRST(&blkdev->freelist);
        QLIST_REMOVE(ioreq, list);
196
    }
B
Blue Swirl 已提交
197
    QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
198 199 200 201 202 203 204 205 206 207
    blkdev->requests_inflight++;

out:
    return ioreq;
}

static void ioreq_finish(struct ioreq *ioreq)
{
    struct XenBlkDev *blkdev = ioreq->blkdev;

B
Blue Swirl 已提交
208 209
    QLIST_REMOVE(ioreq, list);
    QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
210 211 212 213
    blkdev->requests_inflight--;
    blkdev->requests_finished++;
}

214
static void ioreq_release(struct ioreq *ioreq, bool finish)
215 216 217
{
    struct XenBlkDev *blkdev = ioreq->blkdev;

B
Blue Swirl 已提交
218
    QLIST_REMOVE(ioreq, list);
R
Roger Pau Monne 已提交
219
    ioreq_reset(ioreq);
220
    ioreq->blkdev = blkdev;
B
Blue Swirl 已提交
221
    QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
222 223 224 225 226
    if (finish) {
        blkdev->requests_finished--;
    } else {
        blkdev->requests_inflight--;
    }
227 228 229 230 231 232 233 234 235 236 237 238 239 240
}

/*
 * translate request into iovec + start offset
 * do sanity checks along the way
 */
static int ioreq_parse(struct ioreq *ioreq)
{
    struct XenBlkDev *blkdev = ioreq->blkdev;
    uintptr_t mem;
    size_t len;
    int i;

    xen_be_printf(&blkdev->xendev, 3,
241 242 243
                  "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
                  ioreq->req.operation, ioreq->req.nr_segments,
                  ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
244 245
    switch (ioreq->req.operation) {
    case BLKIF_OP_READ:
246 247
        ioreq->prot = PROT_WRITE; /* to memory */
        break;
248 249
    case BLKIF_OP_FLUSH_DISKCACHE:
        ioreq->presync = 1;
250 251 252
        if (!ioreq->req.nr_segments) {
            return 0;
        }
253
        /* fall through */
254
    case BLKIF_OP_WRITE:
255 256
        ioreq->prot = PROT_READ; /* from memory */
        break;
O
Olaf Hering 已提交
257 258
    case BLKIF_OP_DISCARD:
        return 0;
259
    default:
260 261 262
        xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
                      ioreq->req.operation);
        goto err;
263 264
    };

265 266 267 268 269
    if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
        xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
        goto err;
    }

270 271
    ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
    for (i = 0; i < ioreq->req.nr_segments; i++) {
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
        if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
            xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
            goto err;
        }
        if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
            xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
            goto err;
        }
        if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
            xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
            goto err;
        }

        ioreq->domids[i] = blkdev->xendev.dom;
        ioreq->refs[i]   = ioreq->req.seg[i].gref;

        mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
        len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
290 291 292
        qemu_iovec_add(&ioreq->v, (void*)mem, len);
    }
    if (ioreq->start + ioreq->v.size > blkdev->file_size) {
293 294
        xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
        goto err;
295 296 297 298 299 300 301 302 303 304
    }
    return 0;

err:
    ioreq->status = BLKIF_RSP_ERROR;
    return -1;
}

static void ioreq_unmap(struct ioreq *ioreq)
{
305
    XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
306 307
    int i;

308
    if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
309
        return;
310
    }
311
    if (batch_maps) {
312 313 314
        if (!ioreq->pages) {
            return;
        }
315
        if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
316 317 318
            xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
                          strerror(errno));
        }
319
        ioreq->blkdev->cnt_map -= ioreq->num_unmap;
320
        ioreq->pages = NULL;
321
    } else {
322
        for (i = 0; i < ioreq->num_unmap; i++) {
323 324 325 326 327 328 329 330 331 332
            if (!ioreq->page[i]) {
                continue;
            }
            if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) {
                xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
                              strerror(errno));
            }
            ioreq->blkdev->cnt_map--;
            ioreq->page[i] = NULL;
        }
333
    }
334
    ioreq->mapped = 0;
335 336 337 338
}

static int ioreq_map(struct ioreq *ioreq)
{
339
    XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
340 341 342 343 344 345 346 347 348 349 350 351
    uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
    uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
    void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
    int i, j, new_maps = 0;
    PersistentGrant *grant;
    /* domids and refs variables will contain the information necessary
     * to map the grants that are needed to fulfill this request.
     *
     * After mapping the needed grants, the page array will contain the
     * memory address of each granted page in the order specified in ioreq
     * (disregarding if it's a persistent grant or not).
     */
352

353
    if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
354
        return 0;
355
    }
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
    if (ioreq->blkdev->feature_persistent) {
        for (i = 0; i < ioreq->v.niov; i++) {
            grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
                                    GUINT_TO_POINTER(ioreq->refs[i]));

            if (grant != NULL) {
                page[i] = grant->page;
                xen_be_printf(&ioreq->blkdev->xendev, 3,
                              "using persistent-grant %" PRIu32 "\n",
                              ioreq->refs[i]);
            } else {
                    /* Add the grant to the list of grants that
                     * should be mapped
                     */
                    domids[new_maps] = ioreq->domids[i];
                    refs[new_maps] = ioreq->refs[i];
                    page[i] = NULL;
                    new_maps++;
            }
        }
        /* Set the protection to RW, since grants may be reused later
         * with a different protection than the one needed for this request
         */
        ioreq->prot = PROT_WRITE | PROT_READ;
    } else {
        /* All grants in the request should be mapped */
        memcpy(refs, ioreq->refs, sizeof(refs));
        memcpy(domids, ioreq->domids, sizeof(domids));
        memset(page, 0, sizeof(page));
        new_maps = ioreq->v.niov;
    }

    if (batch_maps && new_maps) {
389
        ioreq->pages = xc_gnttab_map_grant_refs
390
            (gnt, new_maps, domids, refs, ioreq->prot);
391 392 393
        if (ioreq->pages == NULL) {
            xen_be_printf(&ioreq->blkdev->xendev, 0,
                          "can't map %d grant refs (%s, %d maps)\n",
394
                          new_maps, strerror(errno), ioreq->blkdev->cnt_map);
395 396
            return -1;
        }
397 398 399 400
        for (i = 0, j = 0; i < ioreq->v.niov; i++) {
            if (page[i] == NULL) {
                page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
            }
401
        }
402 403 404
        ioreq->blkdev->cnt_map += new_maps;
    } else if (new_maps)  {
        for (i = 0; i < new_maps; i++) {
405
            ioreq->page[i] = xc_gnttab_map_grant_ref
406
                (gnt, domids[i], refs[i], ioreq->prot);
407 408 409
            if (ioreq->page[i] == NULL) {
                xen_be_printf(&ioreq->blkdev->xendev, 0,
                              "can't map grant ref %d (%s, %d maps)\n",
410
                              refs[i], strerror(errno), ioreq->blkdev->cnt_map);
411
                ioreq->mapped = 1;
412 413 414 415 416
                ioreq_unmap(ioreq);
                return -1;
            }
            ioreq->blkdev->cnt_map++;
        }
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
        for (i = 0, j = 0; i < ioreq->v.niov; i++) {
            if (page[i] == NULL) {
                page[i] = ioreq->page[j++];
            }
        }
    }
    if (ioreq->blkdev->feature_persistent) {
        while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
              && new_maps) {
            /* Go through the list of newly mapped grants and add as many
             * as possible to the list of persistently mapped grants.
             *
             * Since we start at the end of ioreq->page(s), we only need
             * to decrease new_maps to prevent this granted pages from
             * being unmapped in ioreq_unmap.
             */
            grant = g_malloc0(sizeof(*grant));
            new_maps--;
            if (batch_maps) {
                grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
            } else {
                grant->page = ioreq->page[new_maps];
            }
            grant->blkdev = ioreq->blkdev;
            xen_be_printf(&ioreq->blkdev->xendev, 3,
                          "adding grant %" PRIu32 " page: %p\n",
                          refs[new_maps], grant->page);
            g_tree_insert(ioreq->blkdev->persistent_gnts,
                          GUINT_TO_POINTER(refs[new_maps]),
                          grant);
            ioreq->blkdev->persistent_gnt_count++;
        }
    }
    for (i = 0; i < ioreq->v.niov; i++) {
        ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
452
    }
453
    ioreq->mapped = 1;
454
    ioreq->num_unmap = new_maps;
455 456 457
    return 0;
}

458 459
static int ioreq_runio_qemu_aio(struct ioreq *ioreq);

460 461 462 463 464 465 466 467 468 469 470
static void qemu_aio_complete(void *opaque, int ret)
{
    struct ioreq *ioreq = opaque;

    if (ret != 0) {
        xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
                      ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
        ioreq->aio_errors++;
    }

    ioreq->aio_inflight--;
471 472 473 474 475
    if (ioreq->presync) {
        ioreq->presync = 0;
        ioreq_runio_qemu_aio(ioreq);
        return;
    }
476
    if (ioreq->aio_inflight > 0) {
477
        return;
478
    }
479
    if (ioreq->postsync) {
480 481 482 483
        ioreq->postsync = 0;
        ioreq->aio_inflight++;
        bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
        return;
484
    }
485 486 487 488

    ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
    ioreq_unmap(ioreq);
    ioreq_finish(ioreq);
O
Olaf Hering 已提交
489 490 491 492 493 494 495
    switch (ioreq->req.operation) {
    case BLKIF_OP_WRITE:
    case BLKIF_OP_FLUSH_DISKCACHE:
        if (!ioreq->req.nr_segments) {
            break;
        }
    case BLKIF_OP_READ:
496
        block_acct_done(bdrv_get_stats(ioreq->blkdev->bs), &ioreq->acct);
O
Olaf Hering 已提交
497
        break;
O
Olaf Hering 已提交
498
    case BLKIF_OP_DISCARD:
O
Olaf Hering 已提交
499 500 501
    default:
        break;
    }
502 503 504 505 506 507 508
    qemu_bh_schedule(ioreq->blkdev->bh);
}

static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
{
    struct XenBlkDev *blkdev = ioreq->blkdev;

509 510 511
    if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
        goto err_no_map;
    }
512 513

    ioreq->aio_inflight++;
514
    if (ioreq->presync) {
515 516
        bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
        return 0;
517
    }
518 519 520

    switch (ioreq->req.operation) {
    case BLKIF_OP_READ:
521 522
        block_acct_start(bdrv_get_stats(blkdev->bs), &ioreq->acct,
                         ioreq->v.size, BLOCK_ACCT_READ);
523 524 525 526
        ioreq->aio_inflight++;
        bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
                       &ioreq->v, ioreq->v.size / BLOCK_SIZE,
                       qemu_aio_complete, ioreq);
527
        break;
528
    case BLKIF_OP_WRITE:
529
    case BLKIF_OP_FLUSH_DISKCACHE:
530
        if (!ioreq->req.nr_segments) {
531
            break;
532
        }
533

534 535
        block_acct_start(bdrv_get_stats(blkdev->bs), &ioreq->acct,
                         ioreq->v.size, BLOCK_ACCT_WRITE);
536
        ioreq->aio_inflight++;
537 538 539
        bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
                        &ioreq->v, ioreq->v.size / BLOCK_SIZE,
                        qemu_aio_complete, ioreq);
540
        break;
O
Olaf Hering 已提交
541 542 543 544 545 546 547 548 549
    case BLKIF_OP_DISCARD:
    {
        struct blkif_request_discard *discard_req = (void *)&ioreq->req;
        ioreq->aio_inflight++;
        bdrv_aio_discard(blkdev->bs,
                        discard_req->sector_number, discard_req->nr_sectors,
                        qemu_aio_complete, ioreq);
        break;
    }
550
    default:
551 552
        /* unknown operation (shouldn't happen -- parse catches this) */
        goto err;
553 554 555 556 557 558 559
    }

    qemu_aio_complete(ioreq, 0);

    return 0;

err:
560 561 562
    ioreq_unmap(ioreq);
err_no_map:
    ioreq_finish(ioreq);
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
    ioreq->status = BLKIF_RSP_ERROR;
    return -1;
}

static int blk_send_response_one(struct ioreq *ioreq)
{
    struct XenBlkDev  *blkdev = ioreq->blkdev;
    int               send_notify   = 0;
    int               have_requests = 0;
    blkif_response_t  resp;
    void              *dst;

    resp.id        = ioreq->req.id;
    resp.operation = ioreq->req.operation;
    resp.status    = ioreq->status;

    /* Place on the response ring for the relevant domain. */
    switch (blkdev->protocol) {
    case BLKIF_PROTOCOL_NATIVE:
582 583
        dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
        break;
584
    case BLKIF_PROTOCOL_X86_32:
585 586
        dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
                                blkdev->rings.x86_32_part.rsp_prod_pvt);
587
        break;
588
    case BLKIF_PROTOCOL_X86_64:
589 590
        dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
                                blkdev->rings.x86_64_part.rsp_prod_pvt);
591
        break;
592
    default:
593
        dst = NULL;
594
        return 0;
595 596 597 598 599 600
    }
    memcpy(dst, &resp, sizeof(resp));
    blkdev->rings.common.rsp_prod_pvt++;

    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
    if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
601 602 603 604 605 606
        /*
         * Tail check for pending requests. Allows frontend to avoid
         * notifications if requests are already in flight (lower
         * overheads and promotes batching).
         */
        RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
607
    } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
608
        have_requests = 1;
609 610
    }

611 612 613
    if (have_requests) {
        blkdev->more_work++;
    }
614 615 616 617 618 619 620 621 622
    return send_notify;
}

/* walk finished list, send outstanding responses, free requests */
static void blk_send_response_all(struct XenBlkDev *blkdev)
{
    struct ioreq *ioreq;
    int send_notify = 0;

B
Blue Swirl 已提交
623 624
    while (!QLIST_EMPTY(&blkdev->finished)) {
        ioreq = QLIST_FIRST(&blkdev->finished);
625
        send_notify += blk_send_response_one(ioreq);
626
        ioreq_release(ioreq, true);
627 628 629
    }
    if (send_notify) {
        xen_be_send_notify(&blkdev->xendev);
630 631 632 633 634 635 636
    }
}

static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
{
    switch (blkdev->protocol) {
    case BLKIF_PROTOCOL_NATIVE:
637 638 639
        memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
               sizeof(ioreq->req));
        break;
640
    case BLKIF_PROTOCOL_X86_32:
641 642
        blkif_get_x86_32_req(&ioreq->req,
                             RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
643
        break;
644
    case BLKIF_PROTOCOL_X86_64:
645 646
        blkif_get_x86_64_req(&ioreq->req,
                             RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
647
        break;
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
    }
    return 0;
}

static void blk_handle_requests(struct XenBlkDev *blkdev)
{
    RING_IDX rc, rp;
    struct ioreq *ioreq;

    blkdev->more_work = 0;

    rc = blkdev->rings.common.req_cons;
    rp = blkdev->rings.common.sring->req_prod;
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

P
Paolo Bonzini 已提交
663
    blk_send_response_all(blkdev);
B
blueswir1 已提交
664
    while (rc != rp) {
665
        /* pull request from ring */
666
        if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
667
            break;
668
        }
669 670 671 672 673 674 675 676 677 678
        ioreq = ioreq_start(blkdev);
        if (ioreq == NULL) {
            blkdev->more_work++;
            break;
        }
        blk_get_request(blkdev, ioreq, rc);
        blkdev->rings.common.req_cons = ++rc;

        /* parse them */
        if (ioreq_parse(ioreq) != 0) {
679
            if (blk_send_response_one(ioreq)) {
680
                xen_be_send_notify(&blkdev->xendev);
681
            }
682
            ioreq_release(ioreq, false);
683 684 685
            continue;
        }

P
Paolo Bonzini 已提交
686
        ioreq_runio_qemu_aio(ioreq);
687
    }
688

689
    if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
690
        qemu_bh_schedule(blkdev->bh);
691
    }
692 693 694 695 696 697 698 699 700 701
}

/* ------------------------------------------------------------- */

static void blk_bh(void *opaque)
{
    struct XenBlkDev *blkdev = opaque;
    blk_handle_requests(blkdev);
}

702 703 704 705 706 707 708 709 710
/*
 * We need to account for the grant allocations requiring contiguous
 * chunks; the worst case number would be
 *     max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
 * but in order to keep things simple just use
 *     2 * max_req * max_seg.
 */
#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))

711 712 713 714
static void blk_alloc(struct XenDevice *xendev)
{
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);

B
Blue Swirl 已提交
715 716 717
    QLIST_INIT(&blkdev->inflight);
    QLIST_INIT(&blkdev->finished);
    QLIST_INIT(&blkdev->freelist);
718
    blkdev->bh = qemu_bh_new(blk_bh, blkdev);
719
    if (xen_mode != XEN_EMULATE) {
720
        batch_maps = 1;
721
    }
722 723 724 725 726
    if (xc_gnttab_set_max_grants(xendev->gnttabdev,
            MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
        xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n",
                      strerror(errno));
    }
727 728
}

O
Olaf Hering 已提交
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
static void blk_parse_discard(struct XenBlkDev *blkdev)
{
    int enable;

    blkdev->feature_discard = true;

    if (xenstore_read_be_int(&blkdev->xendev, "discard-enable", &enable) == 0) {
        blkdev->feature_discard = !!enable;
    }

    if (blkdev->feature_discard) {
        xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1);
    }
}

744 745 746
static int blk_init(struct XenDevice *xendev)
{
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
747
    int info = 0;
748
    char *directiosafe = NULL;
749 750 751

    /* read xenstore entries */
    if (blkdev->params == NULL) {
752
        char *h = NULL;
753
        blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
754 755 756
        if (blkdev->params != NULL) {
            h = strchr(blkdev->params, ':');
        }
757 758 759 760 761 762 763 764 765
        if (h != NULL) {
            blkdev->fileproto = blkdev->params;
            blkdev->filename  = h+1;
            *h = 0;
        } else {
            blkdev->fileproto = "<unset>";
            blkdev->filename  = blkdev->params;
        }
    }
766 767 768
    if (!strcmp("aio", blkdev->fileproto)) {
        blkdev->fileproto = "raw";
    }
769 770 771 772 773 774 775 776 777 778 779 780
    if (blkdev->mode == NULL) {
        blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
    }
    if (blkdev->type == NULL) {
        blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
    }
    if (blkdev->dev == NULL) {
        blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
    }
    if (blkdev->devtype == NULL) {
        blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
    }
781 782
    directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
    blkdev->directiosafe = (directiosafe && atoi(directiosafe));
783 784 785

    /* do we have all we need? */
    if (blkdev->params == NULL ||
786 787 788
        blkdev->mode == NULL   ||
        blkdev->type == NULL   ||
        blkdev->dev == NULL) {
789
        goto out_error;
790
    }
791 792

    /* read-only ? */
793
    if (strcmp(blkdev->mode, "w")) {
794
        info  |= VDISK_READONLY;
795 796 797
    }

    /* cdrom ? */
798 799 800
    if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
        info  |= VDISK_CDROM;
    }
801

802 803 804 805 806 807 808 809
    blkdev->file_blk  = BLOCK_SIZE;

    /* fill info
     * blk_connect supplies sector-size and sectors
     */
    xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
    xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
    xenstore_write_be_int(&blkdev->xendev, "info", info);
810

O
Olaf Hering 已提交
811 812
    blk_parse_discard(blkdev);

813
    g_free(directiosafe);
814 815 816 817 818 819 820 821 822 823 824 825 826
    return 0;

out_error:
    g_free(blkdev->params);
    blkdev->params = NULL;
    g_free(blkdev->mode);
    blkdev->mode = NULL;
    g_free(blkdev->type);
    blkdev->type = NULL;
    g_free(blkdev->dev);
    blkdev->dev = NULL;
    g_free(blkdev->devtype);
    blkdev->devtype = NULL;
827 828
    g_free(directiosafe);
    blkdev->directiosafe = false;
829 830 831 832 833 834 835
    return -1;
}

static int blk_connect(struct XenDevice *xendev)
{
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
    int pers, index, qflags;
836
    bool readonly = true;
837 838

    /* read-only ? */
839 840 841 842 843
    if (blkdev->directiosafe) {
        qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
    } else {
        qflags = BDRV_O_CACHE_WB;
    }
844 845
    if (strcmp(blkdev->mode, "w") == 0) {
        qflags |= BDRV_O_RDWR;
846
        readonly = false;
847
    }
O
Olaf Hering 已提交
848 849 850
    if (blkdev->feature_discard) {
        qflags |= BDRV_O_UNMAP;
    }
851

852
    /* init qemu block driver */
G
Gerd Hoffmann 已提交
853 854 855
    index = (blkdev->xendev.dev - 202 * 256) / 16;
    blkdev->dinfo = drive_get(IF_XEN, 0, index);
    if (!blkdev->dinfo) {
K
Kevin Wolf 已提交
856
        Error *local_err = NULL;
857 858
        BlockDriver *drv;

859 860
        /* setup via xenbus -> create new block driver instance */
        xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
861
        blkdev->bs = bdrv_new(blkdev->dev, NULL);
862
        if (!blkdev->bs) {
863
            return -1;
864
        }
865 866 867 868 869 870 871 872 873 874 875

        drv = bdrv_find_whitelisted_format(blkdev->fileproto, readonly);
        if (bdrv_open(&blkdev->bs, blkdev->filename, NULL, NULL, qflags,
                      drv, &local_err) != 0) {
            xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
                          error_get_pretty(local_err));
            error_free(local_err);
            bdrv_unref(blkdev->bs);
            blkdev->bs = NULL;
            return -1;
        }
876 877 878
    } else {
        /* setup via qemu cmdline -> already setup for us */
        xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
879
        blkdev->bs = blkdev->dinfo->bdrv;
880 881 882 883 884
        if (bdrv_is_read_only(blkdev->bs) && !readonly) {
            xen_be_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
            blkdev->bs = NULL;
            return -1;
        }
885 886 887
        /* blkdev->bs is not create by us, we get a reference
         * so we can bdrv_unref() unconditionally */
        bdrv_ref(blkdev->bs);
888
    }
889
    bdrv_attach_dev_nofail(blkdev->bs, blkdev);
890 891 892 893
    blkdev->file_size = bdrv_getlength(blkdev->bs);
    if (blkdev->file_size < 0) {
        xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n",
                      (int)blkdev->file_size, strerror(-blkdev->file_size),
894
                      bdrv_get_format_name(blkdev->bs) ?: "-");
895
        blkdev->file_size = 0;
896 897 898
    }

    xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
899 900 901
                  " size %" PRId64 " (%" PRId64 " MB)\n",
                  blkdev->type, blkdev->fileproto, blkdev->filename,
                  blkdev->file_size, blkdev->file_size >> 20);
902

903 904
    /* Fill in number of sector size and number of sectors */
    xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
905 906
    xenstore_write_be_int64(&blkdev->xendev, "sectors",
                            blkdev->file_size / blkdev->file_blk);
907

908 909 910
    if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
        return -1;
    }
911
    if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
912 913 914
                             &blkdev->xendev.remote_port) == -1) {
        return -1;
    }
915 916 917 918 919
    if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
        blkdev->feature_persistent = FALSE;
    } else {
        blkdev->feature_persistent = !!pers;
    }
920 921 922

    blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
    if (blkdev->xendev.protocol) {
923
        if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
924
            blkdev->protocol = BLKIF_PROTOCOL_X86_32;
925 926
        }
        if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
927
            blkdev->protocol = BLKIF_PROTOCOL_X86_64;
928
        }
929 930 931
    }

    blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
932 933 934 935 936 937
                                            blkdev->xendev.dom,
                                            blkdev->ring_ref,
                                            PROT_READ | PROT_WRITE);
    if (!blkdev->sring) {
        return -1;
    }
938 939 940 941 942
    blkdev->cnt_map++;

    switch (blkdev->protocol) {
    case BLKIF_PROTOCOL_NATIVE:
    {
943 944 945
        blkif_sring_t *sring_native = blkdev->sring;
        BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
        break;
946 947 948
    }
    case BLKIF_PROTOCOL_X86_32:
    {
949
        blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
950 951

        BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
952
        break;
953 954 955
    }
    case BLKIF_PROTOCOL_X86_64:
    {
956
        blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
957 958

        BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
959
        break;
960 961 962
    }
    }

963 964 965 966 967 968 969 970 971
    if (blkdev->feature_persistent) {
        /* Init persistent grants */
        blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
        blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
                                             NULL, NULL,
                                             (GDestroyNotify)destroy_grant);
        blkdev->persistent_gnt_count = 0;
    }

972 973 974
    xen_be_bind_evtchn(&blkdev->xendev);

    xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
975 976 977
                  "remote port %d, local port %d\n",
                  blkdev->xendev.protocol, blkdev->ring_ref,
                  blkdev->xendev.remote_port, blkdev->xendev.local_port);
978 979 980 981 982 983 984 985
    return 0;
}

static void blk_disconnect(struct XenDevice *xendev)
{
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);

    if (blkdev->bs) {
986 987
        bdrv_detach_dev(blkdev->bs, blkdev);
        bdrv_unref(blkdev->bs);
988
        blkdev->bs = NULL;
989 990 991 992
    }
    xen_be_unbind_evtchn(&blkdev->xendev);

    if (blkdev->sring) {
993 994 995
        xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
        blkdev->cnt_map--;
        blkdev->sring = NULL;
996 997 998 999 1000 1001 1002 1003
    }
}

static int blk_free(struct XenDevice *xendev)
{
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
    struct ioreq *ioreq;

1004 1005 1006 1007
    if (blkdev->bs || blkdev->sring) {
        blk_disconnect(xendev);
    }

1008 1009 1010 1011 1012
    /* Free persistent grants */
    if (blkdev->feature_persistent) {
        g_tree_destroy(blkdev->persistent_gnts);
    }

B
Blue Swirl 已提交
1013
    while (!QLIST_EMPTY(&blkdev->freelist)) {
1014
        ioreq = QLIST_FIRST(&blkdev->freelist);
B
Blue Swirl 已提交
1015
        QLIST_REMOVE(ioreq, list);
1016
        qemu_iovec_destroy(&ioreq->v);
1017
        g_free(ioreq);
1018 1019
    }

1020 1021 1022 1023 1024
    g_free(blkdev->params);
    g_free(blkdev->mode);
    g_free(blkdev->type);
    g_free(blkdev->dev);
    g_free(blkdev->devtype);
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
    qemu_bh_delete(blkdev->bh);
    return 0;
}

static void blk_event(struct XenDevice *xendev)
{
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);

    qemu_bh_schedule(blkdev->bh);
}

struct XenDevOps xen_blkdev_ops = {
    .size       = sizeof(struct XenBlkDev),
    .flags      = DEVOPS_FLAG_NEED_GNTDEV,
    .alloc      = blk_alloc,
    .init       = blk_init,
1041
    .initialise    = blk_connect,
1042 1043 1044 1045
    .disconnect = blk_disconnect,
    .event      = blk_event,
    .free       = blk_free,
};