qemu_blockjob.c 13.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * qemu_blockjob.c: helper functions for QEMU block jobs
 *
 * Copyright (C) 2006-2015 Red Hat, Inc.
 * Copyright (C) 2006 Daniel P. Berrange
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library.  If not, see
 * <http://www.gnu.org/licenses/>.
 */

#include <config.h>

#include "internal.h"

#include "qemu_blockjob.h"
27
#include "qemu_block.h"
28 29 30 31 32 33 34
#include "qemu_domain.h"

#include "conf/domain_conf.h"
#include "conf/domain_event.h"

#include "virlog.h"
#include "virstoragefile.h"
35 36
#include "virthread.h"
#include "virtime.h"
37
#include "locking/domain_lock.h"
38
#include "viralloc.h"
39
#include "virstring.h"
40
#include "qemu_security.h"
41 42 43 44 45

#define VIR_FROM_THIS VIR_FROM_QEMU

VIR_LOG_INIT("qemu.qemu_blockjob");

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
/* Note that qemuBlockjobState and qemuBlockjobType values are formatted into
 * the status XML */
VIR_ENUM_IMPL(qemuBlockjobState,
              QEMU_BLOCKJOB_STATE_LAST,
              "completed",
              "failed",
              "cancelled",
              "ready",
              "new",
              "running");

VIR_ENUM_IMPL(qemuBlockjob,
              QEMU_BLOCKJOB_TYPE_LAST,
              "",
              "pull",
              "copy",
              "commit",
              "active-commit",
              "");
65

66 67 68 69 70
static virClassPtr qemuBlockJobDataClass;


static void
qemuBlockJobDataDispose(void *obj)
71
{
72
    qemuBlockJobDataPtr job = obj;
73

74
    VIR_FREE(job->name);
75
    VIR_FREE(job->errmsg);
76 77 78 79 80 81 82 83 84 85 86 87 88
}


static int
qemuBlockJobDataOnceInit(void)
{
    if (!VIR_CLASS_NEW(qemuBlockJobData, virClassForObject()))
        return -1;

    return 0;
}


89
VIR_ONCE_GLOBAL_INIT(qemuBlockJobData);
90

91
qemuBlockJobDataPtr
92 93
qemuBlockJobDataNew(qemuBlockJobType type,
                    const char *name)
94
{
95
    VIR_AUTOUNREF(qemuBlockJobDataPtr) job = NULL;
96

97 98 99
    if (qemuBlockJobDataInitialize() < 0)
        return NULL;

100 101
    if (!(job = virObjectNew(qemuBlockJobDataClass)))
        return NULL;
102

103
    if (VIR_STRDUP(job->name, name) < 0)
104
        return NULL;
105

106
    job->state = QEMU_BLOCKJOB_STATE_NEW;
107
    job->newstate = -1;
108 109
    job->type = type;

110
    VIR_RETURN_PTR(job);
111 112 113
}


114
int
115
qemuBlockJobRegister(qemuBlockJobDataPtr job,
116
                     virDomainObjPtr vm,
117 118
                     virDomainDiskDefPtr disk)
{
119 120 121 122 123 124 125
    qemuDomainObjPrivatePtr priv = vm->privateData;

    if (virHashAddEntry(priv->blockjobs, job->name, virObjectRef(job)) < 0) {
        virObjectUnref(job);
        return -1;
    }

126 127 128 129 130 131 132 133 134 135
    if (disk) {
        job->disk = disk;
        QEMU_DOMAIN_DISK_PRIVATE(disk)->blockjob = virObjectRef(job);
    }

    return 0;
}


static void
136 137
qemuBlockJobUnregister(qemuBlockJobDataPtr job,
                       virDomainObjPtr vm)
138
{
139
    qemuDomainObjPrivatePtr priv = vm->privateData;
140 141 142 143 144 145 146 147 148 149 150 151
    qemuDomainDiskPrivatePtr diskPriv;

    if (job->disk) {
        diskPriv = QEMU_DOMAIN_DISK_PRIVATE(job->disk);

        if (job == diskPriv->blockjob) {
            virObjectUnref(diskPriv->blockjob);
            diskPriv->blockjob = NULL;
        }

        job->disk = NULL;
    }
152 153 154

    /* this may remove the last reference of 'job' */
    virHashRemoveEntry(priv->blockjobs, job->name);
155 156 157
}


158 159 160 161 162 163 164 165 166
/**
 * qemuBlockJobDiskNew:
 * @disk: disk definition
 *
 * Start/associate a new blockjob with @disk.
 *
 * Returns 0 on success and -1 on failure.
 */
qemuBlockJobDataPtr
167 168
qemuBlockJobDiskNew(virDomainObjPtr vm,
                    virDomainDiskDefPtr disk,
169 170
                    qemuBlockJobType type,
                    const char *jobname)
171
{
172
    VIR_AUTOUNREF(qemuBlockJobDataPtr) job = NULL;
173

174
    if (!(job = qemuBlockJobDataNew(type, jobname)))
175
        return NULL;
176

177
    if (qemuBlockJobRegister(job, vm, disk) < 0)
178
        return NULL;
179

180
    VIR_RETURN_PTR(job);
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
}


/**
 * qemuBlockJobDiskGetJob:
 * @disk: disk definition
 *
 * Get a reference to the block job data object associated with @disk.
 */
qemuBlockJobDataPtr
qemuBlockJobDiskGetJob(virDomainDiskDefPtr disk)
{
    qemuBlockJobDataPtr job = QEMU_DOMAIN_DISK_PRIVATE(disk)->blockjob;

    if (!job)
        return NULL;

    return virObjectRef(job);
}


/**
 * qemuBlockJobStarted:
 * @job: job data
 *
 * Mark @job as started in qemu.
 */
void
qemuBlockJobStarted(qemuBlockJobDataPtr job)
{
211 212
    if (job->state == QEMU_BLOCKJOB_STATE_NEW)
        job->state = QEMU_BLOCKJOB_STATE_RUNNING;
213 214 215 216 217 218 219 220 221 222 223 224
}


/**
 * qemuBlockJobStartupFinalize:
 * @job: job being started
 *
 * Cancels and clears the job private data if the job was not started with
 * qemu (see qemuBlockJobStarted) or just clears up the local reference
 * to @job if it was started.
 */
void
225 226
qemuBlockJobStartupFinalize(virDomainObjPtr vm,
                            qemuBlockJobDataPtr job)
227 228 229 230
{
    if (!job)
        return;

231
    if (job->state == QEMU_BLOCKJOB_STATE_NEW)
232
        qemuBlockJobUnregister(job, vm);
233 234 235 236 237

    virObjectUnref(job);
}


238 239 240 241 242 243 244 245
bool
qemuBlockJobIsRunning(qemuBlockJobDataPtr job)
{
    return job->state == QEMU_BLOCKJOB_STATE_RUNNING ||
           job->state == QEMU_BLOCKJOB_STATE_READY;
}


246 247 248 249
/**
 * qemuBlockJobEmitEvents:
 *
 * Emits the VIR_DOMAIN_EVENT_ID_BLOCK_JOB and VIR_DOMAIN_EVENT_ID_BLOCK_JOB_2
250
 * for a block job. The former event is emitted only for local disks.
251 252 253 254 255 256 257 258 259 260 261
 */
static void
qemuBlockJobEmitEvents(virQEMUDriverPtr driver,
                       virDomainObjPtr vm,
                       virDomainDiskDefPtr disk,
                       virDomainBlockJobType type,
                       virConnectDomainEventBlockJobStatus status)
{
    virObjectEventPtr event = NULL;
    virObjectEventPtr event2 = NULL;

262 263 264 265
    /* don't emit events for jobs without disk */
    if (!disk)
        return;

266 267 268 269 270
    /* don't emit events for internal jobs and states */
    if (type >= VIR_DOMAIN_BLOCK_JOB_TYPE_LAST ||
        status >= VIR_DOMAIN_BLOCK_JOB_LAST)
        return;

271 272 273 274 275 276
    if (virStorageSourceIsLocalStorage(disk->src) &&
        !virStorageSourceIsEmpty(disk->src)) {
        event = virDomainEventBlockJobNewFromObj(vm, virDomainDiskGetSource(disk),
                                                 type, status);
        virObjectEventStateQueue(driver->domainEventState, event);
    }
277 278 279 280 281 282

    event2 = virDomainEventBlockJob2NewFromObj(vm, disk->dst, type, status);
    virObjectEventStateQueue(driver->domainEventState, event2);
}


283 284 285
static void
qemuBlockJobEventProcessLegacyCompleted(virQEMUDriverPtr driver,
                                        virDomainObjPtr vm,
286
                                        qemuBlockJobDataPtr job,
287 288
                                        int asyncJob)
{
289
    virDomainDiskDefPtr disk = job->disk;
290 291
    virDomainDiskDefPtr persistDisk = NULL;

292 293 294
    if (!disk)
        return;

295 296 297 298 299 300 301 302 303 304 305 306 307 308
    if (disk->mirrorState == VIR_DOMAIN_DISK_MIRROR_STATE_PIVOT) {
        if (vm->newDef) {
            virStorageSourcePtr copy = NULL;

            if ((persistDisk = virDomainDiskByName(vm->newDef,
                                                   disk->dst, false))) {
                copy = virStorageSourceCopy(disk->mirror, false);
                if (!copy ||
                    virStorageSourceInitChainElement(copy,
                                                     persistDisk->src,
                                                     true) < 0) {
                    VIR_WARN("Unable to update persistent definition "
                             "on vm %s after block job",
                             vm->def->name);
309
                    virObjectUnref(copy);
310 311 312 313 314
                    copy = NULL;
                    persistDisk = NULL;
                }
            }
            if (copy) {
315
                virObjectUnref(persistDisk->src);
316 317 318 319 320 321 322 323 324 325
                persistDisk->src = copy;
            }
        }

        /* XXX We want to revoke security labels as well as audit that
         * revocation, before dropping the original source.  But it gets
         * tricky if both source and mirror share common backing files (we
         * want to only revoke the non-shared portion of the chain); so for
         * now, we leak the access to the original.  */
        virDomainLockImageDetach(driver->lockManager, vm, disk->src);
326 327 328 329 330

        /* Move secret driver metadata */
        if (qemuSecurityMoveImageMetadata(driver, vm, disk->src, disk->mirror) < 0)
            VIR_WARN("Unable to move disk metadata on vm %s", vm->def->name);

331
        virObjectUnref(disk->src);
332 333 334 335
        disk->src = disk->mirror;
    } else {
        if (disk->mirror) {
            virDomainLockImageDetach(driver->lockManager, vm, disk->mirror);
336
            virObjectUnref(disk->mirror);
337 338 339 340 341 342 343 344 345 346 347 348
        }
    }

    /* Recompute the cached backing chain to match our
     * updates.  Better would be storing the chain ourselves
     * rather than reprobing, but we haven't quite completed
     * that conversion to use our XML tracking. */
    disk->mirror = NULL;
    disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_NONE;
    disk->mirrorJob = VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN;
    disk->src->id = 0;
    virStorageSourceBackingStoreClear(disk->src);
349
    ignore_value(qemuDomainDetermineDiskChain(driver, vm, disk, NULL, true));
350
    ignore_value(qemuBlockNodeNamesDetect(driver, vm, asyncJob));
351
    qemuBlockJobUnregister(job, vm);
352
    qemuDomainSaveConfig(vm);
353 354 355
}


356
/**
357
 * qemuBlockJobEventProcessLegacy:
358 359
 * @driver: qemu driver
 * @vm: domain
360
 * @job: job to process events for
361 362 363 364 365
 *
 * Update disk's mirror state in response to a block job event
 * from QEMU. For mirror state's that must survive libvirt
 * restart, also update the domain's status XML.
 */
366
static void
367 368
qemuBlockJobEventProcessLegacy(virQEMUDriverPtr driver,
                               virDomainObjPtr vm,
369 370
                               qemuBlockJobDataPtr job,
                               int asyncJob)
371
{
372
    VIR_AUTOUNREF(virQEMUDriverConfigPtr) cfg = virQEMUDriverGetConfig(driver);
373
    virDomainDiskDefPtr disk = job->disk;
374

375
    VIR_DEBUG("disk=%s, mirrorState=%s, type=%d, state=%d, newstate=%d",
376 377
              disk->dst,
              NULLSTR(virDomainDiskMirrorStateTypeToString(disk->mirrorState)),
378
              job->type,
379
              job->state,
380
              job->newstate);
381

382 383 384
    if (job->newstate == -1)
        return;

385
    qemuBlockJobEmitEvents(driver, vm, disk, job->type, job->newstate);
386

387 388 389
    job->state = job->newstate;
    job->newstate = -1;

390 391
    /* If we completed a block pull or commit, then update the XML
     * to match.  */
392
    switch ((virConnectDomainEventBlockJobStatus) job->state) {
393
    case VIR_DOMAIN_BLOCK_JOB_COMPLETED:
394
        qemuBlockJobEventProcessLegacyCompleted(driver, vm, job, asyncJob);
395 396 397 398 399 400 401 402
        break;

    case VIR_DOMAIN_BLOCK_JOB_READY:
        disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_READY;
        break;

    case VIR_DOMAIN_BLOCK_JOB_FAILED:
    case VIR_DOMAIN_BLOCK_JOB_CANCELED:
403 404
        if (disk->mirror) {
            virDomainLockImageDetach(driver->lockManager, vm, disk->mirror);
405
            virObjectUnref(disk->mirror);
406 407
            disk->mirror = NULL;
        }
408
        disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_NONE;
409
        disk->mirrorJob = VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN;
410
        qemuBlockJobUnregister(job, vm);
411 412 413 414 415 416
        break;

    case VIR_DOMAIN_BLOCK_JOB_LAST:
        break;
    }

417 418
    if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
        VIR_WARN("Unable to save status on vm %s after block job", vm->def->name);
419
}
420 421


422
/**
423
 * qemuBlockJobUpdate:
424
 * @vm: domain
425 426
 * @job: job data
 * @asyncJob: current qemu asynchronous job type
427 428 429 430 431 432 433
 *
 * Update disk's mirror state in response to a block job event stored in
 * blockJobStatus by qemuProcessHandleBlockJob event handler.
 *
 * Returns the block job event processed or -1 if there was no pending event.
 */
int
434 435 436
qemuBlockJobUpdate(virDomainObjPtr vm,
                   qemuBlockJobDataPtr job,
                   int asyncJob)
437 438 439
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

440 441 442 443
    if (job->newstate == -1)
        return -1;

    qemuBlockJobEventProcessLegacy(priv->driver, vm, job, asyncJob);
444

445
    return job->state;
446 447 448
}


449
/**
450 451
 * qemuBlockJobSyncBegin:
 * @job: block job data
452 453 454
 * @disk: domain disk
 *
 * Begin a new synchronous block job for @disk. The synchronous
455
 * block job is ended by a call to qemuBlockJobSyncEnd, or by
456 457 458 459
 * the guest quitting.
 *
 * During a synchronous block job, a block job event for @disk
 * will not be processed asynchronously. Instead, it will be
460
 * processed only when qemuBlockJobUpdate or qemuBlockJobSyncEnd
461
 * is called.
462 463
 */
void
464
qemuBlockJobSyncBegin(qemuBlockJobDataPtr job)
465
{
466
    const char *diskdst = NULL;
467

468 469 470 471
    if (job->disk)
        diskdst = job->disk->dst;

    VIR_DEBUG("disk=%s", NULLSTR(diskdst));
472
    job->synchronous = true;
473 474 475 476
}


/**
477
 * qemuBlockJobSyncEnd:
478 479 480 481
 * @vm: domain
 * @disk: domain disk
 *
 * End a synchronous block job for @disk. Any pending block job event
482 483 484
 * for the disk is processed. Note that it's not necessary to call this function
 * in case the block job was not started successfully if
 * qemuBlockJobStartupFinalize will be called.
485 486
 */
void
487 488 489
qemuBlockJobSyncEnd(virDomainObjPtr vm,
                    qemuBlockJobDataPtr job,
                    int asyncJob)
490
{
491
    const char *diskdst = NULL;
492

493 494
    if (job->disk)
        diskdst = job->disk->dst;
495

496
    VIR_DEBUG("disk=%s", NULLSTR(diskdst));
497 498
    qemuBlockJobUpdate(vm, job, asyncJob);
    job->synchronous = false;
499
}
500 501 502 503 504 505 506 507 508 509 510 511


qemuBlockJobDataPtr
qemuBlockJobGetByDisk(virDomainDiskDefPtr disk)
{
    qemuBlockJobDataPtr job = QEMU_DOMAIN_DISK_PRIVATE(disk)->blockjob;

    if (!job)
        return NULL;

    return virObjectRef(job);
}