qemu_blockjob.c 12.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * qemu_blockjob.c: helper functions for QEMU block jobs
 *
 * Copyright (C) 2006-2015 Red Hat, Inc.
 * Copyright (C) 2006 Daniel P. Berrange
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library.  If not, see
 * <http://www.gnu.org/licenses/>.
 */

#include <config.h>

#include "internal.h"

#include "qemu_blockjob.h"
27
#include "qemu_block.h"
28 29 30 31 32 33 34
#include "qemu_domain.h"

#include "conf/domain_conf.h"
#include "conf/domain_event.h"

#include "virlog.h"
#include "virstoragefile.h"
35 36
#include "virthread.h"
#include "virtime.h"
37
#include "locking/domain_lock.h"
38
#include "viralloc.h"
39
#include "virstring.h"
40 41 42 43 44

#define VIR_FROM_THIS VIR_FROM_QEMU

VIR_LOG_INIT("qemu.qemu_blockjob");

45

46 47 48 49 50
static virClassPtr qemuBlockJobDataClass;


static void
qemuBlockJobDataDispose(void *obj)
51
{
52
    qemuBlockJobDataPtr job = obj;
53

54
    VIR_FREE(job->name);
55
    VIR_FREE(job->errmsg);
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
}


static int
qemuBlockJobDataOnceInit(void)
{
    if (!VIR_CLASS_NEW(qemuBlockJobData, virClassForObject()))
        return -1;

    return 0;
}


VIR_ONCE_GLOBAL_INIT(qemuBlockJobData)

71
static qemuBlockJobDataPtr
72 73
qemuBlockJobDataNew(qemuBlockJobType type,
                    const char *name)
74
{
75
    qemuBlockJobDataPtr job = NULL;
76
    qemuBlockJobDataPtr ret = NULL;
77

78 79 80
    if (qemuBlockJobDataInitialize() < 0)
        return NULL;

81 82
    if (!(job = virObjectNew(qemuBlockJobDataClass)))
        return NULL;
83

84 85 86
    if (VIR_STRDUP(job->name, name) < 0)
        goto cleanup;

87
    job->state = QEMU_BLOCKJOB_STATE_NEW;
88
    job->newstate = -1;
89 90
    job->type = type;

91 92 93 94 95
    VIR_STEAL_PTR(ret, job);

 cleanup:
    virObjectUnref(job);
    return ret;
96 97 98 99 100 101 102 103 104 105 106 107
}


/**
 * qemuBlockJobDiskNew:
 * @disk: disk definition
 *
 * Start/associate a new blockjob with @disk.
 *
 * Returns 0 on success and -1 on failure.
 */
qemuBlockJobDataPtr
108
qemuBlockJobDiskNew(virDomainDiskDefPtr disk,
109 110
                    qemuBlockJobType type,
                    const char *jobname)
111
{
112
    qemuBlockJobDataPtr job = NULL;
113

114
    if (!(job = qemuBlockJobDataNew(type, jobname)))
115
        return NULL;
116

117 118 119
    job->disk = disk;
    if (disk)
        QEMU_DOMAIN_DISK_PRIVATE(disk)->blockjob = virObjectRef(job);
120

121
    return job;
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
}


/**
 * qemuBlockJobDiskGetJob:
 * @disk: disk definition
 *
 * Get a reference to the block job data object associated with @disk.
 */
qemuBlockJobDataPtr
qemuBlockJobDiskGetJob(virDomainDiskDefPtr disk)
{
    qemuBlockJobDataPtr job = QEMU_DOMAIN_DISK_PRIVATE(disk)->blockjob;

    if (!job)
        return NULL;

    return virObjectRef(job);
}


/**
 * qemuBlockJobStarted:
 * @job: job data
 *
 * Mark @job as started in qemu.
 */
void
qemuBlockJobStarted(qemuBlockJobDataPtr job)
{
152
    job->state = QEMU_BLOCKJOB_STATE_RUNNING;
153 154 155
}


156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
static void
qemuBlockJobTerminate(qemuBlockJobDataPtr job)
{
    qemuDomainDiskPrivatePtr diskPriv;

    if (job->disk) {
        diskPriv = QEMU_DOMAIN_DISK_PRIVATE(job->disk);

        if (job == diskPriv->blockjob) {
            virObjectUnref(diskPriv->blockjob);
            diskPriv->blockjob = NULL;
        }
    }
}


172 173 174 175 176 177 178 179 180 181 182 183 184 185
/**
 * qemuBlockJobStartupFinalize:
 * @job: job being started
 *
 * Cancels and clears the job private data if the job was not started with
 * qemu (see qemuBlockJobStarted) or just clears up the local reference
 * to @job if it was started.
 */
void
qemuBlockJobStartupFinalize(qemuBlockJobDataPtr job)
{
    if (!job)
        return;

186
    if (job->state == QEMU_BLOCKJOB_STATE_NEW)
187
        qemuBlockJobTerminate(job);
188 189 190 191 192

    virObjectUnref(job);
}


193 194 195 196 197 198 199 200
bool
qemuBlockJobIsRunning(qemuBlockJobDataPtr job)
{
    return job->state == QEMU_BLOCKJOB_STATE_RUNNING ||
           job->state == QEMU_BLOCKJOB_STATE_READY;
}


201 202 203 204
/**
 * qemuBlockJobEmitEvents:
 *
 * Emits the VIR_DOMAIN_EVENT_ID_BLOCK_JOB and VIR_DOMAIN_EVENT_ID_BLOCK_JOB_2
205
 * for a block job. The former event is emitted only for local disks.
206 207 208 209 210 211 212 213 214 215 216
 */
static void
qemuBlockJobEmitEvents(virQEMUDriverPtr driver,
                       virDomainObjPtr vm,
                       virDomainDiskDefPtr disk,
                       virDomainBlockJobType type,
                       virConnectDomainEventBlockJobStatus status)
{
    virObjectEventPtr event = NULL;
    virObjectEventPtr event2 = NULL;

217 218 219 220 221 222
    if (virStorageSourceIsLocalStorage(disk->src) &&
        !virStorageSourceIsEmpty(disk->src)) {
        event = virDomainEventBlockJobNewFromObj(vm, virDomainDiskGetSource(disk),
                                                 type, status);
        virObjectEventStateQueue(driver->domainEventState, event);
    }
223 224 225 226 227 228

    event2 = virDomainEventBlockJob2NewFromObj(vm, disk->dst, type, status);
    virObjectEventStateQueue(driver->domainEventState, event2);
}


229 230 231
static void
qemuBlockJobEventProcessLegacyCompleted(virQEMUDriverPtr driver,
                                        virDomainObjPtr vm,
232
                                        qemuBlockJobDataPtr job,
233 234
                                        int asyncJob)
{
235
    virDomainDiskDefPtr disk = job->disk;
236 237
    virDomainDiskDefPtr persistDisk = NULL;

238 239 240
    if (!disk)
        return;

241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
    if (disk->mirrorState == VIR_DOMAIN_DISK_MIRROR_STATE_PIVOT) {
        if (vm->newDef) {
            virStorageSourcePtr copy = NULL;

            if ((persistDisk = virDomainDiskByName(vm->newDef,
                                                   disk->dst, false))) {
                copy = virStorageSourceCopy(disk->mirror, false);
                if (!copy ||
                    virStorageSourceInitChainElement(copy,
                                                     persistDisk->src,
                                                     true) < 0) {
                    VIR_WARN("Unable to update persistent definition "
                             "on vm %s after block job",
                             vm->def->name);
                    virStorageSourceFree(copy);
                    copy = NULL;
                    persistDisk = NULL;
                }
            }
            if (copy) {
                virStorageSourceFree(persistDisk->src);
                persistDisk->src = copy;
            }
        }

        /* XXX We want to revoke security labels as well as audit that
         * revocation, before dropping the original source.  But it gets
         * tricky if both source and mirror share common backing files (we
         * want to only revoke the non-shared portion of the chain); so for
         * now, we leak the access to the original.  */
        virDomainLockImageDetach(driver->lockManager, vm, disk->src);
        virStorageSourceFree(disk->src);
        disk->src = disk->mirror;
    } else {
        if (disk->mirror) {
            virDomainLockImageDetach(driver->lockManager, vm, disk->mirror);
            virStorageSourceFree(disk->mirror);
        }
    }

    /* Recompute the cached backing chain to match our
     * updates.  Better would be storing the chain ourselves
     * rather than reprobing, but we haven't quite completed
     * that conversion to use our XML tracking. */
    disk->mirror = NULL;
    disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_NONE;
    disk->mirrorJob = VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN;
    disk->src->id = 0;
    virStorageSourceBackingStoreClear(disk->src);
    ignore_value(qemuDomainDetermineDiskChain(driver, vm, disk, true));
    ignore_value(qemuBlockNodeNamesDetect(driver, vm, asyncJob));
292
    qemuBlockJobTerminate(job);
293 294 295
}


296
/**
297
 * qemuBlockJobEventProcessLegacy:
298 299
 * @driver: qemu driver
 * @vm: domain
300
 * @job: job to process events for
301 302 303 304 305
 *
 * Update disk's mirror state in response to a block job event
 * from QEMU. For mirror state's that must survive libvirt
 * restart, also update the domain's status XML.
 */
306
static void
307 308
qemuBlockJobEventProcessLegacy(virQEMUDriverPtr driver,
                               virDomainObjPtr vm,
309 310
                               qemuBlockJobDataPtr job,
                               int asyncJob)
311 312
{
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
313
    virDomainDiskDefPtr disk = job->disk;
314

315
    VIR_DEBUG("disk=%s, mirrorState=%s, type=%d, state=%d, newstate=%d",
316 317
              disk->dst,
              NULLSTR(virDomainDiskMirrorStateTypeToString(disk->mirrorState)),
318
              job->type,
319
              job->state,
320
              job->newstate);
321

322 323 324
    if (job->newstate == -1)
        return;

325
    qemuBlockJobEmitEvents(driver, vm, disk, job->type, job->newstate);
326 327 328

    /* If we completed a block pull or commit, then update the XML
     * to match.  */
329
    switch ((virConnectDomainEventBlockJobStatus) job->newstate) {
330
    case VIR_DOMAIN_BLOCK_JOB_COMPLETED:
331
        qemuBlockJobEventProcessLegacyCompleted(driver, vm, job, asyncJob);
332 333 334 335 336 337 338 339
        break;

    case VIR_DOMAIN_BLOCK_JOB_READY:
        disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_READY;
        break;

    case VIR_DOMAIN_BLOCK_JOB_FAILED:
    case VIR_DOMAIN_BLOCK_JOB_CANCELED:
340 341 342 343 344
        if (disk->mirror) {
            virDomainLockImageDetach(driver->lockManager, vm, disk->mirror);
            virStorageSourceFree(disk->mirror);
            disk->mirror = NULL;
        }
345
        disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_NONE;
346
        disk->mirrorJob = VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN;
347
        qemuBlockJobTerminate(job);
348 349 350 351 352 353
        break;

    case VIR_DOMAIN_BLOCK_JOB_LAST:
        break;
    }

354
    job->state = job->newstate;
355
    job->newstate = -1;
356

357 358 359
    if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
        VIR_WARN("Unable to save status on vm %s after block job", vm->def->name);

360
    if (job->newstate == VIR_DOMAIN_BLOCK_JOB_COMPLETED && vm->newDef) {
361 362 363 364
        if (virDomainSaveConfig(cfg->configDir, driver->caps, vm->newDef) < 0)
            VIR_WARN("Unable to update persistent definition on vm %s "
                     "after block job", vm->def->name);
    }
365 366 367

    virObjectUnref(cfg);
}
368 369


370
/**
371
 * qemuBlockJobUpdate:
372
 * @vm: domain
373 374
 * @job: job data
 * @asyncJob: current qemu asynchronous job type
375 376 377 378 379 380 381
 *
 * Update disk's mirror state in response to a block job event stored in
 * blockJobStatus by qemuProcessHandleBlockJob event handler.
 *
 * Returns the block job event processed or -1 if there was no pending event.
 */
int
382 383 384
qemuBlockJobUpdate(virDomainObjPtr vm,
                   qemuBlockJobDataPtr job,
                   int asyncJob)
385 386 387
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

388 389 390 391
    if (job->newstate == -1)
        return -1;

    qemuBlockJobEventProcessLegacy(priv->driver, vm, job, asyncJob);
392

393
    return job->state;
394 395 396
}


397
/**
398 399
 * qemuBlockJobSyncBegin:
 * @job: block job data
400 401 402
 * @disk: domain disk
 *
 * Begin a new synchronous block job for @disk. The synchronous
403
 * block job is ended by a call to qemuBlockJobSyncEnd, or by
404 405 406 407
 * the guest quitting.
 *
 * During a synchronous block job, a block job event for @disk
 * will not be processed asynchronously. Instead, it will be
408
 * processed only when qemuBlockJobUpdate or qemuBlockJobSyncEnd
409
 * is called.
410 411
 */
void
412
qemuBlockJobSyncBegin(qemuBlockJobDataPtr job)
413
{
414
    const char *diskdst = NULL;
415

416 417 418 419
    if (job->disk)
        diskdst = job->disk->dst;

    VIR_DEBUG("disk=%s", NULLSTR(diskdst));
420
    job->synchronous = true;
421
    job->newstate = -1;
422 423 424 425
}


/**
426
 * qemuBlockJobSyncEnd:
427 428 429 430
 * @vm: domain
 * @disk: domain disk
 *
 * End a synchronous block job for @disk. Any pending block job event
431 432 433
 * for the disk is processed. Note that it's not necessary to call this function
 * in case the block job was not started successfully if
 * qemuBlockJobStartupFinalize will be called.
434 435
 */
void
436 437 438
qemuBlockJobSyncEnd(virDomainObjPtr vm,
                    qemuBlockJobDataPtr job,
                    int asyncJob)
439
{
440
    const char *diskdst = NULL;
441

442 443
    if (job->disk)
        diskdst = job->disk->dst;
444

445
    VIR_DEBUG("disk=%s", NULLSTR(diskdst));
446 447
    qemuBlockJobUpdate(vm, job, asyncJob);
    job->synchronous = false;
448
}
449 450 451 452 453 454 455 456 457 458 459 460


qemuBlockJobDataPtr
qemuBlockJobGetByDisk(virDomainDiskDefPtr disk)
{
    qemuBlockJobDataPtr job = QEMU_DOMAIN_DISK_PRIVATE(disk)->blockjob;

    if (!job)
        return NULL;

    return virObjectRef(job);
}