提交 319992d4 编写于 作者: E Eric Blake

API: document scheduler parameter names

Document the parameter names that will be used by
virDomain{Get,Set}SchedulerParameters{,Flags}, rather than
hard-coding those names in each driver, to match what is
done with memory, blkio, and blockstats parameters.

* include/libvirt/libvirt.h.in (VIR_DOMAIN_SCHEDULER_CPU_SHARES)
(VIR_DOMAIN_SCHEDULER_VCPU_PERIOD)
(VIR_DOMAIN_SCHEDULER_VCPU_QUOTA, VIR_DOMAIN_SCHEDULER_WEIGHT)
(VIR_DOMAIN_SCHEDULER_CAP, VIR_DOMAIN_SCHEDULER_RESERVATION)
(VIR_DOMAIN_SCHEDULER_LIMIT, VIR_DOMAIN_SCHEDULER_SHARES): New
field name macros.
* src/qemu/qemu_driver.c (qemuSetSchedulerParametersFlags)
(qemuGetSchedulerParametersFlags): Use new defines.
* src/test/test_driver.c (testDomainGetSchedulerParamsFlags)
(testDomainSetSchedulerParamsFlags): Likewise.
* src/xen/xen_hypervisor.c (xenHypervisorGetSchedulerParameters)
(xenHypervisorSetSchedulerParameters): Likewise.
* src/xen/xend_internal.c (xenDaemonGetSchedulerParameters)
(xenDaemonSetSchedulerParameters): Likewise.
* src/lxc/lxc_driver.c (lxcSetSchedulerParametersFlags)
(lxcGetSchedulerParametersFlags): Likewise.
* src/esx/esx_driver.c (esxDomainGetSchedulerParametersFlags)
(esxDomainSetSchedulerParametersFlags): Likewise.
* src/libxl/libxl_driver.c (libxlDomainGetSchedulerParametersFlags)
(libxlDomainSetSchedulerParametersFlags): Likewise.
上级 26b74307
...@@ -533,6 +533,70 @@ typedef virTypedParameter *virTypedParameterPtr; ...@@ -533,6 +533,70 @@ typedef virTypedParameter *virTypedParameterPtr;
/* Management of scheduler parameters */ /* Management of scheduler parameters */
/**
* VIR_DOMAIN_SCHEDULER_CPU_SHARES:
*
* Macro represents proportional weight of the scheduler used on the
* host cpu, when using the posix scheduler, as a ullong.
*/
#define VIR_DOMAIN_SCHEDULER_CPU_SHARES "cpu_shares"
/**
* VIR_DOMAIN_SCHEDULER_VCPU_PERIOD:
*
* Macro represents the enforcement period for a quota, in microseconds,
* when using the posix scheduler, as a ullong.
*/
#define VIR_DOMAIN_SCHEDULER_VCPU_PERIOD "vcpu_period"
/**
* VIR_DOMAIN_SCHEDULER_VCPU_QUOTA:
*
* Macro represents the maximum bandwidth to be used within a period,
* when using the posix scheduler, as an llong.
*/
#define VIR_DOMAIN_SCHEDULER_VCPU_QUOTA "vcpu_quota"
/**
* VIR_DOMAIN_SCHEDULER_WEIGHT:
*
* Macro represents the relative weight, when using the credit
* scheduler, as a uint.
*/
#define VIR_DOMAIN_SCHEDULER_WEIGHT "weight"
/**
* VIR_DOMAIN_SCHEDULER_CAP:
*
* Macro represents the maximum scheduler cap, when using the credit
* scheduler, as a uint.
*/
#define VIR_DOMAIN_SCHEDULER_CAP "cap"
/**
* VIR_DOMAIN_SCHEDULER_RESERVATION:
*
* Macro represents the scheduler reservation value, when using the
* allocation scheduler, as an llong.
*/
#define VIR_DOMAIN_SCHEDULER_RESERVATION "reservation"
/**
* VIR_DOMAIN_SCHEDULER_LIMIT:
*
* Macro represents the scheduler limit value, when using the
* allocation scheduler, as an llong.
*/
#define VIR_DOMAIN_SCHEDULER_LIMIT "limit"
/**
* VIR_DOMAIN_SCHEDULER_SHARES:
*
* Macro represents the scheduler shares value, when using the
* allocation scheduler, as an int.
*/
#define VIR_DOMAIN_SCHEDULER_SHARES "shares"
/* /*
* Fetch scheduler parameters, caller allocates 'params' field of size 'nparams' * Fetch scheduler parameters, caller allocates 'params' field of size 'nparams'
*/ */
...@@ -586,15 +650,16 @@ typedef virDomainBlockStatsStruct *virDomainBlockStatsPtr; ...@@ -586,15 +650,16 @@ typedef virDomainBlockStatsStruct *virDomainBlockStatsPtr;
/** /**
* VIR_DOMAIN_BLOCK_STATS_FIELD_LENGTH: * VIR_DOMAIN_BLOCK_STATS_FIELD_LENGTH:
* *
* Macro providing the field length of virDomainBlockStatsFlagsStruct * Macro providing the field length of parameter names when using
* virDomainBlockStatsFlags().
*/ */
#define VIR_DOMAIN_BLOCK_STATS_FIELD_LENGTH 80 #define VIR_DOMAIN_BLOCK_STATS_FIELD_LENGTH VIR_TYPED_PARAM_FIELD_LENGTH
/** /**
* VIR_DOMAIN_BLOCK_STATS_READ_BYTES: * VIR_DOMAIN_BLOCK_STATS_READ_BYTES:
* *
* Macro represents the total number of read bytes of the * Macro represents the total number of read bytes of the
* block device. * block device, as an llong.
*/ */
#define VIR_DOMAIN_BLOCK_STATS_READ_BYTES "rd_bytes" #define VIR_DOMAIN_BLOCK_STATS_READ_BYTES "rd_bytes"
...@@ -602,7 +667,7 @@ typedef virDomainBlockStatsStruct *virDomainBlockStatsPtr; ...@@ -602,7 +667,7 @@ typedef virDomainBlockStatsStruct *virDomainBlockStatsPtr;
* VIR_DOMAIN_BLOCK_STATS_READ_REQ: * VIR_DOMAIN_BLOCK_STATS_READ_REQ:
* *
* Macro represents the total read requests of the * Macro represents the total read requests of the
* block device. * block device, as an llong.
*/ */
#define VIR_DOMAIN_BLOCK_STATS_READ_REQ "rd_operations" #define VIR_DOMAIN_BLOCK_STATS_READ_REQ "rd_operations"
...@@ -610,7 +675,7 @@ typedef virDomainBlockStatsStruct *virDomainBlockStatsPtr; ...@@ -610,7 +675,7 @@ typedef virDomainBlockStatsStruct *virDomainBlockStatsPtr;
* VIR_DOMAIN_BLOCK_STATS_READ_TOTAL_TIMES: * VIR_DOMAIN_BLOCK_STATS_READ_TOTAL_TIMES:
* *
* Macro represents the total time spend on cache reads in * Macro represents the total time spend on cache reads in
* nano-seconds of the block device. * nano-seconds of the block device, as an llong.
*/ */
#define VIR_DOMAIN_BLOCK_STATS_READ_TOTAL_TIMES "rd_total_times" #define VIR_DOMAIN_BLOCK_STATS_READ_TOTAL_TIMES "rd_total_times"
...@@ -618,7 +683,7 @@ typedef virDomainBlockStatsStruct *virDomainBlockStatsPtr; ...@@ -618,7 +683,7 @@ typedef virDomainBlockStatsStruct *virDomainBlockStatsPtr;
* VIR_DOMAIN_BLOCK_STATS_WRITE_BYTES: * VIR_DOMAIN_BLOCK_STATS_WRITE_BYTES:
* *
* Macro represents the total number of write bytes of the * Macro represents the total number of write bytes of the
* block device. * block device, as an llong.
*/ */
#define VIR_DOMAIN_BLOCK_STATS_WRITE_BYTES "wr_bytes" #define VIR_DOMAIN_BLOCK_STATS_WRITE_BYTES "wr_bytes"
...@@ -626,7 +691,7 @@ typedef virDomainBlockStatsStruct *virDomainBlockStatsPtr; ...@@ -626,7 +691,7 @@ typedef virDomainBlockStatsStruct *virDomainBlockStatsPtr;
* VIR_DOMAIN_BLOCK_STATS_WRITE_REQ: * VIR_DOMAIN_BLOCK_STATS_WRITE_REQ:
* *
* Macro represents the total write requests of the * Macro represents the total write requests of the
* block device. * block device, as an llong.
*/ */
#define VIR_DOMAIN_BLOCK_STATS_WRITE_REQ "wr_operations" #define VIR_DOMAIN_BLOCK_STATS_WRITE_REQ "wr_operations"
...@@ -634,7 +699,7 @@ typedef virDomainBlockStatsStruct *virDomainBlockStatsPtr; ...@@ -634,7 +699,7 @@ typedef virDomainBlockStatsStruct *virDomainBlockStatsPtr;
* VIR_DOMAIN_BLOCK_STATS_WRITE_TOTAL_TIMES: * VIR_DOMAIN_BLOCK_STATS_WRITE_TOTAL_TIMES:
* *
* Macro represents the total time spend on cache writes in * Macro represents the total time spend on cache writes in
* nano-seconds of the block device. * nano-seconds of the block device, as an llong.
*/ */
#define VIR_DOMAIN_BLOCK_STATS_WRITE_TOTAL_TIMES "wr_total_times" #define VIR_DOMAIN_BLOCK_STATS_WRITE_TOTAL_TIMES "wr_total_times"
...@@ -642,7 +707,7 @@ typedef virDomainBlockStatsStruct *virDomainBlockStatsPtr; ...@@ -642,7 +707,7 @@ typedef virDomainBlockStatsStruct *virDomainBlockStatsPtr;
* VIR_DOMAIN_BLOCK_STATS_FLUSH_REQ: * VIR_DOMAIN_BLOCK_STATS_FLUSH_REQ:
* *
* Macro represents the total flush requests of the * Macro represents the total flush requests of the
* block device. * block device, as an llong.
*/ */
#define VIR_DOMAIN_BLOCK_STATS_FLUSH_REQ "flush_operations" #define VIR_DOMAIN_BLOCK_STATS_FLUSH_REQ "flush_operations"
...@@ -650,14 +715,14 @@ typedef virDomainBlockStatsStruct *virDomainBlockStatsPtr; ...@@ -650,14 +715,14 @@ typedef virDomainBlockStatsStruct *virDomainBlockStatsPtr;
* VIR_DOMAIN_BLOCK_STATS_FLUSH_TOTAL_TIMES: * VIR_DOMAIN_BLOCK_STATS_FLUSH_TOTAL_TIMES:
* *
* Macro represents the total time spend on cache flushing in * Macro represents the total time spend on cache flushing in
* nano-seconds of the block device. * nano-seconds of the block device, as an llong.
*/ */
#define VIR_DOMAIN_BLOCK_STATS_FLUSH_TOTAL_TIMES "flush_total_times" #define VIR_DOMAIN_BLOCK_STATS_FLUSH_TOTAL_TIMES "flush_total_times"
/** /**
* VIR_DOMAIN_BLOCK_STATS_ERRS: * VIR_DOMAIN_BLOCK_STATS_ERRS:
* *
* In Xen this returns the mysterious 'oo_req' * In Xen this returns the mysterious 'oo_req', as an llong.
*/ */
#define VIR_DOMAIN_BLOCK_STATS_ERRS "errs" #define VIR_DOMAIN_BLOCK_STATS_ERRS "errs"
...@@ -1136,7 +1201,7 @@ char * virDomainGetSchedulerType(virDomainPtr domain, ...@@ -1136,7 +1201,7 @@ char * virDomainGetSchedulerType(virDomainPtr domain,
* VIR_DOMAIN_BLKIO_WEIGHT: * VIR_DOMAIN_BLKIO_WEIGHT:
* *
* Macro for the Blkio tunable weight: it represents the io weight * Macro for the Blkio tunable weight: it represents the io weight
* the guest can use. * the guest can use, as a uint.
*/ */
#define VIR_DOMAIN_BLKIO_WEIGHT "weight" #define VIR_DOMAIN_BLKIO_WEIGHT "weight"
...@@ -1163,7 +1228,7 @@ int virDomainGetBlkioParameters(virDomainPtr domain, ...@@ -1163,7 +1228,7 @@ int virDomainGetBlkioParameters(virDomainPtr domain,
* VIR_DOMAIN_MEMORY_HARD_LIMIT: * VIR_DOMAIN_MEMORY_HARD_LIMIT:
* *
* Macro for the memory tunable hard_limit: it represents the maximum memory * Macro for the memory tunable hard_limit: it represents the maximum memory
* the guest can use. * the guest can use, as a ullong.
*/ */
#define VIR_DOMAIN_MEMORY_HARD_LIMIT "hard_limit" #define VIR_DOMAIN_MEMORY_HARD_LIMIT "hard_limit"
...@@ -1172,7 +1237,7 @@ int virDomainGetBlkioParameters(virDomainPtr domain, ...@@ -1172,7 +1237,7 @@ int virDomainGetBlkioParameters(virDomainPtr domain,
* VIR_DOMAIN_MEMORY_SOFT_LIMIT: * VIR_DOMAIN_MEMORY_SOFT_LIMIT:
* *
* Macro for the memory tunable soft_limit: it represents the memory upper * Macro for the memory tunable soft_limit: it represents the memory upper
* limit enforced during memory contention. * limit enforced during memory contention, as a ullong.
*/ */
#define VIR_DOMAIN_MEMORY_SOFT_LIMIT "soft_limit" #define VIR_DOMAIN_MEMORY_SOFT_LIMIT "soft_limit"
...@@ -1181,7 +1246,7 @@ int virDomainGetBlkioParameters(virDomainPtr domain, ...@@ -1181,7 +1246,7 @@ int virDomainGetBlkioParameters(virDomainPtr domain,
* VIR_DOMAIN_MEMORY_MIN_GUARANTEE: * VIR_DOMAIN_MEMORY_MIN_GUARANTEE:
* *
* Macro for the memory tunable min_guarantee: it represents the minimum * Macro for the memory tunable min_guarantee: it represents the minimum
* memory guaranteed to be reserved for the guest. * memory guaranteed to be reserved for the guest, as a ullong.
*/ */
#define VIR_DOMAIN_MEMORY_MIN_GUARANTEE "min_guarantee" #define VIR_DOMAIN_MEMORY_MIN_GUARANTEE "min_guarantee"
...@@ -1190,7 +1255,7 @@ int virDomainGetBlkioParameters(virDomainPtr domain, ...@@ -1190,7 +1255,7 @@ int virDomainGetBlkioParameters(virDomainPtr domain,
* VIR_DOMAIN_MEMORY_SWAP_HARD_LIMIT: * VIR_DOMAIN_MEMORY_SWAP_HARD_LIMIT:
* *
* Macro for the swap tunable swap_hard_limit: it represents the maximum swap * Macro for the swap tunable swap_hard_limit: it represents the maximum swap
* plus memory the guest can use. This limit has to be more than * plus memory the guest can use, as a ullong. This limit has to be more than
* VIR_DOMAIN_MEMORY_HARD_LIMIT. * VIR_DOMAIN_MEMORY_HARD_LIMIT.
*/ */
......
...@@ -3661,7 +3661,7 @@ esxDomainGetSchedulerParametersFlags(virDomainPtr domain, ...@@ -3661,7 +3661,7 @@ esxDomainGetSchedulerParametersFlags(virDomainPtr domain,
if (STREQ(dynamicProperty->name, "config.cpuAllocation.reservation") && if (STREQ(dynamicProperty->name, "config.cpuAllocation.reservation") &&
! (mask & (1 << 0))) { ! (mask & (1 << 0))) {
snprintf (params[i].field, VIR_TYPED_PARAM_FIELD_LENGTH, "%s", snprintf (params[i].field, VIR_TYPED_PARAM_FIELD_LENGTH, "%s",
"reservation"); VIR_DOMAIN_SCHEDULER_RESERVATION);
params[i].type = VIR_TYPED_PARAM_LLONG; params[i].type = VIR_TYPED_PARAM_LLONG;
...@@ -3677,7 +3677,7 @@ esxDomainGetSchedulerParametersFlags(virDomainPtr domain, ...@@ -3677,7 +3677,7 @@ esxDomainGetSchedulerParametersFlags(virDomainPtr domain,
"config.cpuAllocation.limit") && "config.cpuAllocation.limit") &&
! (mask & (1 << 1))) { ! (mask & (1 << 1))) {
snprintf (params[i].field, VIR_TYPED_PARAM_FIELD_LENGTH, "%s", snprintf (params[i].field, VIR_TYPED_PARAM_FIELD_LENGTH, "%s",
"limit"); VIR_DOMAIN_SCHEDULER_LIMIT);
params[i].type = VIR_TYPED_PARAM_LLONG; params[i].type = VIR_TYPED_PARAM_LLONG;
...@@ -3693,7 +3693,7 @@ esxDomainGetSchedulerParametersFlags(virDomainPtr domain, ...@@ -3693,7 +3693,7 @@ esxDomainGetSchedulerParametersFlags(virDomainPtr domain,
"config.cpuAllocation.shares") && "config.cpuAllocation.shares") &&
! (mask & (1 << 2))) { ! (mask & (1 << 2))) {
snprintf (params[i].field, VIR_TYPED_PARAM_FIELD_LENGTH, "%s", snprintf (params[i].field, VIR_TYPED_PARAM_FIELD_LENGTH, "%s",
"shares"); VIR_DOMAIN_SCHEDULER_SHARES);
params[i].type = VIR_TYPED_PARAM_INT; params[i].type = VIR_TYPED_PARAM_INT;
...@@ -3783,7 +3783,7 @@ esxDomainSetSchedulerParametersFlags(virDomainPtr domain, ...@@ -3783,7 +3783,7 @@ esxDomainSetSchedulerParametersFlags(virDomainPtr domain,
} }
for (i = 0; i < nparams; ++i) { for (i = 0; i < nparams; ++i) {
if (STREQ (params[i].field, "reservation") && if (STREQ (params[i].field, VIR_DOMAIN_SCHEDULER_RESERVATION) &&
params[i].type == VIR_TYPED_PARAM_LLONG) { params[i].type == VIR_TYPED_PARAM_LLONG) {
if (esxVI_Long_Alloc(&spec->cpuAllocation->reservation) < 0) { if (esxVI_Long_Alloc(&spec->cpuAllocation->reservation) < 0) {
goto cleanup; goto cleanup;
...@@ -3797,7 +3797,7 @@ esxDomainSetSchedulerParametersFlags(virDomainPtr domain, ...@@ -3797,7 +3797,7 @@ esxDomainSetSchedulerParametersFlags(virDomainPtr domain,
} }
spec->cpuAllocation->reservation->value = params[i].value.l; spec->cpuAllocation->reservation->value = params[i].value.l;
} else if (STREQ (params[i].field, "limit") && } else if (STREQ (params[i].field, VIR_DOMAIN_SCHEDULER_LIMIT) &&
params[i].type == VIR_TYPED_PARAM_LLONG) { params[i].type == VIR_TYPED_PARAM_LLONG) {
if (esxVI_Long_Alloc(&spec->cpuAllocation->limit) < 0) { if (esxVI_Long_Alloc(&spec->cpuAllocation->limit) < 0) {
goto cleanup; goto cleanup;
...@@ -3812,7 +3812,7 @@ esxDomainSetSchedulerParametersFlags(virDomainPtr domain, ...@@ -3812,7 +3812,7 @@ esxDomainSetSchedulerParametersFlags(virDomainPtr domain,
} }
spec->cpuAllocation->limit->value = params[i].value.l; spec->cpuAllocation->limit->value = params[i].value.l;
} else if (STREQ (params[i].field, "shares") && } else if (STREQ (params[i].field, VIR_DOMAIN_SCHEDULER_SHARES) &&
params[i].type == VIR_TYPED_PARAM_INT) { params[i].type == VIR_TYPED_PARAM_INT) {
if (esxVI_SharesInfo_Alloc(&sharesInfo) < 0 || if (esxVI_SharesInfo_Alloc(&sharesInfo) < 0 ||
esxVI_Int_Alloc(&sharesInfo->shares) < 0) { esxVI_Int_Alloc(&sharesInfo->shares) < 0) {
......
...@@ -3645,17 +3645,20 @@ libxlDomainGetSchedulerParametersFlags(virDomainPtr dom, ...@@ -3645,17 +3645,20 @@ libxlDomainGetSchedulerParametersFlags(virDomainPtr dom,
params[0].value.ui = sc_info.weight; params[0].value.ui = sc_info.weight;
params[0].type = VIR_TYPED_PARAM_UINT; params[0].type = VIR_TYPED_PARAM_UINT;
if (virStrcpyStatic(params[0].field, "weight") == NULL) { if (virStrcpyStatic(params[0].field,
VIR_DOMAIN_SCHEDULER_WEIGHT) == NULL) {
libxlError(VIR_ERR_INTERNAL_ERROR, libxlError(VIR_ERR_INTERNAL_ERROR,
"%s", _("Field weight too long for destination")); _("Field name '%s' too long"),
VIR_DOMAIN_SCHEDULER_WEIGHT);
goto cleanup; goto cleanup;
} }
params[1].value.ui = sc_info.cap; params[1].value.ui = sc_info.cap;
params[1].type = VIR_TYPED_PARAM_UINT; params[1].type = VIR_TYPED_PARAM_UINT;
if (virStrcpyStatic(params[1].field, "cap") == NULL) { if (virStrcpyStatic(params[1].field, VIR_DOMAIN_SCHEDULER_CAP) == NULL) {
libxlError(VIR_ERR_INTERNAL_ERROR, libxlError(VIR_ERR_INTERNAL_ERROR,
"%s", _("Field cap too long for destination")); _("Field name '%s' too long"),
VIR_DOMAIN_SCHEDULER_CAP);
goto cleanup; goto cleanup;
} }
...@@ -3730,7 +3733,7 @@ libxlDomainSetSchedulerParametersFlags(virDomainPtr dom, ...@@ -3730,7 +3733,7 @@ libxlDomainSetSchedulerParametersFlags(virDomainPtr dom,
for (i = 0; i < nparams; ++i) { for (i = 0; i < nparams; ++i) {
virTypedParameterPtr param = &params[i]; virTypedParameterPtr param = &params[i];
if (STREQ(param->field, "weight")) { if (STREQ(param->field, VIR_DOMAIN_SCHEDULER_WEIGHT)) {
if (param->type != VIR_TYPED_PARAM_UINT) { if (param->type != VIR_TYPED_PARAM_UINT) {
libxlError(VIR_ERR_INVALID_ARG, "%s", libxlError(VIR_ERR_INVALID_ARG, "%s",
_("invalid type for weight tunable, expected a 'uint'")); _("invalid type for weight tunable, expected a 'uint'"));
...@@ -3738,7 +3741,7 @@ libxlDomainSetSchedulerParametersFlags(virDomainPtr dom, ...@@ -3738,7 +3741,7 @@ libxlDomainSetSchedulerParametersFlags(virDomainPtr dom,
} }
sc_info.weight = params[i].value.ui; sc_info.weight = params[i].value.ui;
} else if (STREQ(param->field, "cap")) { } else if (STREQ(param->field, VIR_DOMAIN_SCHEDULER_CAP)) {
if (param->type != VIR_TYPED_PARAM_UINT) { if (param->type != VIR_TYPED_PARAM_UINT) {
libxlError(VIR_ERR_INVALID_ARG, "%s", libxlError(VIR_ERR_INVALID_ARG, "%s",
_("invalid type for cap tunable, expected a 'uint'")); _("invalid type for cap tunable, expected a 'uint'"));
......
...@@ -2524,7 +2524,7 @@ lxcSetSchedulerParametersFlags(virDomainPtr domain, ...@@ -2524,7 +2524,7 @@ lxcSetSchedulerParametersFlags(virDomainPtr domain,
for (i = 0; i < nparams; i++) { for (i = 0; i < nparams; i++) {
virTypedParameterPtr param = &params[i]; virTypedParameterPtr param = &params[i];
if (STRNEQ(param->field, "cpu_shares")) { if (STRNEQ(param->field, VIR_DOMAIN_SCHEDULER_CPU_SHARES)) {
lxcError(VIR_ERR_INVALID_ARG, lxcError(VIR_ERR_INVALID_ARG,
_("Invalid parameter `%s'"), param->field); _("Invalid parameter `%s'"), param->field);
goto cleanup; goto cleanup;
...@@ -2603,7 +2603,8 @@ lxcGetSchedulerParametersFlags(virDomainPtr domain, ...@@ -2603,7 +2603,8 @@ lxcGetSchedulerParametersFlags(virDomainPtr domain,
if (virCgroupGetCpuShares(group, &val) != 0) if (virCgroupGetCpuShares(group, &val) != 0)
goto cleanup; goto cleanup;
params[0].value.ul = val; params[0].value.ul = val;
if (virStrcpyStatic(params[0].field, "cpu_shares") == NULL) { if (virStrcpyStatic(params[0].field,
VIR_DOMAIN_SCHEDULER_CPU_SHARES) == NULL) {
lxcError(VIR_ERR_INTERNAL_ERROR, lxcError(VIR_ERR_INTERNAL_ERROR,
"%s", _("Field cpu_shares too big for destination")); "%s", _("Field cpu_shares too big for destination"));
goto cleanup; goto cleanup;
......
...@@ -6297,7 +6297,8 @@ static int qemuDomainSetMemoryParameters(virDomainPtr dom, ...@@ -6297,7 +6297,8 @@ static int qemuDomainSetMemoryParameters(virDomainPtr dom,
} }
} else if (STREQ(param->field, VIR_DOMAIN_MEMORY_MIN_GUARANTEE)) { } else if (STREQ(param->field, VIR_DOMAIN_MEMORY_MIN_GUARANTEE)) {
qemuReportError(VIR_ERR_INVALID_ARG, qemuReportError(VIR_ERR_INVALID_ARG,
_("Memory tunable `%s' not implemented"), param->field); _("Memory tunable `%s' not implemented"),
param->field);
ret = -1; ret = -1;
} else { } else {
qemuReportError(VIR_ERR_INVALID_ARG, qemuReportError(VIR_ERR_INVALID_ARG,
...@@ -6696,7 +6697,7 @@ static int qemuSetSchedulerParametersFlags(virDomainPtr dom, ...@@ -6696,7 +6697,7 @@ static int qemuSetSchedulerParametersFlags(virDomainPtr dom,
for (i = 0; i < nparams; i++) { for (i = 0; i < nparams; i++) {
virTypedParameterPtr param = &params[i]; virTypedParameterPtr param = &params[i];
if (STREQ(param->field, "cpu_shares")) { if (STREQ(param->field, VIR_DOMAIN_SCHEDULER_CPU_SHARES)) {
if (param->type != VIR_TYPED_PARAM_ULLONG) { if (param->type != VIR_TYPED_PARAM_ULLONG) {
qemuReportError(VIR_ERR_INVALID_ARG, "%s", qemuReportError(VIR_ERR_INVALID_ARG, "%s",
_("invalid type for cpu_shares tunable, expected a 'ullong'")); _("invalid type for cpu_shares tunable, expected a 'ullong'"));
...@@ -6717,7 +6718,7 @@ static int qemuSetSchedulerParametersFlags(virDomainPtr dom, ...@@ -6717,7 +6718,7 @@ static int qemuSetSchedulerParametersFlags(virDomainPtr dom,
if (flags & VIR_DOMAIN_AFFECT_CONFIG) { if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
vmdef->cputune.shares = params[i].value.ul; vmdef->cputune.shares = params[i].value.ul;
} }
} else if (STREQ(param->field, "vcpu_period")) { } else if (STREQ(param->field, VIR_DOMAIN_SCHEDULER_VCPU_PERIOD)) {
if (param->type != VIR_TYPED_PARAM_ULLONG) { if (param->type != VIR_TYPED_PARAM_ULLONG) {
qemuReportError(VIR_ERR_INVALID_ARG, "%s", qemuReportError(VIR_ERR_INVALID_ARG, "%s",
_("invalid type for vcpu_period tunable," _("invalid type for vcpu_period tunable,"
...@@ -6737,7 +6738,7 @@ static int qemuSetSchedulerParametersFlags(virDomainPtr dom, ...@@ -6737,7 +6738,7 @@ static int qemuSetSchedulerParametersFlags(virDomainPtr dom,
if (flags & VIR_DOMAIN_AFFECT_CONFIG) { if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
vmdef->cputune.period = params[i].value.ul; vmdef->cputune.period = params[i].value.ul;
} }
} else if (STREQ(param->field, "vcpu_quota")) { } else if (STREQ(param->field, VIR_DOMAIN_SCHEDULER_VCPU_QUOTA)) {
if (param->type != VIR_TYPED_PARAM_LLONG) { if (param->type != VIR_TYPED_PARAM_LLONG) {
qemuReportError(VIR_ERR_INVALID_ARG, "%s", qemuReportError(VIR_ERR_INVALID_ARG, "%s",
_("invalid type for vcpu_quota tunable," _("invalid type for vcpu_quota tunable,"
...@@ -6981,11 +6982,11 @@ qemuGetSchedulerParametersFlags(virDomainPtr dom, ...@@ -6981,11 +6982,11 @@ qemuGetSchedulerParametersFlags(virDomainPtr dom,
out: out:
params[0].value.ul = shares; params[0].value.ul = shares;
params[0].type = VIR_TYPED_PARAM_ULLONG; params[0].type = VIR_TYPED_PARAM_ULLONG;
/* XXX make these field names public in libvirt.h */ if (virStrcpyStatic(params[0].field,
if (virStrcpyStatic(params[0].field, "cpu_shares") == NULL) { VIR_DOMAIN_SCHEDULER_CPU_SHARES) == NULL) {
qemuReportError(VIR_ERR_INTERNAL_ERROR, qemuReportError(VIR_ERR_INTERNAL_ERROR,
_("Field name '%s' too long"), _("Field name '%s' too long"),
"cpu_shares"); VIR_DOMAIN_SCHEDULER_CPU_SHARES);
goto cleanup; goto cleanup;
} }
...@@ -6995,10 +6996,11 @@ out: ...@@ -6995,10 +6996,11 @@ out:
if (*nparams > saved_nparams) { if (*nparams > saved_nparams) {
params[1].value.ul = period; params[1].value.ul = period;
params[1].type = VIR_TYPED_PARAM_ULLONG; params[1].type = VIR_TYPED_PARAM_ULLONG;
if (virStrcpyStatic(params[1].field, "vcpu_period") == NULL) { if (virStrcpyStatic(params[1].field,
VIR_DOMAIN_SCHEDULER_VCPU_PERIOD) == NULL) {
qemuReportError(VIR_ERR_INTERNAL_ERROR, qemuReportError(VIR_ERR_INTERNAL_ERROR,
_("Field name '%s' too long"), _("Field name '%s' too long"),
"vcpu_period"); VIR_DOMAIN_SCHEDULER_VCPU_PERIOD);
goto cleanup; goto cleanup;
} }
saved_nparams++; saved_nparams++;
...@@ -7007,10 +7009,11 @@ out: ...@@ -7007,10 +7009,11 @@ out:
if (*nparams > saved_nparams) { if (*nparams > saved_nparams) {
params[2].value.ul = quota; params[2].value.ul = quota;
params[2].type = VIR_TYPED_PARAM_LLONG; params[2].type = VIR_TYPED_PARAM_LLONG;
if (virStrcpyStatic(params[2].field, "vcpu_quota") == NULL) { if (virStrcpyStatic(params[2].field,
VIR_DOMAIN_SCHEDULER_VCPU_QUOTA) == NULL) {
qemuReportError(VIR_ERR_INTERNAL_ERROR, qemuReportError(VIR_ERR_INTERNAL_ERROR,
_("Field name '%s' too long"), _("Field name '%s' too long"),
"vcpu_quota"); VIR_DOMAIN_SCHEDULER_VCPU_QUOTA);
goto cleanup; goto cleanup;
} }
saved_nparams++; saved_nparams++;
......
...@@ -2713,7 +2713,7 @@ testDomainGetSchedulerParamsFlags(virDomainPtr domain, ...@@ -2713,7 +2713,7 @@ testDomainGetSchedulerParamsFlags(virDomainPtr domain,
testError(VIR_ERR_INVALID_ARG, "%s", _("Invalid parameter count")); testError(VIR_ERR_INVALID_ARG, "%s", _("Invalid parameter count"));
goto cleanup; goto cleanup;
} }
strcpy(params[0].field, "weight"); strcpy(params[0].field, VIR_DOMAIN_SCHEDULER_WEIGHT);
params[0].type = VIR_TYPED_PARAM_UINT; params[0].type = VIR_TYPED_PARAM_UINT;
/* XXX */ /* XXX */
/*params[0].value.ui = privdom->weight;*/ /*params[0].value.ui = privdom->weight;*/
...@@ -2759,7 +2759,7 @@ testDomainSetSchedulerParamsFlags(virDomainPtr domain, ...@@ -2759,7 +2759,7 @@ testDomainSetSchedulerParamsFlags(virDomainPtr domain,
} }
for (i = 0; i < nparams; i++) { for (i = 0; i < nparams; i++) {
if (STRNEQ(params[i].field, "weight")) { if (STRNEQ(params[i].field, VIR_DOMAIN_SCHEDULER_WEIGHT)) {
testError(VIR_ERR_INVALID_ARG, "field"); testError(VIR_ERR_INVALID_ARG, "field");
goto cleanup; goto cleanup;
} }
......
...@@ -1203,9 +1203,6 @@ xenHypervisorGetSchedulerType(virDomainPtr domain, int *nparams) ...@@ -1203,9 +1203,6 @@ xenHypervisorGetSchedulerType(virDomainPtr domain, int *nparams)
return schedulertype; return schedulertype;
} }
static const char *str_weight = "weight";
static const char *str_cap = "cap";
/** /**
* xenHypervisorGetSchedulerParameters: * xenHypervisorGetSchedulerParameters:
* @domain: pointer to the Xen Hypervisor block * @domain: pointer to the Xen Hypervisor block
...@@ -1291,17 +1288,21 @@ xenHypervisorGetSchedulerParameters(virDomainPtr domain, ...@@ -1291,17 +1288,21 @@ xenHypervisorGetSchedulerParameters(virDomainPtr domain,
if (ret < 0) if (ret < 0)
return(-1); return(-1);
if (virStrcpyStatic(params[0].field, str_weight) == NULL) { if (virStrcpyStatic(params[0].field,
VIR_DOMAIN_SCHEDULER_WEIGHT) == NULL) {
virXenError(VIR_ERR_INTERNAL_ERROR, virXenError(VIR_ERR_INTERNAL_ERROR,
"Weight %s too big for destination", str_weight); "Weight %s too big for destination",
VIR_DOMAIN_SCHEDULER_WEIGHT);
return -1; return -1;
} }
params[0].type = VIR_TYPED_PARAM_UINT; params[0].type = VIR_TYPED_PARAM_UINT;
params[0].value.ui = op_dom.u.getschedinfo.u.credit.weight; params[0].value.ui = op_dom.u.getschedinfo.u.credit.weight;
if (virStrcpyStatic(params[1].field, str_cap) == NULL) { if (virStrcpyStatic(params[1].field,
VIR_DOMAIN_SCHEDULER_CAP) == NULL) {
virXenError(VIR_ERR_INTERNAL_ERROR, virXenError(VIR_ERR_INTERNAL_ERROR,
"Cap %s too big for destination", str_cap); "Cap %s too big for destination",
VIR_DOMAIN_SCHEDULER_CAP);
return -1; return -1;
} }
params[1].type = VIR_TYPED_PARAM_UINT; params[1].type = VIR_TYPED_PARAM_UINT;
...@@ -1402,7 +1403,7 @@ xenHypervisorSetSchedulerParameters(virDomainPtr domain, ...@@ -1402,7 +1403,7 @@ xenHypervisorSetSchedulerParameters(virDomainPtr domain,
for (i = 0; i < nparams; i++) { for (i = 0; i < nparams; i++) {
memset(&buf, 0, sizeof(buf)); memset(&buf, 0, sizeof(buf));
if (STREQ (params[i].field, str_weight) && if (STREQ (params[i].field, VIR_DOMAIN_SCHEDULER_WEIGHT) &&
params[i].type == VIR_TYPED_PARAM_UINT) { params[i].type == VIR_TYPED_PARAM_UINT) {
val = params[i].value.ui; val = params[i].value.ui;
if ((val < 1) || (val > USHRT_MAX)) { if ((val < 1) || (val > USHRT_MAX)) {
...@@ -1411,7 +1412,7 @@ xenHypervisorSetSchedulerParameters(virDomainPtr domain, ...@@ -1411,7 +1412,7 @@ xenHypervisorSetSchedulerParameters(virDomainPtr domain,
return(-1); return(-1);
} }
op_dom.u.getschedinfo.u.credit.weight = val; op_dom.u.getschedinfo.u.credit.weight = val;
} else if (STREQ (params[i].field, str_cap) && } else if (STREQ (params[i].field, VIR_DOMAIN_SCHEDULER_CAP) &&
params[i].type == VIR_TYPED_PARAM_UINT) { params[i].type == VIR_TYPED_PARAM_UINT) {
val = params[i].value.ui; val = params[i].value.ui;
if (val >= USHRT_MAX) { if (val >= USHRT_MAX) {
......
...@@ -3554,9 +3554,6 @@ error: ...@@ -3554,9 +3554,6 @@ error:
} }
static const char *str_weight = "weight";
static const char *str_cap = "cap";
/** /**
* xenDaemonGetSchedulerParameters: * xenDaemonGetSchedulerParameters:
* @domain: pointer to the Domain block * @domain: pointer to the Domain block
...@@ -3635,18 +3632,21 @@ xenDaemonGetSchedulerParameters(virDomainPtr domain, ...@@ -3635,18 +3632,21 @@ xenDaemonGetSchedulerParameters(virDomainPtr domain,
goto error; goto error;
} }
if (virStrcpyStatic(params[0].field, str_weight) == NULL) { if (virStrcpyStatic(params[0].field,
VIR_DOMAIN_SCHEDULER_WEIGHT) == NULL) {
virXendError(VIR_ERR_INTERNAL_ERROR, virXendError(VIR_ERR_INTERNAL_ERROR,
_("Weight %s too big for destination"), _("Weight %s too big for destination"),
str_weight); VIR_DOMAIN_SCHEDULER_WEIGHT);
goto error; goto error;
} }
params[0].type = VIR_TYPED_PARAM_UINT; params[0].type = VIR_TYPED_PARAM_UINT;
params[0].value.ui = sexpr_int(root, "domain/cpu_weight"); params[0].value.ui = sexpr_int(root, "domain/cpu_weight");
if (virStrcpyStatic(params[1].field, str_cap) == NULL) { if (virStrcpyStatic(params[1].field,
VIR_DOMAIN_SCHEDULER_CAP) == NULL) {
virXendError(VIR_ERR_INTERNAL_ERROR, virXendError(VIR_ERR_INTERNAL_ERROR,
_("Cap %s too big for destination"), str_cap); _("Cap %s too big for destination"),
VIR_DOMAIN_SCHEDULER_CAP);
goto error; goto error;
} }
params[1].type = VIR_TYPED_PARAM_UINT; params[1].type = VIR_TYPED_PARAM_UINT;
...@@ -3727,10 +3727,10 @@ xenDaemonSetSchedulerParameters(virDomainPtr domain, ...@@ -3727,10 +3727,10 @@ xenDaemonSetSchedulerParameters(virDomainPtr domain,
memset(&buf_weight, 0, VIR_UUID_BUFLEN); memset(&buf_weight, 0, VIR_UUID_BUFLEN);
memset(&buf_cap, 0, VIR_UUID_BUFLEN); memset(&buf_cap, 0, VIR_UUID_BUFLEN);
for (i = 0; i < nparams; i++) { for (i = 0; i < nparams; i++) {
if (STREQ (params[i].field, str_weight) && if (STREQ (params[i].field, VIR_DOMAIN_SCHEDULER_WEIGHT) &&
params[i].type == VIR_TYPED_PARAM_UINT) { params[i].type == VIR_TYPED_PARAM_UINT) {
snprintf(buf_weight, sizeof(buf_weight), "%u", params[i].value.ui); snprintf(buf_weight, sizeof(buf_weight), "%u", params[i].value.ui);
} else if (STREQ (params[i].field, str_cap) && } else if (STREQ (params[i].field, VIR_DOMAIN_SCHEDULER_CAP) &&
params[i].type == VIR_TYPED_PARAM_UINT) { params[i].type == VIR_TYPED_PARAM_UINT) {
snprintf(buf_cap, sizeof(buf_cap), "%u", params[i].value.ui); snprintf(buf_cap, sizeof(buf_cap), "%u", params[i].value.ui);
} else { } else {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册