提交 5bff668d 编写于 作者: D Daniel P. Berrangé

src: improve thread naming with human targetted names

Historically threads are given a name based on the C function,
and this name is just used inside libvirt. With OS level thread
naming this name is now visible to debuggers, but also has to
fit in 15 characters on Linux, so function names are too long
in some cases.
Reviewed-by: NMichal Privoznik <mprivozn@redhat.com>
Signed-off-by: NDaniel P. Berrangé <berrange@redhat.com>
上级 c85256b3
...@@ -664,6 +664,7 @@ libxlDomainEventHandler(void *data, VIR_LIBXL_EVENT_CONST libxl_event *event) ...@@ -664,6 +664,7 @@ libxlDomainEventHandler(void *data, VIR_LIBXL_EVENT_CONST libxl_event *event)
virThread thread; virThread thread;
g_autoptr(libxlDriverConfig) cfg = NULL; g_autoptr(libxlDriverConfig) cfg = NULL;
int ret = -1; int ret = -1;
g_autofree char *name = NULL;
if (event->type != LIBXL_EVENT_TYPE_DOMAIN_SHUTDOWN && if (event->type != LIBXL_EVENT_TYPE_DOMAIN_SHUTDOWN &&
event->type != LIBXL_EVENT_TYPE_DOMAIN_DEATH) { event->type != LIBXL_EVENT_TYPE_DOMAIN_DEATH) {
...@@ -687,12 +688,13 @@ libxlDomainEventHandler(void *data, VIR_LIBXL_EVENT_CONST libxl_event *event) ...@@ -687,12 +688,13 @@ libxlDomainEventHandler(void *data, VIR_LIBXL_EVENT_CONST libxl_event *event)
shutdown_info->driver = driver; shutdown_info->driver = driver;
shutdown_info->event = (libxl_event *)event; shutdown_info->event = (libxl_event *)event;
name = g_strdup_printf("ev-%d", event->domid);
if (event->type == LIBXL_EVENT_TYPE_DOMAIN_SHUTDOWN) if (event->type == LIBXL_EVENT_TYPE_DOMAIN_SHUTDOWN)
ret = virThreadCreate(&thread, false, libxlDomainShutdownThread, ret = virThreadCreateFull(&thread, false, libxlDomainShutdownThread,
shutdown_info); name, false, shutdown_info);
else if (event->type == LIBXL_EVENT_TYPE_DOMAIN_DEATH) else if (event->type == LIBXL_EVENT_TYPE_DOMAIN_DEATH)
ret = virThreadCreate(&thread, false, libxlDomainDeathThread, ret = virThreadCreateFull(&thread, false, libxlDomainDeathThread,
shutdown_info); name, false, shutdown_info);
if (ret < 0) { if (ret < 0) {
/* /*
......
...@@ -294,6 +294,7 @@ libxlMigrateDstReceive(virNetSocketPtr sock, ...@@ -294,6 +294,7 @@ libxlMigrateDstReceive(virNetSocketPtr sock,
virNetSocketPtr client_sock; virNetSocketPtr client_sock;
int recvfd = -1; int recvfd = -1;
size_t i; size_t i;
g_autofree char *name = NULL;
/* Accept migration connection */ /* Accept migration connection */
if (virNetSocketAccept(sock, &client_sock) < 0 || !client_sock) { if (virNetSocketAccept(sock, &client_sock) < 0 || !client_sock) {
...@@ -314,8 +315,13 @@ libxlMigrateDstReceive(virNetSocketPtr sock, ...@@ -314,8 +315,13 @@ libxlMigrateDstReceive(virNetSocketPtr sock,
VIR_FREE(priv->migrationDstReceiveThr); VIR_FREE(priv->migrationDstReceiveThr);
if (VIR_ALLOC(priv->migrationDstReceiveThr) < 0) if (VIR_ALLOC(priv->migrationDstReceiveThr) < 0)
goto fail; goto fail;
if (virThreadCreate(priv->migrationDstReceiveThr, true,
libxlDoMigrateDstReceive, args) < 0) { name = g_strdup_printf("mig-%s", args->vm->def->name);
if (virThreadCreateFull(priv->migrationDstReceiveThr, true,
libxlDoMigrateDstReceive,
name,
false,
args) < 0) {
virReportError(VIR_ERR_OPERATION_FAILED, "%s", virReportError(VIR_ERR_OPERATION_FAILED, "%s",
_("Failed to create thread for receiving migration data")); _("Failed to create thread for receiving migration data"));
goto fail; goto fail;
...@@ -554,6 +560,7 @@ libxlDomainMigrationDstPrepareTunnel3(virConnectPtr dconn, ...@@ -554,6 +560,7 @@ libxlDomainMigrationDstPrepareTunnel3(virConnectPtr dconn,
char *xmlout = NULL; char *xmlout = NULL;
int dataFD[2] = { -1, -1 }; int dataFD[2] = { -1, -1 };
int ret = -1; int ret = -1;
g_autofree char *name = NULL;
if (libxlDomainMigrationPrepareAny(dconn, def, cookiein, cookieinlen, if (libxlDomainMigrationPrepareAny(dconn, def, cookiein, cookieinlen,
&mig, &xmlout, &taint_hook) < 0) &mig, &xmlout, &taint_hook) < 0)
...@@ -611,7 +618,10 @@ libxlDomainMigrationDstPrepareTunnel3(virConnectPtr dconn, ...@@ -611,7 +618,10 @@ libxlDomainMigrationDstPrepareTunnel3(virConnectPtr dconn,
VIR_FREE(priv->migrationDstReceiveThr); VIR_FREE(priv->migrationDstReceiveThr);
if (VIR_ALLOC(priv->migrationDstReceiveThr) < 0) if (VIR_ALLOC(priv->migrationDstReceiveThr) < 0)
goto error; goto error;
if (virThreadCreate(priv->migrationDstReceiveThr, true, libxlDoMigrateDstReceive, args) < 0) { name = g_strdup_printf("mig-%s", args->vm->def->name);
if (virThreadCreateFull(priv->migrationDstReceiveThr, true,
libxlDoMigrateDstReceive,
name, false, args) < 0) {
virReportError(VIR_ERR_OPERATION_FAILED, "%s", virReportError(VIR_ERR_OPERATION_FAILED, "%s",
_("Failed to create thread for receiving migration data")); _("Failed to create thread for receiving migration data"));
goto endjob; goto endjob;
...@@ -910,6 +920,7 @@ libxlMigrationSrcStartTunnel(libxlDriverPrivatePtr driver, ...@@ -910,6 +920,7 @@ libxlMigrationSrcStartTunnel(libxlDriverPrivatePtr driver,
struct libxlTunnelControl *tc = NULL; struct libxlTunnelControl *tc = NULL;
libxlTunnelMigrationThread *arg = NULL; libxlTunnelMigrationThread *arg = NULL;
int ret = -1; int ret = -1;
g_autofree char *name = NULL;
if (VIR_ALLOC(tc) < 0) if (VIR_ALLOC(tc) < 0)
goto out; goto out;
...@@ -925,8 +936,10 @@ libxlMigrationSrcStartTunnel(libxlDriverPrivatePtr driver, ...@@ -925,8 +936,10 @@ libxlMigrationSrcStartTunnel(libxlDriverPrivatePtr driver,
arg->srcFD = tc->dataFD[0]; arg->srcFD = tc->dataFD[0];
/* Write to dest stream */ /* Write to dest stream */
arg->st = st; arg->st = st;
if (virThreadCreate(&tc->thread, true, name = g_strdup_printf("mig-%s", vm->def->name);
libxlTunnel3MigrationSrcFunc, arg) < 0) { if (virThreadCreateFull(&tc->thread, true,
libxlTunnel3MigrationSrcFunc,
name, false, arg) < 0) {
virReportError(errno, "%s", virReportError(errno, "%s",
_("Unable to create tunnel migration thread")); _("Unable to create tunnel migration thread"));
goto out; goto out;
......
...@@ -335,8 +335,8 @@ int lxcSetupFuse(virLXCFusePtr *f, virDomainDefPtr def) ...@@ -335,8 +335,8 @@ int lxcSetupFuse(virLXCFusePtr *f, virDomainDefPtr def)
int lxcStartFuse(virLXCFusePtr fuse) int lxcStartFuse(virLXCFusePtr fuse)
{ {
if (virThreadCreate(&fuse->thread, false, lxcFuseRun, if (virThreadCreateFull(&fuse->thread, false, lxcFuseRun,
(void *)fuse) < 0) { "lxc-fuse", false, (void *)fuse) < 0) {
lxcFuseDestroy(fuse); lxcFuseDestroy(fuse);
return -1; return -1;
} }
......
...@@ -1863,7 +1863,8 @@ nodeStateInitialize(bool privileged, ...@@ -1863,7 +1863,8 @@ nodeStateInitialize(bool privileged,
udev_monitor_set_receive_buffer_size(priv->udev_monitor, udev_monitor_set_receive_buffer_size(priv->udev_monitor,
128 * 1024 * 1024); 128 * 1024 * 1024);
if (virThreadCreate(&priv->th, true, udevEventHandleThread, NULL) < 0) { if (virThreadCreateFull(&priv->th, true, udevEventHandleThread,
"udev-event", false, NULL) < 0) {
virReportSystemError(errno, "%s", virReportSystemError(errno, "%s",
_("failed to create udev handler thread")); _("failed to create udev handler thread"));
goto unlock; goto unlock;
...@@ -1889,8 +1890,8 @@ nodeStateInitialize(bool privileged, ...@@ -1889,8 +1890,8 @@ nodeStateInitialize(bool privileged,
if (udevSetupSystemDev() != 0) if (udevSetupSystemDev() != 0)
goto cleanup; goto cleanup;
if (virThreadCreate(&enumThread, false, nodeStateInitializeEnumerate, if (virThreadCreateFull(&enumThread, false, nodeStateInitializeEnumerate,
udev) < 0) { "nodedev-init", false, udev) < 0) {
virReportSystemError(errno, "%s", virReportSystemError(errno, "%s",
_("failed to create udev enumerate thread")); _("failed to create udev enumerate thread"));
goto cleanup; goto cleanup;
......
...@@ -1366,9 +1366,10 @@ virNWFilterDHCPSnoopThread(void *req0) ...@@ -1366,9 +1366,10 @@ virNWFilterDHCPSnoopThread(void *req0)
} }
tmp = virNetDevGetIndex(req->binding->portdevname, &ifindex); tmp = virNetDevGetIndex(req->binding->portdevname, &ifindex);
threadkey = g_strdup(req->threadkey); threadkey = g_strdup(req->threadkey);
worker = virThreadPoolNew(1, 1, 0, worker = virThreadPoolNewFull(1, 1, 0,
virNWFilterDHCPDecodeWorker, virNWFilterDHCPDecodeWorker,
req); "dhcp-decode",
req);
} }
/* let creator know how well we initialized */ /* let creator know how well we initialized */
...@@ -1638,8 +1639,8 @@ virNWFilterDHCPSnoopReq(virNWFilterTechDriverPtr techdriver, ...@@ -1638,8 +1639,8 @@ virNWFilterDHCPSnoopReq(virNWFilterTechDriverPtr techdriver,
/* prevent thread from holding req */ /* prevent thread from holding req */
virNWFilterSnoopReqLock(req); virNWFilterSnoopReqLock(req);
if (virThreadCreate(&thread, false, virNWFilterDHCPSnoopThread, if (virThreadCreateFull(&thread, false, virNWFilterDHCPSnoopThread,
req) != 0) { "dhcp-snoop", false, req) != 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, virReportError(VIR_ERR_INTERNAL_ERROR,
_("virNWFilterDHCPSnoopReq virThreadCreate " _("virNWFilterDHCPSnoopReq virThreadCreate "
"failed on interface '%s'"), binding->portdevname); "failed on interface '%s'"), binding->portdevname);
......
...@@ -734,10 +734,12 @@ virNWFilterLearnIPAddress(virNWFilterTechDriverPtr techdriver, ...@@ -734,10 +734,12 @@ virNWFilterLearnIPAddress(virNWFilterTechDriverPtr techdriver,
if (rc < 0) if (rc < 0)
goto err_free_req; goto err_free_req;
if (virThreadCreate(&thread, if (virThreadCreateFull(&thread,
false, false,
learnIPAddressThread, learnIPAddressThread,
req) != 0) "ip-learn",
false,
req) != 0)
goto err_dereg_req; goto err_dereg_req;
return 0; return 0;
......
...@@ -999,7 +999,8 @@ qemuStateInitialize(bool privileged, ...@@ -999,7 +999,8 @@ qemuStateInitialize(bool privileged,
/* must be initialized before trying to reconnect to all the /* must be initialized before trying to reconnect to all the
* running domains since there might occur some QEMU monitor * running domains since there might occur some QEMU monitor
* events that will be dispatched to the worker pool */ * events that will be dispatched to the worker pool */
qemu_driver->workerPool = virThreadPoolNew(0, 1, 0, qemuProcessEventHandler, qemu_driver); qemu_driver->workerPool = virThreadPoolNewFull(0, 1, 0, qemuProcessEventHandler,
"qemu-event", qemu_driver);
if (!qemu_driver->workerPool) if (!qemu_driver->workerPool)
goto error; goto error;
......
...@@ -3309,9 +3309,11 @@ qemuMigrationSrcStartTunnel(virStreamPtr st, ...@@ -3309,9 +3309,11 @@ qemuMigrationSrcStartTunnel(virStreamPtr st,
io->wakeupRecvFD = wakeupFD[0]; io->wakeupRecvFD = wakeupFD[0];
io->wakeupSendFD = wakeupFD[1]; io->wakeupSendFD = wakeupFD[1];
if (virThreadCreate(&io->thread, true, if (virThreadCreateFull(&io->thread, true,
qemuMigrationSrcIOFunc, qemuMigrationSrcIOFunc,
io) < 0) { "qemu-mig-tunnel",
false,
io) < 0) {
virReportSystemError(errno, "%s", virReportSystemError(errno, "%s",
_("Unable to create migration thread")); _("Unable to create migration thread"));
goto error; goto error;
......
...@@ -516,13 +516,16 @@ qemuProcessShutdownOrReboot(virQEMUDriverPtr driver, ...@@ -516,13 +516,16 @@ qemuProcessShutdownOrReboot(virQEMUDriverPtr driver,
qemuDomainObjPrivatePtr priv = vm->privateData; qemuDomainObjPrivatePtr priv = vm->privateData;
if (priv->fakeReboot) { if (priv->fakeReboot) {
g_autofree char *name = g_strdup_printf("reboot-%s", vm->def->name);
qemuDomainSetFakeReboot(driver, vm, false); qemuDomainSetFakeReboot(driver, vm, false);
virObjectRef(vm); virObjectRef(vm);
virThread th; virThread th;
if (virThreadCreate(&th, if (virThreadCreateFull(&th,
false, false,
qemuProcessFakeReboot, qemuProcessFakeReboot,
vm) < 0) { name,
false,
vm) < 0) {
VIR_ERROR(_("Failed to create reboot thread, killing domain")); VIR_ERROR(_("Failed to create reboot thread, killing domain"));
ignore_value(qemuProcessKill(vm, VIR_QEMU_PROCESS_KILL_NOWAIT)); ignore_value(qemuProcessKill(vm, VIR_QEMU_PROCESS_KILL_NOWAIT));
priv->pausedShutdown = false; priv->pausedShutdown = false;
...@@ -8223,6 +8226,7 @@ qemuProcessReconnectHelper(virDomainObjPtr obj, ...@@ -8223,6 +8226,7 @@ qemuProcessReconnectHelper(virDomainObjPtr obj,
virThread thread; virThread thread;
struct qemuProcessReconnectData *src = opaque; struct qemuProcessReconnectData *src = opaque;
struct qemuProcessReconnectData *data; struct qemuProcessReconnectData *data;
g_autofree char *name = NULL;
/* If the VM was inactive, we don't need to reconnect */ /* If the VM was inactive, we don't need to reconnect */
if (!obj->pid) if (!obj->pid)
...@@ -8242,7 +8246,10 @@ qemuProcessReconnectHelper(virDomainObjPtr obj, ...@@ -8242,7 +8246,10 @@ qemuProcessReconnectHelper(virDomainObjPtr obj,
virObjectLock(obj); virObjectLock(obj);
virObjectRef(obj); virObjectRef(obj);
if (virThreadCreate(&thread, false, qemuProcessReconnect, data) < 0) { name = g_strdup_printf("init-%s", obj->def->name);
if (virThreadCreateFull(&thread, false, qemuProcessReconnect,
name, false, data) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s", virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Could not create thread. QEMU initialization " _("Could not create thread. QEMU initialization "
"might be incomplete")); "might be incomplete"));
......
...@@ -713,7 +713,8 @@ static void daemonReloadHandler(virNetDaemonPtr dmn G_GNUC_UNUSED, ...@@ -713,7 +713,8 @@ static void daemonReloadHandler(virNetDaemonPtr dmn G_GNUC_UNUSED,
return; return;
} }
if (virThreadCreate(&thr, false, daemonReloadHandlerThread, NULL) < 0) { if (virThreadCreateFull(&thr, false, daemonReloadHandlerThread,
"daemon-reload", false, NULL) < 0) {
/* /*
* Not much we can do on error here except log it. * Not much we can do on error here except log it.
*/ */
...@@ -770,7 +771,8 @@ static void daemonStop(virNetDaemonPtr dmn) ...@@ -770,7 +771,8 @@ static void daemonStop(virNetDaemonPtr dmn)
{ {
virThread thr; virThread thr;
virObjectRef(dmn); virObjectRef(dmn);
if (virThreadCreate(&thr, false, daemonStopWorker, dmn) < 0) if (virThreadCreateFull(&thr, false, daemonStopWorker,
"daemon-stop", false, dmn) < 0)
virObjectUnref(dmn); virObjectUnref(dmn);
} }
...@@ -876,7 +878,8 @@ static int daemonStateInit(virNetDaemonPtr dmn) ...@@ -876,7 +878,8 @@ static int daemonStateInit(virNetDaemonPtr dmn)
{ {
virThread thr; virThread thr;
virObjectRef(dmn); virObjectRef(dmn);
if (virThreadCreate(&thr, false, daemonRunStateInit, dmn) < 0) { if (virThreadCreateFull(&thr, false, daemonRunStateInit,
"daemon-init", false, dmn) < 0) {
virObjectUnref(dmn); virObjectUnref(dmn);
return -1; return -1;
} }
......
...@@ -367,10 +367,11 @@ virNetServerPtr virNetServerNew(const char *name, ...@@ -367,10 +367,11 @@ virNetServerPtr virNetServerNew(const char *name,
if (!(srv = virObjectLockableNew(virNetServerClass))) if (!(srv = virObjectLockableNew(virNetServerClass)))
return NULL; return NULL;
if (!(srv->workers = virThreadPoolNew(min_workers, max_workers, if (!(srv->workers = virThreadPoolNewFull(min_workers, max_workers,
priority_workers, priority_workers,
virNetServerHandleJob, virNetServerHandleJob,
srv))) "rpc-worker",
srv)))
goto error; goto error;
srv->name = g_strdup(name); srv->name = g_strdup(name);
......
...@@ -334,8 +334,8 @@ createVport(virStoragePoolDefPtr def, ...@@ -334,8 +334,8 @@ createVport(virStoragePoolDefPtr def,
memcpy(cbdata->pool_uuid, def->uuid, VIR_UUID_BUFLEN); memcpy(cbdata->pool_uuid, def->uuid, VIR_UUID_BUFLEN);
cbdata->fchost_name = g_steal_pointer(&name); cbdata->fchost_name = g_steal_pointer(&name);
if (virThreadCreate(&thread, false, virStoragePoolFCRefreshThread, if (virThreadCreateFull(&thread, false, virStoragePoolFCRefreshThread,
cbdata) < 0) { "scsi-refresh", false, cbdata) < 0) {
/* Oh well - at least someone can still refresh afterwards */ /* Oh well - at least someone can still refresh afterwards */
VIR_DEBUG("Failed to create FC Pool Refresh Thread"); VIR_DEBUG("Failed to create FC Pool Refresh Thread");
virStoragePoolFCRefreshDataFree(cbdata); virStoragePoolFCRefreshDataFree(cbdata);
......
...@@ -2367,8 +2367,8 @@ virStorageVolFDStreamCloseCb(virStreamPtr st G_GNUC_UNUSED, ...@@ -2367,8 +2367,8 @@ virStorageVolFDStreamCloseCb(virStreamPtr st G_GNUC_UNUSED,
{ {
virThread thread; virThread thread;
if (virThreadCreate(&thread, false, virStorageVolPoolRefreshThread, if (virThreadCreateFull(&thread, false, virStorageVolPoolRefreshThread,
opaque) < 0) { "vol-refresh", false, opaque) < 0) {
/* Not much else can be done */ /* Not much else can be done */
VIR_ERROR(_("Failed to create thread to handle pool refresh")); VIR_ERROR(_("Failed to create thread to handle pool refresh"));
goto error; goto error;
......
...@@ -2620,8 +2620,9 @@ virCommandRunAsync(virCommandPtr cmd, pid_t *pid) ...@@ -2620,8 +2620,9 @@ virCommandRunAsync(virCommandPtr cmd, pid_t *pid)
/* clear any error so we can catch if the helper thread reports one */ /* clear any error so we can catch if the helper thread reports one */
cmd->has_error = 0; cmd->has_error = 0;
if (VIR_ALLOC(cmd->asyncioThread) < 0 || if (VIR_ALLOC(cmd->asyncioThread) < 0 ||
virThreadCreate(cmd->asyncioThread, true, virThreadCreateFull(cmd->asyncioThread, true,
virCommandDoAsyncIOHelper, cmd) < 0) { virCommandDoAsyncIOHelper,
"cmd-async-io", false, cmd) < 0) {
virReportSystemError(errno, "%s", virReportSystemError(errno, "%s",
_("Unable to create thread " _("Unable to create thread "
"to process command's IO")); "to process command's IO"));
......
...@@ -1134,10 +1134,12 @@ static int virFDStreamOpenInternal(virStreamPtr st, ...@@ -1134,10 +1134,12 @@ static int virFDStreamOpenInternal(virStreamPtr st,
goto error; goto error;
} }
if (virThreadCreate(fdst->thread, if (virThreadCreateFull(fdst->thread,
true, true,
virFDStreamThread, virFDStreamThread,
threadData) < 0) "fd-stream",
false,
threadData) < 0)
goto error; goto error;
} }
......
...@@ -220,9 +220,11 @@ int virNodeSuspend(unsigned int target, ...@@ -220,9 +220,11 @@ int virNodeSuspend(unsigned int target,
if (virNodeSuspendSetNodeWakeup(duration) < 0) if (virNodeSuspendSetNodeWakeup(duration) < 0)
goto cleanup; goto cleanup;
if (virThreadCreate(&thread, false, if (virThreadCreateFull(&thread, false,
virNodeSuspendHelper, virNodeSuspendHelper,
(void *)cmdString) < 0) { "node-suspend",
false,
(void *)cmdString) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s", virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Failed to create thread to suspend the host")); _("Failed to create thread to suspend the host"));
goto cleanup; goto cleanup;
......
...@@ -54,7 +54,7 @@ struct _virThreadPool { ...@@ -54,7 +54,7 @@ struct _virThreadPool {
bool quit; bool quit;
virThreadPoolJobFunc jobFunc; virThreadPoolJobFunc jobFunc;
const char *jobFuncName; const char *jobName;
void *jobOpaque; void *jobOpaque;
virThreadPoolJobList jobList; virThreadPoolJobList jobList;
size_t jobQueueDepth; size_t jobQueueDepth;
...@@ -187,6 +187,7 @@ virThreadPoolExpand(virThreadPoolPtr pool, size_t gain, bool priority) ...@@ -187,6 +187,7 @@ virThreadPoolExpand(virThreadPoolPtr pool, size_t gain, bool priority)
return -1; return -1;
for (i = 0; i < gain; i++) { for (i = 0; i < gain; i++) {
g_autofree char *name = NULL;
if (VIR_ALLOC(data) < 0) if (VIR_ALLOC(data) < 0)
goto error; goto error;
...@@ -194,10 +195,15 @@ virThreadPoolExpand(virThreadPoolPtr pool, size_t gain, bool priority) ...@@ -194,10 +195,15 @@ virThreadPoolExpand(virThreadPoolPtr pool, size_t gain, bool priority)
data->cond = priority ? &pool->prioCond : &pool->cond; data->cond = priority ? &pool->prioCond : &pool->cond;
data->priority = priority; data->priority = priority;
if (priority)
name = g_strdup_printf("prio-%s", pool->jobName);
else
name = g_strdup(pool->jobName);
if (virThreadCreateFull(&(*workers)[i], if (virThreadCreateFull(&(*workers)[i],
false, false,
virThreadPoolWorker, virThreadPoolWorker,
pool->jobFuncName, name,
true, true,
data) < 0) { data) < 0) {
VIR_FREE(data); VIR_FREE(data);
...@@ -218,7 +224,7 @@ virThreadPoolNewFull(size_t minWorkers, ...@@ -218,7 +224,7 @@ virThreadPoolNewFull(size_t minWorkers,
size_t maxWorkers, size_t maxWorkers,
size_t prioWorkers, size_t prioWorkers,
virThreadPoolJobFunc func, virThreadPoolJobFunc func,
const char *funcName, const char *name,
void *opaque) void *opaque)
{ {
virThreadPoolPtr pool; virThreadPoolPtr pool;
...@@ -232,7 +238,7 @@ virThreadPoolNewFull(size_t minWorkers, ...@@ -232,7 +238,7 @@ virThreadPoolNewFull(size_t minWorkers,
pool->jobList.tail = pool->jobList.head = NULL; pool->jobList.tail = pool->jobList.head = NULL;
pool->jobFunc = func; pool->jobFunc = func;
pool->jobFuncName = funcName; pool->jobName = name;
pool->jobOpaque = opaque; pool->jobOpaque = opaque;
if (virMutexInit(&pool->mutex) < 0) if (virMutexInit(&pool->mutex) < 0)
......
...@@ -35,7 +35,7 @@ virThreadPoolPtr virThreadPoolNewFull(size_t minWorkers, ...@@ -35,7 +35,7 @@ virThreadPoolPtr virThreadPoolNewFull(size_t minWorkers,
size_t maxWorkers, size_t maxWorkers,
size_t prioWorkers, size_t prioWorkers,
virThreadPoolJobFunc func, virThreadPoolJobFunc func,
const char *funcName, const char *name,
void *opaque) ATTRIBUTE_NONNULL(4); void *opaque) ATTRIBUTE_NONNULL(4);
size_t virThreadPoolGetMinWorkers(virThreadPoolPtr pool); size_t virThreadPoolGetMinWorkers(virThreadPoolPtr pool);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册