hv_util.c 16.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8
/*
 * Copyright (c) 2010, Microsoft Corporation.
 *
 * Authors:
 *   Haiyang Zhang <haiyangz@microsoft.com>
 *   Hank Janssen  <hjanssen@microsoft.com>
 */
9 10
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

11 12 13 14 15
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
16
#include <linux/reboot.h>
17
#include <linux/hyperv.h>
18 19
#include <linux/clockchips.h>
#include <linux/ptp_clock_kernel.h>
20
#include <clocksource/hyperv_timer.h>
21
#include <asm/mshyperv.h>
22

23
#include "hyperv_vmbus.h"
24

25 26
#define SD_MAJOR	3
#define SD_MINOR	0
27
#define SD_MINOR_1	1
28
#define SD_MINOR_2	2
29
#define SD_VERSION_3_1	(SD_MAJOR << 16 | SD_MINOR_1)
30
#define SD_VERSION_3_2	(SD_MAJOR << 16 | SD_MINOR_2)
31
#define SD_VERSION	(SD_MAJOR << 16 | SD_MINOR)
32

33 34
#define SD_MAJOR_1	1
#define SD_VERSION_1	(SD_MAJOR_1 << 16 | SD_MINOR)
35

36
#define TS_MAJOR	4
37 38 39
#define TS_MINOR	0
#define TS_VERSION	(TS_MAJOR << 16 | TS_MINOR)

40 41
#define TS_MAJOR_1	1
#define TS_VERSION_1	(TS_MAJOR_1 << 16 | TS_MINOR)
42

43 44 45
#define TS_MAJOR_3	3
#define TS_VERSION_3	(TS_MAJOR_3 << 16 | TS_MINOR)

46
#define HB_MAJOR	3
47
#define HB_MINOR	0
48 49
#define HB_VERSION	(HB_MAJOR << 16 | HB_MINOR)

50 51
#define HB_MAJOR_1	1
#define HB_VERSION_1	(HB_MAJOR_1 << 16 | HB_MINOR)
52 53 54 55

static int sd_srv_version;
static int ts_srv_version;
static int hb_srv_version;
56

57
#define SD_VER_COUNT 4
58
static const int sd_versions[] = {
59
	SD_VERSION_3_2,
60
	SD_VERSION_3_1,
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
	SD_VERSION,
	SD_VERSION_1
};

#define TS_VER_COUNT 3
static const int ts_versions[] = {
	TS_VERSION,
	TS_VERSION_3,
	TS_VERSION_1
};

#define HB_VER_COUNT 2
static const int hb_versions[] = {
	HB_VERSION,
	HB_VERSION_1
};

#define FW_VER_COUNT 2
static const int fw_versions[] = {
	UTIL_FW_VERSION,
	UTIL_WS2K8_FW_VERSION
};
83

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
/*
 * Send the "hibernate" udev event in a thread context.
 */
struct hibernate_work_context {
	struct work_struct work;
	struct hv_device *dev;
};

static struct hibernate_work_context hibernate_context;
static bool hibernation_supported;

static void send_hibernate_uevent(struct work_struct *work)
{
	char *uevent_env[2] = { "EVENT=hibernate", NULL };
	struct hibernate_work_context *ctx;

	ctx = container_of(work, struct hibernate_work_context, work);

	kobject_uevent_env(&ctx->dev->device.kobj, KOBJ_CHANGE, uevent_env);

	pr_info("Sent hibernation uevent\n");
}

static int hv_shutdown_init(struct hv_util_service *srv)
{
	struct vmbus_channel *channel = srv->channel;

	INIT_WORK(&hibernate_context.work, send_hibernate_uevent);
	hibernate_context.dev = channel->device_obj;

	hibernation_supported = hv_is_hibernation_supported();

	return 0;
}

119 120 121
static void shutdown_onchannelcallback(void *context);
static struct hv_util_service util_shutdown = {
	.util_cb = shutdown_onchannelcallback,
122
	.util_init = hv_shutdown_init,
123 124
};

125
static int hv_timesync_init(struct hv_util_service *srv);
126
static int hv_timesync_pre_suspend(void);
127 128
static void hv_timesync_deinit(void);

129 130 131
static void timesync_onchannelcallback(void *context);
static struct hv_util_service util_timesynch = {
	.util_cb = timesync_onchannelcallback,
132
	.util_init = hv_timesync_init,
133
	.util_pre_suspend = hv_timesync_pre_suspend,
134
	.util_deinit = hv_timesync_deinit,
135 136 137 138 139 140 141 142 143 144
};

static void heartbeat_onchannelcallback(void *context);
static struct hv_util_service util_heartbeat = {
	.util_cb = heartbeat_onchannelcallback,
};

static struct hv_util_service util_kvp = {
	.util_cb = hv_kvp_onchannelcallback,
	.util_init = hv_kvp_init,
145 146
	.util_pre_suspend = hv_kvp_pre_suspend,
	.util_pre_resume = hv_kvp_pre_resume,
147 148
	.util_deinit = hv_kvp_deinit,
};
149

150 151 152
static struct hv_util_service util_vss = {
	.util_cb = hv_vss_onchannelcallback,
	.util_init = hv_vss_init,
153 154
	.util_pre_suspend = hv_vss_pre_suspend,
	.util_pre_resume = hv_vss_pre_resume,
155 156 157
	.util_deinit = hv_vss_deinit,
};

158 159 160
static struct hv_util_service util_fcopy = {
	.util_cb = hv_fcopy_onchannelcallback,
	.util_init = hv_fcopy_init,
161 162
	.util_pre_suspend = hv_fcopy_pre_suspend,
	.util_pre_resume = hv_fcopy_pre_resume,
163 164 165
	.util_deinit = hv_fcopy_deinit,
};

166 167 168 169 170
static void perform_shutdown(struct work_struct *dummy)
{
	orderly_poweroff(true);
}

171 172 173 174 175
static void perform_restart(struct work_struct *dummy)
{
	orderly_reboot();
}

176 177 178 179 180
/*
 * Perform the shutdown operation in a thread context.
 */
static DECLARE_WORK(shutdown_work, perform_shutdown);

181 182 183 184 185
/*
 * Perform the restart operation in a thread context.
 */
static DECLARE_WORK(restart_work, perform_restart);

186
static void shutdown_onchannelcallback(void *context)
187 188
{
	struct vmbus_channel *channel = context;
189
	struct work_struct *work = NULL;
190
	u32 recvlen;
191
	u64 requestid;
192
	u8  *shut_txf_buf = util_shutdown.recv_buffer;
193 194 195 196 197

	struct shutdown_msg_data *shutdown_msg;

	struct icmsg_hdr *icmsghdrp;

198
	vmbus_recvpacket(channel, shut_txf_buf,
199
			 HV_HYP_PAGE_SIZE, &recvlen, &requestid);
200 201

	if (recvlen > 0) {
202
		icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[
203 204 205
			sizeof(struct vmbuspipe_hdr)];

		if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
206 207 208 209 210 211 212 213
			if (vmbus_prep_negotiate_resp(icmsghdrp, shut_txf_buf,
					fw_versions, FW_VER_COUNT,
					sd_versions, SD_VER_COUNT,
					NULL, &sd_srv_version)) {
				pr_info("Shutdown IC version %d.%d\n",
					sd_srv_version >> 16,
					sd_srv_version & 0xFFFF);
			}
214
		} else {
215 216 217 218
			shutdown_msg =
				(struct shutdown_msg_data *)&shut_txf_buf[
					sizeof(struct vmbuspipe_hdr) +
					sizeof(struct icmsg_hdr)];
219

220 221 222 223 224 225
			/*
			 * shutdown_msg->flags can be 0(shut down), 2(reboot),
			 * or 4(hibernate). It may bitwise-OR 1, which means
			 * performing the request by force. Linux always tries
			 * to perform the request by force.
			 */
226 227 228 229
			switch (shutdown_msg->flags) {
			case 0:
			case 1:
				icmsghdrp->status = HV_S_OK;
230
				work = &shutdown_work;
231
				pr_info("Shutdown request received -"
232
					    " graceful shutdown initiated\n");
233
				break;
234 235 236 237 238 239 240
			case 2:
			case 3:
				icmsghdrp->status = HV_S_OK;
				work = &restart_work;
				pr_info("Restart request received -"
					    " graceful restart initiated\n");
				break;
241 242 243 244 245 246 247 248
			case 4:
			case 5:
				pr_info("Hibernation request received\n");
				icmsghdrp->status = hibernation_supported ?
					HV_S_OK : HV_E_FAIL;
				if (hibernation_supported)
					work = &hibernate_context.work;
				break;
249 250
			default:
				icmsghdrp->status = HV_E_FAIL;
251 252
				pr_info("Shutdown request received -"
					    " Invalid request\n");
253
				break;
254
			}
255 256 257 258 259
		}

		icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
			| ICMSGHDRFLAG_RESPONSE;

260
		vmbus_sendpacket(channel, shut_txf_buf,
261
				       recvlen, requestid,
262
				       VM_PKT_DATA_INBAND, 0);
263 264
	}

265 266
	if (work)
		schedule_work(work);
267 268
}

269 270 271
/*
 * Set the host time in a process context.
 */
272
static struct work_struct adj_time_work;
273

274 275 276 277 278 279 280 281 282 283
/*
 * The last time sample, received from the host. PTP device responds to
 * requests by using this data and the current partition-wide time reference
 * count.
 */
static struct {
	u64				host_time;
	u64				ref_time;
	spinlock_t			lock;
} host_ts;
284

285
static struct timespec64 hv_get_adj_host_time(void)
286
{
287 288 289
	struct timespec64 ts;
	u64 newtime, reftime;
	unsigned long flags;
290

291
	spin_lock_irqsave(&host_ts.lock, flags);
292
	reftime = hv_read_reference_counter();
293 294 295
	newtime = host_ts.host_time + (reftime - host_ts.ref_time);
	ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
	spin_unlock_irqrestore(&host_ts.lock, flags);
296

297 298 299 300 301 302 303 304
	return ts;
}

static void hv_set_host_time(struct work_struct *work)
{
	struct timespec64 ts = hv_get_adj_host_time();

	do_settimeofday64(&ts);
305 306
}

307 308 309 310 311 312
/*
 * Synchronize time with host after reboot, restore, etc.
 *
 * ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM.
 * After reboot the flag ICTIMESYNCFLAG_SYNC is included in the first time
 * message after the timesync channel is opened. Since the hv_utils module is
313 314 315 316 317 318
 * loaded after hv_vmbus, the first message is usually missed. This bit is
 * considered a hard request to discipline the clock.
 *
 * ICTIMESYNCFLAG_SAMPLE bit indicates a time sample from host. This is
 * typically used as a hint to the guest. The guest is under no obligation
 * to discipline the clock.
319
 */
320
static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
321
{
322 323
	unsigned long flags;
	u64 cur_reftime;
324

325
	/*
326 327
	 * Save the adjusted time sample from the host and the snapshot
	 * of the current system time.
328
	 */
329 330
	spin_lock_irqsave(&host_ts.lock, flags);

331
	cur_reftime = hv_read_reference_counter();
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
	host_ts.host_time = hosttime;
	host_ts.ref_time = cur_reftime;

	/*
	 * TimeSync v4 messages contain reference time (guest's Hyper-V
	 * clocksource read when the time sample was generated), we can
	 * improve the precision by adding the delta between now and the
	 * time of generation. For older protocols we set
	 * reftime == cur_reftime on call.
	 */
	host_ts.host_time += (cur_reftime - reftime);

	spin_unlock_irqrestore(&host_ts.lock, flags);

	/* Schedule work to do do_settimeofday64() */
	if (adj_flags & ICTIMESYNCFLAG_SYNC)
		schedule_work(&adj_time_work);
349 350 351 352 353 354 355 356
}

/*
 * Time Sync Channel message handler.
 */
static void timesync_onchannelcallback(void *context)
{
	struct vmbus_channel *channel = context;
357
	u32 recvlen;
358 359 360
	u64 requestid;
	struct icmsg_hdr *icmsghdrp;
	struct ictimesync_data *timedatap;
361
	struct ictimesync_ref_data *refdata;
362
	u8 *time_txf_buf = util_timesynch.recv_buffer;
363

364
	vmbus_recvpacket(channel, time_txf_buf,
365
			 HV_HYP_PAGE_SIZE, &recvlen, &requestid);
366 367

	if (recvlen > 0) {
368
		icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
369 370 371
				sizeof(struct vmbuspipe_hdr)];

		if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
372 373 374 375
			if (vmbus_prep_negotiate_resp(icmsghdrp, time_txf_buf,
						fw_versions, FW_VER_COUNT,
						ts_versions, TS_VER_COUNT,
						NULL, &ts_srv_version)) {
376
				pr_info("TimeSync IC version %d.%d\n",
377 378 379
					ts_srv_version >> 16,
					ts_srv_version & 0xFFFF);
			}
380
		} else {
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
			if (ts_srv_version > TS_VERSION_3) {
				refdata = (struct ictimesync_ref_data *)
					&time_txf_buf[
					sizeof(struct vmbuspipe_hdr) +
					sizeof(struct icmsg_hdr)];

				adj_guesttime(refdata->parenttime,
						refdata->vmreferencetime,
						refdata->flags);
			} else {
				timedatap = (struct ictimesync_data *)
					&time_txf_buf[
					sizeof(struct vmbuspipe_hdr) +
					sizeof(struct icmsg_hdr)];
				adj_guesttime(timedatap->parenttime,
396
					      hv_read_reference_counter(),
397
					      timedatap->flags);
398
			}
399 400 401 402 403
		}

		icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
			| ICMSGHDRFLAG_RESPONSE;

404
		vmbus_sendpacket(channel, time_txf_buf,
405
				recvlen, requestid,
406
				VM_PKT_DATA_INBAND, 0);
407 408 409
	}
}

410 411 412 413 414 415 416 417
/*
 * Heartbeat functionality.
 * Every two seconds, Hyper-V send us a heartbeat request message.
 * we respond to this message, and Hyper-V knows we are alive.
 */
static void heartbeat_onchannelcallback(void *context)
{
	struct vmbus_channel *channel = context;
418
	u32 recvlen;
419 420 421
	u64 requestid;
	struct icmsg_hdr *icmsghdrp;
	struct heartbeat_msg_data *heartbeat_msg;
422
	u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
423

424 425 426
	while (1) {

		vmbus_recvpacket(channel, hbeat_txf_buf,
427
				 HV_HYP_PAGE_SIZE, &recvlen, &requestid);
428 429 430

		if (!recvlen)
			break;
431

432
		icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
433 434 435
				sizeof(struct vmbuspipe_hdr)];

		if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
436 437 438 439 440 441
			if (vmbus_prep_negotiate_resp(icmsghdrp,
					hbeat_txf_buf,
					fw_versions, FW_VER_COUNT,
					hb_versions, HB_VER_COUNT,
					NULL, &hb_srv_version)) {

442
				pr_info("Heartbeat IC version %d.%d\n",
443 444 445
					hb_srv_version >> 16,
					hb_srv_version & 0xFFFF);
			}
446
		} else {
447 448 449 450
			heartbeat_msg =
				(struct heartbeat_msg_data *)&hbeat_txf_buf[
					sizeof(struct vmbuspipe_hdr) +
					sizeof(struct icmsg_hdr)];
451 452 453 454 455 456 457

			heartbeat_msg->seq_num += 1;
		}

		icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
			| ICMSGHDRFLAG_RESPONSE;

458
		vmbus_sendpacket(channel, hbeat_txf_buf,
459
				       recvlen, requestid,
460
				       VM_PKT_DATA_INBAND, 0);
461 462
	}
}
463

464 465
static int util_probe(struct hv_device *dev,
			const struct hv_vmbus_device_id *dev_id)
466
{
467 468 469 470
	struct hv_util_service *srv =
		(struct hv_util_service *)dev_id->driver_data;
	int ret;

471
	srv->recv_buffer = kmalloc(HV_HYP_PAGE_SIZE * 4, GFP_KERNEL);
472 473
	if (!srv->recv_buffer)
		return -ENOMEM;
474
	srv->channel = dev->channel;
475 476 477
	if (srv->util_init) {
		ret = srv->util_init(srv);
		if (ret) {
478 479
			ret = -ENODEV;
			goto error1;
480 481 482
		}
	}

483 484 485 486 487 488 489
	/*
	 * The set of services managed by the util driver are not performance
	 * critical and do not need batched reading. Furthermore, some services
	 * such as KVP can only handle one message from the host at a time.
	 * Turn off batched reading for all util drivers before we open the
	 * channel.
	 */
490
	set_channel_read_mode(dev->channel, HV_CALL_DIRECT);
491

492
	hv_set_drvdata(dev, srv);
493

494 495 496
	ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
			 4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
			 dev->channel);
497 498 499
	if (ret)
		goto error;

500
	return 0;
501 502 503 504 505 506 507

error:
	if (srv->util_deinit)
		srv->util_deinit();
error1:
	kfree(srv->recv_buffer);
	return ret;
508 509 510 511
}

static int util_remove(struct hv_device *dev)
{
512 513 514 515
	struct hv_util_service *srv = hv_get_drvdata(dev);

	if (srv->util_deinit)
		srv->util_deinit();
516
	vmbus_close(dev->channel);
517 518
	kfree(srv->recv_buffer);

519 520 521
	return 0;
}

522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
/*
 * When we're in util_suspend(), all the userspace processes have been frozen
 * (refer to hibernate() -> freeze_processes()). The userspace is thawed only
 * after the whole resume procedure, including util_resume(), finishes.
 */
static int util_suspend(struct hv_device *dev)
{
	struct hv_util_service *srv = hv_get_drvdata(dev);
	int ret = 0;

	if (srv->util_pre_suspend) {
		ret = srv->util_pre_suspend();
		if (ret)
			return ret;
	}

	vmbus_close(dev->channel);

	return 0;
}

static int util_resume(struct hv_device *dev)
{
	struct hv_util_service *srv = hv_get_drvdata(dev);
	int ret = 0;

	if (srv->util_pre_resume) {
		ret = srv->util_pre_resume();
		if (ret)
			return ret;
	}

	ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
			 4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
			 dev->channel);
	return ret;
}

560
static const struct hv_vmbus_device_id id_table[] = {
561
	/* Shutdown guid */
562 563 564
	{ HV_SHUTDOWN_GUID,
	  .driver_data = (unsigned long)&util_shutdown
	},
565
	/* Time synch guid */
566 567 568
	{ HV_TS_GUID,
	  .driver_data = (unsigned long)&util_timesynch
	},
569
	/* Heartbeat guid */
570 571 572
	{ HV_HEART_BEAT_GUID,
	  .driver_data = (unsigned long)&util_heartbeat
	},
573
	/* KVP guid */
574 575 576
	{ HV_KVP_GUID,
	  .driver_data = (unsigned long)&util_kvp
	},
577 578 579 580
	/* VSS GUID */
	{ HV_VSS_GUID,
	  .driver_data = (unsigned long)&util_vss
	},
581 582 583 584
	/* File copy GUID */
	{ HV_FCOPY_GUID,
	  .driver_data = (unsigned long)&util_fcopy
	},
585
	{ },
586 587 588 589 590 591
};

MODULE_DEVICE_TABLE(vmbus, id_table);

/* The one and only one */
static  struct hv_driver util_drv = {
592
	.name = "hv_utils",
593 594 595
	.id_table = id_table,
	.probe =  util_probe,
	.remove =  util_remove,
596 597
	.suspend = util_suspend,
	.resume =  util_resume,
598 599 600
	.driver = {
		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
	},
601 602
};

603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
static int hv_ptp_enable(struct ptp_clock_info *info,
			 struct ptp_clock_request *request, int on)
{
	return -EOPNOTSUPP;
}

static int hv_ptp_settime(struct ptp_clock_info *p, const struct timespec64 *ts)
{
	return -EOPNOTSUPP;
}

static int hv_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
{
	return -EOPNOTSUPP;
}
static int hv_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
	return -EOPNOTSUPP;
}

static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
{
625
	*ts = hv_get_adj_host_time();
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641

	return 0;
}

static struct ptp_clock_info ptp_hyperv_info = {
	.name		= "hyperv",
	.enable         = hv_ptp_enable,
	.adjtime        = hv_ptp_adjtime,
	.adjfreq        = hv_ptp_adjfreq,
	.gettime64      = hv_ptp_gettime,
	.settime64      = hv_ptp_settime,
	.owner		= THIS_MODULE,
};

static struct ptp_clock *hv_ptp_clock;

642 643
static int hv_timesync_init(struct hv_util_service *srv)
{
644
	/* TimeSync requires Hyper-V clocksource. */
645
	if (!hv_read_reference_counter)
646 647
		return -ENODEV;

648 649
	spin_lock_init(&host_ts.lock);

650
	INIT_WORK(&adj_time_work, hv_set_host_time);
651 652 653 654 655 656 657 658 659 660 661 662 663

	/*
	 * ptp_clock_register() returns NULL when CONFIG_PTP_1588_CLOCK is
	 * disabled but the driver is still useful without the PTP device
	 * as it still handles the ICTIMESYNCFLAG_SYNC case.
	 */
	hv_ptp_clock = ptp_clock_register(&ptp_hyperv_info, NULL);
	if (IS_ERR_OR_NULL(hv_ptp_clock)) {
		pr_err("cannot register PTP clock: %ld\n",
		       PTR_ERR(hv_ptp_clock));
		hv_ptp_clock = NULL;
	}

664 665 666
	return 0;
}

667 668 669 670 671 672 673 674 675 676 677
static void hv_timesync_cancel_work(void)
{
	cancel_work_sync(&adj_time_work);
}

static int hv_timesync_pre_suspend(void)
{
	hv_timesync_cancel_work();
	return 0;
}

678 679
static void hv_timesync_deinit(void)
{
680 681
	if (hv_ptp_clock)
		ptp_clock_unregister(hv_ptp_clock);
682 683

	hv_timesync_cancel_work();
684 685
}

686 687
static int __init init_hyperv_utils(void)
{
688
	pr_info("Registering HyperV Utility Driver\n");
689

690
	return vmbus_driver_register(&util_drv);
691 692 693 694
}

static void exit_hyperv_utils(void)
{
695
	pr_info("De-Registered HyperV Utility Driver\n");
696

697
	vmbus_driver_unregister(&util_drv);
698 699 700 701 702 703 704
}

module_init(init_hyperv_utils);
module_exit(exit_hyperv_utils);

MODULE_DESCRIPTION("Hyper-V Utilities");
MODULE_LICENSE("GPL");