diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index b123c20e209740d4f3c3af36dd3ca6835972995e..f5486cb94818c335a78f74cb5662471ece98c3f9 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -3,6 +3,6 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
 	interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
 	execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
 
-ccflags-y				+= -I$(src) -I$(src)/$(GVT_DIR) -Wall
+ccflags-y				+= -I$(src) -I$(src)/$(GVT_DIR)
 i915-y					+= $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
 obj-$(CONFIG_DRM_I915_GVT_KVMGT)	+= $(GVT_DIR)/kvmgt.o
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 41b2c3aaa04a5ab82fd72da44369a78b15a0dfa4..51241de5e7a74ce68f7fdf732165c5ec1373f491 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2414,53 +2414,13 @@ static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
 	hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
 }
 
-#define GVT_MAX_CMD_LENGTH     20  /* In Dword */
-
-static void trace_cs_command(struct parser_exec_state *s,
-		cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler)
-{
-	/* This buffer is used by ftrace to store all commands copied from
-	 * guest gma space. Sometimes commands can cross pages, this should
-	 * not be handled in ftrace logic. So this is just used as a
-	 * 'bounce buffer'
-	 */
-	u32 cmd_trace_buf[GVT_MAX_CMD_LENGTH];
-	int i;
-	u32 cmd_len = cmd_length(s);
-	/* The chosen value of GVT_MAX_CMD_LENGTH are just based on
-	 * following two considerations:
-	 * 1) From observation, most common ring commands is not that long.
-	 *    But there are execeptions. So it indeed makes sence to observe
-	 *    longer commands.
-	 * 2) From the performance and debugging point of view, dumping all
-	 *    contents of very commands is not necessary.
-	 * We mgith shrink GVT_MAX_CMD_LENGTH or remove this trace event in
-	 * future for performance considerations.
-	 */
-	if (unlikely(cmd_len > GVT_MAX_CMD_LENGTH)) {
-		gvt_dbg_cmd("cmd length exceed tracing limitation!\n");
-		cmd_len = GVT_MAX_CMD_LENGTH;
-	}
-
-	for (i = 0; i < cmd_len; i++)
-		cmd_trace_buf[i] = cmd_val(s, i);
-
-	trace_gvt_command(s->vgpu->id, s->ring_id, s->ip_gma, cmd_trace_buf,
-			cmd_len, s->buf_type == RING_BUFFER_INSTRUCTION,
-			cost_pre_cmd_handler, cost_cmd_handler);
-}
-
 /* call the cmd handler, and advance ip */
 static int cmd_parser_exec(struct parser_exec_state *s)
 {
+	struct intel_vgpu *vgpu = s->vgpu;
 	struct cmd_info *info;
 	u32 cmd;
 	int ret = 0;
-	cycles_t t0, t1, t2;
-	struct parser_exec_state s_before_advance_custom;
-	struct intel_vgpu *vgpu = s->vgpu;
-
-	t0 = get_cycles();
 
 	cmd = cmd_val(s, 0);
 
@@ -2471,13 +2431,10 @@ static int cmd_parser_exec(struct parser_exec_state *s)
 		return -EINVAL;
 	}
 
-	gvt_dbg_cmd("%s\n", info->name);
-
 	s->info = info;
 
-	t1 = get_cycles();
-
-	s_before_advance_custom = *s;
+	trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va,
+			  cmd_length(s), s->buf_type);
 
 	if (info->handler) {
 		ret = info->handler(s);
@@ -2486,9 +2443,6 @@ static int cmd_parser_exec(struct parser_exec_state *s)
 			return ret;
 		}
 	}
-	t2 = get_cycles();
-
-	trace_cs_command(&s_before_advance_custom, t1 - t0, t2 - t1);
 
 	if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
 		ret = cmd_advance_default(s);
@@ -2522,8 +2476,6 @@ static int command_scan(struct parser_exec_state *s,
 	gma_tail = rb_start + rb_tail;
 	gma_bottom = rb_start +  rb_len;
 
-	gvt_dbg_cmd("scan_start: start=%lx end=%lx\n", gma_head, gma_tail);
-
 	while (s->ip_gma != gma_tail) {
 		if (s->buf_type == RING_BUFFER_INSTRUCTION) {
 			if (!(s->ip_gma >= rb_start) ||
@@ -2552,8 +2504,6 @@ static int command_scan(struct parser_exec_state *s,
 		}
 	}
 
-	gvt_dbg_cmd("scan_end\n");
-
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index dca989eb2d42ed48f6c13c15fe9d3f8a9cbfaab2..8bba38fa19b8e35d346a6bceca6cb6f78ad41fe4 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -708,53 +708,43 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
 int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
 {
 	struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
-	struct execlist_ctx_descriptor_format *desc[2], valid_desc[2];
-	unsigned long valid_desc_bitmap = 0;
-	bool emulate_schedule_in = true;
-	int ret;
-	int i;
+	struct execlist_ctx_descriptor_format desc[2];
+	int i, ret;
 
-	memset(valid_desc, 0, sizeof(valid_desc));
+	desc[0] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
+	desc[1] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
 
-	desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
-	desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
+	if (!desc[0].valid) {
+		gvt_vgpu_err("invalid elsp submission, desc0 is invalid\n");
+		goto inv_desc;
+	}
 
-	for (i = 0; i < 2; i++) {
-		if (!desc[i]->valid)
+	for (i = 0; i < ARRAY_SIZE(desc); i++) {
+		if (!desc[i].valid)
 			continue;
-
-		if (!desc[i]->privilege_access) {
+		if (!desc[i].privilege_access) {
 			gvt_vgpu_err("unexpected GGTT elsp submission\n");
-			return -EINVAL;
+			goto inv_desc;
 		}
-
-		/* TODO: add another guest context checks here. */
-		set_bit(i, &valid_desc_bitmap);
-		valid_desc[i] = *desc[i];
-	}
-
-	if (!valid_desc_bitmap) {
-		gvt_vgpu_err("no valid desc in a elsp submission\n");
-		return -EINVAL;
-	}
-
-	if (!test_bit(0, (void *)&valid_desc_bitmap) &&
-			test_bit(1, (void *)&valid_desc_bitmap)) {
-		gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n");
-		return -EINVAL;
 	}
 
 	/* submit workload */
-	for_each_set_bit(i, (void *)&valid_desc_bitmap, 2) {
-		ret = submit_context(vgpu, ring_id, &valid_desc[i],
-				emulate_schedule_in);
+	for (i = 0; i < ARRAY_SIZE(desc); i++) {
+		if (!desc[i].valid)
+			continue;
+		ret = submit_context(vgpu, ring_id, &desc[i], i == 0);
 		if (ret) {
-			gvt_vgpu_err("fail to schedule workload\n");
+			gvt_vgpu_err("failed to submit desc %d\n", i);
 			return ret;
 		}
-		emulate_schedule_in = false;
 	}
+
 	return 0;
+
+inv_desc:
+	gvt_vgpu_err("descriptors content: desc0 %08x %08x desc1 %08x %08x\n",
+		     desc[0].udw, desc[0].ldw, desc[1].udw, desc[1].ldw);
+	return -EINVAL;
 }
 
 static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index dce8d15f706f58b4cf1019ddc1d5b609c903980c..5dad9298b2d5dbbe7b626895806e6008047bbd6a 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -102,13 +102,8 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
 
 	p = firmware + h->mmio_offset;
 
-	hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
-		int j;
-
-		for (j = 0; j < e->length; j += 4)
-			*(u32 *)(p + e->offset + j) =
-				I915_READ_NOTRACE(_MMIO(e->offset + j));
-	}
+	hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
+		*(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
 
 	memcpy(gvt->firmware.mmio, p, info->mmio_size);
 
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index c6f0077f590d206e1b65ea0176a5a8645eb58d06..66374dba3b1a29800f5a03eda75d4282c0852337 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -244,15 +244,19 @@ static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
 	return readq(addr);
 }
 
+static void gtt_invalidate(struct drm_i915_private *dev_priv)
+{
+	mmio_hw_access_pre(dev_priv);
+	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+	mmio_hw_access_post(dev_priv);
+}
+
 static void write_pte64(struct drm_i915_private *dev_priv,
 		unsigned long index, u64 pte)
 {
 	void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
 
 	writeq(pte, addr);
-
-	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
-	POSTING_READ(GFX_FLSH_CNTL_GEN6);
 }
 
 static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
@@ -1849,6 +1853,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 	}
 
 	ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
+	gtt_invalidate(gvt->dev_priv);
 	ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
 	return 0;
 }
@@ -2301,8 +2306,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
 	u32 num_entries;
 	struct intel_gvt_gtt_entry e;
 
-	intel_runtime_pm_get(dev_priv);
-
 	memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
 	e.type = GTT_TYPE_GGTT_PTE;
 	ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
@@ -2318,7 +2321,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
 	for (offset = 0; offset < num_entries; offset++)
 		ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
 
-	intel_runtime_pm_put(dev_priv);
+	gtt_invalidate(dev_priv);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 7dea5e5d556793c4cbf0fbb974819752ae211a69..c27c6838eacae08d03b3551fc7b62b8bceba1273 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -147,7 +147,9 @@ static int gvt_service_thread(void *data)
 			mutex_unlock(&gvt->lock);
 		}
 
-		if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
+		if (test_bit(INTEL_GVT_REQUEST_SCHED,
+				(void *)&gvt->service_request) ||
+			test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
 					(void *)&gvt->service_request)) {
 			intel_gvt_schedule(gvt);
 		}
@@ -244,7 +246,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 	gvt_dbg_core("init gvt device\n");
 
 	idr_init(&gvt->vgpu_idr);
-
+	spin_lock_init(&gvt->scheduler.mmio_context_lock);
 	mutex_init(&gvt->lock);
 	gvt->dev_priv = dev_priv;
 
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 930732e5c7806d8970183f5d389c52d388bf2fc3..3a74e79eac2f6c13fef32e1611b539db7b8f46c3 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -165,7 +165,6 @@ struct intel_vgpu {
 	struct list_head workload_q_head[I915_NUM_ENGINES];
 	struct kmem_cache *workloads;
 	atomic_t running_workload_num;
-	ktime_t last_ctx_submit_time;
 	DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
 	struct i915_gem_context *shadow_ctx;
 
@@ -196,11 +195,27 @@ struct intel_gvt_fence {
 	unsigned long vgpu_allocated_fence_num;
 };
 
-#define INTEL_GVT_MMIO_HASH_BITS 9
+#define INTEL_GVT_MMIO_HASH_BITS 11
 
 struct intel_gvt_mmio {
-	u32 *mmio_attribute;
+	u8 *mmio_attribute;
+/* Register contains RO bits */
+#define F_RO		(1 << 0)
+/* Register contains graphics address */
+#define F_GMADR		(1 << 1)
+/* Mode mask registers with high 16 bits as the mask bits */
+#define F_MODE_MASK	(1 << 2)
+/* This reg can be accessed by GPU commands */
+#define F_CMD_ACCESS	(1 << 3)
+/* This reg has been accessed by a VM */
+#define F_ACCESSED	(1 << 4)
+/* This reg has been accessed through GPU commands */
+#define F_CMD_ACCESSED	(1 << 5)
+/* This reg could be accessed by unaligned address */
+#define F_UNALIGN	(1 << 6)
+
 	DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
+	unsigned int num_tracked_mmio;
 };
 
 struct intel_gvt_firmware {
@@ -257,7 +272,12 @@ static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
 
 enum {
 	INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
+
+	/* Scheduling trigger by timer */
 	INTEL_GVT_REQUEST_SCHED = 1,
+
+	/* Scheduling trigger by event */
+	INTEL_GVT_REQUEST_EVENT_SCHED = 2,
 };
 
 static inline void intel_gvt_request_service(struct intel_gvt *gvt,
@@ -473,6 +493,80 @@ enum {
 	GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
 };
 
+static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
+{
+	intel_runtime_pm_get(dev_priv);
+}
+
+static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
+{
+	intel_runtime_pm_put(dev_priv);
+}
+
+/**
+ * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+static inline void intel_gvt_mmio_set_accessed(
+			struct intel_gvt *gvt, unsigned int offset)
+{
+	gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED;
+}
+
+/**
+ * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+static inline bool intel_gvt_mmio_is_cmd_access(
+			struct intel_gvt *gvt, unsigned int offset)
+{
+	return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
+}
+
+/**
+ * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+static inline bool intel_gvt_mmio_is_unalign(
+			struct intel_gvt *gvt, unsigned int offset)
+{
+	return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
+}
+
+/**
+ * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+static inline void intel_gvt_mmio_set_cmd_accessed(
+			struct intel_gvt *gvt, unsigned int offset)
+{
+	gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED;
+}
+
+/**
+ * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
+ *
+ */
+static inline bool intel_gvt_mmio_has_mode_mask(
+			struct intel_gvt *gvt, unsigned int offset)
+{
+	return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
+}
+
+#include "trace.h"
 #include "mpt.h"
 
 #endif
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index c995e540ff96e1f8a18a9232de2b26794fa03aa2..35a9bff6b0cff91fab12d448544192cc26c36286 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -47,21 +47,6 @@
 #define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
 #define PCH_PP_DIVISOR _MMIO(0xc7210)
 
-/* Register contains RO bits */
-#define F_RO		(1 << 0)
-/* Register contains graphics address */
-#define F_GMADR		(1 << 1)
-/* Mode mask registers with high 16 bits as the mask bits */
-#define F_MODE_MASK	(1 << 2)
-/* This reg can be accessed by GPU commands */
-#define F_CMD_ACCESS	(1 << 3)
-/* This reg has been accessed by a VM */
-#define F_ACCESSED	(1 << 4)
-/* This reg has been accessed through GPU commands */
-#define F_CMD_ACCESSED	(1 << 5)
-/* This reg could be accessed by unaligned address */
-#define F_UNALIGN	(1 << 6)
-
 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
 {
 	if (IS_BROADWELL(gvt->dev_priv))
@@ -92,11 +77,22 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
 	memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
 }
 
+static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
+						  unsigned int offset)
+{
+	struct intel_gvt_mmio_info *e;
+
+	hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
+		if (e->offset == offset)
+			return e;
+	}
+	return NULL;
+}
+
 static int new_mmio_info(struct intel_gvt *gvt,
-		u32 offset, u32 flags, u32 size,
+		u32 offset, u8 flags, u32 size,
 		u32 addr_mask, u32 ro_mask, u32 device,
-		int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int),
-		int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
+		gvt_mmio_func read, gvt_mmio_func write)
 {
 	struct intel_gvt_mmio_info *info, *p;
 	u32 start, end, i;
@@ -116,13 +112,11 @@ static int new_mmio_info(struct intel_gvt *gvt,
 			return -ENOMEM;
 
 		info->offset = i;
-		p = intel_gvt_find_mmio_info(gvt, info->offset);
+		p = find_mmio_info(gvt, info->offset);
 		if (p)
 			gvt_err("dup mmio definition offset %x\n",
 				info->offset);
-		info->size = size;
-		info->length = (i + 4) < end ? 4 : (end - i);
-		info->addr_mask = addr_mask;
+
 		info->ro_mask = ro_mask;
 		info->device = device;
 		info->read = read ? read : intel_vgpu_default_mmio_read;
@@ -130,6 +124,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
 		gvt->mmio.mmio_attribute[info->offset / 4] = flags;
 		INIT_HLIST_NODE(&info->node);
 		hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
+		gvt->mmio.num_tracked_mmio++;
 	}
 	return 0;
 }
@@ -209,6 +204,7 @@ static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
 static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 		void *p_data, unsigned int bytes)
 {
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	unsigned int fence_num = offset_to_fence_num(off);
 	int ret;
 
@@ -217,8 +213,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 		return ret;
 	write_vreg(vgpu, off, p_data, bytes);
 
+	mmio_hw_access_pre(dev_priv);
 	intel_vgpu_write_fence(vgpu, fence_num,
 			vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
+	mmio_hw_access_post(dev_priv);
 	return 0;
 }
 
@@ -300,6 +298,9 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 
 	intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
 
+	/* sw will wait for the device to ack the reset request */
+	 vgpu_vreg(vgpu, offset) = 0;
+
 	return 0;
 }
 
@@ -1265,7 +1266,10 @@ static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
 	}
 	write_vreg(vgpu, offset, p_data, bytes);
 	/* TRTTE is not per-context */
+
+	mmio_hw_access_pre(dev_priv);
 	I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
+	mmio_hw_access_post(dev_priv);
 
 	return 0;
 }
@@ -1278,7 +1282,9 @@ static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
 
 	if (val & 1) {
 		/* unblock hw logic */
+		mmio_hw_access_pre(dev_priv);
 		I915_WRITE(_MMIO(offset), val);
+		mmio_hw_access_post(dev_priv);
 	}
 	write_vreg(vgpu, offset, p_data, bytes);
 	return 0;
@@ -1405,7 +1411,20 @@ static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu,
 {
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 
+	mmio_hw_access_pre(dev_priv);
 	vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
+	mmio_hw_access_post(dev_priv);
+	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+}
+
+static int instdone_mmio_read(struct intel_vgpu *vgpu,
+		unsigned int offset, void *p_data, unsigned int bytes)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+	mmio_hw_access_pre(dev_priv);
+	vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
+	mmio_hw_access_post(dev_priv);
 	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
 }
 
@@ -1424,7 +1443,6 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 
 	execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
 	if (execlist->elsp_dwords.index == 3) {
-		vgpu->last_ctx_submit_time = ktime_get();
 		ret = intel_vgpu_submit_execlist(vgpu, ring_id);
 		if(ret)
 			gvt_vgpu_err("fail submit workload on ring %d\n",
@@ -1593,6 +1611,12 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 	MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
 #undef RING_REG
 
+#define RING_REG(base) (base + 0x6c)
+	MMIO_RING_DFH(RING_REG, D_ALL, 0, instdone_mmio_read, NULL);
+	MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_ALL, instdone_mmio_read, NULL);
+#undef RING_REG
+	MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, instdone_mmio_read, NULL);
+
 	MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
 	MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
 	MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL);
@@ -1768,10 +1792,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 	MMIO_D(SPRSCALE(PIPE_C), D_ALL);
 	MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
 
-	MMIO_F(LGC_PALETTE(PIPE_A, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
-	MMIO_F(LGC_PALETTE(PIPE_B, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
-	MMIO_F(LGC_PALETTE(PIPE_C, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
-
 	MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
 	MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
 	MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
@@ -2176,7 +2196,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 	MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
-	MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
+	MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL);
 	MMIO_D(ECOBUS, D_ALL);
 	MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
 	MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
@@ -2208,22 +2228,19 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 	MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
 	MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
 	MMIO_D(GEN6_PMINTRMSK, D_ALL);
-	MMIO_DH(HSW_PWR_WELL_BIOS, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
-	MMIO_DH(HSW_PWR_WELL_DRIVER, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
-	MMIO_DH(HSW_PWR_WELL_KVMR, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
-	MMIO_DH(HSW_PWR_WELL_DEBUG, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
-	MMIO_DH(HSW_PWR_WELL_CTL5, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
-	MMIO_DH(HSW_PWR_WELL_CTL6, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+	MMIO_DH(HSW_PWR_WELL_BIOS, D_BDW, NULL, power_well_ctl_mmio_write);
+	MMIO_DH(HSW_PWR_WELL_DRIVER, D_BDW, NULL, power_well_ctl_mmio_write);
+	MMIO_DH(HSW_PWR_WELL_KVMR, D_BDW, NULL, power_well_ctl_mmio_write);
+	MMIO_DH(HSW_PWR_WELL_DEBUG, D_BDW, NULL, power_well_ctl_mmio_write);
+	MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
+	MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
 
 	MMIO_D(RSTDBYCTL, D_ALL);
 
 	MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
 	MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
-	MMIO_F(VGT_PVINFO_PAGE, VGT_PVINFO_SIZE, F_UNALIGN, 0, 0, D_ALL, pvinfo_mmio_read, pvinfo_mmio_write);
 	MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
 
-	MMIO_F(MCHBAR_MIRROR_BASE_SNB, 0x40000, 0, 0, 0, D_ALL, NULL, NULL);
-
 	MMIO_D(TILECTL, D_ALL);
 
 	MMIO_D(GEN6_UCGCTL1, D_ALL);
@@ -2231,7 +2248,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 
 	MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
 
-	MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_BDW);
 	MMIO_D(GEN6_PCODE_DATA, D_ALL);
 	MMIO_D(0x13812c, D_ALL);
 	MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
@@ -2310,14 +2326,13 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 	MMIO_D(0x1a054, D_ALL);
 
 	MMIO_D(0x44070, D_ALL);
-	MMIO_DFH(0x215c, D_HSW_PLUS, F_CMD_ACCESS, NULL, NULL);
+	MMIO_DFH(0x215c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
 
-	MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_HSW_PLUS, NULL, NULL);
-	MMIO_DFH(GEN7_OACONTROL, D_HSW, F_CMD_ACCESS, NULL, NULL);
+	MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
 	MMIO_D(0x2b00, D_BDW_PLUS);
 	MMIO_D(0x2360, D_BDW_PLUS);
 	MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
@@ -2754,7 +2769,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
 	MMIO_D(0x72380, D_SKL_PLUS);
 	MMIO_D(0x7039c, D_SKL_PLUS);
 
-	MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
 	MMIO_D(0x8f074, D_SKL | D_KBL);
 	MMIO_D(0x8f004, D_SKL | D_KBL);
 	MMIO_D(0x8f034, D_SKL | D_KBL);
@@ -2828,26 +2842,36 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
 	return 0;
 }
 
-/**
- * intel_gvt_find_mmio_info - find MMIO information entry by aligned offset
- * @gvt: GVT device
- * @offset: register offset
- *
- * This function is used to find the MMIO information entry from hash table
- *
- * Returns:
- * pointer to MMIO information entry, NULL if not exists
- */
-struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
-	unsigned int offset)
-{
-	struct intel_gvt_mmio_info *e;
+/* Special MMIO blocks. */
+static struct gvt_mmio_block {
+	unsigned int device;
+	i915_reg_t   offset;
+	unsigned int size;
+	gvt_mmio_func read;
+	gvt_mmio_func write;
+} gvt_mmio_blocks[] = {
+	{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
+	{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
+	{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
+		pvinfo_mmio_read, pvinfo_mmio_write},
+	{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
+	{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
+	{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
+};
 
-	WARN_ON(!IS_ALIGNED(offset, 4));
+static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
+					      unsigned int offset)
+{
+	unsigned long device = intel_gvt_get_device_type(gvt);
+	struct gvt_mmio_block *block = gvt_mmio_blocks;
+	int i;
 
-	hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
-		if (e->offset == offset)
-			return e;
+	for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) {
+		if (!(device & block->device))
+			continue;
+		if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
+		    offset < INTEL_GVT_MMIO_OFFSET(block->offset) + block->size)
+			return block;
 	}
 	return NULL;
 }
@@ -2887,9 +2911,10 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
 {
 	struct intel_gvt_device_info *info = &gvt->device_info;
 	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
 	int ret;
 
-	gvt->mmio.mmio_attribute = vzalloc(info->mmio_size);
+	gvt->mmio.mmio_attribute = vzalloc(size);
 	if (!gvt->mmio.mmio_attribute)
 		return -ENOMEM;
 
@@ -2910,77 +2935,15 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
 		if (ret)
 			goto err;
 	}
+
+	gvt_dbg_mmio("traced %u virtual mmio registers\n",
+		     gvt->mmio.num_tracked_mmio);
 	return 0;
 err:
 	intel_gvt_clean_mmio_info(gvt);
 	return ret;
 }
 
-/**
- * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
- * @gvt: a GVT device
- * @offset: register offset
- *
- */
-void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset)
-{
-	gvt->mmio.mmio_attribute[offset >> 2] |=
-		F_ACCESSED;
-}
-
-/**
- * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
- * @gvt: a GVT device
- * @offset: register offset
- *
- */
-bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
-		unsigned int offset)
-{
-	return gvt->mmio.mmio_attribute[offset >> 2] &
-		F_CMD_ACCESS;
-}
-
-/**
- * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
- * @gvt: a GVT device
- * @offset: register offset
- *
- */
-bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt,
-		unsigned int offset)
-{
-	return gvt->mmio.mmio_attribute[offset >> 2] &
-		F_UNALIGN;
-}
-
-/**
- * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
- * @gvt: a GVT device
- * @offset: register offset
- *
- */
-void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
-		unsigned int offset)
-{
-	gvt->mmio.mmio_attribute[offset >> 2] |=
-		F_CMD_ACCESSED;
-}
-
-/**
- * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
- * @gvt: a GVT device
- * @offset: register offset
- *
- * Returns:
- * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
- *
- */
-bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset)
-{
-	return gvt->mmio.mmio_attribute[offset >> 2] &
-		F_MODE_MASK;
-}
 
 /**
  * intel_vgpu_default_mmio_read - default MMIO read handler
@@ -3032,3 +2995,91 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
 {
 	return in_whitelist(offset);
 }
+
+/**
+ * intel_vgpu_mmio_reg_rw - emulate tracked mmio registers
+ * @vgpu: a vGPU
+ * @offset: register offset
+ * @pdata: data buffer
+ * @bytes: data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
+			   void *pdata, unsigned int bytes, bool is_read)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_gvt_mmio_info *mmio_info;
+	struct gvt_mmio_block *mmio_block;
+	gvt_mmio_func func;
+	int ret;
+
+	if (WARN_ON(bytes > 4))
+		return -EINVAL;
+
+	/*
+	 * Handle special MMIO blocks.
+	 */
+	mmio_block = find_mmio_block(gvt, offset);
+	if (mmio_block) {
+		func = is_read ? mmio_block->read : mmio_block->write;
+		if (func)
+			return func(vgpu, offset, pdata, bytes);
+		goto default_rw;
+	}
+
+	/*
+	 * Normal tracked MMIOs.
+	 */
+	mmio_info = find_mmio_info(gvt, offset);
+	if (!mmio_info) {
+		if (!vgpu->mmio.disable_warn_untrack)
+			gvt_vgpu_err("untracked MMIO %08x len %d\n",
+				     offset, bytes);
+		goto default_rw;
+	}
+
+	if (is_read)
+		return mmio_info->read(vgpu, offset, pdata, bytes);
+	else {
+		u64 ro_mask = mmio_info->ro_mask;
+		u32 old_vreg = 0, old_sreg = 0;
+		u64 data = 0;
+
+		if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
+			old_vreg = vgpu_vreg(vgpu, offset);
+			old_sreg = vgpu_sreg(vgpu, offset);
+		}
+
+		if (likely(!ro_mask))
+			ret = mmio_info->write(vgpu, offset, pdata, bytes);
+		else if (!~ro_mask) {
+			gvt_vgpu_err("try to write RO reg %x\n", offset);
+			return 0;
+		} else {
+			/* keep the RO bits in the virtual register */
+			memcpy(&data, pdata, bytes);
+			data &= ~ro_mask;
+			data |= vgpu_vreg(vgpu, offset) & ro_mask;
+			ret = mmio_info->write(vgpu, offset, &data, bytes);
+		}
+
+		/* higher 16bits of mode ctl regs are mask bits for change */
+		if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
+			u32 mask = vgpu_vreg(vgpu, offset) >> 16;
+
+			vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
+					| (vgpu_vreg(vgpu, offset) & mask);
+			vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
+					| (vgpu_sreg(vgpu, offset) & mask);
+		}
+	}
+
+	return ret;
+
+default_rw:
+	return is_read ?
+		intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
+		intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
+}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 9d6812f0957f04333ffde7ebbfe153f9a1c2391e..7a041b368f68861e552b590a88ca633416802cc3 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -31,6 +31,7 @@
 
 #include "i915_drv.h"
 #include "gvt.h"
+#include "trace.h"
 
 /* common offset among interrupt control registers */
 #define regbase_to_isr(base)	(base)
@@ -178,8 +179,8 @@ int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
 	struct intel_gvt_irq_ops *ops = gvt->irq.ops;
 	u32 imr = *(u32 *)p_data;
 
-	gvt_dbg_irq("write IMR %x, new %08x, old %08x, changed %08x\n",
-		    reg, imr, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ imr);
+	trace_write_ir(vgpu->id, "IMR", reg, imr, vgpu_vreg(vgpu, reg),
+		       (vgpu_vreg(vgpu, reg) ^ imr));
 
 	vgpu_vreg(vgpu, reg) = imr;
 
@@ -209,8 +210,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
 	u32 ier = *(u32 *)p_data;
 	u32 virtual_ier = vgpu_vreg(vgpu, reg);
 
-	gvt_dbg_irq("write MASTER_IRQ %x, new %08x, old %08x, changed %08x\n",
-		    reg, ier, virtual_ier, virtual_ier ^ ier);
+	trace_write_ir(vgpu->id, "MASTER_IRQ", reg, ier, virtual_ier,
+		       (virtual_ier ^ ier));
 
 	/*
 	 * GEN8_MASTER_IRQ is a special irq register,
@@ -248,8 +249,8 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
 	struct intel_gvt_irq_info *info;
 	u32 ier = *(u32 *)p_data;
 
-	gvt_dbg_irq("write IER %x, new %08x, old %08x, changed %08x\n",
-		    reg, ier, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ ier);
+	trace_write_ir(vgpu->id, "IER", reg, ier, vgpu_vreg(vgpu, reg),
+		       (vgpu_vreg(vgpu, reg) ^ ier));
 
 	vgpu_vreg(vgpu, reg) = ier;
 
@@ -285,8 +286,8 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
 		iir_to_regbase(reg));
 	u32 iir = *(u32 *)p_data;
 
-	gvt_dbg_irq("write IIR %x, new %08x, old %08x, changed %08x\n",
-		    reg, iir, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ iir);
+	trace_write_ir(vgpu->id, "IIR", reg, iir, vgpu_vreg(vgpu, reg),
+		       (vgpu_vreg(vgpu, reg) ^ iir));
 
 	if (WARN_ON(!info))
 		return -EINVAL;
@@ -411,8 +412,7 @@ static void propagate_event(struct intel_gvt_irq *irq,
 
 	if (!test_bit(bit, (void *)&vgpu_vreg(vgpu,
 					regbase_to_imr(reg_base)))) {
-		gvt_dbg_irq("set bit (%d) for (%s) for vgpu (%d)\n",
-				bit, irq_name[event], vgpu->id);
+		trace_propagate_event(vgpu->id, irq_name[event], bit);
 		set_bit(bit, (void *)&vgpu_vreg(vgpu,
 					regbase_to_iir(reg_base)));
 	}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 1ba3bdb093416674c2f44014942971bcdcd8ea9e..980ec8906b1e9f0bac48f6bc2ff92c48aeb6d146 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -123,7 +123,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
 		void *p_data, unsigned int bytes)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
-	struct intel_gvt_mmio_info *mmio;
 	unsigned int offset = 0;
 	int ret = -EINVAL;
 
@@ -187,32 +186,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
 			goto err;
 	}
 
-	mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
-	if (mmio) {
-		if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
-			if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
-				goto err;
-			if (WARN_ON(mmio->offset != offset))
-				goto err;
-		}
-		ret = mmio->read(vgpu, offset, p_data, bytes);
-	} else {
-		ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
-
-		if (!vgpu->mmio.disable_warn_untrack) {
-			gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n",
-				offset, bytes, *(u32 *)p_data);
-
-			if (offset == 0x206c) {
-				gvt_vgpu_err("------------------------------------------\n");
-				gvt_vgpu_err("likely triggers a gfx reset\n");
-				gvt_vgpu_err("------------------------------------------\n");
-				vgpu->mmio.disable_warn_untrack = true;
-			}
-		}
-	}
-
-	if (ret)
+	ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, true);
+	if (ret < 0)
 		goto err;
 
 	intel_gvt_mmio_set_accessed(gvt, offset);
@@ -239,9 +214,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
 		void *p_data, unsigned int bytes)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
-	struct intel_gvt_mmio_info *mmio;
 	unsigned int offset = 0;
-	u32 old_vreg = 0, old_sreg = 0;
 	int ret = -EINVAL;
 
 	if (vgpu->failsafe) {
@@ -296,66 +269,10 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
 		return ret;
 	}
 
-	mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
-	if (!mmio && !vgpu->mmio.disable_warn_untrack)
-		gvt_dbg_mmio("vgpu%d: write untracked MMIO %x len %d val %x\n",
-				vgpu->id, offset, bytes, *(u32 *)p_data);
-
-	if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
-		if (WARN_ON(!IS_ALIGNED(offset, bytes)))
-			goto err;
-	}
-
-	if (mmio) {
-		u64 ro_mask = mmio->ro_mask;
-
-		if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
-			if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
-				goto err;
-			if (WARN_ON(mmio->offset != offset))
-				goto err;
-		}
-
-		if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
-			old_vreg = vgpu_vreg(vgpu, offset);
-			old_sreg = vgpu_sreg(vgpu, offset);
-		}
-
-		if (!ro_mask) {
-			ret = mmio->write(vgpu, offset, p_data, bytes);
-		} else {
-			/* Protect RO bits like HW */
-			u64 data = 0;
-
-			/* all register bits are RO. */
-			if (ro_mask == ~(u64)0) {
-				gvt_vgpu_err("try to write RO reg %x\n",
-					offset);
-				ret = 0;
-				goto out;
-			}
-			/* keep the RO bits in the virtual register */
-			memcpy(&data, p_data, bytes);
-			data &= ~mmio->ro_mask;
-			data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
-			ret = mmio->write(vgpu, offset, &data, bytes);
-		}
-
-		/* higher 16bits of mode ctl regs are mask bits for change */
-		if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
-			u32 mask = vgpu_vreg(vgpu, offset) >> 16;
-
-			vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
-				| (vgpu_vreg(vgpu, offset) & mask);
-			vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
-				| (vgpu_sreg(vgpu, offset) & mask);
-		}
-	} else
-		ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
-				bytes);
-	if (ret)
+	ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false);
+	if (ret < 0)
 		goto err;
-out:
+
 	intel_gvt_mmio_set_accessed(gvt, offset);
 	mutex_unlock(&gvt->lock);
 	return 0;
@@ -372,20 +289,32 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
  * @vgpu: a vGPU
  *
  */
-void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
+void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
 	const struct intel_gvt_device_info *info = &gvt->device_info;
+	void  *mmio = gvt->firmware.mmio;
+
+	if (dmlr) {
+		memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
+		memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
 
-	memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
-	memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
+		vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
 
-	vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
+		/* set the bit 0:2(Core C-State ) to C0 */
+		vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
 
-	/* set the bit 0:2(Core C-State ) to C0 */
-	vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+		vgpu->mmio.disable_warn_untrack = false;
+	} else {
+#define GVT_GEN8_MMIO_RESET_OFFSET		(0x44200)
+		/* only reset the engine related, so starting with 0x44200
+		 * interrupt include DE,display mmio related will not be
+		 * touched
+		 */
+		memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
+		memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
+	}
 
-	vgpu->mmio.disable_warn_untrack = false;
 }
 
 /**
@@ -405,7 +334,7 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
 
 	vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
 
-	intel_vgpu_reset_mmio(vgpu);
+	intel_vgpu_reset_mmio(vgpu, true);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 7edd66f38ef987499d7bb2abe44efe960127e102..32cd64ddad2668ed88d707c68f856f4b968bb3c4 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -39,36 +39,28 @@
 struct intel_gvt;
 struct intel_vgpu;
 
-#define D_SNB   (1 << 0)
-#define D_IVB   (1 << 1)
-#define D_HSW   (1 << 2)
-#define D_BDW   (1 << 3)
-#define D_SKL	(1 << 4)
-#define D_KBL	(1 << 5)
+#define D_BDW   (1 << 0)
+#define D_SKL	(1 << 1)
+#define D_KBL	(1 << 2)
 
 #define D_GEN9PLUS	(D_SKL | D_KBL)
 #define D_GEN8PLUS	(D_BDW | D_SKL | D_KBL)
-#define D_GEN75PLUS	(D_HSW | D_BDW | D_SKL | D_KBL)
-#define D_GEN7PLUS	(D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
 
 #define D_SKL_PLUS	(D_SKL | D_KBL)
 #define D_BDW_PLUS	(D_BDW | D_SKL | D_KBL)
-#define D_HSW_PLUS	(D_HSW | D_BDW | D_SKL | D_KBL)
-#define D_IVB_PLUS	(D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
 
-#define D_PRE_BDW	(D_SNB | D_IVB | D_HSW)
-#define D_PRE_SKL	(D_SNB | D_IVB | D_HSW | D_BDW)
-#define D_ALL		(D_SNB | D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
+#define D_PRE_SKL	(D_BDW)
+#define D_ALL		(D_BDW | D_SKL | D_KBL)
+
+typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
+			     unsigned int);
 
 struct intel_gvt_mmio_info {
 	u32 offset;
-	u32 size;
-	u32 length;
-	u32 addr_mask;
 	u64 ro_mask;
 	u32 device;
-	int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int);
-	int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int);
+	gvt_mmio_func read;
+	gvt_mmio_func write;
 	u32 addr_range;
 	struct hlist_node node;
 };
@@ -79,8 +71,6 @@ bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
 int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
 void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
 
-struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
-						     unsigned int offset);
 #define INTEL_GVT_MMIO_OFFSET(reg) ({ \
 	typeof(reg) __reg = reg; \
 	u32 *offset = (u32 *)&__reg; \
@@ -88,7 +78,7 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
 })
 
 int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
-void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr);
 void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
 
 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
@@ -97,13 +87,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
 				void *p_data, unsigned int bytes);
 int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
 				void *p_data, unsigned int bytes);
-bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
-				  unsigned int offset);
-bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt, unsigned int offset);
-void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset);
-void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
-				     unsigned int offset);
-bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset);
+
 int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
 				 void *p_data, unsigned int bytes);
 int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
@@ -111,4 +95,8 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 
 bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
 					  unsigned int offset);
+
+int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
+			   void *pdata, unsigned int bytes, bool is_read);
+
 #endif
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 419353624c5a490f8d76f511ac98adfaa6b9e9cf..f0e5487e668865342002f27adfb70012091c152f 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -133,8 +133,7 @@ static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
 	if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
 		return -EINVAL;
 
-	gvt_dbg_irq("vgpu%d: inject msi address %x data%x\n", vgpu->id, addr,
-		    data);
+	trace_inject_msi(vgpu->id, addr, data);
 
 	ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
 	if (ret)
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index a5e11d89df2f86d13dc546f7b3e1a9e9c8ba7d97..504e57c3bc23c824036dff1bf51cd1168f5b15f3 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -35,6 +35,7 @@
 
 #include "i915_drv.h"
 #include "gvt.h"
+#include "trace.h"
 
 struct render_mmio {
 	int ring_id;
@@ -260,7 +261,8 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
 
 #define CTX_CONTEXT_CONTROL_VAL	0x03
 
-void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
+/* Switch ring mmio values (context) from host to a vgpu. */
+static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
 {
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	struct render_mmio *mmio;
@@ -305,14 +307,15 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
 		I915_WRITE(mmio->reg, v);
 		POSTING_READ(mmio->reg);
 
-		gvt_dbg_render("load reg %x old %x new %x\n",
-				i915_mmio_reg_offset(mmio->reg),
-				mmio->value, v);
+		trace_render_mmio(vgpu->id, "load",
+				  i915_mmio_reg_offset(mmio->reg),
+				  mmio->value, v);
 	}
 	handle_tlb_pending_event(vgpu, ring_id);
 }
 
-void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
+/* Switch ring mmio values (context) from vgpu to host. */
+static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
 {
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	struct render_mmio *mmio;
@@ -346,8 +349,37 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
 		I915_WRITE(mmio->reg, v);
 		POSTING_READ(mmio->reg);
 
-		gvt_dbg_render("restore reg %x old %x new %x\n",
-				i915_mmio_reg_offset(mmio->reg),
-				mmio->value, v);
+		trace_render_mmio(vgpu->id, "restore",
+				  i915_mmio_reg_offset(mmio->reg),
+				  mmio->value, v);
 	}
 }
+
+/**
+ * intel_gvt_switch_render_mmio - switch mmio context of specific engine
+ * @pre: the last vGPU that own the engine
+ * @next: the vGPU to switch to
+ * @ring_id: specify the engine
+ *
+ * If pre is null indicates that host own the engine. If next is null
+ * indicates that we are switching to host workload.
+ */
+void intel_gvt_switch_mmio(struct intel_vgpu *pre,
+			   struct intel_vgpu *next, int ring_id)
+{
+	if (WARN_ON(!pre && !next))
+		return;
+
+	gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
+		       pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
+
+	/**
+	 * TODO: Optimize for vGPU to vGPU switch by merging
+	 * switch_mmio_to_host() and switch_mmio_to_vgpu().
+	 */
+	if (pre)
+		switch_mmio_to_host(pre, ring_id);
+
+	if (next)
+		switch_mmio_to_vgpu(next, ring_id);
+}
diff --git a/drivers/gpu/drm/i915/gvt/render.h b/drivers/gpu/drm/i915/gvt/render.h
index dac1a3cc458b0e312e94136f7e52759b058c9007..91db1d39d28f65e7ee42d17cbcc9e09cabe0ddf0 100644
--- a/drivers/gpu/drm/i915/gvt/render.h
+++ b/drivers/gpu/drm/i915/gvt/render.h
@@ -36,8 +36,8 @@
 #ifndef __GVT_RENDER_H__
 #define __GVT_RENDER_H__
 
-void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id);
+void intel_gvt_switch_mmio(struct intel_vgpu *pre,
+			   struct intel_vgpu *next, int ring_id);
 
-void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id);
 
 #endif
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index f25ff133865f1327936483ac166b97c41e0baa60..436377da41baced8e81352587cc3e035d0dc1ba3 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -202,11 +202,6 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 	struct vgpu_sched_data *vgpu_data;
 	struct intel_vgpu *vgpu = NULL;
-	static uint64_t timer_check;
-
-	if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
-		gvt_balance_timeslice(sched_data);
-
 	/* no active vgpu or has already had a target */
 	if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
 		goto out;
@@ -231,9 +226,19 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
 void intel_gvt_schedule(struct intel_gvt *gvt)
 {
 	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
+	static uint64_t timer_check;
 
 	mutex_lock(&gvt->lock);
+
+	if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
+				(void *)&gvt->service_request)) {
+		if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
+			gvt_balance_timeslice(sched_data);
+	}
+	clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
+
 	tbs_sched_func(sched_data);
+
 	mutex_unlock(&gvt->lock);
 }
 
@@ -303,8 +308,20 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
 
 static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
 {
+	struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler;
+	int ring_id;
+
 	kfree(vgpu->sched_data);
 	vgpu->sched_data = NULL;
+
+	spin_lock_bh(&scheduler->mmio_context_lock);
+	for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
+		if (scheduler->engine_owner[ring_id] == vgpu) {
+			intel_gvt_switch_mmio(vgpu, NULL, ring_id);
+			scheduler->engine_owner[ring_id] = NULL;
+		}
+	}
+	spin_unlock_bh(&scheduler->mmio_context_lock);
 }
 
 static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 6ae286cb5804aee4342b30dfeda64159d54acf7d..488fdea348a979e411258d6317748fe2bdd5e242 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -138,21 +138,42 @@ static int shadow_context_status_change(struct notifier_block *nb,
 	struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
 				shadow_ctx_notifier_block[req->engine->id]);
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-	struct intel_vgpu_workload *workload =
-		scheduler->current_workload[req->engine->id];
+	enum intel_engine_id ring_id = req->engine->id;
+	struct intel_vgpu_workload *workload;
+
+	if (!is_gvt_request(req)) {
+		spin_lock_bh(&scheduler->mmio_context_lock);
+		if (action == INTEL_CONTEXT_SCHEDULE_IN &&
+		    scheduler->engine_owner[ring_id]) {
+			/* Switch ring from vGPU to host. */
+			intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
+					      NULL, ring_id);
+			scheduler->engine_owner[ring_id] = NULL;
+		}
+		spin_unlock_bh(&scheduler->mmio_context_lock);
 
-	if (!is_gvt_request(req) || unlikely(!workload))
+		return NOTIFY_OK;
+	}
+
+	workload = scheduler->current_workload[ring_id];
+	if (unlikely(!workload))
 		return NOTIFY_OK;
 
 	switch (action) {
 	case INTEL_CONTEXT_SCHEDULE_IN:
-		intel_gvt_load_render_mmio(workload->vgpu,
-					   workload->ring_id);
+		spin_lock_bh(&scheduler->mmio_context_lock);
+		if (workload->vgpu != scheduler->engine_owner[ring_id]) {
+			/* Switch ring from host to vGPU or vGPU to vGPU. */
+			intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
+					      workload->vgpu, ring_id);
+			scheduler->engine_owner[ring_id] = workload->vgpu;
+		} else
+			gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
+				      ring_id, workload->vgpu->id);
+		spin_unlock_bh(&scheduler->mmio_context_lock);
 		atomic_set(&workload->shadow_ctx_active, 1);
 		break;
 	case INTEL_CONTEXT_SCHEDULE_OUT:
-		intel_gvt_restore_render_mmio(workload->vgpu,
-					      workload->ring_id);
 		/* If the status is -EINPROGRESS means this workload
 		 * doesn't meet any issue during dispatching so when
 		 * get the SCHEDULE_OUT set the status to be zero for
@@ -431,6 +452,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 
 	atomic_dec(&vgpu->running_workload_num);
 	wake_up(&scheduler->workload_complete_wq);
+
+	if (gvt->scheduler.need_reschedule)
+		intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
+
 	mutex_unlock(&gvt->lock);
 }
 
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 2cd725c0573e75924b3afc49603b7d9b6ba0df16..9b6bf51e9b9b0b1f20ec609e50131261c1f4dec0 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -42,6 +42,10 @@ struct intel_gvt_workload_scheduler {
 	struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
 	bool need_reschedule;
 
+	spinlock_t mmio_context_lock;
+	/* can be null when owner is host */
+	struct intel_vgpu *engine_owner[I915_NUM_ENGINES];
+
 	wait_queue_head_t workload_complete_wq;
 	struct task_struct *thread[I915_NUM_ENGINES];
 	wait_queue_head_t waitq[I915_NUM_ENGINES];
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
index 53a2d10cf3f12c02da61e23e9d62e6d0f1d4b0e9..8c150381d9a4ef9f9c014305ead6f000aed963f1 100644
--- a/drivers/gpu/drm/i915/gvt/trace.h
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -224,58 +224,138 @@ TRACE_EVENT(oos_sync,
 	TP_printk("%s", __entry->buf)
 );
 
-#define MAX_CMD_STR_LEN	256
 TRACE_EVENT(gvt_command,
-		TP_PROTO(u8 vm_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len, bool ring_buffer_cmd, cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler),
-
-		TP_ARGS(vm_id, ring_id, ip_gma, cmd_va, cmd_len, ring_buffer_cmd, cost_pre_cmd_handler, cost_cmd_handler),
-
-		TP_STRUCT__entry(
-			__field(u8, vm_id)
-			__field(u8, ring_id)
-			__field(int, i)
-			__array(char, tmp_buf, MAX_CMD_STR_LEN)
-			__array(char, cmd_str, MAX_CMD_STR_LEN)
-			),
-
-		TP_fast_assign(
-			__entry->vm_id = vm_id;
-			__entry->ring_id = ring_id;
-			__entry->cmd_str[0] = '\0';
-			snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "VM(%d) Ring(%d): %s ip(%08x) pre handler cost (%llu), handler cost (%llu) ", vm_id, ring_id, ring_buffer_cmd ? "RB":"BB", ip_gma, cost_pre_cmd_handler, cost_cmd_handler);
-			strcat(__entry->cmd_str, __entry->tmp_buf);
-			entry->i = 0;
-			while (cmd_len > 0) {
-				if (cmd_len >= 8) {
-					snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x %08x %08x %08x %08x ",
-						cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3],
-						cmd_va[__entry->i+4], cmd_va[__entry->i+5], cmd_va[__entry->i+6], cmd_va[__entry->i+7]);
-					__entry->i += 8;
-					cmd_len -= 8;
-					strcat(__entry->cmd_str, __entry->tmp_buf);
-				} else if (cmd_len >= 4) {
-					snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x ",
-						cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3]);
-					__entry->i += 4;
-					cmd_len -= 4;
-					strcat(__entry->cmd_str, __entry->tmp_buf);
-				} else if (cmd_len >= 2) {
-					snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x ", cmd_va[__entry->i], cmd_va[__entry->i+1]);
-					__entry->i += 2;
-					cmd_len -= 2;
-					strcat(__entry->cmd_str, __entry->tmp_buf);
-				} else if (cmd_len == 1) {
-					snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x ", cmd_va[__entry->i]);
-					__entry->i += 1;
-					cmd_len -= 1;
-					strcat(__entry->cmd_str, __entry->tmp_buf);
-				}
-			}
-			strcat(__entry->cmd_str, "\n");
-		),
+	TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len,
+		 u32 buf_type),
+
+	TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type),
+
+	TP_STRUCT__entry(
+		__field(u8, vgpu_id)
+		__field(u8, ring_id)
+		__field(u32, ip_gma)
+		__field(u32, buf_type)
+		__field(u32, cmd_len)
+		__dynamic_array(u32, raw_cmd, cmd_len)
+	),
+
+	TP_fast_assign(
+		__entry->vgpu_id = vgpu_id;
+		__entry->ring_id = ring_id;
+		__entry->ip_gma = ip_gma;
+		__entry->buf_type = buf_type;
+		__entry->cmd_len = cmd_len;
+		memcpy(__get_dynamic_array(raw_cmd), cmd_va, cmd_len * sizeof(*cmd_va));
+	),
+
+
+	TP_printk("vgpu%d ring %d: buf_type %u, ip_gma %08x, raw cmd %s",
+		__entry->vgpu_id,
+		__entry->ring_id,
+		__entry->buf_type,
+		__entry->ip_gma,
+		__print_array(__get_dynamic_array(raw_cmd), __entry->cmd_len, 4))
+);
+
+#define GVT_TEMP_STR_LEN 10
+TRACE_EVENT(write_ir,
+	TP_PROTO(int id, char *reg_name, unsigned int reg, unsigned int new_val,
+		 unsigned int old_val, bool changed),
+
+	TP_ARGS(id, reg_name, reg, new_val, old_val, changed),
+
+	TP_STRUCT__entry(
+		__field(int, id)
+		__array(char, buf, GVT_TEMP_STR_LEN)
+		__field(unsigned int, reg)
+		__field(unsigned int, new_val)
+		__field(unsigned int, old_val)
+		__field(bool, changed)
+	),
+
+	TP_fast_assign(
+		__entry->id = id;
+		snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", reg_name);
+		__entry->reg = reg;
+		__entry->new_val = new_val;
+		__entry->old_val = old_val;
+		__entry->changed = changed;
+	),
+
+	TP_printk("VM%u write [%s] %x, new %08x, old %08x, changed %08x\n",
+		  __entry->id, __entry->buf, __entry->reg, __entry->new_val,
+		  __entry->old_val, __entry->changed)
+);
+
+TRACE_EVENT(propagate_event,
+	TP_PROTO(int id, const char *irq_name, int bit),
+
+	TP_ARGS(id, irq_name, bit),
+
+	TP_STRUCT__entry(
+		__field(int, id)
+		__array(char, buf, GVT_TEMP_STR_LEN)
+		__field(int, bit)
+	),
 
-		TP_printk("%s", __entry->cmd_str)
+	TP_fast_assign(
+		__entry->id = id;
+		snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", irq_name);
+		__entry->bit = bit;
+	),
+
+	TP_printk("Set bit (%d) for (%s) for vgpu (%d)\n",
+		  __entry->bit, __entry->buf, __entry->id)
 );
+
+TRACE_EVENT(inject_msi,
+	TP_PROTO(int id, unsigned int address, unsigned int data),
+
+	TP_ARGS(id, address, data),
+
+	TP_STRUCT__entry(
+		__field(int, id)
+		__field(unsigned int, address)
+		__field(unsigned int, data)
+	),
+
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->address = address;
+		__entry->data = data;
+	),
+
+	TP_printk("vgpu%d:inject msi address %x data %x\n",
+		  __entry->id, __entry->address, __entry->data)
+);
+
+TRACE_EVENT(render_mmio,
+	TP_PROTO(int id, char *action, unsigned int reg,
+		 unsigned int old_val, unsigned int new_val),
+
+	TP_ARGS(id, action, reg, new_val, old_val),
+
+	TP_STRUCT__entry(
+		__field(int, id)
+		__array(char, buf, GVT_TEMP_STR_LEN)
+		__field(unsigned int, reg)
+		__field(unsigned int, old_val)
+		__field(unsigned int, new_val)
+	),
+
+	TP_fast_assign(
+		__entry->id = id;
+		snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", action);
+		__entry->reg = reg;
+		__entry->old_val = old_val;
+		__entry->new_val = new_val;
+	),
+
+	TP_printk("VM%u %s reg %x, old %08x new %08x\n",
+		  __entry->id, __entry->buf, __entry->reg,
+		  __entry->old_val, __entry->new_val)
+);
+
 #endif /* _GVT_TRACE_H_ */
 
 /* This part must be out of protection */
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 6e3cbd8caec26c029e162c865346a8e0ac28b001..90c14e6e3ea06b8de36d90284132659eb80f72c6 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -501,9 +501,14 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
 
 	/* full GPU reset or device model level reset */
 	if (engine_mask == ALL_ENGINES || dmlr) {
+
 		intel_vgpu_reset_gtt(vgpu, dmlr);
-		intel_vgpu_reset_resource(vgpu);
-		intel_vgpu_reset_mmio(vgpu);
+
+		/*fence will not be reset during virtual reset */
+		if (dmlr)
+			intel_vgpu_reset_resource(vgpu);
+
+		intel_vgpu_reset_mmio(vgpu, dmlr);
 		populate_pvinfo_page(vgpu);
 		intel_vgpu_reset_display(vgpu);