提交 8f89743b 编写于 作者: J Jike Song 提交者: Zhenyu Wang

drm/i915/gvt: remove obsolete code for old kvmgt opregion

Current GVT contains some obsolete logic originally cooked to
support the old, non-vfio kvmgt, which is actually workarounds.
We don't support that anymore, so it's safe to remove it and
make a better framework.
Signed-off-by: NJike Song <jike.song@intel.com>
Signed-off-by: NZhenyu Wang <zhenyuw@linux.intel.com>
上级 1f31c829
...@@ -82,9 +82,8 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map) ...@@ -82,9 +82,8 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn, ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
first_mfn, first_mfn,
vgpu_aperture_sz(vgpu) vgpu_aperture_sz(vgpu) >>
>> PAGE_SHIFT, map, PAGE_SHIFT, map);
GVT_MAP_APERTURE);
if (ret) if (ret)
return ret; return ret;
......
...@@ -60,8 +60,7 @@ struct intel_gvt_mpt { ...@@ -60,8 +60,7 @@ struct intel_gvt_mpt {
unsigned long len); unsigned long len);
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn); unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn, int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
unsigned long mfn, unsigned int nr, bool map, unsigned long mfn, unsigned int nr, bool map);
int type);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end, int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map); bool map);
}; };
......
...@@ -224,11 +224,6 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn( ...@@ -224,11 +224,6 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn); return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
} }
enum {
GVT_MAP_APERTURE = 0,
GVT_MAP_OPREGION,
};
/** /**
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
* @vgpu: a vGPU * @vgpu: a vGPU
...@@ -236,7 +231,6 @@ enum { ...@@ -236,7 +231,6 @@ enum {
* @mfn: host PFN * @mfn: host PFN
* @nr: amount of PFNs * @nr: amount of PFNs
* @map: map or unmap * @map: map or unmap
* @type: map type
* *
* Returns: * Returns:
* Zero on success, negative error code if failed. * Zero on success, negative error code if failed.
...@@ -244,10 +238,10 @@ enum { ...@@ -244,10 +238,10 @@ enum {
static inline int intel_gvt_hypervisor_map_gfn_to_mfn( static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
struct intel_vgpu *vgpu, unsigned long gfn, struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long mfn, unsigned int nr, unsigned long mfn, unsigned int nr,
bool map, int type) bool map)
{ {
return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr, return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
map, type); map);
} }
/** /**
......
...@@ -73,7 +73,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) ...@@ -73,7 +73,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
} }
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
vgpu_opregion(vgpu)->gfn[i], vgpu_opregion(vgpu)->gfn[i],
mfn, 1, map, GVT_MAP_OPREGION); mfn, 1, map);
if (ret) { if (ret) {
gvt_err("fail to map GFN to MFN, errno: %d\n", ret); gvt_err("fail to map GFN to MFN, errno: %d\n", ret);
return ret; return ret;
...@@ -89,28 +89,18 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) ...@@ -89,28 +89,18 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
*/ */
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
{ {
int i;
gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id); gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
if (!vgpu_opregion(vgpu)->va) if (!vgpu_opregion(vgpu)->va)
return; return;
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) { if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
vunmap(vgpu_opregion(vgpu)->va);
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
if (vgpu_opregion(vgpu)->pages[i]) {
put_page(vgpu_opregion(vgpu)->pages[i]);
vgpu_opregion(vgpu)->pages[i] = NULL;
}
}
} else {
map_vgpu_opregion(vgpu, false); map_vgpu_opregion(vgpu, false);
free_pages((unsigned long)vgpu_opregion(vgpu)->va, free_pages((unsigned long)vgpu_opregion(vgpu)->va,
INTEL_GVT_OPREGION_PORDER); INTEL_GVT_OPREGION_PORDER);
}
vgpu_opregion(vgpu)->va = NULL; vgpu_opregion(vgpu)->va = NULL;
}
} }
/** /**
...@@ -137,22 +127,8 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa) ...@@ -137,22 +127,8 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
ret = map_vgpu_opregion(vgpu, true); ret = map_vgpu_opregion(vgpu, true);
if (ret) if (ret)
return ret; return ret;
} else {
gvt_dbg_core("emulate opregion from userspace\n");
/*
* If opregion pages are not allocated from host kenrel,
* most of the params are meaningless
*/
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
0, /* not used */
0, /* not used */
2, /* not used */
1,
GVT_MAP_OPREGION);
if (ret)
return ret;
} }
return 0; return 0;
} }
......
...@@ -315,15 +315,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -315,15 +315,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret) if (ret)
goto out_detach_hypervisor_vgpu; goto out_detach_hypervisor_vgpu;
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
ret = intel_vgpu_init_opregion(vgpu, 0);
if (ret)
goto out_clean_gtt;
}
ret = intel_vgpu_init_display(vgpu); ret = intel_vgpu_init_display(vgpu);
if (ret) if (ret)
goto out_clean_opregion; goto out_clean_gtt;
ret = intel_vgpu_init_execlist(vgpu); ret = intel_vgpu_init_execlist(vgpu);
if (ret) if (ret)
...@@ -348,8 +342,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -348,8 +342,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
intel_vgpu_clean_execlist(vgpu); intel_vgpu_clean_execlist(vgpu);
out_clean_display: out_clean_display:
intel_vgpu_clean_display(vgpu); intel_vgpu_clean_display(vgpu);
out_clean_opregion:
intel_vgpu_clean_opregion(vgpu);
out_clean_gtt: out_clean_gtt:
intel_vgpu_clean_gtt(vgpu); intel_vgpu_clean_gtt(vgpu);
out_detach_hypervisor_vgpu: out_detach_hypervisor_vgpu:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册