amdgpu_virt.h 3.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 * Copyright 2016 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Author: Monk.liu@amd.com
 */
#ifndef AMDGPU_VIRT_H
#define AMDGPU_VIRT_H

#define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS  (1 << 0) /* vBIOS is sr-iov ready */
#define AMDGPU_SRIOV_CAPS_ENABLE_IOV   (1 << 1) /* sr-iov is enabled on this GPU */
#define AMDGPU_SRIOV_CAPS_IS_VF        (1 << 2) /* this GPU is a virtual function */
#define AMDGPU_PASSTHROUGH_MODE        (1 << 3) /* thw whole GPU is pass through for VM */
31
#define AMDGPU_SRIOV_CAPS_RUNTIME      (1 << 4) /* is out of full access mode */
32

33 34 35 36 37 38 39 40 41
/**
 * struct amdgpu_virt_ops - amdgpu device virt operations
 */
struct amdgpu_virt_ops {
	int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
	int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
	int (*reset_gpu)(struct amdgpu_device *adev);
};

42
/* GPU virtualization */
43
struct amdgpu_virt {
44 45 46
	uint32_t			caps;
	struct amdgpu_bo		*csa_obj;
	uint64_t			csa_vmid0_addr;
47
	bool chained_ib_support;
48
	uint32_t			reg_val_offs;
M
Monk Liu 已提交
49
	struct mutex			lock_kiq;
M
Monk Liu 已提交
50
	struct mutex                    lock_reset;
51 52
	struct amdgpu_irq_src		ack_irq;
	struct amdgpu_irq_src		rcv_irq;
53
	struct work_struct		flr_work;
54
	const struct amdgpu_virt_ops	*ops;
55 56
};

57 58 59
#define AMDGPU_CSA_SIZE    (8 * 1024)
#define AMDGPU_CSA_VADDR   (AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE)

60
#define amdgpu_sriov_enabled(adev) \
61
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
62 63

#define amdgpu_sriov_vf(adev) \
64
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_IS_VF)
65 66

#define amdgpu_sriov_bios(adev) \
67
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)
68

69 70 71
#define amdgpu_sriov_runtime(adev) \
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME)

72
#define amdgpu_passthrough(adev) \
73
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
74 75 76 77 78 79 80 81 82 83

static inline bool is_virtual_machine(void)
{
#ifdef CONFIG_X86
	return boot_cpu_has(X86_FEATURE_HYPERVISOR);
#else
	return false;
#endif
}

M
Monk Liu 已提交
84 85 86
struct amdgpu_vm;
int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm);
87 88 89
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
90 91 92
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
93
int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary);
M
Monk Liu 已提交
94

95
#endif