提交 7ec27233 编写于 作者: D Dave Airlie

Merge branch 'drm-next-4.12' of git://people.freedesktop.org/~agd5f/linux into drm-next

Fixes for 4.12.  This is a bit bigger than usual since it's 3 weeks
worth of fixes and most of these changes are for vega10 which is
new for 4.12 and still in a fair amount of flux.  It looks like
you missed my last pull request, so those patches are included here
as well.  Highlights:
- Lots of vega10 fixes
- Fix interruptable wait mixup
- Fan control method fixes
- Misc display fixes for radeon and amdgpu
- Misc bug fixes

* 'drm-next-4.12' of git://people.freedesktop.org/~agd5f/linux: (132 commits)
  drm/amd/powerplay: refine pwm1_enable callback functions for CI.
  drm/amd/powerplay: refine pwm1_enable callback functions for vi.
  drm/amd/powerplay: refine pwm1_enable callback functions for Vega10.
  drm/amdgpu: refine amdgpu pwm1_enable sysfs interface.
  drm/amdgpu: add amd fan ctrl mode enums.
  drm/amd/powerplay: add more smu message on Vega10.
  drm/amdgpu: fix dependency issue
  drm/amd: fix init order of sched job
  drm/amdgpu: add some additional vega10 pci ids
  drm/amdgpu/soc15: use atomfirmware for setting bios scratch for reset
  drm/amdgpu/atomfirmware: add function to update engine hang status
  drm/radeon: only warn once in radeon_ttm_bo_destroy if va list not empty
  drm/amdgpu: fix mutex list null pointer reference
  drm/amd/powerplay: fix bug sclk/mclk level can't be set on vega10.
  drm/amd/powerplay: Setup sw CTF to allow graceful exit when temperature exceeds maximum.
  drm/amd/powerplay: delete dead code in powerplay.
  drm/amdgpu: Use less generic enum definitions
  drm/amdgpu/gfx9: derive tile pipes from golden settings
  drm/amdgpu/gfx: drop max_gs_waves_per_vgt
  drm/amd/powerplay: disable engine spread spectrum feature on Vega10.
  ...
...@@ -110,6 +110,7 @@ extern int amdgpu_pos_buf_per_se; ...@@ -110,6 +110,7 @@ extern int amdgpu_pos_buf_per_se;
extern int amdgpu_cntl_sb_buf_per_se; extern int amdgpu_cntl_sb_buf_per_se;
extern int amdgpu_param_buf_per_se; extern int amdgpu_param_buf_per_se;
#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
...@@ -966,6 +967,8 @@ struct amdgpu_gfx_config { ...@@ -966,6 +967,8 @@ struct amdgpu_gfx_config {
unsigned mc_arb_ramcfg; unsigned mc_arb_ramcfg;
unsigned gb_addr_config; unsigned gb_addr_config;
unsigned num_rbs; unsigned num_rbs;
unsigned gs_vgt_table_depth;
unsigned gs_prim_buffer_depth;
uint32_t tile_mode_array[32]; uint32_t tile_mode_array[32];
uint32_t macrotile_mode_array[16]; uint32_t macrotile_mode_array[16];
...@@ -980,6 +983,7 @@ struct amdgpu_gfx_config { ...@@ -980,6 +983,7 @@ struct amdgpu_gfx_config {
struct amdgpu_cu_info { struct amdgpu_cu_info {
uint32_t number; /* total active CU number */ uint32_t number; /* total active CU number */
uint32_t ao_cu_mask; uint32_t ao_cu_mask;
uint32_t wave_front_size;
uint32_t bitmap[4][4]; uint32_t bitmap[4][4];
}; };
...@@ -1000,10 +1004,10 @@ struct amdgpu_ngg_buf { ...@@ -1000,10 +1004,10 @@ struct amdgpu_ngg_buf {
}; };
enum { enum {
PRIM = 0, NGG_PRIM = 0,
POS, NGG_POS,
CNTL, NGG_CNTL,
PARAM, NGG_PARAM,
NGG_BUF_MAX NGG_BUF_MAX
}; };
...@@ -1125,6 +1129,7 @@ struct amdgpu_job { ...@@ -1125,6 +1129,7 @@ struct amdgpu_job {
void *owner; void *owner;
uint64_t fence_ctx; /* the fence_context this job uses */ uint64_t fence_ctx; /* the fence_context this job uses */
bool vm_needs_flush; bool vm_needs_flush;
bool need_pipeline_sync;
unsigned vm_id; unsigned vm_id;
uint64_t vm_pd_addr; uint64_t vm_pd_addr;
uint32_t gds_base, gds_size; uint32_t gds_base, gds_size;
...@@ -1704,9 +1709,6 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v); ...@@ -1704,9 +1709,6 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
#define WREG32_FIELD_OFFSET(reg, offset, field, val) \ #define WREG32_FIELD_OFFSET(reg, offset, field, val) \
WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
#define WREG32_FIELD15(ip, idx, reg, field, val) \
WREG32(SOC15_REG_OFFSET(ip, idx, mm##reg), (RREG32(SOC15_REG_OFFSET(ip, idx, mm##reg)) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
/* /*
* BIOS helpers. * BIOS helpers.
*/ */
......
...@@ -1727,6 +1727,12 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev) ...@@ -1727,6 +1727,12 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
{ {
int i; int i;
/*
* VBIOS will check ASIC_INIT_COMPLETE bit to decide if
* execute ASIC_Init posting via driver
*/
adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK;
for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++) for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]); WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
} }
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "atomfirmware.h" #include "atomfirmware.h"
#include "amdgpu_atomfirmware.h" #include "amdgpu_atomfirmware.h"
#include "atom.h" #include "atom.h"
#include "atombios.h"
#define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t)) #define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t))
...@@ -77,10 +78,29 @@ void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev) ...@@ -77,10 +78,29 @@ void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev)
{ {
int i; int i;
/*
* VBIOS will check ASIC_INIT_COMPLETE bit to decide if
* execute ASIC_Init posting via driver
*/
adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK;
for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++) for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]); WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]);
} }
void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung)
{
u32 tmp = RREG32(adev->bios_scratch_reg_offset + 3);
if (hung)
tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
else
tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
WREG32(adev->bios_scratch_reg_offset + 3, tmp);
}
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
{ {
struct atom_context *ctx = adev->mode_info.atom_context; struct atom_context *ctx = adev->mode_info.atom_context;
......
...@@ -28,6 +28,8 @@ bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev) ...@@ -28,6 +28,8 @@ bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev); void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev); void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev); void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
#endif #endif
...@@ -117,8 +117,13 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, ...@@ -117,8 +117,13 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
} }
out_cleanup: out_cleanup:
/* Check error value now. The value can be overwritten when clean up.*/
if (r) {
DRM_ERROR("Error while benchmarking BO move.\n");
}
if (sobj) { if (sobj) {
r = amdgpu_bo_reserve(sobj, false); r = amdgpu_bo_reserve(sobj, true);
if (likely(r == 0)) { if (likely(r == 0)) {
amdgpu_bo_unpin(sobj); amdgpu_bo_unpin(sobj);
amdgpu_bo_unreserve(sobj); amdgpu_bo_unreserve(sobj);
...@@ -126,17 +131,13 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, ...@@ -126,17 +131,13 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
amdgpu_bo_unref(&sobj); amdgpu_bo_unref(&sobj);
} }
if (dobj) { if (dobj) {
r = amdgpu_bo_reserve(dobj, false); r = amdgpu_bo_reserve(dobj, true);
if (likely(r == 0)) { if (likely(r == 0)) {
amdgpu_bo_unpin(dobj); amdgpu_bo_unpin(dobj);
amdgpu_bo_unreserve(dobj); amdgpu_bo_unreserve(dobj);
} }
amdgpu_bo_unref(&dobj); amdgpu_bo_unref(&dobj);
} }
if (r) {
DRM_ERROR("Error while benchmarking BO move.\n");
}
} }
void amdgpu_benchmark(struct amdgpu_device *adev, int test_number) void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
......
...@@ -42,82 +42,6 @@ struct amdgpu_cgs_device { ...@@ -42,82 +42,6 @@ struct amdgpu_cgs_device {
struct amdgpu_device *adev = \ struct amdgpu_device *adev = \
((struct amdgpu_cgs_device *)cgs_device)->adev ((struct amdgpu_cgs_device *)cgs_device)->adev
static int amdgpu_cgs_gpu_mem_info(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
uint64_t *mc_start, uint64_t *mc_size,
uint64_t *mem_size)
{
CGS_FUNC_ADEV;
switch(type) {
case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__VISIBLE_FB:
*mc_start = 0;
*mc_size = adev->mc.visible_vram_size;
*mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
break;
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
*mc_start = adev->mc.visible_vram_size;
*mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
*mem_size = *mc_size;
break;
case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
*mc_start = adev->mc.gtt_start;
*mc_size = adev->mc.gtt_size;
*mem_size = adev->mc.gtt_size - adev->gart_pin_size;
break;
default:
return -EINVAL;
}
return 0;
}
static int amdgpu_cgs_gmap_kmem(struct cgs_device *cgs_device, void *kmem,
uint64_t size,
uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *kmem_handle, uint64_t *mcaddr)
{
CGS_FUNC_ADEV;
int ret;
struct amdgpu_bo *bo;
struct page *kmem_page = vmalloc_to_page(kmem);
int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
if (ret)
return ret;
ret = amdgpu_bo_reserve(bo, false);
if (unlikely(ret != 0))
return ret;
/* pin buffer into GTT */
ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
min_offset, max_offset, mcaddr);
amdgpu_bo_unreserve(bo);
*kmem_handle = (cgs_handle_t)bo;
return ret;
}
static int amdgpu_cgs_gunmap_kmem(struct cgs_device *cgs_device, cgs_handle_t kmem_handle)
{
struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
if (obj) {
int r = amdgpu_bo_reserve(obj, false);
if (likely(r == 0)) {
amdgpu_bo_unpin(obj);
amdgpu_bo_unreserve(obj);
}
amdgpu_bo_unref(&obj);
}
return 0;
}
static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
enum cgs_gpu_mem_type type, enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align, uint64_t size, uint64_t align,
...@@ -215,7 +139,7 @@ static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h ...@@ -215,7 +139,7 @@ static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
if (obj) { if (obj) {
int r = amdgpu_bo_reserve(obj, false); int r = amdgpu_bo_reserve(obj, true);
if (likely(r == 0)) { if (likely(r == 0)) {
amdgpu_bo_kunmap(obj); amdgpu_bo_kunmap(obj);
amdgpu_bo_unpin(obj); amdgpu_bo_unpin(obj);
...@@ -239,7 +163,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h ...@@ -239,7 +163,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
min_offset = obj->placements[0].fpfn << PAGE_SHIFT; min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
max_offset = obj->placements[0].lpfn << PAGE_SHIFT; max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
r = amdgpu_bo_reserve(obj, false); r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains, r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
...@@ -252,7 +176,7 @@ static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t ...@@ -252,7 +176,7 @@ static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t
{ {
int r; int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
r = amdgpu_bo_reserve(obj, false); r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
r = amdgpu_bo_unpin(obj); r = amdgpu_bo_unpin(obj);
...@@ -265,7 +189,7 @@ static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h ...@@ -265,7 +189,7 @@ static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
{ {
int r; int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
r = amdgpu_bo_reserve(obj, false); r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
r = amdgpu_bo_kmap(obj, map); r = amdgpu_bo_kmap(obj, map);
...@@ -277,7 +201,7 @@ static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t ...@@ -277,7 +201,7 @@ static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t
{ {
int r; int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
r = amdgpu_bo_reserve(obj, false); r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
amdgpu_bo_kunmap(obj); amdgpu_bo_kunmap(obj);
...@@ -349,62 +273,6 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device, ...@@ -349,62 +273,6 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
WARN(1, "Invalid indirect register space"); WARN(1, "Invalid indirect register space");
} }
static uint8_t amdgpu_cgs_read_pci_config_byte(struct cgs_device *cgs_device, unsigned addr)
{
CGS_FUNC_ADEV;
uint8_t val;
int ret = pci_read_config_byte(adev->pdev, addr, &val);
if (WARN(ret, "pci_read_config_byte error"))
return 0;
return val;
}
static uint16_t amdgpu_cgs_read_pci_config_word(struct cgs_device *cgs_device, unsigned addr)
{
CGS_FUNC_ADEV;
uint16_t val;
int ret = pci_read_config_word(adev->pdev, addr, &val);
if (WARN(ret, "pci_read_config_word error"))
return 0;
return val;
}
static uint32_t amdgpu_cgs_read_pci_config_dword(struct cgs_device *cgs_device,
unsigned addr)
{
CGS_FUNC_ADEV;
uint32_t val;
int ret = pci_read_config_dword(adev->pdev, addr, &val);
if (WARN(ret, "pci_read_config_dword error"))
return 0;
return val;
}
static void amdgpu_cgs_write_pci_config_byte(struct cgs_device *cgs_device, unsigned addr,
uint8_t value)
{
CGS_FUNC_ADEV;
int ret = pci_write_config_byte(adev->pdev, addr, value);
WARN(ret, "pci_write_config_byte error");
}
static void amdgpu_cgs_write_pci_config_word(struct cgs_device *cgs_device, unsigned addr,
uint16_t value)
{
CGS_FUNC_ADEV;
int ret = pci_write_config_word(adev->pdev, addr, value);
WARN(ret, "pci_write_config_word error");
}
static void amdgpu_cgs_write_pci_config_dword(struct cgs_device *cgs_device, unsigned addr,
uint32_t value)
{
CGS_FUNC_ADEV;
int ret = pci_write_config_dword(adev->pdev, addr, value);
WARN(ret, "pci_write_config_dword error");
}
static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device, static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
enum cgs_resource_type resource_type, enum cgs_resource_type resource_type,
uint64_t size, uint64_t size,
...@@ -477,56 +345,6 @@ static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigne ...@@ -477,56 +345,6 @@ static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigne
adev->mode_info.atom_context, table, args); adev->mode_info.atom_context, table, args);
} }
static int amdgpu_cgs_create_pm_request(struct cgs_device *cgs_device, cgs_handle_t *request)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_destroy_pm_request(struct cgs_device *cgs_device, cgs_handle_t request)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_set_pm_request(struct cgs_device *cgs_device, cgs_handle_t request,
int active)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_pm_request_clock(struct cgs_device *cgs_device, cgs_handle_t request,
enum cgs_clock clock, unsigned freq)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_pm_request_engine(struct cgs_device *cgs_device, cgs_handle_t request,
enum cgs_engine engine, int powered)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_pm_query_clock_limits(struct cgs_device *cgs_device,
enum cgs_clock clock,
struct cgs_clock_limits *limits)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_set_camera_voltages(struct cgs_device *cgs_device, uint32_t mask,
const uint32_t *voltages)
{
DRM_ERROR("not implemented");
return -EPERM;
}
struct cgs_irq_params { struct cgs_irq_params {
unsigned src_id; unsigned src_id;
cgs_irq_source_set_func_t set; cgs_irq_source_set_func_t set;
...@@ -1269,9 +1087,6 @@ static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device, ...@@ -1269,9 +1087,6 @@ static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
} }
static const struct cgs_ops amdgpu_cgs_ops = { static const struct cgs_ops amdgpu_cgs_ops = {
.gpu_mem_info = amdgpu_cgs_gpu_mem_info,
.gmap_kmem = amdgpu_cgs_gmap_kmem,
.gunmap_kmem = amdgpu_cgs_gunmap_kmem,
.alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem, .alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem,
.free_gpu_mem = amdgpu_cgs_free_gpu_mem, .free_gpu_mem = amdgpu_cgs_free_gpu_mem,
.gmap_gpu_mem = amdgpu_cgs_gmap_gpu_mem, .gmap_gpu_mem = amdgpu_cgs_gmap_gpu_mem,
...@@ -1282,23 +1097,10 @@ static const struct cgs_ops amdgpu_cgs_ops = { ...@@ -1282,23 +1097,10 @@ static const struct cgs_ops amdgpu_cgs_ops = {
.write_register = amdgpu_cgs_write_register, .write_register = amdgpu_cgs_write_register,
.read_ind_register = amdgpu_cgs_read_ind_register, .read_ind_register = amdgpu_cgs_read_ind_register,
.write_ind_register = amdgpu_cgs_write_ind_register, .write_ind_register = amdgpu_cgs_write_ind_register,
.read_pci_config_byte = amdgpu_cgs_read_pci_config_byte,
.read_pci_config_word = amdgpu_cgs_read_pci_config_word,
.read_pci_config_dword = amdgpu_cgs_read_pci_config_dword,
.write_pci_config_byte = amdgpu_cgs_write_pci_config_byte,
.write_pci_config_word = amdgpu_cgs_write_pci_config_word,
.write_pci_config_dword = amdgpu_cgs_write_pci_config_dword,
.get_pci_resource = amdgpu_cgs_get_pci_resource, .get_pci_resource = amdgpu_cgs_get_pci_resource,
.atom_get_data_table = amdgpu_cgs_atom_get_data_table, .atom_get_data_table = amdgpu_cgs_atom_get_data_table,
.atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs, .atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs,
.atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table, .atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table,
.create_pm_request = amdgpu_cgs_create_pm_request,
.destroy_pm_request = amdgpu_cgs_destroy_pm_request,
.set_pm_request = amdgpu_cgs_set_pm_request,
.pm_request_clock = amdgpu_cgs_pm_request_clock,
.pm_request_engine = amdgpu_cgs_pm_request_engine,
.pm_query_clock_limits = amdgpu_cgs_pm_query_clock_limits,
.set_camera_voltages = amdgpu_cgs_set_camera_voltages,
.get_firmware_info = amdgpu_cgs_get_firmware_info, .get_firmware_info = amdgpu_cgs_get_firmware_info,
.rel_firmware = amdgpu_cgs_rel_firmware, .rel_firmware = amdgpu_cgs_rel_firmware,
.set_powergating_state = amdgpu_cgs_set_powergating_state, .set_powergating_state = amdgpu_cgs_set_powergating_state,
......
...@@ -1074,6 +1074,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1074,6 +1074,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle; job->uf_sequence = cs->out.handle;
amdgpu_job_free_resources(job); amdgpu_job_free_resources(job);
amdgpu_cs_parser_fini(p, 0, true);
trace_amdgpu_cs_ioctl(job); trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base); amd_sched_entity_push_job(&job->base);
...@@ -1129,7 +1130,10 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -1129,7 +1130,10 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out; goto out;
r = amdgpu_cs_submit(&parser, cs); r = amdgpu_cs_submit(&parser, cs);
if (r)
goto out;
return 0;
out: out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers); amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
return r; return r;
......
...@@ -273,6 +273,9 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, ...@@ -273,6 +273,9 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
spin_lock(&ctx->ring_lock); spin_lock(&ctx->ring_lock);
if (seq == ~0ull)
seq = ctx->rings[ring->idx].sequence - 1;
if (seq >= cring->sequence) { if (seq >= cring->sequence) {
spin_unlock(&ctx->ring_lock); spin_unlock(&ctx->ring_lock);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
......
...@@ -53,7 +53,6 @@ ...@@ -53,7 +53,6 @@
#include "bif/bif_4_1_d.h" #include "bif/bif_4_1_d.h"
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/firmware.h> #include <linux/firmware.h>
#include "amdgpu_pm.h"
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
...@@ -350,7 +349,7 @@ static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev) ...@@ -350,7 +349,7 @@ static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
if (adev->vram_scratch.robj == NULL) { if (adev->vram_scratch.robj == NULL) {
return; return;
} }
r = amdgpu_bo_reserve(adev->vram_scratch.robj, false); r = amdgpu_bo_reserve(adev->vram_scratch.robj, true);
if (likely(r == 0)) { if (likely(r == 0)) {
amdgpu_bo_kunmap(adev->vram_scratch.robj); amdgpu_bo_kunmap(adev->vram_scratch.robj);
amdgpu_bo_unpin(adev->vram_scratch.robj); amdgpu_bo_unpin(adev->vram_scratch.robj);
...@@ -422,12 +421,11 @@ static int amdgpu_doorbell_init(struct amdgpu_device *adev) ...@@ -422,12 +421,11 @@ static int amdgpu_doorbell_init(struct amdgpu_device *adev)
if (adev->doorbell.num_doorbells == 0) if (adev->doorbell.num_doorbells == 0)
return -EINVAL; return -EINVAL;
adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32)); adev->doorbell.ptr = ioremap(adev->doorbell.base,
if (adev->doorbell.ptr == NULL) { adev->doorbell.num_doorbells *
sizeof(u32));
if (adev->doorbell.ptr == NULL)
return -ENOMEM; return -ENOMEM;
}
DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base);
DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size);
return 0; return 0;
} }
...@@ -1584,9 +1582,6 @@ static int amdgpu_late_init(struct amdgpu_device *adev) ...@@ -1584,9 +1582,6 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
} }
} }
amdgpu_dpm_enable_uvd(adev, false);
amdgpu_dpm_enable_vce(adev, false);
return 0; return 0;
} }
...@@ -1854,7 +1849,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -1854,7 +1849,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* mutex initialization are all done here so we /* mutex initialization are all done here so we
* can recall function without having locking issues */ * can recall function without having locking issues */
mutex_init(&adev->vm_manager.lock);
atomic_set(&adev->irq.ih.lock, 0); atomic_set(&adev->irq.ih.lock, 0);
mutex_init(&adev->firmware.mutex); mutex_init(&adev->firmware.mutex);
mutex_init(&adev->pm.mutex); mutex_init(&adev->pm.mutex);
...@@ -2071,7 +2065,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev) ...@@ -2071,7 +2065,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
DRM_INFO("amdgpu: finishing device.\n"); DRM_INFO("amdgpu: finishing device.\n");
adev->shutdown = true; adev->shutdown = true;
drm_crtc_force_disable_all(adev->ddev); if (adev->mode_info.mode_config_initialized)
drm_crtc_force_disable_all(adev->ddev);
/* evict vram memory */ /* evict vram memory */
amdgpu_bo_evict_vram(adev); amdgpu_bo_evict_vram(adev);
amdgpu_ib_pool_fini(adev); amdgpu_ib_pool_fini(adev);
...@@ -2146,7 +2141,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) ...@@ -2146,7 +2141,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
if (amdgpu_crtc->cursor_bo) { if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, false); r = amdgpu_bo_reserve(aobj, true);
if (r == 0) { if (r == 0) {
amdgpu_bo_unpin(aobj); amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj); amdgpu_bo_unreserve(aobj);
...@@ -2159,7 +2154,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) ...@@ -2159,7 +2154,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
robj = gem_to_amdgpu_bo(rfb->obj); robj = gem_to_amdgpu_bo(rfb->obj);
/* don't unpin kernel fb objects */ /* don't unpin kernel fb objects */
if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
r = amdgpu_bo_reserve(robj, false); r = amdgpu_bo_reserve(robj, true);
if (r == 0) { if (r == 0) {
amdgpu_bo_unpin(robj); amdgpu_bo_unpin(robj);
amdgpu_bo_unreserve(robj); amdgpu_bo_unreserve(robj);
...@@ -2216,7 +2211,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) ...@@ -2216,7 +2211,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
struct drm_connector *connector; struct drm_connector *connector;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct drm_crtc *crtc; struct drm_crtc *crtc;
int r; int r = 0;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0; return 0;
...@@ -2228,11 +2223,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) ...@@ -2228,11 +2223,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
pci_set_power_state(dev->pdev, PCI_D0); pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev); pci_restore_state(dev->pdev);
r = pci_enable_device(dev->pdev); r = pci_enable_device(dev->pdev);
if (r) { if (r)
if (fbcon) goto unlock;
console_unlock();
return r;
}
} }
if (adev->is_atom_fw) if (adev->is_atom_fw)
amdgpu_atomfirmware_scratch_regs_restore(adev); amdgpu_atomfirmware_scratch_regs_restore(adev);
...@@ -2249,7 +2241,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) ...@@ -2249,7 +2241,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
r = amdgpu_resume(adev); r = amdgpu_resume(adev);
if (r) { if (r) {
DRM_ERROR("amdgpu_resume failed (%d).\n", r); DRM_ERROR("amdgpu_resume failed (%d).\n", r);
return r; goto unlock;
} }
amdgpu_fence_driver_resume(adev); amdgpu_fence_driver_resume(adev);
...@@ -2260,11 +2252,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) ...@@ -2260,11 +2252,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
} }
r = amdgpu_late_init(adev); r = amdgpu_late_init(adev);
if (r) { if (r)
if (fbcon) goto unlock;
console_unlock();
return r;
}
/* pin cursors */ /* pin cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
...@@ -2272,7 +2261,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) ...@@ -2272,7 +2261,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (amdgpu_crtc->cursor_bo) { if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, false); r = amdgpu_bo_reserve(aobj, true);
if (r == 0) { if (r == 0) {
r = amdgpu_bo_pin(aobj, r = amdgpu_bo_pin(aobj,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
...@@ -2314,12 +2303,14 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) ...@@ -2314,12 +2303,14 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
dev->dev->power.disable_depth--; dev->dev->power.disable_depth--;
#endif #endif
if (fbcon) { if (fbcon)
amdgpu_fbdev_set_suspend(adev, 0); amdgpu_fbdev_set_suspend(adev, 0);
unlock:
if (fbcon)
console_unlock(); console_unlock();
}
return 0; return r;
} }
static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
...@@ -2430,25 +2421,37 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, ...@@ -2430,25 +2421,37 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
uint32_t domain; uint32_t domain;
int r; int r;
if (!bo->shadow) if (!bo->shadow)
return 0; return 0;
r = amdgpu_bo_reserve(bo, true);
if (r)
return r;
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
/* if bo has been evicted, then no need to recover */
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
r = amdgpu_bo_validate(bo->shadow);
if (r) {
DRM_ERROR("bo validate failed!\n");
goto err;
}
r = amdgpu_bo_reserve(bo, false); r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem);
if (r) if (r) {
return r; DRM_ERROR("%p bind failed\n", bo->shadow);
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); goto err;
/* if bo has been evicted, then no need to recover */ }
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
r = amdgpu_bo_restore_from_shadow(adev, ring, bo, r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
NULL, fence, true); NULL, fence, true);
if (r) { if (r) {
DRM_ERROR("recover page table failed!\n"); DRM_ERROR("recover page table failed!\n");
goto err; goto err;
} }
} }
err: err:
amdgpu_bo_unreserve(bo); amdgpu_bo_unreserve(bo);
return r; return r;
} }
/** /**
...@@ -2520,6 +2523,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary) ...@@ -2520,6 +2523,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
ring = adev->mman.buffer_funcs_ring; ring = adev->mman.buffer_funcs_ring;
mutex_lock(&adev->shadow_list_lock); mutex_lock(&adev->shadow_list_lock);
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
next = NULL;
amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
if (fence) { if (fence) {
r = dma_fence_wait(fence, false); r = dma_fence_wait(fence, false);
...@@ -2593,7 +2597,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) ...@@ -2593,7 +2597,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i]; struct amdgpu_ring *ring = adev->rings[i];
if (!ring) if (!ring || !ring->sched.thread)
continue; continue;
kthread_park(ring->sched.thread); kthread_park(ring->sched.thread);
amd_sched_hw_job_reset(&ring->sched); amd_sched_hw_job_reset(&ring->sched);
...@@ -2666,6 +2670,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) ...@@ -2666,6 +2670,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
DRM_INFO("recover vram bo from shadow\n"); DRM_INFO("recover vram bo from shadow\n");
mutex_lock(&adev->shadow_list_lock); mutex_lock(&adev->shadow_list_lock);
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
next = NULL;
amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
if (fence) { if (fence) {
r = dma_fence_wait(fence, false); r = dma_fence_wait(fence, false);
...@@ -2688,7 +2693,8 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) ...@@ -2688,7 +2693,8 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
} }
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i]; struct amdgpu_ring *ring = adev->rings[i];
if (!ring)
if (!ring || !ring->sched.thread)
continue; continue;
amd_sched_job_recovery(&ring->sched); amd_sched_job_recovery(&ring->sched);
...@@ -2697,7 +2703,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) ...@@ -2697,7 +2703,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
} else { } else {
dev_err(adev->dev, "asic resume failed (%d).\n", r); dev_err(adev->dev, "asic resume failed (%d).\n", r);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
if (adev->rings[i]) { if (adev->rings[i] && adev->rings[i]->sched.thread) {
kthread_unpark(adev->rings[i]->sched.thread); kthread_unpark(adev->rings[i]->sched.thread);
} }
} }
......
...@@ -123,7 +123,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work) ...@@ -123,7 +123,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
int r; int r;
/* unpin of the old buffer */ /* unpin of the old buffer */
r = amdgpu_bo_reserve(work->old_abo, false); r = amdgpu_bo_reserve(work->old_abo, true);
if (likely(r == 0)) { if (likely(r == 0)) {
r = amdgpu_bo_unpin(work->old_abo); r = amdgpu_bo_unpin(work->old_abo);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
...@@ -138,52 +138,11 @@ static void amdgpu_unpin_work_func(struct work_struct *__work) ...@@ -138,52 +138,11 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
kfree(work); kfree(work);
} }
int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
static void amdgpu_flip_work_cleanup(struct amdgpu_flip_work *work) struct drm_framebuffer *fb,
{ struct drm_pending_vblank_event *event,
int i; uint32_t page_flip_flags, uint32_t target,
struct drm_modeset_acquire_ctx *ctx)
amdgpu_bo_unref(&work->old_abo);
dma_fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
dma_fence_put(work->shared[i]);
kfree(work->shared);
kfree(work);
}
static void amdgpu_flip_cleanup_unreserve(struct amdgpu_flip_work *work,
struct amdgpu_bo *new_abo)
{
amdgpu_bo_unreserve(new_abo);
amdgpu_flip_work_cleanup(work);
}
static void amdgpu_flip_cleanup_unpin(struct amdgpu_flip_work *work,
struct amdgpu_bo *new_abo)
{
if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
DRM_ERROR("failed to unpin new abo in error path\n");
amdgpu_flip_cleanup_unreserve(work, new_abo);
}
void amdgpu_crtc_cleanup_flip_ctx(struct amdgpu_flip_work *work,
struct amdgpu_bo *new_abo)
{
if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
DRM_ERROR("failed to reserve new abo in error path\n");
amdgpu_flip_work_cleanup(work);
return;
}
amdgpu_flip_cleanup_unpin(work, new_abo);
}
int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags,
uint32_t target,
struct amdgpu_flip_work **work_p,
struct amdgpu_bo **new_abo_p)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
...@@ -196,7 +155,7 @@ int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc, ...@@ -196,7 +155,7 @@ int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
unsigned long flags; unsigned long flags;
u64 tiling_flags; u64 tiling_flags;
u64 base; u64 base;
int r; int i, r;
work = kzalloc(sizeof *work, GFP_KERNEL); work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL) if (work == NULL)
...@@ -257,80 +216,41 @@ int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc, ...@@ -257,80 +216,41 @@ int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
spin_unlock_irqrestore(&crtc->dev->event_lock, flags); spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
r = -EBUSY; r = -EBUSY;
goto pflip_cleanup; goto pflip_cleanup;
} }
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
*work_p = work;
*new_abo_p = new_abo;
return 0;
pflip_cleanup:
amdgpu_crtc_cleanup_flip_ctx(work, new_abo);
return r;
unpin:
amdgpu_flip_cleanup_unpin(work, new_abo);
return r;
unreserve:
amdgpu_flip_cleanup_unreserve(work, new_abo);
return r;
cleanup:
amdgpu_flip_work_cleanup(work);
return r;
}
void amdgpu_crtc_submit_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct amdgpu_flip_work *work,
struct amdgpu_bo *new_abo)
{
unsigned long flags;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING; amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
amdgpu_crtc->pflip_works = work; amdgpu_crtc->pflip_works = work;
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
amdgpu_crtc->crtc_id, amdgpu_crtc, work);
/* update crtc fb */ /* update crtc fb */
crtc->primary->fb = fb; crtc->primary->fb = fb;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags); spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
DRM_DEBUG_DRIVER(
"crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
amdgpu_crtc->crtc_id, amdgpu_crtc, work);
amdgpu_flip_work_func(&work->flip_work.work); amdgpu_flip_work_func(&work->flip_work.work);
} return 0;
int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags,
uint32_t target,
struct drm_modeset_acquire_ctx *ctx)
{
struct amdgpu_bo *new_abo;
struct amdgpu_flip_work *work;
int r;
r = amdgpu_crtc_prepare_flip(crtc, pflip_cleanup:
fb, if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
event, DRM_ERROR("failed to reserve new abo in error path\n");
page_flip_flags, goto cleanup;
target, }
&work, unpin:
&new_abo); if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) {
if (r) DRM_ERROR("failed to unpin new abo in error path\n");
return r; }
unreserve:
amdgpu_bo_unreserve(new_abo);
amdgpu_crtc_submit_flip(crtc, fb, work, new_abo); cleanup:
amdgpu_bo_unref(&work->old_abo);
dma_fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
dma_fence_put(work->shared[i]);
kfree(work->shared);
kfree(work);
return 0; return r;
} }
int amdgpu_crtc_set_config(struct drm_mode_set *set, int amdgpu_crtc_set_config(struct drm_mode_set *set,
......
...@@ -63,9 +63,11 @@ ...@@ -63,9 +63,11 @@
* - 3.11.0 - Add support for sensor query info (clocks, temp, etc). * - 3.11.0 - Add support for sensor query info (clocks, temp, etc).
* - 3.12.0 - Add query for double offchip LDS buffers * - 3.12.0 - Add query for double offchip LDS buffers
* - 3.13.0 - Add PRT support * - 3.13.0 - Add PRT support
* - 3.14.0 - Fix race in amdgpu_ctx_get_fence() and note new functionality
* - 3.15.0 - Export more gpu info for gfx9
*/ */
#define KMS_DRIVER_MAJOR 3 #define KMS_DRIVER_MAJOR 3
#define KMS_DRIVER_MINOR 13 #define KMS_DRIVER_MINOR 15
#define KMS_DRIVER_PATCHLEVEL 0 #define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0; int amdgpu_vram_limit = 0;
...@@ -453,7 +455,9 @@ static const struct pci_device_id pciidlist[] = { ...@@ -453,7 +455,9 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, {0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, {0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0, 0, 0} {0, 0, 0}
......
...@@ -112,7 +112,7 @@ static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) ...@@ -112,7 +112,7 @@ static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
int ret; int ret;
ret = amdgpu_bo_reserve(abo, false); ret = amdgpu_bo_reserve(abo, true);
if (likely(ret == 0)) { if (likely(ret == 0)) {
amdgpu_bo_kunmap(abo); amdgpu_bo_kunmap(abo);
amdgpu_bo_unpin(abo); amdgpu_bo_unpin(abo);
......
...@@ -186,7 +186,7 @@ void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev) ...@@ -186,7 +186,7 @@ void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
if (adev->gart.robj == NULL) { if (adev->gart.robj == NULL) {
return; return;
} }
r = amdgpu_bo_reserve(adev->gart.robj, false); r = amdgpu_bo_reserve(adev->gart.robj, true);
if (likely(r == 0)) { if (likely(r == 0)) {
amdgpu_bo_kunmap(adev->gart.robj); amdgpu_bo_kunmap(adev->gart.robj);
amdgpu_bo_unpin(adev->gart.robj); amdgpu_bo_unpin(adev->gart.robj);
......
...@@ -139,6 +139,35 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, ...@@ -139,6 +139,35 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
return 0; return 0;
} }
static int amdgpu_gem_vm_check(void *param, struct amdgpu_bo *bo)
{
/* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */
if (!amdgpu_bo_gpu_accessible(bo))
return -ERESTARTSYS;
if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
return -ERESTARTSYS;
return 0;
}
static bool amdgpu_gem_vm_ready(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct list_head *list)
{
struct ttm_validate_buffer *entry;
list_for_each_entry(entry, list, head) {
struct amdgpu_bo *bo =
container_of(entry->bo, struct amdgpu_bo, tbo);
if (amdgpu_gem_vm_check(NULL, bo))
return false;
}
return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_vm_check, NULL);
}
void amdgpu_gem_object_close(struct drm_gem_object *obj, void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
...@@ -148,15 +177,13 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, ...@@ -148,15 +177,13 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry vm_pd; struct amdgpu_bo_list_entry vm_pd;
struct list_head list, duplicates; struct list_head list;
struct ttm_validate_buffer tv; struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
struct dma_fence *fence = NULL;
int r; int r;
INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo; tv.bo = &bo->tbo;
tv.shared = true; tv.shared = true;
...@@ -164,16 +191,18 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, ...@@ -164,16 +191,18 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates); r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
if (r) { if (r) {
dev_err(adev->dev, "leaking bo va because " dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r); "we fail to reserve bo (%d)\n", r);
return; return;
} }
bo_va = amdgpu_vm_bo_find(vm, bo); bo_va = amdgpu_vm_bo_find(vm, bo);
if (bo_va) { if (bo_va && --bo_va->ref_count == 0) {
if (--bo_va->ref_count == 0) { amdgpu_vm_bo_rmv(adev, bo_va);
amdgpu_vm_bo_rmv(adev, bo_va);
if (amdgpu_gem_vm_ready(adev, vm, &list)) {
struct dma_fence *fence = NULL;
r = amdgpu_vm_clear_freed(adev, vm, &fence); r = amdgpu_vm_clear_freed(adev, vm, &fence);
if (unlikely(r)) { if (unlikely(r)) {
...@@ -502,19 +531,6 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, ...@@ -502,19 +531,6 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
return r; return r;
} }
static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
{
/* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */
if (!amdgpu_bo_gpu_accessible(bo))
return -ERESTARTSYS;
if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
return -ERESTARTSYS;
return 0;
}
/** /**
* amdgpu_gem_va_update_vm -update the bo_va in its VM * amdgpu_gem_va_update_vm -update the bo_va in its VM
* *
...@@ -533,19 +549,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, ...@@ -533,19 +549,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct list_head *list, struct list_head *list,
uint32_t operation) uint32_t operation)
{ {
struct ttm_validate_buffer *entry;
int r = -ERESTARTSYS; int r = -ERESTARTSYS;
list_for_each_entry(entry, list, head) { if (!amdgpu_gem_vm_ready(adev, vm, list))
struct amdgpu_bo *bo =
container_of(entry->bo, struct amdgpu_bo, tbo);
if (amdgpu_gem_va_check(NULL, bo))
goto error;
}
r = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_va_check,
NULL);
if (r)
goto error; goto error;
r = amdgpu_vm_update_directories(adev, vm); r = amdgpu_vm_update_directories(adev, vm);
......
...@@ -134,6 +134,15 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, ...@@ -134,6 +134,15 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
return r; return r;
} }
void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager *man)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_gtt_mgr *mgr = man->priv;
seq_printf(m, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n",
man->size, mgr->available, (u64)atomic64_read(&adev->gtt_usage) >> 20);
}
/** /**
* amdgpu_gtt_mgr_new - allocate a new node * amdgpu_gtt_mgr_new - allocate a new node
* *
......
...@@ -160,6 +160,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -160,6 +160,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
dev_err(adev->dev, "scheduling IB failed (%d).\n", r); dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
return r; return r;
} }
if (ring->funcs->emit_pipeline_sync && job && job->need_pipeline_sync)
amdgpu_ring_emit_pipeline_sync(ring);
if (vm) { if (vm) {
r = amdgpu_vm_flush(ring, job); r = amdgpu_vm_flush(ring, job);
...@@ -217,7 +219,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -217,7 +219,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (r) { if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r); dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vm_id) if (job && job->vm_id)
amdgpu_vm_reset_id(adev, job->vm_id); amdgpu_vm_reset_id(adev, ring->funcs->vmhub,
job->vm_id);
amdgpu_ring_undo(ring); amdgpu_ring_undo(ring);
return r; return r;
} }
......
...@@ -57,6 +57,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, ...@@ -57,6 +57,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
(*job)->vm = vm; (*job)->vm = vm;
(*job)->ibs = (void *)&(*job)[1]; (*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs; (*job)->num_ibs = num_ibs;
(*job)->need_pipeline_sync = false;
amdgpu_sync_create(&(*job)->sync); amdgpu_sync_create(&(*job)->sync);
...@@ -139,7 +140,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) ...@@ -139,7 +140,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync); struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync);
if (fence == NULL && vm && !job->vm_id) { while (fence == NULL && vm && !job->vm_id) {
struct amdgpu_ring *ring = job->ring; struct amdgpu_ring *ring = job->ring;
int r; int r;
...@@ -152,6 +153,9 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) ...@@ -152,6 +153,9 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
fence = amdgpu_sync_get_fence(&job->sync); fence = amdgpu_sync_get_fence(&job->sync);
} }
if (amd_sched_dependency_optimized(fence, sched_job->s_entity))
job->need_pipeline_sync = true;
return fence; return fence;
} }
......
...@@ -545,11 +545,22 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -545,11 +545,22 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
adev->gfx.config.double_offchip_lds_buf; adev->gfx.config.double_offchip_lds_buf;
if (amdgpu_ngg) { if (amdgpu_ngg) {
dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[PRIM].gpu_addr; dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[POS].gpu_addr; dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[CNTL].gpu_addr; dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[PARAM].gpu_addr; dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
} }
dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
return copy_to_user(out, &dev_info, return copy_to_user(out, &dev_info,
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
...@@ -810,7 +821,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, ...@@ -810,7 +821,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
/* TODO: how to handle reserve failure */ /* TODO: how to handle reserve failure */
BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, false)); BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va); amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va);
fpriv->vm.csa_bo_va = NULL; fpriv->vm.csa_bo_va = NULL;
amdgpu_bo_unreserve(adev->virt.csa_obj); amdgpu_bo_unreserve(adev->virt.csa_obj);
......
...@@ -597,21 +597,6 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, ...@@ -597,21 +597,6 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
struct drm_pending_vblank_event *event, struct drm_pending_vblank_event *event,
uint32_t page_flip_flags, uint32_t target, uint32_t page_flip_flags, uint32_t target,
struct drm_modeset_acquire_ctx *ctx); struct drm_modeset_acquire_ctx *ctx);
void amdgpu_crtc_cleanup_flip_ctx(struct amdgpu_flip_work *work,
struct amdgpu_bo *new_abo);
int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags,
uint32_t target,
struct amdgpu_flip_work **work,
struct amdgpu_bo **new_abo);
void amdgpu_crtc_submit_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct amdgpu_flip_work *work,
struct amdgpu_bo *new_abo);
extern const struct drm_mode_config_funcs amdgpu_mode_funcs; extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
#endif #endif
...@@ -295,7 +295,7 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, ...@@ -295,7 +295,7 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
if (*bo == NULL) if (*bo == NULL)
return; return;
if (likely(amdgpu_bo_reserve(*bo, false) == 0)) { if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
if (cpu_addr) if (cpu_addr)
amdgpu_bo_kunmap(*bo); amdgpu_bo_kunmap(*bo);
...@@ -543,6 +543,27 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, ...@@ -543,6 +543,27 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
return r; return r;
} }
int amdgpu_bo_validate(struct amdgpu_bo *bo)
{
uint32_t domain;
int r;
if (bo->pin_count)
return 0;
domain = bo->prefered_domains;
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
domain = bo->allowed_domains;
goto retry;
}
return r;
}
int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring, struct amdgpu_ring *ring,
struct amdgpu_bo *bo, struct amdgpu_bo *bo,
......
...@@ -175,6 +175,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, ...@@ -175,6 +175,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_bo *bo, struct amdgpu_bo *bo,
struct reservation_object *resv, struct reservation_object *resv,
struct dma_fence **fence, bool direct); struct dma_fence **fence, bool direct);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring, struct amdgpu_ring *ring,
struct amdgpu_bo *bo, struct amdgpu_bo *bo,
......
...@@ -867,8 +867,7 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, ...@@ -867,8 +867,7 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
/* never 0 (full-speed), fuse or smc-controlled always */ return sprintf(buf, "%i\n", pwm_mode);
return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
} }
static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
...@@ -887,14 +886,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, ...@@ -887,14 +886,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
if (err) if (err)
return err; return err;
switch (value) { amdgpu_dpm_set_fan_control_mode(adev, value);
case 1: /* manual, percent-based */
amdgpu_dpm_set_fan_control_mode(adev, FDO_PWM_MODE_STATIC);
break;
default: /* disable */
amdgpu_dpm_set_fan_control_mode(adev, 0);
break;
}
return count; return count;
} }
......
...@@ -113,7 +113,7 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj) ...@@ -113,7 +113,7 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
int ret = 0; int ret = 0;
ret = amdgpu_bo_reserve(bo, false); ret = amdgpu_bo_reserve(bo, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return; return;
......
...@@ -55,6 +55,8 @@ static int psp_sw_init(void *handle) ...@@ -55,6 +55,8 @@ static int psp_sw_init(void *handle)
psp->bootloader_load_sos = psp_v3_1_bootloader_load_sos; psp->bootloader_load_sos = psp_v3_1_bootloader_load_sos;
psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf; psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf;
psp->ring_init = psp_v3_1_ring_init; psp->ring_init = psp_v3_1_ring_init;
psp->ring_create = psp_v3_1_ring_create;
psp->ring_destroy = psp_v3_1_ring_destroy;
psp->cmd_submit = psp_v3_1_cmd_submit; psp->cmd_submit = psp_v3_1_cmd_submit;
psp->compare_sram_data = psp_v3_1_compare_sram_data; psp->compare_sram_data = psp_v3_1_compare_sram_data;
psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk; psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
...@@ -152,11 +154,6 @@ static void psp_prep_tmr_cmd_buf(struct psp_gfx_cmd_resp *cmd, ...@@ -152,11 +154,6 @@ static void psp_prep_tmr_cmd_buf(struct psp_gfx_cmd_resp *cmd,
static int psp_tmr_init(struct psp_context *psp) static int psp_tmr_init(struct psp_context *psp)
{ {
int ret; int ret;
struct psp_gfx_cmd_resp *cmd;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
/* /*
* Allocate 3M memory aligned to 1M from Frame Buffer (local * Allocate 3M memory aligned to 1M from Frame Buffer (local
...@@ -168,22 +165,30 @@ static int psp_tmr_init(struct psp_context *psp) ...@@ -168,22 +165,30 @@ static int psp_tmr_init(struct psp_context *psp)
ret = amdgpu_bo_create_kernel(psp->adev, 0x300000, 0x100000, ret = amdgpu_bo_create_kernel(psp->adev, 0x300000, 0x100000,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf); &psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
if (ret)
goto failed; return ret;
}
static int psp_tmr_load(struct psp_context *psp)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
psp_prep_tmr_cmd_buf(cmd, psp->tmr_mc_addr, 0x300000); psp_prep_tmr_cmd_buf(cmd, psp->tmr_mc_addr, 0x300000);
ret = psp_cmd_submit_buf(psp, NULL, cmd, ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr, 1); psp->fence_buf_mc_addr, 1);
if (ret) if (ret)
goto failed_mem; goto failed;
kfree(cmd); kfree(cmd);
return 0; return 0;
failed_mem:
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
failed: failed:
kfree(cmd); kfree(cmd);
return ret; return ret;
...@@ -203,104 +208,78 @@ static void psp_prep_asd_cmd_buf(struct psp_gfx_cmd_resp *cmd, ...@@ -203,104 +208,78 @@ static void psp_prep_asd_cmd_buf(struct psp_gfx_cmd_resp *cmd,
cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
} }
static int psp_asd_load(struct psp_context *psp) static int psp_asd_init(struct psp_context *psp)
{ {
int ret; int ret;
struct amdgpu_bo *asd_bo, *asd_shared_bo;
uint64_t asd_mc_addr, asd_shared_mc_addr;
void *asd_buf, *asd_shared_buf;
struct psp_gfx_cmd_resp *cmd;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
/* /*
* Allocate 16k memory aligned to 4k from Frame Buffer (local * Allocate 16k memory aligned to 4k from Frame Buffer (local
* physical) for shared ASD <-> Driver * physical) for shared ASD <-> Driver
*/ */
ret = amdgpu_bo_create_kernel(psp->adev, PSP_ASD_SHARED_MEM_SIZE, PAGE_SIZE, ret = amdgpu_bo_create_kernel(psp->adev, PSP_ASD_SHARED_MEM_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
&asd_shared_bo, &asd_shared_mc_addr, &asd_buf); &psp->asd_shared_bo,
if (ret) &psp->asd_shared_mc_addr,
goto failed; &psp->asd_shared_buf);
/* return ret;
* Allocate 256k memory aligned to 4k from Frame Buffer (local }
* physical) for ASD firmware
*/ static int psp_asd_load(struct psp_context *psp)
ret = amdgpu_bo_create_kernel(psp->adev, PSP_ASD_BIN_SIZE, PAGE_SIZE, {
AMDGPU_GEM_DOMAIN_VRAM, int ret;
&asd_bo, &asd_mc_addr, &asd_buf); struct psp_gfx_cmd_resp *cmd;
if (ret)
goto failed_mem; cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
memcpy(asd_buf, psp->asd_start_addr, psp->asd_ucode_size); memset(psp->fw_pri_buf, 0, PSP_1_MEG);
memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size);
psp_prep_asd_cmd_buf(cmd, asd_mc_addr, asd_shared_mc_addr, psp_prep_asd_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->asd_shared_mc_addr,
psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE); psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd, ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr, 2); psp->fence_buf_mc_addr, 2);
if (ret)
goto failed_mem1;
amdgpu_bo_free_kernel(&asd_bo, &asd_mc_addr, &asd_buf);
amdgpu_bo_free_kernel(&asd_shared_bo, &asd_shared_mc_addr, &asd_shared_buf);
kfree(cmd); kfree(cmd);
return 0;
failed_mem1:
amdgpu_bo_free_kernel(&asd_bo, &asd_mc_addr, &asd_buf);
failed_mem:
amdgpu_bo_free_kernel(&asd_shared_bo, &asd_shared_mc_addr, &asd_shared_buf);
failed:
kfree(cmd);
return ret; return ret;
} }
static int psp_load_fw(struct amdgpu_device *adev) static int psp_hw_start(struct psp_context *psp)
{ {
int ret; int ret;
struct psp_gfx_cmd_resp *cmd;
int i;
struct amdgpu_firmware_info *ucode;
struct psp_context *psp = &adev->psp;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
ret = psp_bootloader_load_sysdrv(psp); ret = psp_bootloader_load_sysdrv(psp);
if (ret) if (ret)
goto failed; return ret;
ret = psp_bootloader_load_sos(psp); ret = psp_bootloader_load_sos(psp);
if (ret) if (ret)
goto failed; return ret;
ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
if (ret)
goto failed;
ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
AMDGPU_GEM_DOMAIN_VRAM,
&psp->fence_buf_bo,
&psp->fence_buf_mc_addr,
&psp->fence_buf);
if (ret) if (ret)
goto failed; return ret;
memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
ret = psp_tmr_init(psp); ret = psp_tmr_load(psp);
if (ret) if (ret)
goto failed_mem; return ret;
ret = psp_asd_load(psp); ret = psp_asd_load(psp);
if (ret) if (ret)
goto failed_mem; return ret;
return 0;
}
static int psp_np_fw_load(struct psp_context *psp)
{
int i, ret;
struct amdgpu_firmware_info *ucode;
struct amdgpu_device* adev = psp->adev;
for (i = 0; i < adev->firmware.max_ucodes; i++) { for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i]; ucode = &adev->firmware.ucode[i];
...@@ -310,15 +289,21 @@ static int psp_load_fw(struct amdgpu_device *adev) ...@@ -310,15 +289,21 @@ static int psp_load_fw(struct amdgpu_device *adev)
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
psp_smu_reload_quirk(psp)) psp_smu_reload_quirk(psp))
continue; continue;
if (amdgpu_sriov_vf(adev) &&
(ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
|| ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G))
/*skip ucode loading in SRIOV VF */
continue;
ret = psp_prep_cmd_buf(ucode, cmd); ret = psp_prep_cmd_buf(ucode, psp->cmd);
if (ret) if (ret)
goto failed_mem; return ret;
ret = psp_cmd_submit_buf(psp, ucode, cmd, ret = psp_cmd_submit_buf(psp, ucode, psp->cmd,
psp->fence_buf_mc_addr, i + 3); psp->fence_buf_mc_addr, i + 3);
if (ret) if (ret)
goto failed_mem; return ret;
#if 0 #if 0
/* check if firmware loaded sucessfully */ /* check if firmware loaded sucessfully */
...@@ -327,8 +312,59 @@ static int psp_load_fw(struct amdgpu_device *adev) ...@@ -327,8 +312,59 @@ static int psp_load_fw(struct amdgpu_device *adev)
#endif #endif
} }
amdgpu_bo_free_kernel(&psp->fence_buf_bo, return 0;
&psp->fence_buf_mc_addr, &psp->fence_buf); }
static int psp_load_fw(struct amdgpu_device *adev)
{
int ret;
struct psp_context *psp = &adev->psp;
struct psp_gfx_cmd_resp *cmd;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
psp->cmd = cmd;
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
AMDGPU_GEM_DOMAIN_GTT,
&psp->fw_pri_bo,
&psp->fw_pri_mc_addr,
&psp->fw_pri_buf);
if (ret)
goto failed;
ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&psp->fence_buf_bo,
&psp->fence_buf_mc_addr,
&psp->fence_buf);
if (ret)
goto failed_mem1;
memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
if (ret)
goto failed_mem1;
ret = psp_tmr_init(psp);
if (ret)
goto failed_mem;
ret = psp_asd_init(psp);
if (ret)
goto failed_mem;
ret = psp_hw_start(psp);
if (ret)
goto failed_mem;
ret = psp_np_fw_load(psp);
if (ret)
goto failed_mem;
kfree(cmd); kfree(cmd);
return 0; return 0;
...@@ -336,6 +372,9 @@ static int psp_load_fw(struct amdgpu_device *adev) ...@@ -336,6 +372,9 @@ static int psp_load_fw(struct amdgpu_device *adev)
failed_mem: failed_mem:
amdgpu_bo_free_kernel(&psp->fence_buf_bo, amdgpu_bo_free_kernel(&psp->fence_buf_bo,
&psp->fence_buf_mc_addr, &psp->fence_buf); &psp->fence_buf_mc_addr, &psp->fence_buf);
failed_mem1:
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
failed: failed:
kfree(cmd); kfree(cmd);
return ret; return ret;
...@@ -379,12 +418,24 @@ static int psp_hw_fini(void *handle) ...@@ -379,12 +418,24 @@ static int psp_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp; struct psp_context *psp = &adev->psp;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
amdgpu_ucode_fini_bo(adev); return 0;
amdgpu_ucode_fini_bo(adev);
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
if (psp->tmr_buf) if (psp->tmr_buf)
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf); amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
if (psp->fw_pri_buf)
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
if (psp->fence_buf_bo)
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
&psp->fence_buf_mc_addr, &psp->fence_buf);
return 0; return 0;
} }
...@@ -397,18 +448,30 @@ static int psp_resume(void *handle) ...@@ -397,18 +448,30 @@ static int psp_resume(void *handle)
{ {
int ret; int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
return 0; return 0;
DRM_INFO("PSP is resuming...\n");
mutex_lock(&adev->firmware.mutex); mutex_lock(&adev->firmware.mutex);
ret = psp_load_fw(adev); ret = psp_hw_start(psp);
if (ret) if (ret)
DRM_ERROR("PSP resume failed\n"); goto failed;
ret = psp_np_fw_load(psp);
if (ret)
goto failed;
mutex_unlock(&adev->firmware.mutex); mutex_unlock(&adev->firmware.mutex);
return 0;
failed:
DRM_ERROR("PSP resume failed\n");
mutex_unlock(&adev->firmware.mutex);
return ret; return ret;
} }
......
...@@ -30,8 +30,8 @@ ...@@ -30,8 +30,8 @@
#define PSP_FENCE_BUFFER_SIZE 0x1000 #define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000 #define PSP_CMD_BUFFER_SIZE 0x1000
#define PSP_ASD_BIN_SIZE 0x40000
#define PSP_ASD_SHARED_MEM_SIZE 0x4000 #define PSP_ASD_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000
enum psp_ring_type enum psp_ring_type
{ {
...@@ -57,6 +57,7 @@ struct psp_context ...@@ -57,6 +57,7 @@ struct psp_context
{ {
struct amdgpu_device *adev; struct amdgpu_device *adev;
struct psp_ring km_ring; struct psp_ring km_ring;
struct psp_gfx_cmd_resp *cmd;
int (*init_microcode)(struct psp_context *psp); int (*init_microcode)(struct psp_context *psp);
int (*bootloader_load_sysdrv)(struct psp_context *psp); int (*bootloader_load_sysdrv)(struct psp_context *psp);
...@@ -64,6 +65,9 @@ struct psp_context ...@@ -64,6 +65,9 @@ struct psp_context
int (*prep_cmd_buf)(struct amdgpu_firmware_info *ucode, int (*prep_cmd_buf)(struct amdgpu_firmware_info *ucode,
struct psp_gfx_cmd_resp *cmd); struct psp_gfx_cmd_resp *cmd);
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type); int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type);
int (*ring_destroy)(struct psp_context *psp,
enum psp_ring_type ring_type);
int (*cmd_submit)(struct psp_context *psp, struct amdgpu_firmware_info *ucode, int (*cmd_submit)(struct psp_context *psp, struct amdgpu_firmware_info *ucode,
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, int index); uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, int index);
bool (*compare_sram_data)(struct psp_context *psp, bool (*compare_sram_data)(struct psp_context *psp,
...@@ -71,6 +75,11 @@ struct psp_context ...@@ -71,6 +75,11 @@ struct psp_context
enum AMDGPU_UCODE_ID ucode_type); enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp); bool (*smu_reload_quirk)(struct psp_context *psp);
/* fence buffer */
struct amdgpu_bo *fw_pri_bo;
uint64_t fw_pri_mc_addr;
void *fw_pri_buf;
/* sos firmware */ /* sos firmware */
const struct firmware *sos_fw; const struct firmware *sos_fw;
uint32_t sos_fw_version; uint32_t sos_fw_version;
...@@ -85,12 +94,15 @@ struct psp_context ...@@ -85,12 +94,15 @@ struct psp_context
uint64_t tmr_mc_addr; uint64_t tmr_mc_addr;
void *tmr_buf; void *tmr_buf;
/* asd firmware */ /* asd firmware and buffer */
const struct firmware *asd_fw; const struct firmware *asd_fw;
uint32_t asd_fw_version; uint32_t asd_fw_version;
uint32_t asd_feature_version; uint32_t asd_feature_version;
uint32_t asd_ucode_size; uint32_t asd_ucode_size;
uint8_t *asd_start_addr; uint8_t *asd_start_addr;
struct amdgpu_bo *asd_shared_bo;
uint64_t asd_shared_mc_addr;
void *asd_shared_buf;
/* fence buffer */ /* fence buffer */
struct amdgpu_bo *fence_buf_bo; struct amdgpu_bo *fence_buf_bo;
...@@ -105,6 +117,8 @@ struct amdgpu_psp_funcs { ...@@ -105,6 +117,8 @@ struct amdgpu_psp_funcs {
#define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type)) #define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type))
#define psp_ring_init(psp, type) (psp)->ring_init((psp), (type)) #define psp_ring_init(psp, type) (psp)->ring_init((psp), (type))
#define psp_ring_create(psp, type) (psp)->ring_create((psp), (type))
#define psp_ring_destroy(psp, type) ((psp)->ring_destroy((psp), (type)))
#define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \ #define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \
(psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index)) (psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index))
#define psp_compare_sram_data(psp, ucode, type) \ #define psp_compare_sram_data(psp, ucode, type) \
......
...@@ -99,6 +99,7 @@ struct amdgpu_ring_funcs { ...@@ -99,6 +99,7 @@ struct amdgpu_ring_funcs {
uint32_t align_mask; uint32_t align_mask;
u32 nop; u32 nop;
bool support_64bit_ptrs; bool support_64bit_ptrs;
unsigned vmhub;
/* ring read/write ptr handling */ /* ring read/write ptr handling */
u64 (*get_rptr)(struct amdgpu_ring *ring); u64 (*get_rptr)(struct amdgpu_ring *ring);
...@@ -178,6 +179,7 @@ struct amdgpu_ring { ...@@ -178,6 +179,7 @@ struct amdgpu_ring {
unsigned cond_exe_offs; unsigned cond_exe_offs;
u64 cond_exe_gpu_addr; u64 cond_exe_gpu_addr;
volatile u32 *cond_exe_cpu_addr; volatile u32 *cond_exe_cpu_addr;
unsigned vm_inv_eng;
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
struct dentry *ent; struct dentry *ent;
#endif #endif
......
...@@ -130,7 +130,7 @@ int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, ...@@ -130,7 +130,7 @@ int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
return -EINVAL; return -EINVAL;
} }
r = amdgpu_bo_reserve(sa_manager->bo, false); r = amdgpu_bo_reserve(sa_manager->bo, true);
if (!r) { if (!r) {
amdgpu_bo_kunmap(sa_manager->bo); amdgpu_bo_kunmap(sa_manager->bo);
amdgpu_bo_unpin(sa_manager->bo); amdgpu_bo_unpin(sa_manager->bo);
......
...@@ -190,26 +190,29 @@ TRACE_EVENT(amdgpu_sched_run_job, ...@@ -190,26 +190,29 @@ TRACE_EVENT(amdgpu_sched_run_job,
TRACE_EVENT(amdgpu_vm_grab_id, TRACE_EVENT(amdgpu_vm_grab_id,
TP_PROTO(struct amdgpu_vm *vm, int ring, struct amdgpu_job *job), TP_PROTO(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_job *job),
TP_ARGS(vm, ring, job), TP_ARGS(vm, ring, job),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct amdgpu_vm *, vm) __field(struct amdgpu_vm *, vm)
__field(u32, ring) __field(u32, ring)
__field(u32, vmid) __field(u32, vm_id)
__field(u32, vm_hub)
__field(u64, pd_addr) __field(u64, pd_addr)
__field(u32, needs_flush) __field(u32, needs_flush)
), ),
TP_fast_assign( TP_fast_assign(
__entry->vm = vm; __entry->vm = vm;
__entry->ring = ring; __entry->ring = ring->idx;
__entry->vmid = job->vm_id; __entry->vm_id = job->vm_id;
__entry->vm_hub = ring->funcs->vmhub,
__entry->pd_addr = job->vm_pd_addr; __entry->pd_addr = job->vm_pd_addr;
__entry->needs_flush = job->vm_needs_flush; __entry->needs_flush = job->vm_needs_flush;
), ),
TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx needs_flush=%u", TP_printk("vm=%p, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
__entry->vm, __entry->ring, __entry->vmid, __entry->vm, __entry->ring, __entry->vm_id,
__entry->pd_addr, __entry->needs_flush) __entry->vm_hub, __entry->pd_addr, __entry->needs_flush)
); );
TRACE_EVENT(amdgpu_vm_bo_map, TRACE_EVENT(amdgpu_vm_bo_map,
...@@ -331,21 +334,25 @@ TRACE_EVENT(amdgpu_vm_copy_ptes, ...@@ -331,21 +334,25 @@ TRACE_EVENT(amdgpu_vm_copy_ptes,
); );
TRACE_EVENT(amdgpu_vm_flush, TRACE_EVENT(amdgpu_vm_flush,
TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id), TP_PROTO(struct amdgpu_ring *ring, unsigned vm_id,
TP_ARGS(pd_addr, ring, id), uint64_t pd_addr),
TP_ARGS(ring, vm_id, pd_addr),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u64, pd_addr)
__field(u32, ring) __field(u32, ring)
__field(u32, id) __field(u32, vm_id)
__field(u32, vm_hub)
__field(u64, pd_addr)
), ),
TP_fast_assign( TP_fast_assign(
__entry->ring = ring->idx;
__entry->vm_id = vm_id;
__entry->vm_hub = ring->funcs->vmhub;
__entry->pd_addr = pd_addr; __entry->pd_addr = pd_addr;
__entry->ring = ring;
__entry->id = id;
), ),
TP_printk("ring=%u, id=%u, pd_addr=%010Lx", TP_printk("ring=%u, id=%u, hub=%u, pd_addr=%010Lx",
__entry->ring, __entry->id, __entry->pd_addr) __entry->ring, __entry->vm_id,
__entry->vm_hub,__entry->pd_addr)
); );
TRACE_EVENT(amdgpu_bo_list_set, TRACE_EVENT(amdgpu_bo_list_set,
......
...@@ -203,7 +203,9 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, ...@@ -203,7 +203,9 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
abo = container_of(bo, struct amdgpu_bo, tbo); abo = container_of(bo, struct amdgpu_bo, tbo);
switch (bo->mem.mem_type) { switch (bo->mem.mem_type) {
case TTM_PL_VRAM: case TTM_PL_VRAM:
if (adev->mman.buffer_funcs_ring->ready == false) { if (adev->mman.buffer_funcs &&
adev->mman.buffer_funcs_ring &&
adev->mman.buffer_funcs_ring->ready == false) {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
} else { } else {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
...@@ -763,7 +765,7 @@ int amdgpu_ttm_recover_gart(struct amdgpu_device *adev) ...@@ -763,7 +765,7 @@ int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
{ {
struct amdgpu_ttm_tt *gtt, *tmp; struct amdgpu_ttm_tt *gtt, *tmp;
struct ttm_mem_reg bo_mem; struct ttm_mem_reg bo_mem;
uint32_t flags; uint64_t flags;
int r; int r;
bo_mem.mem_type = TTM_PL_TT; bo_mem.mem_type = TTM_PL_TT;
...@@ -1038,11 +1040,17 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, ...@@ -1038,11 +1040,17 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place) const struct ttm_place *place)
{ {
if (bo->mem.mem_type == TTM_PL_VRAM && unsigned long num_pages = bo->mem.num_pages;
bo->mem.start == AMDGPU_BO_INVALID_OFFSET) { struct drm_mm_node *node = bo->mem.mm_node;
unsigned long num_pages = bo->mem.num_pages;
struct drm_mm_node *node = bo->mem.mm_node; if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
return ttm_bo_eviction_valuable(bo, place);
switch (bo->mem.mem_type) {
case TTM_PL_TT:
return true;
case TTM_PL_VRAM:
/* Check each drm MM node individually */ /* Check each drm MM node individually */
while (num_pages) { while (num_pages) {
if (place->fpfn < (node->start + node->size) && if (place->fpfn < (node->start + node->size) &&
...@@ -1052,8 +1060,10 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, ...@@ -1052,8 +1060,10 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
num_pages -= node->size; num_pages -= node->size;
++node; ++node;
} }
break;
return false; default:
break;
} }
return ttm_bo_eviction_valuable(bo, place); return ttm_bo_eviction_valuable(bo, place);
...@@ -1188,7 +1198,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) ...@@ -1188,7 +1198,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
return; return;
amdgpu_ttm_debugfs_fini(adev); amdgpu_ttm_debugfs_fini(adev);
if (adev->stollen_vga_memory) { if (adev->stollen_vga_memory) {
r = amdgpu_bo_reserve(adev->stollen_vga_memory, false); r = amdgpu_bo_reserve(adev->stollen_vga_memory, true);
if (r == 0) { if (r == 0) {
amdgpu_bo_unpin(adev->stollen_vga_memory); amdgpu_bo_unpin(adev->stollen_vga_memory);
amdgpu_bo_unreserve(adev->stollen_vga_memory); amdgpu_bo_unreserve(adev->stollen_vga_memory);
...@@ -1401,6 +1411,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, ...@@ -1401,6 +1411,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
extern void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager
*man);
static int amdgpu_mm_dump_table(struct seq_file *m, void *data) static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
{ {
struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_info_node *node = (struct drm_info_node *)m->private;
...@@ -1414,11 +1426,17 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data) ...@@ -1414,11 +1426,17 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
drm_mm_print(mm, &p); drm_mm_print(mm, &p);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
if (ttm_pl == TTM_PL_VRAM) switch (ttm_pl) {
case TTM_PL_VRAM:
seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n", seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
adev->mman.bdev.man[ttm_pl].size, adev->mman.bdev.man[ttm_pl].size,
(u64)atomic64_read(&adev->vram_usage) >> 20, (u64)atomic64_read(&adev->vram_usage) >> 20,
(u64)atomic64_read(&adev->vram_vis_usage) >> 20); (u64)atomic64_read(&adev->vram_vis_usage) >> 20);
break;
case TTM_PL_TT:
amdgpu_gtt_mgr_print(m, &adev->mman.bdev.man[TTM_PL_TT]);
break;
}
return 0; return 0;
} }
......
...@@ -382,10 +382,14 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) ...@@ -382,10 +382,14 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
* if SMU loaded firmware, it needn't add SMC, UVD, and VCE * if SMU loaded firmware, it needn't add SMC, UVD, and VCE
* ucode info here * ucode info here
*/ */
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM - 4; if (amdgpu_sriov_vf(adev))
else adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM - 3;
else
adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM - 4;
} else {
adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM; adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM;
}
for (i = 0; i < adev->firmware.max_ucodes; i++) { for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i]; ucode = &adev->firmware.ucode[i];
......
...@@ -955,11 +955,11 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) ...@@ -955,11 +955,11 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
uint32_t rptr = amdgpu_ring_get_rptr(ring); uint32_t rptr = amdgpu_ring_get_rptr(ring);
unsigned i; unsigned i;
int r; int r, timeout = adev->usec_timeout;
/* TODO: remove it if VCE can work for sriov */ /* workaround VCE ring test slow issue for sriov*/
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
return 0; timeout *= 10;
r = amdgpu_ring_alloc(ring, 16); r = amdgpu_ring_alloc(ring, 16);
if (r) { if (r) {
...@@ -970,13 +970,13 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) ...@@ -970,13 +970,13 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, VCE_CMD_END); amdgpu_ring_write(ring, VCE_CMD_END);
amdgpu_ring_commit(ring); amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) { for (i = 0; i < timeout; i++) {
if (amdgpu_ring_get_rptr(ring) != rptr) if (amdgpu_ring_get_rptr(ring) != rptr)
break; break;
DRM_UDELAY(1); DRM_UDELAY(1);
} }
if (i < adev->usec_timeout) { if (i < timeout) {
DRM_INFO("ring test on %d succeeded in %d usecs\n", DRM_INFO("ring test on %d succeeded in %d usecs\n",
ring->idx, i); ring->idx, i);
} else { } else {
...@@ -999,10 +999,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) ...@@ -999,10 +999,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
long r; long r;
/* TODO: remove it if VCE can work for sriov */
if (amdgpu_sriov_vf(ring->adev))
return 0;
/* skip vce ring1/2 ib test for now, since it's not reliable */ /* skip vce ring1/2 ib test for now, since it's not reliable */
if (ring != &ring->adev->vce.ring[0]) if (ring != &ring->adev->vce.ring[0])
return 0; return 0;
......
...@@ -225,3 +225,49 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev) ...@@ -225,3 +225,49 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
return 0; return 0;
} }
/**
* amdgpu_virt_alloc_mm_table() - alloc memory for mm table
* @amdgpu: amdgpu device.
* MM table is used by UVD and VCE for its initialization
* Return: Zero if allocate success.
*/
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
{
int r;
if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
return 0;
r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->virt.mm_table.bo,
&adev->virt.mm_table.gpu_addr,
(void *)&adev->virt.mm_table.cpu_addr);
if (r) {
DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
return r;
}
memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
adev->virt.mm_table.gpu_addr,
adev->virt.mm_table.cpu_addr);
return 0;
}
/**
* amdgpu_virt_free_mm_table() - free mm table memory
* @amdgpu: amdgpu device.
* Free MM table memory
*/
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
{
if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
return;
amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
&adev->virt.mm_table.gpu_addr,
(void *)&adev->virt.mm_table.cpu_addr);
adev->virt.mm_table.gpu_addr = 0;
}
...@@ -98,5 +98,7 @@ int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); ...@@ -98,5 +98,7 @@ int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary); int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary);
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
#endif #endif
...@@ -406,6 +406,8 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -406,6 +406,8 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_job *job) struct amdgpu_job *job)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx; uint64_t fence_context = adev->fence_context + ring->idx;
struct dma_fence *updates = sync->last_vm_update; struct dma_fence *updates = sync->last_vm_update;
struct amdgpu_vm_id *id, *idle; struct amdgpu_vm_id *id, *idle;
...@@ -413,16 +415,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -413,16 +415,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
unsigned i; unsigned i;
int r = 0; int r = 0;
fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids, fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
GFP_KERNEL);
if (!fences) if (!fences)
return -ENOMEM; return -ENOMEM;
mutex_lock(&adev->vm_manager.lock); mutex_lock(&id_mgr->lock);
/* Check if we have an idle VMID */ /* Check if we have an idle VMID */
i = 0; i = 0;
list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) { list_for_each_entry(idle, &id_mgr->ids_lru, list) {
fences[i] = amdgpu_sync_peek_fence(&idle->active, ring); fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
if (!fences[i]) if (!fences[i])
break; break;
...@@ -430,7 +431,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -430,7 +431,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
} }
/* If we can't find a idle VMID to use, wait till one becomes available */ /* If we can't find a idle VMID to use, wait till one becomes available */
if (&idle->list == &adev->vm_manager.ids_lru) { if (&idle->list == &id_mgr->ids_lru) {
u64 fence_context = adev->vm_manager.fence_context + ring->idx; u64 fence_context = adev->vm_manager.fence_context + ring->idx;
unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
struct dma_fence_array *array; struct dma_fence_array *array;
...@@ -455,25 +456,19 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -455,25 +456,19 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r) if (r)
goto error; goto error;
mutex_unlock(&adev->vm_manager.lock); mutex_unlock(&id_mgr->lock);
return 0; return 0;
} }
kfree(fences); kfree(fences);
job->vm_needs_flush = true; job->vm_needs_flush = false;
/* Check if we can use a VMID already assigned to this VM */ /* Check if we can use a VMID already assigned to this VM */
i = ring->idx; list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
do {
struct dma_fence *flushed; struct dma_fence *flushed;
bool needs_flush = false;
id = vm->ids[i++];
if (i == AMDGPU_MAX_RINGS)
i = 0;
/* Check all the prerequisites to using this VMID */ /* Check all the prerequisites to using this VMID */
if (!id)
continue;
if (amdgpu_vm_had_gpu_reset(adev, id)) if (amdgpu_vm_had_gpu_reset(adev, id))
continue; continue;
...@@ -483,16 +478,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -483,16 +478,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (job->vm_pd_addr != id->pd_gpu_addr) if (job->vm_pd_addr != id->pd_gpu_addr)
continue; continue;
if (!id->last_flush) if (!id->last_flush ||
continue; (id->last_flush->context != fence_context &&
!dma_fence_is_signaled(id->last_flush)))
if (id->last_flush->context != fence_context && needs_flush = true;
!dma_fence_is_signaled(id->last_flush))
continue;
flushed = id->flushed_updates; flushed = id->flushed_updates;
if (updates && if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
(!flushed || dma_fence_is_later(updates, flushed))) needs_flush = true;
/* Concurrent flushes are only possible starting with Vega10 */
if (adev->asic_type < CHIP_VEGA10 && needs_flush)
continue; continue;
/* Good we can use this VMID. Remember this submission as /* Good we can use this VMID. Remember this submission as
...@@ -502,17 +498,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -502,17 +498,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r) if (r)
goto error; goto error;
list_move_tail(&id->list, &adev->vm_manager.ids_lru); if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
vm->ids[ring->idx] = id; dma_fence_put(id->flushed_updates);
id->flushed_updates = dma_fence_get(updates);
job->vm_id = id - adev->vm_manager.ids; }
job->vm_needs_flush = false;
trace_amdgpu_vm_grab_id(vm, ring->idx, job);
mutex_unlock(&adev->vm_manager.lock); if (needs_flush)
return 0; goto needs_flush;
else
goto no_flush_needed;
} while (i != ring->idx); };
/* Still no ID to use? Then use the idle one found earlier */ /* Still no ID to use? Then use the idle one found earlier */
id = idle; id = idle;
...@@ -522,23 +518,25 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -522,23 +518,25 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r) if (r)
goto error; goto error;
dma_fence_put(id->last_flush); id->pd_gpu_addr = job->vm_pd_addr;
id->last_flush = NULL;
dma_fence_put(id->flushed_updates); dma_fence_put(id->flushed_updates);
id->flushed_updates = dma_fence_get(updates); id->flushed_updates = dma_fence_get(updates);
id->pd_gpu_addr = job->vm_pd_addr;
id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
atomic64_set(&id->owner, vm->client_id); atomic64_set(&id->owner, vm->client_id);
vm->ids[ring->idx] = id;
job->vm_id = id - adev->vm_manager.ids; needs_flush:
trace_amdgpu_vm_grab_id(vm, ring->idx, job); job->vm_needs_flush = true;
dma_fence_put(id->last_flush);
id->last_flush = NULL;
no_flush_needed:
list_move_tail(&id->list, &id_mgr->ids_lru);
job->vm_id = id - id_mgr->ids;
trace_amdgpu_vm_grab_id(vm, ring, job);
error: error:
mutex_unlock(&adev->vm_manager.lock); mutex_unlock(&id_mgr->lock);
return r; return r;
} }
...@@ -590,7 +588,9 @@ static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr) ...@@ -590,7 +588,9 @@ static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id]; unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vm_id *id = &id_mgr->ids[job->vm_id];
bool gds_switch_needed = ring->funcs->emit_gds_switch && ( bool gds_switch_needed = ring->funcs->emit_gds_switch && (
id->gds_base != job->gds_base || id->gds_base != job->gds_base ||
id->gds_size != job->gds_size || id->gds_size != job->gds_size ||
...@@ -614,24 +614,24 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) ...@@ -614,24 +614,24 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
if (ring->funcs->init_cond_exec) if (ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring); patch_offset = amdgpu_ring_init_cond_exec(ring);
if (ring->funcs->emit_pipeline_sync) if (ring->funcs->emit_pipeline_sync && !job->need_pipeline_sync)
amdgpu_ring_emit_pipeline_sync(ring); amdgpu_ring_emit_pipeline_sync(ring);
if (ring->funcs->emit_vm_flush && vm_flush_needed) { if (ring->funcs->emit_vm_flush && vm_flush_needed) {
u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr); u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
struct dma_fence *fence; struct dma_fence *fence;
trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id); trace_amdgpu_vm_flush(ring, job->vm_id, pd_addr);
amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr); amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
r = amdgpu_fence_emit(ring, &fence); r = amdgpu_fence_emit(ring, &fence);
if (r) if (r)
return r; return r;
mutex_lock(&adev->vm_manager.lock); mutex_lock(&id_mgr->lock);
dma_fence_put(id->last_flush); dma_fence_put(id->last_flush);
id->last_flush = fence; id->last_flush = fence;
mutex_unlock(&adev->vm_manager.lock); mutex_unlock(&id_mgr->lock);
} }
if (gds_switch_needed) { if (gds_switch_needed) {
...@@ -666,9 +666,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) ...@@ -666,9 +666,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
* *
* Reset saved GDW, GWS and OA to force switch on next flush. * Reset saved GDW, GWS and OA to force switch on next flush.
*/ */
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id) void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
unsigned vmid)
{ {
struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id]; struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
id->gds_base = 0; id->gds_base = 0;
id->gds_size = 0; id->gds_size = 0;
...@@ -1336,6 +1338,12 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, ...@@ -1336,6 +1338,12 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
flags &= ~AMDGPU_PTE_MTYPE_MASK; flags &= ~AMDGPU_PTE_MTYPE_MASK;
flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK); flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
if ((mapping->flags & AMDGPU_PTE_PRT) &&
(adev->asic_type >= CHIP_VEGA10)) {
flags |= AMDGPU_PTE_PRT;
flags &= ~AMDGPU_PTE_VALID;
}
trace_amdgpu_vm_bo_update(mapping); trace_amdgpu_vm_bo_update(mapping);
pfn = mapping->offset >> PAGE_SHIFT; pfn = mapping->offset >> PAGE_SHIFT;
...@@ -1629,8 +1637,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, ...@@ -1629,8 +1637,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping, list); struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list); list_del(&mapping->list);
r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping, r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm,
0, 0, &f); mapping->start, mapping->last,
0, 0, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f); amdgpu_vm_free_mapping(adev, vm, mapping, f);
if (r) { if (r) {
dma_fence_put(f); dma_fence_put(f);
...@@ -2117,10 +2126,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -2117,10 +2126,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
unsigned ring_instance; unsigned ring_instance;
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
struct amd_sched_rq *rq; struct amd_sched_rq *rq;
int i, r; int r;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
vm->ids[i] = NULL;
vm->va = RB_ROOT; vm->va = RB_ROOT;
vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter); vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
spin_lock_init(&vm->status_lock); spin_lock_init(&vm->status_lock);
...@@ -2241,16 +2248,21 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -2241,16 +2248,21 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
*/ */
void amdgpu_vm_manager_init(struct amdgpu_device *adev) void amdgpu_vm_manager_init(struct amdgpu_device *adev)
{ {
unsigned i; unsigned i, j;
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vm_id_manager *id_mgr =
&adev->vm_manager.id_mgr[i];
INIT_LIST_HEAD(&adev->vm_manager.ids_lru); mutex_init(&id_mgr->lock);
INIT_LIST_HEAD(&id_mgr->ids_lru);
/* skip over VMID 0, since it is the system VM */ /* skip over VMID 0, since it is the system VM */
for (i = 1; i < adev->vm_manager.num_ids; ++i) { for (j = 1; j < id_mgr->num_ids; ++j) {
amdgpu_vm_reset_id(adev, i); amdgpu_vm_reset_id(adev, i, j);
amdgpu_sync_create(&adev->vm_manager.ids[i].active); amdgpu_sync_create(&id_mgr->ids[i].active);
list_add_tail(&adev->vm_manager.ids[i].list, list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
&adev->vm_manager.ids_lru); }
} }
adev->vm_manager.fence_context = adev->vm_manager.fence_context =
...@@ -2258,6 +2270,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) ...@@ -2258,6 +2270,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
adev->vm_manager.seqno[i] = 0; adev->vm_manager.seqno[i] = 0;
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
atomic64_set(&adev->vm_manager.client_counter, 0); atomic64_set(&adev->vm_manager.client_counter, 0);
spin_lock_init(&adev->vm_manager.prt_lock); spin_lock_init(&adev->vm_manager.prt_lock);
...@@ -2273,13 +2286,19 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) ...@@ -2273,13 +2286,19 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
*/ */
void amdgpu_vm_manager_fini(struct amdgpu_device *adev) void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{ {
unsigned i; unsigned i, j;
for (i = 0; i < AMDGPU_NUM_VM; ++i) { for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vm_id *id = &adev->vm_manager.ids[i]; struct amdgpu_vm_id_manager *id_mgr =
&adev->vm_manager.id_mgr[i];
amdgpu_sync_free(&adev->vm_manager.ids[i].active); mutex_destroy(&id_mgr->lock);
dma_fence_put(id->flushed_updates); for (j = 0; j < AMDGPU_NUM_VM; ++j) {
dma_fence_put(id->last_flush); struct amdgpu_vm_id *id = &id_mgr->ids[j];
amdgpu_sync_free(&id->active);
dma_fence_put(id->flushed_updates);
dma_fence_put(id->last_flush);
}
} }
} }
...@@ -65,7 +65,8 @@ struct amdgpu_bo_list_entry; ...@@ -65,7 +65,8 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7) #define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7)
#define AMDGPU_PTE_PRT (1ULL << 63) /* TILED for VEGA10, reserved for older ASICs */
#define AMDGPU_PTE_PRT (1ULL << 51)
/* VEGA10 only */ /* VEGA10 only */
#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57) #define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
...@@ -114,9 +115,6 @@ struct amdgpu_vm { ...@@ -114,9 +115,6 @@ struct amdgpu_vm {
struct dma_fence *last_dir_update; struct dma_fence *last_dir_update;
uint64_t last_eviction_counter; uint64_t last_eviction_counter;
/* for id and flush management per ring */
struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
/* protecting freed */ /* protecting freed */
spinlock_t freed_lock; spinlock_t freed_lock;
...@@ -149,12 +147,16 @@ struct amdgpu_vm_id { ...@@ -149,12 +147,16 @@ struct amdgpu_vm_id {
uint32_t oa_size; uint32_t oa_size;
}; };
struct amdgpu_vm_id_manager {
struct mutex lock;
unsigned num_ids;
struct list_head ids_lru;
struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
};
struct amdgpu_vm_manager { struct amdgpu_vm_manager {
/* Handling of VMIDs */ /* Handling of VMIDs */
struct mutex lock; struct amdgpu_vm_id_manager id_mgr[AMDGPU_MAX_VMHUBS];
unsigned num_ids;
struct list_head ids_lru;
struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
/* Handling of VM fences */ /* Handling of VM fences */
u64 fence_context; u64 fence_context;
...@@ -200,7 +202,8 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -200,7 +202,8 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct dma_fence *fence, struct amdgpu_sync *sync, struct dma_fence *fence,
struct amdgpu_job *job); struct amdgpu_job *job);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
unsigned vmid);
int amdgpu_vm_update_directories(struct amdgpu_device *adev, int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm); struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev, int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
......
...@@ -1267,30 +1267,33 @@ static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev, ...@@ -1267,30 +1267,33 @@ static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode) static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
{ {
if (mode) { switch (mode) {
/* stop auto-manage */ case AMD_FAN_CTRL_NONE:
if (adev->pm.dpm.fan.ucode_fan_control) if (adev->pm.dpm.fan.ucode_fan_control)
ci_fan_ctrl_stop_smc_fan_control(adev); ci_fan_ctrl_stop_smc_fan_control(adev);
ci_fan_ctrl_set_static_mode(adev, mode); ci_dpm_set_fan_speed_percent(adev, 100);
} else { break;
/* restart auto-manage */ case AMD_FAN_CTRL_MANUAL:
if (adev->pm.dpm.fan.ucode_fan_control)
ci_fan_ctrl_stop_smc_fan_control(adev);
break;
case AMD_FAN_CTRL_AUTO:
if (adev->pm.dpm.fan.ucode_fan_control) if (adev->pm.dpm.fan.ucode_fan_control)
ci_thermal_start_smc_fan_control(adev); ci_thermal_start_smc_fan_control(adev);
else break;
ci_fan_ctrl_set_default_mode(adev); default:
break;
} }
} }
static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev) static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
{ {
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
u32 tmp;
if (pi->fan_is_controlled_by_smc) if (pi->fan_is_controlled_by_smc)
return 0; return AMD_FAN_CTRL_AUTO;
else
tmp = RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK; return AMD_FAN_CTRL_MANUAL;
return (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);
} }
#if 0 #if 0
...@@ -3036,6 +3039,7 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev, ...@@ -3036,6 +3039,7 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev,
memory_clock, memory_clock,
&memory_level->MinVddcPhases); &memory_level->MinVddcPhases);
memory_level->EnabledForActivity = 1;
memory_level->EnabledForThrottle = 1; memory_level->EnabledForThrottle = 1;
memory_level->UpH = 0; memory_level->UpH = 0;
memory_level->DownH = 100; memory_level->DownH = 100;
...@@ -3468,8 +3472,6 @@ static int ci_populate_all_memory_levels(struct amdgpu_device *adev) ...@@ -3468,8 +3472,6 @@ static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
return ret; return ret;
} }
pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
if ((dpm_table->mclk_table.count >= 2) && if ((dpm_table->mclk_table.count >= 2) &&
((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) { ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
pi->smc_state_table.MemoryLevel[1].MinVddc = pi->smc_state_table.MemoryLevel[1].MinVddc =
......
...@@ -2230,7 +2230,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, ...@@ -2230,7 +2230,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) { if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb); amdgpu_fb = to_amdgpu_framebuffer(fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj); abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
amdgpu_bo_unpin(abo); amdgpu_bo_unpin(abo);
...@@ -2589,7 +2589,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, ...@@ -2589,7 +2589,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
unpin: unpin:
if (amdgpu_crtc->cursor_bo) { if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
ret = amdgpu_bo_reserve(aobj, false); ret = amdgpu_bo_reserve(aobj, true);
if (likely(ret == 0)) { if (likely(ret == 0)) {
amdgpu_bo_unpin(aobj); amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj); amdgpu_bo_unreserve(aobj);
...@@ -2720,7 +2720,7 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc) ...@@ -2720,7 +2720,7 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj); abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, true);
if (unlikely(r)) if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n"); DRM_ERROR("failed to reserve abo before unpin\n");
else { else {
......
...@@ -2214,7 +2214,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, ...@@ -2214,7 +2214,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) { if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb); amdgpu_fb = to_amdgpu_framebuffer(fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj); abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
amdgpu_bo_unpin(abo); amdgpu_bo_unpin(abo);
...@@ -2609,7 +2609,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, ...@@ -2609,7 +2609,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
unpin: unpin:
if (amdgpu_crtc->cursor_bo) { if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
ret = amdgpu_bo_reserve(aobj, false); ret = amdgpu_bo_reserve(aobj, true);
if (likely(ret == 0)) { if (likely(ret == 0)) {
amdgpu_bo_unpin(aobj); amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj); amdgpu_bo_unreserve(aobj);
...@@ -2740,7 +2740,7 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc) ...@@ -2740,7 +2740,7 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj); abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, true);
if (unlikely(r)) if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n"); DRM_ERROR("failed to reserve abo before unpin\n");
else { else {
......
...@@ -979,7 +979,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, ...@@ -979,7 +979,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
u32 priority_a_mark = 0, priority_b_mark = 0; u32 priority_a_mark = 0, priority_b_mark = 0;
u32 priority_a_cnt = PRIORITY_OFF; u32 priority_a_cnt = PRIORITY_OFF;
u32 priority_b_cnt = PRIORITY_OFF; u32 priority_b_cnt = PRIORITY_OFF;
u32 tmp, arb_control3; u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
fixed20_12 a, b, c; fixed20_12 a, b, c;
if (amdgpu_crtc->base.enabled && num_heads && mode) { if (amdgpu_crtc->base.enabled && num_heads && mode) {
...@@ -1091,6 +1091,8 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, ...@@ -1091,6 +1091,8 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
c.full = dfixed_div(c, a); c.full = dfixed_div(c, a);
priority_b_mark = dfixed_trunc(c); priority_b_mark = dfixed_trunc(c);
priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
} }
/* select wm A */ /* select wm A */
...@@ -1120,6 +1122,9 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, ...@@ -1120,6 +1122,9 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */ /* save values for DPM */
amdgpu_crtc->line_time = line_time; amdgpu_crtc->line_time = line_time;
amdgpu_crtc->wm_high = latency_watermark_a; amdgpu_crtc->wm_high = latency_watermark_a;
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
} }
/* watermark setup */ /* watermark setup */
...@@ -1640,7 +1645,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, ...@@ -1640,7 +1645,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) { if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb); amdgpu_fb = to_amdgpu_framebuffer(fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj); abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
amdgpu_bo_unpin(abo); amdgpu_bo_unpin(abo);
...@@ -1957,7 +1962,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, ...@@ -1957,7 +1962,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
unpin: unpin:
if (amdgpu_crtc->cursor_bo) { if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
ret = amdgpu_bo_reserve(aobj, false); ret = amdgpu_bo_reserve(aobj, true);
if (likely(ret == 0)) { if (likely(ret == 0)) {
amdgpu_bo_unpin(aobj); amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj); amdgpu_bo_unreserve(aobj);
...@@ -2083,7 +2088,7 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc) ...@@ -2083,7 +2088,7 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj); abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, true);
if (unlikely(r)) if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n"); DRM_ERROR("failed to reserve abo before unpin\n");
else { else {
......
...@@ -2089,7 +2089,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, ...@@ -2089,7 +2089,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) { if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb); amdgpu_fb = to_amdgpu_framebuffer(fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj); abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
amdgpu_bo_unpin(abo); amdgpu_bo_unpin(abo);
...@@ -2440,7 +2440,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, ...@@ -2440,7 +2440,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
unpin: unpin:
if (amdgpu_crtc->cursor_bo) { if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
ret = amdgpu_bo_reserve(aobj, false); ret = amdgpu_bo_reserve(aobj, true);
if (likely(ret == 0)) { if (likely(ret == 0)) {
amdgpu_bo_unpin(aobj); amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj); amdgpu_bo_unreserve(aobj);
...@@ -2571,7 +2571,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc) ...@@ -2571,7 +2571,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj); abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, true);
if (unlikely(r)) if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n"); DRM_ERROR("failed to reserve abo before unpin\n");
else { else {
......
...@@ -248,7 +248,7 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc) ...@@ -248,7 +248,7 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj); abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, true);
if (unlikely(r)) if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n"); DRM_ERROR("failed to reserve abo before unpin\n");
else { else {
......
...@@ -1579,7 +1579,7 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev) ...@@ -1579,7 +1579,7 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev)
static void gfx_v6_0_config_init(struct amdgpu_device *adev) static void gfx_v6_0_config_init(struct amdgpu_device *adev)
{ {
adev->gfx.config.double_offchip_lds_buf = 1; adev->gfx.config.double_offchip_lds_buf = 0;
} }
static void gfx_v6_0_gpu_init(struct amdgpu_device *adev) static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
...@@ -2437,7 +2437,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev) ...@@ -2437,7 +2437,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
int r; int r;
if (adev->gfx.rlc.save_restore_obj) { if (adev->gfx.rlc.save_restore_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false); r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r); dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj); amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
...@@ -2448,7 +2448,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev) ...@@ -2448,7 +2448,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
} }
if (adev->gfx.rlc.clear_state_obj) { if (adev->gfx.rlc.clear_state_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
...@@ -2459,7 +2459,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev) ...@@ -2459,7 +2459,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
} }
if (adev->gfx.rlc.cp_table_obj) { if (adev->gfx.rlc.cp_table_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
...@@ -3292,7 +3292,7 @@ static int gfx_v6_0_sw_init(void *handle) ...@@ -3292,7 +3292,7 @@ static int gfx_v6_0_sw_init(void *handle)
ring->me = 1; ring->me = 1;
ring->pipe = i; ring->pipe = i;
ring->queue = i; ring->queue = i;
sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue); sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
r = amdgpu_ring_init(adev, ring, 1024, r = amdgpu_ring_init(adev, ring, 1024,
&adev->gfx.eop_irq, irq_type); &adev->gfx.eop_irq, irq_type);
......
...@@ -1935,7 +1935,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) ...@@ -1935,7 +1935,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
INDEX_STRIDE, 3); INDEX_STRIDE, 3);
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.num_ids; i++) { for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
if (i == 0) if (i == 0)
sh_mem_base = 0; sh_mem_base = 0;
else else
...@@ -2792,7 +2792,7 @@ static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev) ...@@ -2792,7 +2792,7 @@ static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
if (ring->mqd_obj) { if (ring->mqd_obj) {
r = amdgpu_bo_reserve(ring->mqd_obj, false); r = amdgpu_bo_reserve(ring->mqd_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r); dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
...@@ -2810,7 +2810,7 @@ static void gfx_v7_0_mec_fini(struct amdgpu_device *adev) ...@@ -2810,7 +2810,7 @@ static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
int r; int r;
if (adev->gfx.mec.hpd_eop_obj) { if (adev->gfx.mec.hpd_eop_obj) {
r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
...@@ -3359,7 +3359,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev) ...@@ -3359,7 +3359,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
/* save restore block */ /* save restore block */
if (adev->gfx.rlc.save_restore_obj) { if (adev->gfx.rlc.save_restore_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false); r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r); dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj); amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
...@@ -3371,7 +3371,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev) ...@@ -3371,7 +3371,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
/* clear state block */ /* clear state block */
if (adev->gfx.rlc.clear_state_obj) { if (adev->gfx.rlc.clear_state_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
...@@ -3383,7 +3383,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev) ...@@ -3383,7 +3383,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
/* clear state block */ /* clear state block */
if (adev->gfx.rlc.cp_table_obj) { if (adev->gfx.rlc.cp_table_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
......
...@@ -1239,7 +1239,7 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) ...@@ -1239,7 +1239,7 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
/* clear state block */ /* clear state block */
if (adev->gfx.rlc.clear_state_obj) { if (adev->gfx.rlc.clear_state_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r); dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
...@@ -1250,7 +1250,7 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) ...@@ -1250,7 +1250,7 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
/* jump table block */ /* jump table block */
if (adev->gfx.rlc.cp_table_obj) { if (adev->gfx.rlc.cp_table_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
...@@ -1363,7 +1363,7 @@ static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) ...@@ -1363,7 +1363,7 @@ static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
int r; int r;
if (adev->gfx.mec.hpd_eop_obj) { if (adev->gfx.mec.hpd_eop_obj) {
r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
...@@ -1490,7 +1490,7 @@ static int gfx_v8_0_kiq_init(struct amdgpu_device *adev) ...@@ -1490,7 +1490,7 @@ static int gfx_v8_0_kiq_init(struct amdgpu_device *adev)
memset(hpd, 0, MEC_HPD_SIZE); memset(hpd, 0, MEC_HPD_SIZE);
r = amdgpu_bo_reserve(kiq->eop_obj, false); r = amdgpu_bo_reserve(kiq->eop_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r); dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
amdgpu_bo_kunmap(kiq->eop_obj); amdgpu_bo_kunmap(kiq->eop_obj);
...@@ -1932,6 +1932,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) ...@@ -1932,6 +1932,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
case 0xca: case 0xca:
case 0xce: case 0xce:
case 0x88: case 0x88:
case 0xe6:
/* B6 */ /* B6 */
adev->gfx.config.max_cu_per_sh = 6; adev->gfx.config.max_cu_per_sh = 6;
break; break;
...@@ -1964,17 +1965,28 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) ...@@ -1964,17 +1965,28 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.max_backends_per_se = 1; adev->gfx.config.max_backends_per_se = 1;
switch (adev->pdev->revision) { switch (adev->pdev->revision) {
case 0x80:
case 0x81:
case 0xc0: case 0xc0:
case 0xc1: case 0xc1:
case 0xc2: case 0xc2:
case 0xc4: case 0xc4:
case 0xc8: case 0xc8:
case 0xc9: case 0xc9:
case 0xd6:
case 0xda:
case 0xe9:
case 0xea:
adev->gfx.config.max_cu_per_sh = 3; adev->gfx.config.max_cu_per_sh = 3;
break; break;
case 0x83:
case 0xd0: case 0xd0:
case 0xd1: case 0xd1:
case 0xd2: case 0xd2:
case 0xd4:
case 0xdb:
case 0xe1:
case 0xe2:
default: default:
adev->gfx.config.max_cu_per_sh = 2; adev->gfx.config.max_cu_per_sh = 2;
break; break;
...@@ -3890,7 +3902,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) ...@@ -3890,7 +3902,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG, sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
INDEX_STRIDE, 3); INDEX_STRIDE, 3);
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.num_ids; i++) { for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
vi_srbm_select(adev, 0, 0, 0, i); vi_srbm_select(adev, 0, 0, 0, i);
/* CP and shaders */ /* CP and shaders */
if (i == 0) { if (i == 0) {
......
...@@ -346,7 +346,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev) ...@@ -346,7 +346,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger. * size equal to the 1024 or vram, whichever is larger.
*/ */
if (amdgpu_gart_size == -1) if (amdgpu_gart_size == -1)
adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size); adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
adev->mc.mc_vram_size);
else else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
...@@ -621,7 +622,7 @@ static int gmc_v6_0_vm_init(struct amdgpu_device *adev) ...@@ -621,7 +622,7 @@ static int gmc_v6_0_vm_init(struct amdgpu_device *adev)
* amdgpu graphics/compute will use VMIDs 1-7 * amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15 * amdkfd will use VMIDs 8-15
*/ */
adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS; adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.num_level = 1; adev->vm_manager.num_level = 1;
amdgpu_vm_manager_init(adev); amdgpu_vm_manager_init(adev);
......
...@@ -395,7 +395,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) ...@@ -395,7 +395,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger. * size equal to the 1024 or vram, whichever is larger.
*/ */
if (amdgpu_gart_size == -1) if (amdgpu_gart_size == -1)
adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size); adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
adev->mc.mc_vram_size);
else else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
...@@ -746,7 +747,7 @@ static int gmc_v7_0_vm_init(struct amdgpu_device *adev) ...@@ -746,7 +747,7 @@ static int gmc_v7_0_vm_init(struct amdgpu_device *adev)
* amdgpu graphics/compute will use VMIDs 1-7 * amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15 * amdkfd will use VMIDs 8-15
*/ */
adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS; adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.num_level = 1; adev->vm_manager.num_level = 1;
amdgpu_vm_manager_init(adev); amdgpu_vm_manager_init(adev);
......
...@@ -557,7 +557,8 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) ...@@ -557,7 +557,8 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger. * size equal to the 1024 or vram, whichever is larger.
*/ */
if (amdgpu_gart_size == -1) if (amdgpu_gart_size == -1)
adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size); adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
adev->mc.mc_vram_size);
else else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
...@@ -949,7 +950,7 @@ static int gmc_v8_0_vm_init(struct amdgpu_device *adev) ...@@ -949,7 +950,7 @@ static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
* amdgpu graphics/compute will use VMIDs 1-7 * amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15 * amdkfd will use VMIDs 8-15
*/ */
adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS; adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.num_level = 1; adev->vm_manager.num_level = 1;
amdgpu_vm_manager_init(adev); amdgpu_vm_manager_init(adev);
......
...@@ -386,6 +386,23 @@ static int gmc_v9_0_early_init(void *handle) ...@@ -386,6 +386,23 @@ static int gmc_v9_0_early_init(void *handle)
static int gmc_v9_0_late_init(void *handle) static int gmc_v9_0_late_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 };
unsigned i;
for(i = 0; i < adev->num_rings; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
unsigned vmhub = ring->funcs->vmhub;
ring->vm_inv_eng = vm_inv_eng[vmhub]++;
dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
ring->idx, ring->name, ring->vm_inv_eng,
ring->funcs->vmhub);
}
/* Engine 17 is used for GART flushes */
for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
BUG_ON(vm_inv_eng[i] > 17);
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
} }
...@@ -469,7 +486,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) ...@@ -469,7 +486,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger. * size equal to the 1024 or vram, whichever is larger.
*/ */
if (amdgpu_gart_size == -1) if (amdgpu_gart_size == -1)
adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size); adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
adev->mc.mc_vram_size);
else else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
...@@ -519,7 +537,8 @@ static int gmc_v9_0_vm_init(struct amdgpu_device *adev) ...@@ -519,7 +537,8 @@ static int gmc_v9_0_vm_init(struct amdgpu_device *adev)
* amdgpu graphics/compute will use VMIDs 1-7 * amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15 * amdkfd will use VMIDs 8-15
*/ */
adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS; adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
/* TODO: fix num_level for APU when updating vm size and block size */ /* TODO: fix num_level for APU when updating vm size and block size */
if (adev->flags & AMD_IS_APU) if (adev->flags & AMD_IS_APU)
......
...@@ -511,6 +511,9 @@ static int mmhub_v1_0_set_clockgating_state(void *handle, ...@@ -511,6 +511,9 @@ static int mmhub_v1_0_set_clockgating_state(void *handle,
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_sriov_vf(adev))
return 0;
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
mmhub_v1_0_update_medium_grain_clock_gating(adev, mmhub_v1_0_update_medium_grain_clock_gating(adev,
......
...@@ -84,4 +84,61 @@ struct mmsch_v1_0_cmd_indirect_write { ...@@ -84,4 +84,61 @@ struct mmsch_v1_0_cmd_indirect_write {
uint32_t reg_value; uint32_t reg_value;
}; };
static inline void mmsch_v1_0_insert_direct_wt(struct mmsch_v1_0_cmd_direct_write *direct_wt,
uint32_t *init_table,
uint32_t reg_offset,
uint32_t value)
{
direct_wt->cmd_header.reg_offset = reg_offset;
direct_wt->reg_value = value;
memcpy((void *)init_table, direct_wt, sizeof(struct mmsch_v1_0_cmd_direct_write));
}
static inline void mmsch_v1_0_insert_direct_rd_mod_wt(struct mmsch_v1_0_cmd_direct_read_modify_write *direct_rd_mod_wt,
uint32_t *init_table,
uint32_t reg_offset,
uint32_t mask, uint32_t data)
{
direct_rd_mod_wt->cmd_header.reg_offset = reg_offset;
direct_rd_mod_wt->mask_value = mask;
direct_rd_mod_wt->write_data = data;
memcpy((void *)init_table, direct_rd_mod_wt,
sizeof(struct mmsch_v1_0_cmd_direct_read_modify_write));
}
static inline void mmsch_v1_0_insert_direct_poll(struct mmsch_v1_0_cmd_direct_polling *direct_poll,
uint32_t *init_table,
uint32_t reg_offset,
uint32_t mask, uint32_t wait)
{
direct_poll->cmd_header.reg_offset = reg_offset;
direct_poll->mask_value = mask;
direct_poll->wait_value = wait;
memcpy((void *)init_table, direct_poll, sizeof(struct mmsch_v1_0_cmd_direct_polling));
}
#define MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \
mmsch_v1_0_insert_direct_rd_mod_wt(&direct_rd_mod_wt, \
init_table, (reg), \
(mask), (data)); \
init_table += sizeof(struct mmsch_v1_0_cmd_direct_read_modify_write)/4; \
table_size += sizeof(struct mmsch_v1_0_cmd_direct_read_modify_write)/4; \
}
#define MMSCH_V1_0_INSERT_DIRECT_WT(reg, value) { \
mmsch_v1_0_insert_direct_wt(&direct_wt, \
init_table, (reg), \
(value)); \
init_table += sizeof(struct mmsch_v1_0_cmd_direct_write)/4; \
table_size += sizeof(struct mmsch_v1_0_cmd_direct_write)/4; \
}
#define MMSCH_V1_0_INSERT_DIRECT_POLL(reg, mask, wait) { \
mmsch_v1_0_insert_direct_poll(&direct_poll, \
init_table, (reg), \
(mask), (wait)); \
init_table += sizeof(struct mmsch_v1_0_cmd_direct_polling)/4; \
table_size += sizeof(struct mmsch_v1_0_cmd_direct_polling)/4; \
}
#endif #endif
...@@ -368,9 +368,12 @@ static int xgpu_vi_mailbox_rcv_msg(struct amdgpu_device *adev, ...@@ -368,9 +368,12 @@ static int xgpu_vi_mailbox_rcv_msg(struct amdgpu_device *adev,
u32 reg; u32 reg;
u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID); u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL); /* workaround: host driver doesn't set VALID for CMPL now */
if (!(reg & mask)) if (event != IDH_FLR_NOTIFICATION_CMPL) {
return -ENOENT; reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
if (!(reg & mask))
return -ENOENT;
}
reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0); reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
if (reg != event) if (reg != event)
......
...@@ -166,11 +166,8 @@ int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) ...@@ -166,11 +166,8 @@ int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
{ {
int ret; int ret;
uint32_t psp_gfxdrv_command_reg = 0; uint32_t psp_gfxdrv_command_reg = 0;
struct amdgpu_bo *psp_sysdrv;
void *psp_sysdrv_virt = NULL;
uint64_t psp_sysdrv_mem;
struct amdgpu_device *adev = psp->adev; struct amdgpu_device *adev = psp->adev;
uint32_t size, sol_reg; uint32_t sol_reg;
/* Check sOS sign of life register to confirm sys driver and sOS /* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded. * are already been loaded.
...@@ -185,27 +182,14 @@ int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) ...@@ -185,27 +182,14 @@ int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
if (ret) if (ret)
return ret; return ret;
/* memset(psp->fw_pri_buf, 0, PSP_1_MEG);
* Create a 1 meg GART memory to store the psp sys driver
* binary with a 1 meg aligned address
*/
size = (psp->sys_bin_size + (PSP_BOOTLOADER_1_MEG_ALIGNMENT - 1)) &
(~(PSP_BOOTLOADER_1_MEG_ALIGNMENT - 1));
ret = amdgpu_bo_create_kernel(adev, size, PSP_BOOTLOADER_1_MEG_ALIGNMENT,
AMDGPU_GEM_DOMAIN_GTT,
&psp_sysdrv,
&psp_sysdrv_mem,
&psp_sysdrv_virt);
if (ret)
return ret;
/* Copy PSP System Driver binary to memory */ /* Copy PSP System Driver binary to memory */
memcpy(psp_sysdrv_virt, psp->sys_start_addr, psp->sys_bin_size); memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
/* Provide the sys driver to bootrom */ /* Provide the sys driver to bootrom */
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_36), WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_36),
(uint32_t)(psp_sysdrv_mem >> 20)); (uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = 1 << 16; psp_gfxdrv_command_reg = 1 << 16;
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
psp_gfxdrv_command_reg); psp_gfxdrv_command_reg);
...@@ -216,8 +200,6 @@ int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) ...@@ -216,8 +200,6 @@ int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
0x80000000, 0x80000000, false); 0x80000000, 0x80000000, false);
amdgpu_bo_free_kernel(&psp_sysdrv, &psp_sysdrv_mem, &psp_sysdrv_virt);
return ret; return ret;
} }
...@@ -225,11 +207,8 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp) ...@@ -225,11 +207,8 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
{ {
int ret; int ret;
unsigned int psp_gfxdrv_command_reg = 0; unsigned int psp_gfxdrv_command_reg = 0;
struct amdgpu_bo *psp_sos;
void *psp_sos_virt = NULL;
uint64_t psp_sos_mem;
struct amdgpu_device *adev = psp->adev; struct amdgpu_device *adev = psp->adev;
uint32_t size, sol_reg; uint32_t sol_reg;
/* Check sOS sign of life register to confirm sys driver and sOS /* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded. * are already been loaded.
...@@ -244,23 +223,14 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp) ...@@ -244,23 +223,14 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
if (ret) if (ret)
return ret; return ret;
size = (psp->sos_bin_size + (PSP_BOOTLOADER_1_MEG_ALIGNMENT - 1)) & memset(psp->fw_pri_buf, 0, PSP_1_MEG);
(~((uint64_t)PSP_BOOTLOADER_1_MEG_ALIGNMENT - 1));
ret = amdgpu_bo_create_kernel(adev, size, PSP_BOOTLOADER_1_MEG_ALIGNMENT,
AMDGPU_GEM_DOMAIN_GTT,
&psp_sos,
&psp_sos_mem,
&psp_sos_virt);
if (ret)
return ret;
/* Copy Secure OS binary to PSP memory */ /* Copy Secure OS binary to PSP memory */
memcpy(psp_sos_virt, psp->sos_start_addr, psp->sos_bin_size); memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
/* Provide the PSP secure OS to bootrom */ /* Provide the PSP secure OS to bootrom */
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_36), WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_36),
(uint32_t)(psp_sos_mem >> 20)); (uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = 2 << 16; psp_gfxdrv_command_reg = 2 << 16;
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
psp_gfxdrv_command_reg); psp_gfxdrv_command_reg);
...@@ -273,8 +243,6 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp) ...@@ -273,8 +243,6 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
0, true); 0, true);
#endif #endif
amdgpu_bo_free_kernel(&psp_sos, &psp_sos_mem, &psp_sos_virt);
return ret; return ret;
} }
...@@ -300,7 +268,6 @@ int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd ...@@ -300,7 +268,6 @@ int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd
int psp_v3_1_ring_init(struct psp_context *psp, enum psp_ring_type ring_type) int psp_v3_1_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
{ {
int ret = 0; int ret = 0;
unsigned int psp_ring_reg = 0;
struct psp_ring *ring; struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev; struct amdgpu_device *adev = psp->adev;
...@@ -320,6 +287,16 @@ int psp_v3_1_ring_init(struct psp_context *psp, enum psp_ring_type ring_type) ...@@ -320,6 +287,16 @@ int psp_v3_1_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
return ret; return ret;
} }
return 0;
}
int psp_v3_1_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
{
int ret = 0;
unsigned int psp_ring_reg = 0;
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
/* Write low address of the ring to C2PMSG_69 */ /* Write low address of the ring to C2PMSG_69 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_69), psp_ring_reg); WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_69), psp_ring_reg);
...@@ -344,6 +321,33 @@ int psp_v3_1_ring_init(struct psp_context *psp, enum psp_ring_type ring_type) ...@@ -344,6 +321,33 @@ int psp_v3_1_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
return ret; return ret;
} }
int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
unsigned int psp_ring_reg = 0;
struct amdgpu_device *adev = psp->adev;
ring = &psp->km_ring;
/* Write the ring destroy command to C2PMSG_64 */
psp_ring_reg = 3 << 16;
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), psp_ring_reg);
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
0x80000000, 0x80000000, false);
if (ring->ring_mem)
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
return ret;
}
int psp_v3_1_cmd_submit(struct psp_context *psp, int psp_v3_1_cmd_submit(struct psp_context *psp,
struct amdgpu_firmware_info *ucode, struct amdgpu_firmware_info *ucode,
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
......
...@@ -39,6 +39,10 @@ extern int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode, ...@@ -39,6 +39,10 @@ extern int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
struct psp_gfx_cmd_resp *cmd); struct psp_gfx_cmd_resp *cmd);
extern int psp_v3_1_ring_init(struct psp_context *psp, extern int psp_v3_1_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type); enum psp_ring_type ring_type);
extern int psp_v3_1_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v3_1_ring_destroy(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v3_1_cmd_submit(struct psp_context *psp, extern int psp_v3_1_cmd_submit(struct psp_context *psp,
struct amdgpu_firmware_info *ucode, struct amdgpu_firmware_info *ucode,
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
......
...@@ -48,8 +48,7 @@ static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev); ...@@ -48,8 +48,7 @@ static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev); static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev); static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
static const u32 golden_settings_sdma_4[] = static const u32 golden_settings_sdma_4[] = {
{
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831f07, SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831f07,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xff000ff0, 0x3f000100, SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xff000ff0, 0x3f000100,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0100, 0x00000100, SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0100, 0x00000100,
...@@ -76,8 +75,7 @@ static const u32 golden_settings_sdma_4[] = ...@@ -76,8 +75,7 @@ static const u32 golden_settings_sdma_4[] =
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_UTCL1_PAGE), 0x000003ff, 0x000003c0 SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_UTCL1_PAGE), 0x000003ff, 0x000003c0
}; };
static const u32 golden_settings_sdma_vg10[] = static const u32 golden_settings_sdma_vg10[] = {
{
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG), 0x0018773f, 0x00104002, SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG), 0x0018773f, 0x00104002,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00104002, SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00104002,
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG), 0x0018773f, 0x00104002, SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG), 0x0018773f, 0x00104002,
...@@ -87,16 +85,17 @@ static const u32 golden_settings_sdma_vg10[] = ...@@ -87,16 +85,17 @@ static const u32 golden_settings_sdma_vg10[] =
static u32 sdma_v4_0_get_reg_offset(u32 instance, u32 internal_offset) static u32 sdma_v4_0_get_reg_offset(u32 instance, u32 internal_offset)
{ {
u32 base = 0; u32 base = 0;
switch (instance) { switch (instance) {
case 0: case 0:
base = SDMA0_BASE.instance[0].segment[0]; base = SDMA0_BASE.instance[0].segment[0];
break; break;
case 1: case 1:
base = SDMA1_BASE.instance[0].segment[0]; base = SDMA1_BASE.instance[0].segment[0];
break; break;
default: default:
BUG(); BUG();
break; break;
} }
return base + internal_offset; return base + internal_offset;
...@@ -159,7 +158,8 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) ...@@ -159,7 +158,8 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
case CHIP_VEGA10: case CHIP_VEGA10:
chip_name = "vega10"; chip_name = "vega10";
break; break;
default: BUG(); default:
BUG();
} }
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
...@@ -179,7 +179,7 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) ...@@ -179,7 +179,7 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
if (adev->sdma.instance[i].feature_version >= 20) if (adev->sdma.instance[i].feature_version >= 20)
adev->sdma.instance[i].burst_nop = true; adev->sdma.instance[i].burst_nop = true;
DRM_DEBUG("psp_load == '%s'\n", DRM_DEBUG("psp_load == '%s'\n",
adev->firmware.load_type == AMDGPU_FW_LOAD_PSP? "true": "false"); adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
...@@ -192,9 +192,7 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) ...@@ -192,9 +192,7 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
} }
out: out:
if (err) { if (err) {
printk(KERN_ERR DRM_ERROR("sdma_v4_0: Failed to load firmware \"%s\"\n", fw_name);
"sdma_v4_0: Failed to load firmware \"%s\"\n",
fw_name);
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
release_firmware(adev->sdma.instance[i].fw); release_firmware(adev->sdma.instance[i].fw);
adev->sdma.instance[i].fw = NULL; adev->sdma.instance[i].fw = NULL;
...@@ -212,10 +210,10 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) ...@@ -212,10 +210,10 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
*/ */
static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring) static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
{ {
u64* rptr; u64 *rptr;
/* XXX check if swapping is necessary on BE */ /* XXX check if swapping is necessary on BE */
rptr =((u64*)&ring->adev->wb.wb[ring->rptr_offs]); rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]);
DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr); DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
return ((*rptr) >> 2); return ((*rptr) >> 2);
...@@ -231,19 +229,20 @@ static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring) ...@@ -231,19 +229,20 @@ static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring) static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
u64* wptr = NULL; u64 *wptr = NULL;
uint64_t local_wptr=0; uint64_t local_wptr = 0;
if (ring->use_doorbell) { if (ring->use_doorbell) {
/* XXX check if swapping is necessary on BE */ /* XXX check if swapping is necessary on BE */
wptr = ((u64*)&adev->wb.wb[ring->wptr_offs]); wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]);
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr); DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr);
*wptr = (*wptr) >> 2; *wptr = (*wptr) >> 2;
DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr); DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr);
} else { } else {
u32 lowbit, highbit; u32 lowbit, highbit;
int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
wptr=&local_wptr;
wptr = &local_wptr;
lowbit = RREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR)) >> 2; lowbit = RREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR)) >> 2;
highbit = RREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2; highbit = RREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
...@@ -285,12 +284,13 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring) ...@@ -285,12 +284,13 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
WDOORBELL64(ring->doorbell_index, ring->wptr << 2); WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else { } else {
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
DRM_DEBUG("Not using doorbell -- " DRM_DEBUG("Not using doorbell -- "
"mmSDMA%i_GFX_RB_WPTR == 0x%08x " "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
"mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x \n", "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
me,
me, me,
lower_32_bits(ring->wptr << 2), lower_32_bits(ring->wptr << 2),
me,
upper_32_bits(ring->wptr << 2)); upper_32_bits(ring->wptr << 2));
WREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2)); WREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
WREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); WREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
...@@ -319,22 +319,22 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) ...@@ -319,22 +319,22 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (VEGA10). * Schedule an IB in the DMA ring (VEGA10).
*/ */
static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring, static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch) unsigned vm_id, bool ctx_switch)
{ {
u32 vmid = vm_id & 0xf; u32 vmid = vm_id & 0xf;
/* IB packet must end on a 8 DW boundary */ /* IB packet must end on a 8 DW boundary */
sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid)); SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
/* base must be 32 byte aligned */ /* base must be 32 byte aligned */
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw); amdgpu_ring_write(ring, ib->length_dw);
amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0);
} }
...@@ -523,7 +523,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev) ...@@ -523,7 +523,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
u32 doorbell; u32 doorbell;
u32 doorbell_offset; u32 doorbell_offset;
u32 temp; u32 temp;
int i,r; int i, r;
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring; ring = &adev->sdma.instance[i].ring;
...@@ -572,7 +572,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev) ...@@ -572,7 +572,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
doorbell = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL)); doorbell = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL));
doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL_OFFSET)); doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL_OFFSET));
if (ring->use_doorbell){ if (ring->use_doorbell) {
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET, doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
OFFSET, ring->doorbell_index); OFFSET, ring->doorbell_index);
...@@ -694,9 +694,7 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev) ...@@ -694,9 +694,7 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
for (j = 0; j < fw_size; j++) for (j = 0; j < fw_size; j++)
{
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++)); WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
}
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version); WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
} }
...@@ -744,10 +742,8 @@ static int sdma_v4_0_start(struct amdgpu_device *adev) ...@@ -744,10 +742,8 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
if (r) if (r)
return r; return r;
r = sdma_v4_0_rlc_resume(adev); r = sdma_v4_0_rlc_resume(adev);
if (r)
return r;
return 0; return r;
} }
/** /**
...@@ -797,9 +793,8 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring) ...@@ -797,9 +793,8 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
for (i = 0; i < adev->usec_timeout; i++) { for (i = 0; i < adev->usec_timeout; i++) {
tmp = le32_to_cpu(adev->wb.wb[index]); tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF) { if (tmp == 0xDEADBEEF)
break; break;
}
DRM_UDELAY(1); DRM_UDELAY(1);
} }
...@@ -864,29 +859,29 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) ...@@ -864,29 +859,29 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r) if (r)
goto err1; goto err1;
r = dma_fence_wait_timeout(f, false, timeout); r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) { if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n"); DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT; r = -ETIMEDOUT;
goto err1; goto err1;
} else if (r < 0) { } else if (r < 0) {
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1; goto err1;
} }
tmp = le32_to_cpu(adev->wb.wb[index]); tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF) { if (tmp == 0xDEADBEEF) {
DRM_INFO("ib test on ring %d succeeded\n", ring->idx); DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
r = 0; r = 0;
} else { } else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
r = -EINVAL; r = -EINVAL;
} }
err1: err1:
amdgpu_ib_free(adev, &ib, NULL); amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f); dma_fence_put(f);
err0: err0:
amdgpu_wb_free(adev, index); amdgpu_wb_free(adev, index);
return r; return r;
} }
...@@ -1039,44 +1034,40 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) ...@@ -1039,44 +1034,40 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr) unsigned vm_id, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->idx; unsigned eng = ring->vm_inv_eng;
unsigned i;
pd_addr = pd_addr | 0x1; /* valid bit */ pd_addr = pd_addr | 0x1; /* valid bit */
/* now only use physical base address of PDE and valid */ /* now only use physical base address of PDE and valid */
BUG_ON(pd_addr & 0xFFFF00000000003EULL); BUG_ON(pd_addr & 0xFFFF00000000003EULL);
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i]; SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vm_id * 2);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | amdgpu_ring_write(ring, lower_32_bits(pd_addr));
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vm_id * 2); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
amdgpu_ring_write(ring, lower_32_bits(pd_addr)); SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vm_id * 2);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | amdgpu_ring_write(ring, upper_32_bits(pd_addr));
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vm_id * 2); /* flush TLB */
amdgpu_ring_write(ring, upper_32_bits(pd_addr)); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
/* flush TLB */ amdgpu_ring_write(ring, hub->vm_inv_eng0_req + eng);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | amdgpu_ring_write(ring, req);
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, hub->vm_inv_eng0_req + eng); /* wait for flush */
amdgpu_ring_write(ring, req); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
/* wait for flush */ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | amdgpu_ring_write(ring, 0);
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */ amdgpu_ring_write(ring, 1 << vm_id); /* reference */
amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2); amdgpu_ring_write(ring, 1 << vm_id); /* mask */
amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
amdgpu_ring_write(ring, 1 << vm_id); /* reference */ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
amdgpu_ring_write(ring, 1 << vm_id); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
}
} }
static int sdma_v4_0_early_init(void *handle) static int sdma_v4_0_early_init(void *handle)
...@@ -1162,8 +1153,6 @@ static int sdma_v4_0_hw_init(void *handle) ...@@ -1162,8 +1153,6 @@ static int sdma_v4_0_hw_init(void *handle)
sdma_v4_0_init_golden_registers(adev); sdma_v4_0_init_golden_registers(adev);
r = sdma_v4_0_start(adev); r = sdma_v4_0_start(adev);
if (r)
return r;
return r; return r;
} }
...@@ -1199,10 +1188,12 @@ static bool sdma_v4_0_is_idle(void *handle) ...@@ -1199,10 +1188,12 @@ static bool sdma_v4_0_is_idle(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 i; u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
u32 tmp = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_STATUS_REG)); u32 tmp = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_STATUS_REG));
if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
return false; return false;
} }
return true; return true;
...@@ -1211,8 +1202,9 @@ static bool sdma_v4_0_is_idle(void *handle) ...@@ -1211,8 +1202,9 @@ static bool sdma_v4_0_is_idle(void *handle)
static int sdma_v4_0_wait_for_idle(void *handle) static int sdma_v4_0_wait_for_idle(void *handle)
{ {
unsigned i; unsigned i;
u32 sdma0,sdma1; u32 sdma0, sdma1;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) { for (i = 0; i < adev->usec_timeout; i++) {
sdma0 = RREG32(sdma_v4_0_get_reg_offset(0, mmSDMA0_STATUS_REG)); sdma0 = RREG32(sdma_v4_0_get_reg_offset(0, mmSDMA0_STATUS_REG));
sdma1 = RREG32(sdma_v4_0_get_reg_offset(1, mmSDMA0_STATUS_REG)); sdma1 = RREG32(sdma_v4_0_get_reg_offset(1, mmSDMA0_STATUS_REG));
...@@ -1240,7 +1232,7 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev, ...@@ -1240,7 +1232,7 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
u32 reg_offset = (type == AMDGPU_SDMA_IRQ_TRAP0) ? u32 reg_offset = (type == AMDGPU_SDMA_IRQ_TRAP0) ?
sdma_v4_0_get_reg_offset(0, mmSDMA0_CNTL) : sdma_v4_0_get_reg_offset(0, mmSDMA0_CNTL) :
sdma_v4_0_get_reg_offset(1, mmSDMA0_CNTL); sdma_v4_0_get_reg_offset(1, mmSDMA0_CNTL);
sdma_cntl = RREG32(reg_offset); sdma_cntl = RREG32(reg_offset);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
...@@ -1332,7 +1324,7 @@ static void sdma_v4_0_update_medium_grain_clock_gating( ...@@ -1332,7 +1324,7 @@ static void sdma_v4_0_update_medium_grain_clock_gating(
SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK | SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK | SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK); SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK);
if(def != data) if (def != data)
WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL), data); WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL), data);
} }
} else { } else {
...@@ -1382,17 +1374,17 @@ static void sdma_v4_0_update_medium_grain_light_sleep( ...@@ -1382,17 +1374,17 @@ static void sdma_v4_0_update_medium_grain_light_sleep(
/* 1-not override: enable sdma1 mem light sleep */ /* 1-not override: enable sdma1 mem light sleep */
if (adev->asic_type == CHIP_VEGA10) { if (adev->asic_type == CHIP_VEGA10) {
def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL)); def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (def != data) if (def != data)
WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), data); WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), data);
} }
} else { } else {
/* 0-override:disable sdma0 mem light sleep */ /* 0-override:disable sdma0 mem light sleep */
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL)); def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (def != data) if (def != data)
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data); WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
/* 0-override:disable sdma1 mem light sleep */ /* 0-override:disable sdma1 mem light sleep */
if (adev->asic_type == CHIP_VEGA10) { if (adev->asic_type == CHIP_VEGA10) {
...@@ -1473,6 +1465,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = { ...@@ -1473,6 +1465,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
.align_mask = 0xf, .align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true, .support_64bit_ptrs = true,
.vmhub = AMDGPU_MMHUB,
.get_rptr = sdma_v4_0_ring_get_rptr, .get_rptr = sdma_v4_0_ring_get_rptr,
.get_wptr = sdma_v4_0_ring_get_wptr, .get_wptr = sdma_v4_0_ring_get_wptr,
.set_wptr = sdma_v4_0_ring_set_wptr, .set_wptr = sdma_v4_0_ring_set_wptr,
...@@ -1480,7 +1473,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = { ...@@ -1480,7 +1473,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
6 + /* sdma_v4_0_ring_emit_hdp_flush */ 6 + /* sdma_v4_0_ring_emit_hdp_flush */
3 + /* sdma_v4_0_ring_emit_hdp_invalidate */ 3 + /* sdma_v4_0_ring_emit_hdp_invalidate */
6 + /* sdma_v4_0_ring_emit_pipeline_sync */ 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
36 + /* sdma_v4_0_ring_emit_vm_flush */ 18 + /* sdma_v4_0_ring_emit_vm_flush */
10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */ 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */ .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
.emit_ib = sdma_v4_0_ring_emit_ib, .emit_ib = sdma_v4_0_ring_emit_ib,
...@@ -1606,8 +1599,7 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev) ...@@ -1606,8 +1599,7 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
} }
} }
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
{
.type = AMD_IP_BLOCK_TYPE_SDMA, .type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 4, .major = 4,
.minor = 0, .minor = 0,
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include <linux/module.h> #include <linux/module.h>
#include "drmP.h" #include "drmP.h"
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_atombios.h" #include "amdgpu_atomfirmware.h"
#include "amdgpu_ih.h" #include "amdgpu_ih.h"
#include "amdgpu_uvd.h" #include "amdgpu_uvd.h"
#include "amdgpu_vce.h" #include "amdgpu_vce.h"
...@@ -405,11 +405,11 @@ static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev) ...@@ -405,11 +405,11 @@ static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
static int soc15_asic_reset(struct amdgpu_device *adev) static int soc15_asic_reset(struct amdgpu_device *adev)
{ {
amdgpu_atombios_scratch_regs_engine_hung(adev, true); amdgpu_atomfirmware_scratch_regs_engine_hung(adev, true);
soc15_gpu_pci_config_reset(adev); soc15_gpu_pci_config_reset(adev);
amdgpu_atombios_scratch_regs_engine_hung(adev, false); amdgpu_atomfirmware_scratch_regs_engine_hung(adev, false);
return 0; return 0;
} }
...@@ -505,8 +505,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) ...@@ -505,8 +505,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block); amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block); amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
if (!amdgpu_sriov_vf(adev)) amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
amdgpu_ip_block_add(adev, &vce_v4_0_ip_block); amdgpu_ip_block_add(adev, &vce_v4_0_ip_block);
break; break;
default: default:
......
...@@ -45,13 +45,31 @@ struct nbio_pcie_index_data { ...@@ -45,13 +45,31 @@ struct nbio_pcie_index_data {
u32 index_offset; u32 index_offset;
u32 data_offset; u32 data_offset;
}; };
// Register Access Macro
/* Register Access Macros */
#define SOC15_REG_OFFSET(ip, inst, reg) (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \ #define SOC15_REG_OFFSET(ip, inst, reg) (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
(1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \ (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
(2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \ (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
(3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \ (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
(ip##_BASE__INST##inst##_SEG4 + reg))))) (ip##_BASE__INST##inst##_SEG4 + reg)))))
#define WREG32_FIELD15(ip, idx, reg, field, val) \
WREG32(SOC15_REG_OFFSET(ip, idx, mm##reg), (RREG32(SOC15_REG_OFFSET(ip, idx, mm##reg)) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
#define RREG32_SOC15(ip, inst, reg) \
RREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
(1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
(2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
(3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
(ip##_BASE__INST##inst##_SEG4 + reg))))))
#define WREG32_SOC15(ip, inst, reg, value) \
WREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
(1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
(2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
(3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
(ip##_BASE__INST##inst##_SEG4 + reg))))), value)
#endif #endif
...@@ -138,6 +138,12 @@ struct amd_pp_profile { ...@@ -138,6 +138,12 @@ struct amd_pp_profile {
uint8_t down_hyst; uint8_t down_hyst;
}; };
enum amd_fan_ctrl_mode {
AMD_FAN_CTRL_NONE = 0,
AMD_FAN_CTRL_MANUAL = 1,
AMD_FAN_CTRL_AUTO = 2,
};
/* CG flags */ /* CG flags */
#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0) #define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1) #define AMD_CG_SUPPORT_GFX_MGLS (1 << 1)
......
...@@ -251,7 +251,9 @@ static int pp_suspend(void *handle) ...@@ -251,7 +251,9 @@ static int pp_suspend(void *handle)
ret = pp_check(pp_handle); ret = pp_check(pp_handle);
if (ret != 0) if (ret == PP_DPM_DISABLED)
return 0;
else if (ret != 0)
return ret; return ret;
eventmgr = pp_handle->eventmgr; eventmgr = pp_handle->eventmgr;
......
...@@ -219,7 +219,7 @@ const pem_event_action notify_smu_suspend_tasks[] = { ...@@ -219,7 +219,7 @@ const pem_event_action notify_smu_suspend_tasks[] = {
}; };
const pem_event_action disable_smc_firmware_ctf_tasks[] = { const pem_event_action disable_smc_firmware_ctf_tasks[] = {
/* PEM_Task_DisableSMCFirmwareCTF,*/ pem_task_disable_smc_firmware_ctf,
NULL NULL
}; };
......
...@@ -173,6 +173,11 @@ int pem_task_stop_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_even ...@@ -173,6 +173,11 @@ int pem_task_stop_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_even
return 0; return 0;
} }
int pem_task_disable_smc_firmware_ctf(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
{
return phm_disable_smc_firmware_ctf(eventmgr->hwmgr);
}
int pem_task_setup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) int pem_task_setup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
{ {
return phm_setup_asic(eventmgr->hwmgr); return phm_setup_asic(eventmgr->hwmgr);
......
...@@ -84,5 +84,6 @@ int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, str ...@@ -84,5 +84,6 @@ int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, str
/*thermal */ /*thermal */
int pem_task_initialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); int pem_task_initialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
int pem_task_uninitialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); int pem_task_uninitialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
int pem_task_disable_smc_firmware_ctf(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
#endif /* _EVENT_TASKS_H_ */ #endif /* _EVENT_TASKS_H_ */
...@@ -501,3 +501,13 @@ int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_i ...@@ -501,3 +501,13 @@ int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_i
return hwmgr->hwmgr_func->get_max_high_clocks(hwmgr, clocks); return hwmgr->hwmgr_func->get_max_high_clocks(hwmgr, clocks);
} }
int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
if (hwmgr->hwmgr_func->disable_smc_firmware_ctf == NULL)
return -EINVAL;
return hwmgr->hwmgr_func->disable_smc_firmware_ctf(hwmgr);
}
...@@ -314,52 +314,45 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr, ...@@ -314,52 +314,45 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
le32_to_cpu(profile->gb_vdroop_table_ckson_a2); le32_to_cpu(profile->gb_vdroop_table_ckson_a2);
param->ulGbFuseTableCksoffM1 = param->ulGbFuseTableCksoffM1 =
le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m1); le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m1);
param->usGbFuseTableCksoffM2 = param->ulGbFuseTableCksoffM2 =
le16_to_cpu(profile->avfsgb_fuse_table_cksoff_m2); le16_to_cpu(profile->avfsgb_fuse_table_cksoff_m2);
param->ulGbFuseTableCksoffB = param->ulGbFuseTableCksoffB =
le32_to_cpu(profile->avfsgb_fuse_table_cksoff_b); le32_to_cpu(profile->avfsgb_fuse_table_cksoff_b);
param->ulGbFuseTableCksonM1 = param->ulGbFuseTableCksonM1 =
le32_to_cpu(profile->avfsgb_fuse_table_ckson_m1); le32_to_cpu(profile->avfsgb_fuse_table_ckson_m1);
param->usGbFuseTableCksonM2 = param->ulGbFuseTableCksonM2 =
le16_to_cpu(profile->avfsgb_fuse_table_ckson_m2); le16_to_cpu(profile->avfsgb_fuse_table_ckson_m2);
param->ulGbFuseTableCksonB = param->ulGbFuseTableCksonB =
le32_to_cpu(profile->avfsgb_fuse_table_ckson_b); le32_to_cpu(profile->avfsgb_fuse_table_ckson_b);
param->usMaxVoltage025mv =
le16_to_cpu(profile->max_voltage_0_25mv);
param->ucEnableGbVdroopTableCksoff =
profile->enable_gb_vdroop_table_cksoff;
param->ucEnableGbVdroopTableCkson = param->ucEnableGbVdroopTableCkson =
profile->enable_gb_vdroop_table_ckson; profile->enable_gb_vdroop_table_ckson;
param->ucEnableGbFuseTableCksoff =
profile->enable_gb_fuse_table_cksoff;
param->ucEnableGbFuseTableCkson = param->ucEnableGbFuseTableCkson =
profile->enable_gb_fuse_table_ckson; profile->enable_gb_fuse_table_ckson;
param->usPsmAgeComfactor = param->usPsmAgeComfactor =
le16_to_cpu(profile->psm_age_comfactor); le16_to_cpu(profile->psm_age_comfactor);
param->ucEnableApplyAvfsCksoffVoltage =
profile->enable_apply_avfs_cksoff_voltage;
param->ulDispclk2GfxclkM1 = param->ulDispclk2GfxclkM1 =
le32_to_cpu(profile->dispclk2gfxclk_a); le32_to_cpu(profile->dispclk2gfxclk_a);
param->usDispclk2GfxclkM2 = param->ulDispclk2GfxclkM2 =
le16_to_cpu(profile->dispclk2gfxclk_b); le16_to_cpu(profile->dispclk2gfxclk_b);
param->ulDispclk2GfxclkB = param->ulDispclk2GfxclkB =
le32_to_cpu(profile->dispclk2gfxclk_c); le32_to_cpu(profile->dispclk2gfxclk_c);
param->ulDcefclk2GfxclkM1 = param->ulDcefclk2GfxclkM1 =
le32_to_cpu(profile->dcefclk2gfxclk_a); le32_to_cpu(profile->dcefclk2gfxclk_a);
param->usDcefclk2GfxclkM2 = param->ulDcefclk2GfxclkM2 =
le16_to_cpu(profile->dcefclk2gfxclk_b); le16_to_cpu(profile->dcefclk2gfxclk_b);
param->ulDcefclk2GfxclkB = param->ulDcefclk2GfxclkB =
le32_to_cpu(profile->dcefclk2gfxclk_c); le32_to_cpu(profile->dcefclk2gfxclk_c);
param->ulPixelclk2GfxclkM1 = param->ulPixelclk2GfxclkM1 =
le32_to_cpu(profile->pixclk2gfxclk_a); le32_to_cpu(profile->pixclk2gfxclk_a);
param->usPixelclk2GfxclkM2 = param->ulPixelclk2GfxclkM2 =
le16_to_cpu(profile->pixclk2gfxclk_b); le16_to_cpu(profile->pixclk2gfxclk_b);
param->ulPixelclk2GfxclkB = param->ulPixelclk2GfxclkB =
le32_to_cpu(profile->pixclk2gfxclk_c); le32_to_cpu(profile->pixclk2gfxclk_c);
param->ulPhyclk2GfxclkM1 = param->ulPhyclk2GfxclkM1 =
le32_to_cpu(profile->phyclk2gfxclk_a); le32_to_cpu(profile->phyclk2gfxclk_a);
param->usPhyclk2GfxclkM2 = param->ulPhyclk2GfxclkM2 =
le16_to_cpu(profile->phyclk2gfxclk_b); le16_to_cpu(profile->phyclk2gfxclk_b);
param->ulPhyclk2GfxclkB = param->ulPhyclk2GfxclkB =
le32_to_cpu(profile->phyclk2gfxclk_c); le32_to_cpu(profile->phyclk2gfxclk_c);
...@@ -394,3 +387,31 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr, ...@@ -394,3 +387,31 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr,
return 0; return 0;
} }
int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_bios_boot_up_values *boot_values)
{
struct atom_firmware_info_v3_1 *info = NULL;
uint16_t ix;
ix = GetIndexIntoMasterDataTable(firmwareinfo);
info = (struct atom_firmware_info_v3_1 *)
cgs_atom_get_data_table(hwmgr->device,
ix, NULL, NULL, NULL);
if (!info) {
pr_info("Error retrieving BIOS firmwareinfo!");
return -EINVAL;
}
boot_values->ulRevision = info->firmware_revision;
boot_values->ulGfxClk = info->bootup_sclk_in10khz;
boot_values->ulUClk = info->bootup_mclk_in10khz;
boot_values->ulSocClk = 0;
boot_values->usVddc = info->bootup_vddc_mv;
boot_values->usVddci = info->bootup_vddci_mv;
boot_values->usMvddc = info->bootup_mvddc_mv;
boot_values->usVddGfx = info->bootup_vddgfx_mv;
return 0;
}
\ No newline at end of file
...@@ -69,7 +69,7 @@ struct pp_atomfwctrl_clock_dividers_soc15 { ...@@ -69,7 +69,7 @@ struct pp_atomfwctrl_clock_dividers_soc15 {
struct pp_atomfwctrl_avfs_parameters { struct pp_atomfwctrl_avfs_parameters {
uint32_t ulMaxVddc; uint32_t ulMaxVddc;
uint32_t ulMinVddc; uint32_t ulMinVddc;
uint8_t ucMaxVidStep;
uint32_t ulMeanNsigmaAcontant0; uint32_t ulMeanNsigmaAcontant0;
uint32_t ulMeanNsigmaAcontant1; uint32_t ulMeanNsigmaAcontant1;
uint32_t ulMeanNsigmaAcontant2; uint32_t ulMeanNsigmaAcontant2;
...@@ -82,30 +82,30 @@ struct pp_atomfwctrl_avfs_parameters { ...@@ -82,30 +82,30 @@ struct pp_atomfwctrl_avfs_parameters {
uint32_t ulGbVdroopTableCksonA0; uint32_t ulGbVdroopTableCksonA0;
uint32_t ulGbVdroopTableCksonA1; uint32_t ulGbVdroopTableCksonA1;
uint32_t ulGbVdroopTableCksonA2; uint32_t ulGbVdroopTableCksonA2;
uint32_t ulGbFuseTableCksoffM1; uint32_t ulGbFuseTableCksoffM1;
uint16_t usGbFuseTableCksoffM2; uint32_t ulGbFuseTableCksoffM2;
uint32_t ulGbFuseTableCksoffB;\ uint32_t ulGbFuseTableCksoffB;
uint32_t ulGbFuseTableCksonM1; uint32_t ulGbFuseTableCksonM1;
uint16_t usGbFuseTableCksonM2; uint32_t ulGbFuseTableCksonM2;
uint32_t ulGbFuseTableCksonB; uint32_t ulGbFuseTableCksonB;
uint16_t usMaxVoltage025mv;
uint8_t ucEnableGbVdroopTableCksoff;
uint8_t ucEnableGbVdroopTableCkson; uint8_t ucEnableGbVdroopTableCkson;
uint8_t ucEnableGbFuseTableCksoff;
uint8_t ucEnableGbFuseTableCkson; uint8_t ucEnableGbFuseTableCkson;
uint16_t usPsmAgeComfactor; uint16_t usPsmAgeComfactor;
uint8_t ucEnableApplyAvfsCksoffVoltage;
uint32_t ulDispclk2GfxclkM1; uint32_t ulDispclk2GfxclkM1;
uint16_t usDispclk2GfxclkM2; uint32_t ulDispclk2GfxclkM2;
uint32_t ulDispclk2GfxclkB; uint32_t ulDispclk2GfxclkB;
uint32_t ulDcefclk2GfxclkM1; uint32_t ulDcefclk2GfxclkM1;
uint16_t usDcefclk2GfxclkM2; uint32_t ulDcefclk2GfxclkM2;
uint32_t ulDcefclk2GfxclkB; uint32_t ulDcefclk2GfxclkB;
uint32_t ulPixelclk2GfxclkM1; uint32_t ulPixelclk2GfxclkM1;
uint16_t usPixelclk2GfxclkM2; uint32_t ulPixelclk2GfxclkM2;
uint32_t ulPixelclk2GfxclkB; uint32_t ulPixelclk2GfxclkB;
uint32_t ulPhyclk2GfxclkM1; uint32_t ulPhyclk2GfxclkM1;
uint16_t usPhyclk2GfxclkM2; uint32_t ulPhyclk2GfxclkM2;
uint32_t ulPhyclk2GfxclkB; uint32_t ulPhyclk2GfxclkB;
}; };
...@@ -119,6 +119,18 @@ struct pp_atomfwctrl_gpio_parameters { ...@@ -119,6 +119,18 @@ struct pp_atomfwctrl_gpio_parameters {
uint8_t ucFwCtfGpio; uint8_t ucFwCtfGpio;
uint8_t ucFwCtfPolarity; uint8_t ucFwCtfPolarity;
}; };
struct pp_atomfwctrl_bios_boot_up_values {
uint32_t ulRevision;
uint32_t ulGfxClk;
uint32_t ulUClk;
uint32_t ulSocClk;
uint16_t usVddc;
uint16_t usVddci;
uint16_t usMvddc;
uint16_t usVddGfx;
};
int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr, int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
uint32_t clock_type, uint32_t clock_value, uint32_t clock_type, uint32_t clock_value,
struct pp_atomfwctrl_clock_dividers_soc15 *dividers); struct pp_atomfwctrl_clock_dividers_soc15 *dividers);
...@@ -136,5 +148,8 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr, ...@@ -136,5 +148,8 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr, int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_gpio_parameters *param); struct pp_atomfwctrl_gpio_parameters *param);
int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_bios_boot_up_values *boot_values);
#endif #endif
...@@ -54,6 +54,6 @@ extern int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *spe ...@@ -54,6 +54,6 @@ extern int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *spe
extern int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); extern int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
extern int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr); extern int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr);
extern int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr); extern int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr);
extern int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
#endif #endif
...@@ -177,8 +177,11 @@ struct vega10_dpmlevel_enable_mask { ...@@ -177,8 +177,11 @@ struct vega10_dpmlevel_enable_mask {
}; };
struct vega10_vbios_boot_state { struct vega10_vbios_boot_state {
bool bsoc_vddc_lock;
uint16_t vddc; uint16_t vddc;
uint16_t vddci; uint16_t vddci;
uint16_t mvddc;
uint16_t vdd_gfx;
uint32_t gfx_clock; uint32_t gfx_clock;
uint32_t mem_clock; uint32_t mem_clock;
uint32_t soc_clock; uint32_t soc_clock;
......
...@@ -60,6 +60,7 @@ int vega10_enable_smc_cac(struct pp_hwmgr *hwmgr); ...@@ -60,6 +60,7 @@ int vega10_enable_smc_cac(struct pp_hwmgr *hwmgr);
int vega10_enable_power_containment(struct pp_hwmgr *hwmgr); int vega10_enable_power_containment(struct pp_hwmgr *hwmgr);
int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
int vega10_power_control_set_level(struct pp_hwmgr *hwmgr); int vega10_power_control_set_level(struct pp_hwmgr *hwmgr);
int vega10_disable_power_containment(struct pp_hwmgr *hwmgr);
#endif /* _VEGA10_POWERTUNE_H_ */ #endif /* _VEGA10_POWERTUNE_H_ */
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册