提交 8bb5b222 编写于 作者: D Dave Airlie

Merge branch 'drm-next-4.17' of git://people.freedesktop.org/~agd5f/linux into drm-next

- Expose thermal thresholds through hwmon properly
- Rework HDP flushing for rings and CPU
- Improved dual-link DVI handling in DC
- Lots of code clean up
- Additional DC clean up
- Allow scanout from system memory on CZ/BR/ST
- Improved PASID/VM integration
- Expose GPU voltage and power via hwmon
- Initial wattman-like support
- Initial power profiles for use-case optimized performance
- Rework GPUVM TLB flushing
- Rework IP offset handling for SOC15 asics
- Add CRC support in DC
- Fixes for mmhub powergating
- Initial regamma/degamma/CTM support in DC
- ttm cleanups and simplifications
- ttm OOM avoidance fixes

* 'drm-next-4.17' of git://people.freedesktop.org/~agd5f/linux: (348 commits)
  Revert "drm/radeon/pm: autoswitch power state when in balanced mode"
  drm/radeon: use drm_gem_private_object_init
  drm/amdgpu: use drm_gem_private_object_init
  drm/amdgpu: mitigate workaround for i915
  drm/amdgpu: implement amdgpu_gem_map_(attach/detach)
  drm/amdgpu/powerplay/smu7: drop refresh rate checks for mclk switching
  drm/amdgpu/cgs: add refresh rate checking to non-DC display code
  drm/amd/powerplay/smu7: allow mclk switching with no displays
  drm/amd/powerplay/vega10: allow mclk switching with no displays
  drm/amd/powerplay: use PP_CAP macro for disable_mclk_switching_for_frame_lock
  drm/amd/powerplay: remove unused headers
  drm/amdgpu_gem: fix error handling path in amdgpu_gem_va_update_vm
  drm/amdgpu: update the PASID mapping only on demand
  drm/amdgpu: separate PASID mapping from VM flush v2
  drm/amd/display: Fix increment when sampling OTF in DCE
  drm/amd/display: De PQ implementation
  drm/amd/display: Remove unused dm_pp_ interfaces
  drm/amd/display: Add logging for aux DPCD access
  drm/amd/display: Set vsc pack revision when DPCD revision is >= 1.2
  drm/amd/display: provide an interface to query firmware version
  ...
...@@ -63,7 +63,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ ...@@ -63,7 +63,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
amdgpu-y += \ amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o
# add GMC block # add GMC block
amdgpu-y += \ amdgpu-y += \
......
...@@ -68,6 +68,7 @@ ...@@ -68,6 +68,7 @@
#include "amdgpu_vce.h" #include "amdgpu_vce.h"
#include "amdgpu_vcn.h" #include "amdgpu_vcn.h"
#include "amdgpu_mn.h" #include "amdgpu_mn.h"
#include "amdgpu_gmc.h"
#include "amdgpu_dm.h" #include "amdgpu_dm.h"
#include "amdgpu_virt.h" #include "amdgpu_virt.h"
#include "amdgpu_gart.h" #include "amdgpu_gart.h"
...@@ -127,6 +128,7 @@ extern int amdgpu_job_hang_limit; ...@@ -127,6 +128,7 @@ extern int amdgpu_job_hang_limit;
extern int amdgpu_lbpw; extern int amdgpu_lbpw;
extern int amdgpu_compute_multipipe; extern int amdgpu_compute_multipipe;
extern int amdgpu_gpu_recovery; extern int amdgpu_gpu_recovery;
extern int amdgpu_emu_mode;
#ifdef CONFIG_DRM_AMDGPU_SI #ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support; extern int amdgpu_si_support;
...@@ -318,13 +320,6 @@ struct amdgpu_vm_pte_funcs { ...@@ -318,13 +320,6 @@ struct amdgpu_vm_pte_funcs {
void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
uint64_t value, unsigned count, uint64_t value, unsigned count,
uint32_t incr); uint32_t incr);
/* maximum nums of PTEs/PDEs in a single operation */
uint32_t set_max_nums_pte_pde;
/* number of dw to reserve per operation */
unsigned set_pte_pde_num_dw;
/* for linear pte/pde updates without addr mapping */ /* for linear pte/pde updates without addr mapping */
void (*set_pte_pde)(struct amdgpu_ib *ib, void (*set_pte_pde)(struct amdgpu_ib *ib,
uint64_t pe, uint64_t pe,
...@@ -332,28 +327,6 @@ struct amdgpu_vm_pte_funcs { ...@@ -332,28 +327,6 @@ struct amdgpu_vm_pte_funcs {
uint32_t incr, uint64_t flags); uint32_t incr, uint64_t flags);
}; };
/* provided by the gmc block */
struct amdgpu_gart_funcs {
/* flush the vm tlb via mmio */
void (*flush_gpu_tlb)(struct amdgpu_device *adev,
uint32_t vmid);
/* write pte/pde updates using the cpu */
int (*set_pte_pde)(struct amdgpu_device *adev,
void *cpu_pt_addr, /* cpu addr of page table */
uint32_t gpu_page_idx, /* pte/pde to update */
uint64_t addr, /* addr to write into pte/pde */
uint64_t flags); /* access flags */
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
/* set pte flags based per asic */
uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
uint32_t flags);
/* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags);
uint32_t (*get_invalidate_req)(unsigned int vmid);
};
/* provided by the ih block */ /* provided by the ih block */
struct amdgpu_ih_funcs { struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */ /* ring read/write ptr handling, called from interrupt context */
...@@ -418,8 +391,8 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, ...@@ -418,8 +391,8 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gobj, struct drm_gem_object *gobj,
int flags); int flags);
int amdgpu_gem_prime_pin(struct drm_gem_object *obj); struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
void amdgpu_gem_prime_unpin(struct drm_gem_object *obj); struct dma_buf *dma_buf);
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
...@@ -493,56 +466,6 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp, ...@@ -493,56 +466,6 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
int amdgpu_fence_slab_init(void); int amdgpu_fence_slab_init(void);
void amdgpu_fence_slab_fini(void); void amdgpu_fence_slab_fini(void);
/*
* VMHUB structures, functions & helpers
*/
struct amdgpu_vmhub {
uint32_t ctx0_ptb_addr_lo32;
uint32_t ctx0_ptb_addr_hi32;
uint32_t vm_inv_eng0_req;
uint32_t vm_inv_eng0_ack;
uint32_t vm_context0_cntl;
uint32_t vm_l2_pro_fault_status;
uint32_t vm_l2_pro_fault_cntl;
};
/*
* GPU MC structures, functions & helpers
*/
struct amdgpu_mc {
resource_size_t aper_size;
resource_size_t aper_base;
resource_size_t agp_base;
/* for some chips with <= 32MB we need to lie
* about vram size near mc fb location */
u64 mc_vram_size;
u64 visible_vram_size;
u64 gart_size;
u64 gart_start;
u64 gart_end;
u64 vram_start;
u64 vram_end;
unsigned vram_width;
u64 real_vram_size;
int vram_mtrr;
u64 mc_mask;
const struct firmware *fw; /* MC firmware */
uint32_t fw_version;
struct amdgpu_irq_src vm_fault;
uint32_t vram_type;
uint32_t srbm_soft_reset;
bool prt_warning;
uint64_t stolen_size;
/* apertures */
u64 shared_aperture_start;
u64 shared_aperture_end;
u64 private_aperture_start;
u64 private_aperture_end;
/* protects concurrent invalidation */
spinlock_t invalidate_lock;
bool translate_further;
};
/* /*
* GPU doorbell structures, functions & helpers * GPU doorbell structures, functions & helpers
*/ */
...@@ -1125,8 +1048,9 @@ struct amdgpu_job { ...@@ -1125,8 +1048,9 @@ struct amdgpu_job {
void *owner; void *owner;
uint64_t fence_ctx; /* the fence_context this job uses */ uint64_t fence_ctx; /* the fence_context this job uses */
bool vm_needs_flush; bool vm_needs_flush;
unsigned vmid;
uint64_t vm_pd_addr; uint64_t vm_pd_addr;
unsigned vmid;
unsigned pasid;
uint32_t gds_base, gds_size; uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size; uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size; uint32_t oa_base, oa_size;
...@@ -1288,6 +1212,11 @@ struct amdgpu_asic_funcs { ...@@ -1288,6 +1212,11 @@ struct amdgpu_asic_funcs {
void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes); void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
/* get config memsize register */ /* get config memsize register */
u32 (*get_config_memsize)(struct amdgpu_device *adev); u32 (*get_config_memsize)(struct amdgpu_device *adev);
/* flush hdp write queue */
void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
/* invalidate hdp read cache */
void (*invalidate_hdp)(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
}; };
/* /*
...@@ -1431,7 +1360,7 @@ struct amdgpu_nbio_funcs { ...@@ -1431,7 +1360,7 @@ struct amdgpu_nbio_funcs {
u32 (*get_pcie_data_offset)(struct amdgpu_device *adev); u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
u32 (*get_rev_id)(struct amdgpu_device *adev); u32 (*get_rev_id)(struct amdgpu_device *adev);
void (*mc_access_enable)(struct amdgpu_device *adev, bool enable); void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
void (*hdp_flush)(struct amdgpu_device *adev); void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
u32 (*get_memsize)(struct amdgpu_device *adev); u32 (*get_memsize)(struct amdgpu_device *adev);
void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
bool use_doorbell, int doorbell_index); bool use_doorbell, int doorbell_index);
...@@ -1574,7 +1503,7 @@ struct amdgpu_device { ...@@ -1574,7 +1503,7 @@ struct amdgpu_device {
struct amdgpu_clock clock; struct amdgpu_clock clock;
/* MC */ /* MC */
struct amdgpu_mc mc; struct amdgpu_gmc gmc;
struct amdgpu_gart gart; struct amdgpu_gart gart;
struct amdgpu_dummy_page dummy_page; struct amdgpu_dummy_page dummy_page;
struct amdgpu_vm_manager vm_manager; struct amdgpu_vm_manager vm_manager;
...@@ -1726,6 +1655,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v); ...@@ -1726,6 +1655,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
int emu_soc_asic_init(struct amdgpu_device *adev);
/* /*
* Registers read & write functions. * Registers read & write functions.
*/ */
...@@ -1838,13 +1769,17 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) ...@@ -1838,13 +1769,17 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
#define amdgpu_gart_get_vm_pde(adev, level, dst, flags) (adev)->gart.gart_funcs->get_vm_pde((adev), (level), (dst), (flags)) #define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->gart.gart_funcs->get_vm_pte_flags((adev),(flags))
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t)) #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
...@@ -1857,11 +1792,11 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) ...@@ -1857,11 +1792,11 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r)) #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d)) #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d)) #define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v)) #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b)) #define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
...@@ -1871,7 +1806,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) ...@@ -1871,7 +1806,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
#define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e)) #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h)) #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
...@@ -1894,16 +1828,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, ...@@ -1894,16 +1828,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_job* job, bool force); struct amdgpu_job* job, bool force);
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev);
void amdgpu_update_display_priority(struct amdgpu_device *adev); void amdgpu_display_update_priority(struct amdgpu_device *adev);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes); u64 num_vis_bytes);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
void amdgpu_device_vram_location(struct amdgpu_device *adev, void amdgpu_device_vram_location(struct amdgpu_device *adev,
struct amdgpu_mc *mc, u64 base); struct amdgpu_gmc *mc, u64 base);
void amdgpu_device_gart_location(struct amdgpu_device *adev, void amdgpu_device_gart_location(struct amdgpu_device *adev,
struct amdgpu_mc *mc); struct amdgpu_gmc *mc);
int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev); int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
int amdgpu_ttm_init(struct amdgpu_device *adev); int amdgpu_ttm_init(struct amdgpu_device *adev);
......
...@@ -216,8 +216,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, ...@@ -216,8 +216,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
return -ENOMEM; return -ENOMEM;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, 0, AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
&(*mem)->bo);
if (r) { if (r) {
dev_err(adev->dev, dev_err(adev->dev,
"failed to allocate BO for amdkfd (%d)\n", r); "failed to allocate BO for amdkfd (%d)\n", r);
...@@ -281,24 +280,29 @@ void get_local_mem_info(struct kgd_dev *kgd, ...@@ -281,24 +280,29 @@ void get_local_mem_info(struct kgd_dev *kgd,
struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask : uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
~((1ULL << 32) - 1); ~((1ULL << 32) - 1);
resource_size_t aper_limit = adev->mc.aper_base + adev->mc.aper_size; resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;
memset(mem_info, 0, sizeof(*mem_info)); memset(mem_info, 0, sizeof(*mem_info));
if (!(adev->mc.aper_base & address_mask || aper_limit & address_mask)) { if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
mem_info->local_mem_size_public = adev->mc.visible_vram_size; mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
mem_info->local_mem_size_private = adev->mc.real_vram_size - mem_info->local_mem_size_private = adev->gmc.real_vram_size -
adev->mc.visible_vram_size; adev->gmc.visible_vram_size;
} else { } else {
mem_info->local_mem_size_public = 0; mem_info->local_mem_size_public = 0;
mem_info->local_mem_size_private = adev->mc.real_vram_size; mem_info->local_mem_size_private = adev->gmc.real_vram_size;
} }
mem_info->vram_width = adev->mc.vram_width; mem_info->vram_width = adev->gmc.vram_width;
pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n", pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
&adev->mc.aper_base, &aper_limit, &adev->gmc.aper_base, &aper_limit,
mem_info->local_mem_size_public, mem_info->local_mem_size_public,
mem_info->local_mem_size_private); mem_info->local_mem_size_private);
if (amdgpu_emu_mode == 1) {
mem_info->mem_clk_max = 100;
return;
}
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
mem_info->mem_clk_max = adev->clock.default_mclk / 100; mem_info->mem_clk_max = adev->clock.default_mclk / 100;
else else
...@@ -319,6 +323,9 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd) ...@@ -319,6 +323,9 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
/* the sclk is in quantas of 10kHz */ /* the sclk is in quantas of 10kHz */
if (amdgpu_emu_mode == 1)
return 100;
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
return adev->clock.default_sclk / 100; return adev->clock.default_sclk / 100;
......
...@@ -568,6 +568,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { ...@@ -568,6 +568,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
/* HG _PR3 doesn't seem to work on this A+A weston board */ /* HG _PR3 doesn't seem to work on this A+A weston board */
{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 },
}; };
......
...@@ -81,7 +81,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, ...@@ -81,7 +81,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
n = AMDGPU_BENCHMARK_ITERATIONS; n = AMDGPU_BENCHMARK_ITERATIONS;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL,
NULL, 0, &sobj); NULL, &sobj);
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }
...@@ -94,7 +94,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, ...@@ -94,7 +94,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
goto out_cleanup; goto out_cleanup;
} }
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL,
NULL, 0, &dobj); NULL, &dobj);
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }
......
...@@ -233,8 +233,10 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, ...@@ -233,8 +233,10 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
for (i = 0; i < list->num_entries; i++) { for (i = 0; i < list->num_entries; i++) {
unsigned priority = list->array[i].priority; unsigned priority = list->array[i].priority;
list_add_tail(&list->array[i].tv.head, if (!list->array[i].robj->parent)
&bucket[priority]); list_add_tail(&list->array[i].tv.head,
&bucket[priority]);
list->array[i].user_pages = NULL; list->array[i].user_pages = NULL;
} }
......
...@@ -109,7 +109,7 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, ...@@ -109,7 +109,7 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
*handle = 0; *handle = 0;
ret = amdgpu_bo_create(adev, size, align, true, domain, flags, ret = amdgpu_bo_create(adev, size, align, true, domain, flags,
NULL, NULL, 0, &obj); NULL, NULL, &obj);
if (ret) { if (ret) {
DRM_ERROR("(%d) bo create failed\n", ret); DRM_ERROR("(%d) bo create failed\n", ret);
return ret; return ret;
...@@ -953,6 +953,11 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device, ...@@ -953,6 +953,11 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
(amdgpu_crtc->v_border * 2); (amdgpu_crtc->v_border * 2);
mode_info->vblank_time_us = vblank_lines * line_time_us; mode_info->vblank_time_us = vblank_lines * line_time_us;
mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
/* we have issues with mclk switching with refresh rates
* over 120 hz on the non-DC code.
*/
if (mode_info->refresh_rate > 120)
mode_info->vblank_time_us = 0;
mode_info = NULL; mode_info = NULL;
} }
} }
...@@ -1187,6 +1192,18 @@ static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device, ...@@ -1187,6 +1192,18 @@ static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
return amdgpu_cgs_acpi_eval_object(cgs_device, &info); return amdgpu_cgs_acpi_eval_object(cgs_device, &info);
} }
static int amdgpu_cgs_set_temperature_range(struct cgs_device *cgs_device,
int min_temperature,
int max_temperature)
{
CGS_FUNC_ADEV;
adev->pm.dpm.thermal.min_temp = min_temperature;
adev->pm.dpm.thermal.max_temp = max_temperature;
return 0;
}
static const struct cgs_ops amdgpu_cgs_ops = { static const struct cgs_ops amdgpu_cgs_ops = {
.alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem, .alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem,
.free_gpu_mem = amdgpu_cgs_free_gpu_mem, .free_gpu_mem = amdgpu_cgs_free_gpu_mem,
...@@ -1214,6 +1231,7 @@ static const struct cgs_ops amdgpu_cgs_ops = { ...@@ -1214,6 +1231,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
.enter_safe_mode = amdgpu_cgs_enter_safe_mode, .enter_safe_mode = amdgpu_cgs_enter_safe_mode,
.lock_grbm_idx = amdgpu_cgs_lock_grbm_idx, .lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
.register_pp_handle = amdgpu_cgs_register_pp_handle, .register_pp_handle = amdgpu_cgs_register_pp_handle,
.set_temperature_range = amdgpu_cgs_set_temperature_range,
}; };
static const struct cgs_os_ops amdgpu_cgs_os_ops = { static const struct cgs_os_ops amdgpu_cgs_os_ops = {
......
...@@ -877,7 +877,7 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) ...@@ -877,7 +877,7 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
ret = connector_status_disconnected; ret = connector_status_disconnected;
if (amdgpu_connector->ddc_bus) if (amdgpu_connector->ddc_bus)
dret = amdgpu_ddc_probe(amdgpu_connector, false); dret = amdgpu_display_ddc_probe(amdgpu_connector, false);
if (dret) { if (dret) {
amdgpu_connector->detected_by_load = false; amdgpu_connector->detected_by_load = false;
amdgpu_connector_free_edid(connector); amdgpu_connector_free_edid(connector);
...@@ -998,7 +998,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) ...@@ -998,7 +998,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
} }
if (amdgpu_connector->ddc_bus) if (amdgpu_connector->ddc_bus)
dret = amdgpu_ddc_probe(amdgpu_connector, false); dret = amdgpu_display_ddc_probe(amdgpu_connector, false);
if (dret) { if (dret) {
amdgpu_connector->detected_by_load = false; amdgpu_connector->detected_by_load = false;
amdgpu_connector_free_edid(connector); amdgpu_connector_free_edid(connector);
...@@ -1401,7 +1401,8 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) ...@@ -1401,7 +1401,8 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
/* setup ddc on the bridge */ /* setup ddc on the bridge */
amdgpu_atombios_encoder_setup_ext_encoder_ddc(encoder); amdgpu_atombios_encoder_setup_ext_encoder_ddc(encoder);
/* bridge chips are always aux */ /* bridge chips are always aux */
if (amdgpu_ddc_probe(amdgpu_connector, true)) /* try DDC */ /* try DDC */
if (amdgpu_display_ddc_probe(amdgpu_connector, true))
ret = connector_status_connected; ret = connector_status_connected;
else if (amdgpu_connector->dac_load_detect) { /* try load detection */ else if (amdgpu_connector->dac_load_detect) { /* try load detection */
const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
...@@ -1421,7 +1422,8 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) ...@@ -1421,7 +1422,8 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
ret = connector_status_connected; ret = connector_status_connected;
} else { } else {
/* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */ /* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
if (amdgpu_ddc_probe(amdgpu_connector, false)) if (amdgpu_display_ddc_probe(amdgpu_connector,
false))
ret = connector_status_connected; ret = connector_status_connected;
} }
} }
......
...@@ -257,7 +257,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, ...@@ -257,7 +257,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
return; return;
} }
total_vram = adev->mc.real_vram_size - adev->vram_pin_size; total_vram = adev->gmc.real_vram_size - adev->vram_pin_size;
used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
...@@ -302,8 +302,8 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, ...@@ -302,8 +302,8 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
/* Do the same for visible VRAM if half of it is free */ /* Do the same for visible VRAM if half of it is free */
if (adev->mc.visible_vram_size < adev->mc.real_vram_size) { if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size) {
u64 total_vis_vram = adev->mc.visible_vram_size; u64 total_vis_vram = adev->gmc.visible_vram_size;
u64 used_vis_vram = u64 used_vis_vram =
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
...@@ -359,7 +359,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, ...@@ -359,7 +359,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
* to move it. Don't move anything if the threshold is zero. * to move it. Don't move anything if the threshold is zero.
*/ */
if (p->bytes_moved < p->bytes_moved_threshold) { if (p->bytes_moved < p->bytes_moved_threshold) {
if (adev->mc.visible_vram_size < adev->mc.real_vram_size && if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
(bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
/* And don't move a CPU_ACCESS_REQUIRED BO to limited /* And don't move a CPU_ACCESS_REQUIRED BO to limited
* visible VRAM if we've depleted our allowance to do * visible VRAM if we've depleted our allowance to do
...@@ -381,9 +381,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, ...@@ -381,9 +381,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
p->bytes_moved += ctx.bytes_moved; p->bytes_moved += ctx.bytes_moved;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size && if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
bo->tbo.mem.mem_type == TTM_PL_VRAM && bo->tbo.mem.mem_type == TTM_PL_VRAM &&
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT) bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
p->bytes_moved_vis += ctx.bytes_moved; p->bytes_moved_vis += ctx.bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
...@@ -437,9 +437,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, ...@@ -437,9 +437,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
/* Good we can try to move this BO somewhere else */ /* Good we can try to move this BO somewhere else */
amdgpu_ttm_placement_from_domain(bo, other); amdgpu_ttm_placement_from_domain(bo, other);
update_bytes_moved_vis = update_bytes_moved_vis =
adev->mc.visible_vram_size < adev->mc.real_vram_size && adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
bo->tbo.mem.mem_type == TTM_PL_VRAM && bo->tbo.mem.mem_type == TTM_PL_VRAM &&
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT; bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT;
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
bytes_moved = atomic64_read(&adev->num_bytes_moved) - bytes_moved = atomic64_read(&adev->num_bytes_moved) -
...@@ -542,7 +542,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -542,7 +542,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
INIT_LIST_HEAD(&duplicates); INIT_LIST_HEAD(&duplicates);
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
if (p->uf_entry.robj) if (p->uf_entry.robj && !p->uf_entry.robj->parent)
list_add(&p->uf_entry.tv.head, &p->validated); list_add(&p->uf_entry.tv.head, &p->validated);
while (1) { while (1) {
......
...@@ -544,7 +544,7 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) ...@@ -544,7 +544,7 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
* as parameter. * as parameter.
*/ */
void amdgpu_device_vram_location(struct amdgpu_device *adev, void amdgpu_device_vram_location(struct amdgpu_device *adev,
struct amdgpu_mc *mc, u64 base) struct amdgpu_gmc *mc, u64 base)
{ {
uint64_t limit = (uint64_t)amdgpu_vram_limit << 20; uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
...@@ -570,11 +570,11 @@ void amdgpu_device_vram_location(struct amdgpu_device *adev, ...@@ -570,11 +570,11 @@ void amdgpu_device_vram_location(struct amdgpu_device *adev,
* FIXME: when reducing GTT size align new size on power of 2. * FIXME: when reducing GTT size align new size on power of 2.
*/ */
void amdgpu_device_gart_location(struct amdgpu_device *adev, void amdgpu_device_gart_location(struct amdgpu_device *adev,
struct amdgpu_mc *mc) struct amdgpu_gmc *mc)
{ {
u64 size_af, size_bf; u64 size_af, size_bf;
size_af = adev->mc.mc_mask - mc->vram_end; size_af = adev->gmc.mc_mask - mc->vram_end;
size_bf = mc->vram_start; size_bf = mc->vram_start;
if (size_bf > size_af) { if (size_bf > size_af) {
if (mc->gart_size > size_bf) { if (mc->gart_size > size_bf) {
...@@ -608,7 +608,7 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev, ...@@ -608,7 +608,7 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
*/ */
int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
{ {
u64 space_needed = roundup_pow_of_two(adev->mc.real_vram_size); u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1; u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
struct pci_bus *root; struct pci_bus *root;
struct resource *res; struct resource *res;
...@@ -1036,7 +1036,7 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev, ...@@ -1036,7 +1036,7 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
if (!ip_block_version) if (!ip_block_version)
return -EINVAL; return -EINVAL;
DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks, DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
ip_block_version->funcs->name); ip_block_version->funcs->name);
adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
...@@ -1310,6 +1310,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) ...@@ -1310,6 +1310,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
return r; return r;
} }
adev->ip_blocks[i].status.sw = true; adev->ip_blocks[i].status.sw = true;
/* need to do gmc hw init early so we can allocate gpu mem */ /* need to do gmc hw init early so we can allocate gpu mem */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
r = amdgpu_device_vram_scratch_init(adev); r = amdgpu_device_vram_scratch_init(adev);
...@@ -1343,8 +1344,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) ...@@ -1343,8 +1344,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
for (i = 0; i < adev->num_ip_blocks; i++) { for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.sw) if (!adev->ip_blocks[i].status.sw)
continue; continue;
/* gmc hw init is done early */ if (adev->ip_blocks[i].status.hw)
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
continue; continue;
r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
if (r) { if (r) {
...@@ -1378,6 +1378,9 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev) ...@@ -1378,6 +1378,9 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
{ {
int i = 0, r; int i = 0, r;
if (amdgpu_emu_mode == 1)
return 0;
for (i = 0; i < adev->num_ip_blocks; i++) { for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid) if (!adev->ip_blocks[i].status.valid)
continue; continue;
...@@ -1483,6 +1486,9 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) ...@@ -1483,6 +1486,9 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false; adev->ip_blocks[i].status.hw = false;
} }
/* disable all interrupts */
amdgpu_irq_disable_all(adev);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) { for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.sw) if (!adev->ip_blocks[i].status.sw)
continue; continue;
...@@ -1701,6 +1707,8 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) ...@@ -1701,6 +1707,8 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_BONAIRE: case CHIP_BONAIRE:
case CHIP_HAWAII: case CHIP_HAWAII:
case CHIP_KAVERI: case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
case CHIP_CARRIZO: case CHIP_CARRIZO:
case CHIP_STONEY: case CHIP_STONEY:
case CHIP_POLARIS11: case CHIP_POLARIS11:
...@@ -1711,9 +1719,6 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) ...@@ -1711,9 +1719,6 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA) #if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
return amdgpu_dc != 0; return amdgpu_dc != 0;
#endif #endif
case CHIP_KABINI:
case CHIP_MULLINS:
return amdgpu_dc > 0;
case CHIP_VEGA10: case CHIP_VEGA10:
#if defined(CONFIG_DRM_AMD_DC_DCN1_0) #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case CHIP_RAVEN: case CHIP_RAVEN:
...@@ -1768,14 +1773,16 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -1768,14 +1773,16 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->flags = flags; adev->flags = flags;
adev->asic_type = flags & AMD_ASIC_MASK; adev->asic_type = flags & AMD_ASIC_MASK;
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
adev->mc.gart_size = 512 * 1024 * 1024; if (amdgpu_emu_mode == 1)
adev->usec_timeout *= 2;
adev->gmc.gart_size = 512 * 1024 * 1024;
adev->accel_working = false; adev->accel_working = false;
adev->num_rings = 0; adev->num_rings = 0;
adev->mman.buffer_funcs = NULL; adev->mman.buffer_funcs = NULL;
adev->mman.buffer_funcs_ring = NULL; adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL; adev->vm_manager.vm_pte_funcs = NULL;
adev->vm_manager.vm_pte_num_rings = 0; adev->vm_manager.vm_pte_num_rings = 0;
adev->gart.gart_funcs = NULL; adev->gmc.gmc_funcs = NULL;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
...@@ -1882,6 +1889,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -1882,6 +1889,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (runtime) if (runtime)
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
if (amdgpu_emu_mode == 1) {
/* post the asic on emulation mode */
emu_soc_asic_init(adev);
goto fence_driver_init;
}
/* Read BIOS */ /* Read BIOS */
if (!amdgpu_get_bios(adev)) { if (!amdgpu_get_bios(adev)) {
r = -EINVAL; r = -EINVAL;
...@@ -1934,6 +1947,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -1934,6 +1947,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_atombios_i2c_init(adev); amdgpu_atombios_i2c_init(adev);
} }
fence_driver_init:
/* Fence driver */ /* Fence driver */
r = amdgpu_fence_driver_init(adev); r = amdgpu_fence_driver_init(adev);
if (r) { if (r) {
...@@ -2076,7 +2090,10 @@ void amdgpu_device_fini(struct amdgpu_device *adev) ...@@ -2076,7 +2090,10 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
/* free i2c buses */ /* free i2c buses */
if (!amdgpu_device_has_dc_support(adev)) if (!amdgpu_device_has_dc_support(adev))
amdgpu_i2c_fini(adev); amdgpu_i2c_fini(adev);
amdgpu_atombios_fini(adev);
if (amdgpu_emu_mode != 1)
amdgpu_atombios_fini(adev);
kfree(adev->bios); kfree(adev->bios);
adev->bios = NULL; adev->bios = NULL;
if (!pci_is_thunderbolt_attached(adev->pdev)) if (!pci_is_thunderbolt_attached(adev->pdev))
...@@ -2284,14 +2301,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) ...@@ -2284,14 +2301,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
} }
drm_modeset_unlock_all(dev); drm_modeset_unlock_all(dev);
} else {
/*
* There is no equivalent atomic helper to turn on
* display, so we defined our own function for this,
* once suspend resume is supported by the atomic
* framework this will be reworked
*/
amdgpu_dm_display_resume(adev);
} }
} }
...@@ -2726,7 +2735,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, ...@@ -2726,7 +2735,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
if (amdgpu_device_has_dc_support(adev)) { if (amdgpu_device_has_dc_support(adev)) {
if (drm_atomic_helper_resume(adev->ddev, state)) if (drm_atomic_helper_resume(adev->ddev, state))
dev_info(adev->dev, "drm resume failed:%d\n", r); dev_info(adev->dev, "drm resume failed:%d\n", r);
amdgpu_dm_display_resume(adev);
} else { } else {
drm_helper_resume_force_mode(adev->ddev); drm_helper_resume_force_mode(adev->ddev);
} }
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "amdgpu_i2c.h" #include "amdgpu_i2c.h"
#include "atom.h" #include "atom.h"
#include "amdgpu_connectors.h" #include "amdgpu_connectors.h"
#include "amdgpu_display.h"
#include <asm/div64.h> #include <asm/div64.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
...@@ -36,7 +37,8 @@ ...@@ -36,7 +37,8 @@
#include <drm/drm_edid.h> #include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h> #include <drm/drm_fb_helper.h>
static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb) static void amdgpu_display_flip_callback(struct dma_fence *f,
struct dma_fence_cb *cb)
{ {
struct amdgpu_flip_work *work = struct amdgpu_flip_work *work =
container_of(cb, struct amdgpu_flip_work, cb); container_of(cb, struct amdgpu_flip_work, cb);
...@@ -45,8 +47,8 @@ static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb) ...@@ -45,8 +47,8 @@ static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
schedule_work(&work->flip_work.work); schedule_work(&work->flip_work.work);
} }
static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
struct dma_fence **f) struct dma_fence **f)
{ {
struct dma_fence *fence= *f; struct dma_fence *fence= *f;
...@@ -55,14 +57,15 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, ...@@ -55,14 +57,15 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
*f = NULL; *f = NULL;
if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) if (!dma_fence_add_callback(fence, &work->cb,
amdgpu_display_flip_callback))
return true; return true;
dma_fence_put(fence); dma_fence_put(fence);
return false; return false;
} }
static void amdgpu_flip_work_func(struct work_struct *__work) static void amdgpu_display_flip_work_func(struct work_struct *__work)
{ {
struct delayed_work *delayed_work = struct delayed_work *delayed_work =
container_of(__work, struct delayed_work, work); container_of(__work, struct delayed_work, work);
...@@ -76,20 +79,20 @@ static void amdgpu_flip_work_func(struct work_struct *__work) ...@@ -76,20 +79,20 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
unsigned i; unsigned i;
int vpos, hpos; int vpos, hpos;
if (amdgpu_flip_handle_fence(work, &work->excl)) if (amdgpu_display_flip_handle_fence(work, &work->excl))
return; return;
for (i = 0; i < work->shared_count; ++i) for (i = 0; i < work->shared_count; ++i)
if (amdgpu_flip_handle_fence(work, &work->shared[i])) if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
return; return;
/* Wait until we're out of the vertical blank period before the one /* Wait until we're out of the vertical blank period before the one
* targeted by the flip * targeted by the flip
*/ */
if (amdgpu_crtc->enabled && if (amdgpu_crtc->enabled &&
(amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0, (amdgpu_display_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
&vpos, &hpos, NULL, NULL, &vpos, &hpos, NULL, NULL,
&crtc->hwmode) &crtc->hwmode)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(work->target_vblank - (int)(work->target_vblank -
...@@ -117,7 +120,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work) ...@@ -117,7 +120,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
/* /*
* Handle unpin events outside the interrupt handler proper. * Handle unpin events outside the interrupt handler proper.
*/ */
static void amdgpu_unpin_work_func(struct work_struct *__work) static void amdgpu_display_unpin_work_func(struct work_struct *__work)
{ {
struct amdgpu_flip_work *work = struct amdgpu_flip_work *work =
container_of(__work, struct amdgpu_flip_work, unpin_work); container_of(__work, struct amdgpu_flip_work, unpin_work);
...@@ -139,11 +142,11 @@ static void amdgpu_unpin_work_func(struct work_struct *__work) ...@@ -139,11 +142,11 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
kfree(work); kfree(work);
} }
int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
struct drm_framebuffer *fb, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event, struct drm_pending_vblank_event *event,
uint32_t page_flip_flags, uint32_t target, uint32_t page_flip_flags, uint32_t target,
struct drm_modeset_acquire_ctx *ctx) struct drm_modeset_acquire_ctx *ctx)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
...@@ -162,8 +165,8 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, ...@@ -162,8 +165,8 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
if (work == NULL) if (work == NULL)
return -ENOMEM; return -ENOMEM;
INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func); INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func); INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
work->event = event; work->event = event;
work->adev = adev; work->adev = adev;
...@@ -189,7 +192,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, ...@@ -189,7 +192,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
goto cleanup; goto cleanup;
} }
r = amdgpu_bo_pin(new_abo, AMDGPU_GEM_DOMAIN_VRAM, &base); r = amdgpu_bo_pin(new_abo, amdgpu_display_framebuffer_domains(adev), &base);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
DRM_ERROR("failed to pin new abo buffer before flip\n"); DRM_ERROR("failed to pin new abo buffer before flip\n");
goto unreserve; goto unreserve;
...@@ -228,7 +231,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, ...@@ -228,7 +231,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
/* update crtc fb */ /* update crtc fb */
crtc->primary->fb = fb; crtc->primary->fb = fb;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags); spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
amdgpu_flip_work_func(&work->flip_work.work); amdgpu_display_flip_work_func(&work->flip_work.work);
return 0; return 0;
pflip_cleanup: pflip_cleanup:
...@@ -254,8 +257,8 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, ...@@ -254,8 +257,8 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
return r; return r;
} }
int amdgpu_crtc_set_config(struct drm_mode_set *set, int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
struct drm_modeset_acquire_ctx *ctx) struct drm_modeset_acquire_ctx *ctx)
{ {
struct drm_device *dev; struct drm_device *dev;
struct amdgpu_device *adev; struct amdgpu_device *adev;
...@@ -352,7 +355,7 @@ static const char *hpd_names[6] = { ...@@ -352,7 +355,7 @@ static const char *hpd_names[6] = {
"HPD6", "HPD6",
}; };
void amdgpu_print_display_setup(struct drm_device *dev) void amdgpu_display_print_display_setup(struct drm_device *dev)
{ {
struct drm_connector *connector; struct drm_connector *connector;
struct amdgpu_connector *amdgpu_connector; struct amdgpu_connector *amdgpu_connector;
...@@ -429,11 +432,11 @@ void amdgpu_print_display_setup(struct drm_device *dev) ...@@ -429,11 +432,11 @@ void amdgpu_print_display_setup(struct drm_device *dev)
} }
/** /**
* amdgpu_ddc_probe * amdgpu_display_ddc_probe
* *
*/ */
bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector, bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
bool use_aux) bool use_aux)
{ {
u8 out = 0x0; u8 out = 0x0;
u8 buf[8]; u8 buf[8];
...@@ -479,7 +482,7 @@ bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector, ...@@ -479,7 +482,7 @@ bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector,
return true; return true;
} }
static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb) static void amdgpu_display_user_framebuffer_destroy(struct drm_framebuffer *fb)
{ {
struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
...@@ -488,9 +491,10 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb) ...@@ -488,9 +491,10 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
kfree(amdgpu_fb); kfree(amdgpu_fb);
} }
static int amdgpu_user_framebuffer_create_handle(struct drm_framebuffer *fb, static int amdgpu_display_user_framebuffer_create_handle(
struct drm_file *file_priv, struct drm_framebuffer *fb,
unsigned int *handle) struct drm_file *file_priv,
unsigned int *handle)
{ {
struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
...@@ -498,15 +502,28 @@ static int amdgpu_user_framebuffer_create_handle(struct drm_framebuffer *fb, ...@@ -498,15 +502,28 @@ static int amdgpu_user_framebuffer_create_handle(struct drm_framebuffer *fb,
} }
static const struct drm_framebuffer_funcs amdgpu_fb_funcs = { static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.destroy = amdgpu_user_framebuffer_destroy, .destroy = amdgpu_display_user_framebuffer_destroy,
.create_handle = amdgpu_user_framebuffer_create_handle, .create_handle = amdgpu_display_user_framebuffer_create_handle,
}; };
int uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev)
amdgpu_framebuffer_init(struct drm_device *dev, {
struct amdgpu_framebuffer *rfb, uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj) #if defined(CONFIG_DRM_AMD_DC)
if (adev->asic_type >= CHIP_CARRIZO && adev->asic_type < CHIP_RAVEN &&
adev->flags & AMD_IS_APU &&
amdgpu_device_asic_has_dc_support(adev->asic_type))
domain |= AMDGPU_GEM_DOMAIN_GTT;
#endif
return domain;
}
int amdgpu_display_framebuffer_init(struct drm_device *dev,
struct amdgpu_framebuffer *rfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{ {
int ret; int ret;
rfb->obj = obj; rfb->obj = obj;
...@@ -520,9 +537,9 @@ amdgpu_framebuffer_init(struct drm_device *dev, ...@@ -520,9 +537,9 @@ amdgpu_framebuffer_init(struct drm_device *dev,
} }
struct drm_framebuffer * struct drm_framebuffer *
amdgpu_user_framebuffer_create(struct drm_device *dev, amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd) const struct drm_mode_fb_cmd2 *mode_cmd)
{ {
struct drm_gem_object *obj; struct drm_gem_object *obj;
struct amdgpu_framebuffer *amdgpu_fb; struct amdgpu_framebuffer *amdgpu_fb;
...@@ -547,7 +564,7 @@ amdgpu_user_framebuffer_create(struct drm_device *dev, ...@@ -547,7 +564,7 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj); ret = amdgpu_display_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
if (ret) { if (ret) {
kfree(amdgpu_fb); kfree(amdgpu_fb);
drm_gem_object_put_unlocked(obj); drm_gem_object_put_unlocked(obj);
...@@ -558,7 +575,7 @@ amdgpu_user_framebuffer_create(struct drm_device *dev, ...@@ -558,7 +575,7 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
} }
const struct drm_mode_config_funcs amdgpu_mode_funcs = { const struct drm_mode_config_funcs amdgpu_mode_funcs = {
.fb_create = amdgpu_user_framebuffer_create, .fb_create = amdgpu_display_user_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed, .output_poll_changed = drm_fb_helper_output_poll_changed,
}; };
...@@ -580,7 +597,7 @@ static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = ...@@ -580,7 +597,7 @@ static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
{ AMDGPU_FMT_DITHER_ENABLE, "on" }, { AMDGPU_FMT_DITHER_ENABLE, "on" },
}; };
int amdgpu_modeset_create_props(struct amdgpu_device *adev) int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
{ {
int sz; int sz;
...@@ -629,7 +646,7 @@ int amdgpu_modeset_create_props(struct amdgpu_device *adev) ...@@ -629,7 +646,7 @@ int amdgpu_modeset_create_props(struct amdgpu_device *adev)
return 0; return 0;
} }
void amdgpu_update_display_priority(struct amdgpu_device *adev) void amdgpu_display_update_priority(struct amdgpu_device *adev)
{ {
/* adjustment options for the display watermarks */ /* adjustment options for the display watermarks */
if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2)) if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
...@@ -639,7 +656,7 @@ void amdgpu_update_display_priority(struct amdgpu_device *adev) ...@@ -639,7 +656,7 @@ void amdgpu_update_display_priority(struct amdgpu_device *adev)
} }
static bool is_hdtv_mode(const struct drm_display_mode *mode) static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode)
{ {
/* try and guess if this is a tv or a monitor */ /* try and guess if this is a tv or a monitor */
if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */ if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
...@@ -651,9 +668,9 @@ static bool is_hdtv_mode(const struct drm_display_mode *mode) ...@@ -651,9 +668,9 @@ static bool is_hdtv_mode(const struct drm_display_mode *mode)
return false; return false;
} }
bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc, bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode) struct drm_display_mode *adjusted_mode)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder; struct drm_encoder *encoder;
...@@ -696,7 +713,7 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc, ...@@ -696,7 +713,7 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
((amdgpu_encoder->underscan_type == UNDERSCAN_ON) || ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) && ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) && drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
is_hdtv_mode(mode)))) { amdgpu_display_is_hdtv_mode(mode)))) {
if (amdgpu_encoder->underscan_hborder != 0) if (amdgpu_encoder->underscan_hborder != 0)
amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder; amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
else else
...@@ -764,10 +781,10 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc, ...@@ -764,10 +781,10 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
* unknown small number of scanlines wrt. real scanout position. * unknown small number of scanlines wrt. real scanout position.
* *
*/ */
int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
unsigned int flags, int *vpos, int *hpos, unsigned int pipe, unsigned int flags, int *vpos,
ktime_t *stime, ktime_t *etime, int *hpos, ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode) const struct drm_display_mode *mode)
{ {
u32 vbl = 0, position = 0; u32 vbl = 0, position = 0;
int vbl_start, vbl_end, vtotal, ret = 0; int vbl_start, vbl_end, vtotal, ret = 0;
...@@ -859,7 +876,7 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, ...@@ -859,7 +876,7 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
return ret; return ret;
} }
int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc) int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
{ {
if (crtc < 0 || crtc >= adev->mode_info.num_crtc) if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
return AMDGPU_CRTC_IRQ_NONE; return AMDGPU_CRTC_IRQ_NONE;
......
...@@ -23,9 +23,10 @@ ...@@ -23,9 +23,10 @@
#ifndef __AMDGPU_DISPLAY_H__ #ifndef __AMDGPU_DISPLAY_H__
#define __AMDGPU_DISPLAY_H__ #define __AMDGPU_DISPLAY_H__
uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev);
struct drm_framebuffer * struct drm_framebuffer *
amdgpu_user_framebuffer_create(struct drm_device *dev, amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd); const struct drm_mode_fb_cmd2 *mode_cmd);
#endif #endif
...@@ -265,9 +265,6 @@ enum amdgpu_pcie_gen { ...@@ -265,9 +265,6 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_read_sensor(adev, idx, value, size) \ #define amdgpu_dpm_read_sensor(adev, idx, value, size) \
((adev)->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, (idx), (value), (size))) ((adev)->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, (idx), (value), (size)))
#define amdgpu_dpm_get_temperature(adev) \
((adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_fan_control_mode(adev, m) \ #define amdgpu_dpm_set_fan_control_mode(adev, m) \
((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m))) ((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)))
...@@ -328,8 +325,8 @@ enum amdgpu_pcie_gen { ...@@ -328,8 +325,8 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_set_mclk_od(adev, value) \ #define amdgpu_dpm_set_mclk_od(adev, value) \
((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
#define amdgpu_dpm_dispatch_task(adev, task_id, input, output) \ #define amdgpu_dpm_dispatch_task(adev, task_id, user_state) \
((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (input), (output)) ((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (user_state))
#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \ #define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal))) ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
...@@ -366,6 +363,22 @@ enum amdgpu_pcie_gen { ...@@ -366,6 +363,22 @@ enum amdgpu_pcie_gen {
(adev)->powerplay.pp_handle, virtual_addr_low, \ (adev)->powerplay.pp_handle, virtual_addr_low, \
virtual_addr_hi, mc_addr_low, mc_addr_hi, size) virtual_addr_hi, mc_addr_low, mc_addr_hi, size)
#define amdgpu_dpm_get_power_profile_mode(adev, buf) \
((adev)->powerplay.pp_funcs->get_power_profile_mode(\
(adev)->powerplay.pp_handle, buf))
#define amdgpu_dpm_set_power_profile_mode(adev, parameter, size) \
((adev)->powerplay.pp_funcs->set_power_profile_mode(\
(adev)->powerplay.pp_handle, parameter, size))
#define amdgpu_dpm_odn_edit_dpm_table(adev, type, parameter, size) \
((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
(adev)->powerplay.pp_handle, type, parameter, size))
#define amdgpu_dpm_set_mmhub_powergating_by_smu(adev) \
((adev)->powerplay.pp_funcs->set_mmhub_powergating_by_smu( \
(adev)->powerplay.pp_handle))
struct amdgpu_dpm { struct amdgpu_dpm {
struct amdgpu_ps *ps; struct amdgpu_ps *ps;
/* number of valid power states */ /* number of valid power states */
......
...@@ -73,9 +73,11 @@ ...@@ -73,9 +73,11 @@
* - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
* - 3.22.0 - Add DRM_AMDGPU_SCHED ioctl * - 3.22.0 - Add DRM_AMDGPU_SCHED ioctl
* - 3.23.0 - Add query for VRAM lost counter * - 3.23.0 - Add query for VRAM lost counter
* - 3.24.0 - Add high priority compute support for gfx9
* - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
*/ */
#define KMS_DRIVER_MAJOR 3 #define KMS_DRIVER_MAJOR 3
#define KMS_DRIVER_MINOR 23 #define KMS_DRIVER_MINOR 25
#define KMS_DRIVER_PATCHLEVEL 0 #define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0; int amdgpu_vram_limit = 0;
...@@ -119,7 +121,7 @@ uint amdgpu_pg_mask = 0xffffffff; ...@@ -119,7 +121,7 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32; uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL; char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL; char *amdgpu_virtual_display = NULL;
uint amdgpu_pp_feature_mask = 0xffffffff; uint amdgpu_pp_feature_mask = 0x3fff;
int amdgpu_ngg = 0; int amdgpu_ngg = 0;
int amdgpu_prim_buf_per_se = 0; int amdgpu_prim_buf_per_se = 0;
int amdgpu_pos_buf_per_se = 0; int amdgpu_pos_buf_per_se = 0;
...@@ -129,6 +131,7 @@ int amdgpu_job_hang_limit = 0; ...@@ -129,6 +131,7 @@ int amdgpu_job_hang_limit = 0;
int amdgpu_lbpw = -1; int amdgpu_lbpw = -1;
int amdgpu_compute_multipipe = -1; int amdgpu_compute_multipipe = -1;
int amdgpu_gpu_recovery = -1; /* auto */ int amdgpu_gpu_recovery = -1; /* auto */
int amdgpu_emu_mode = 0;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
...@@ -284,6 +287,9 @@ module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444); ...@@ -284,6 +287,9 @@ module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto"); MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto");
module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444); module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable");
module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
#ifdef CONFIG_DRM_AMDGPU_SI #ifdef CONFIG_DRM_AMDGPU_SI
#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE) #if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
...@@ -576,6 +582,11 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, ...@@ -576,6 +582,11 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
struct drm_device *dev; struct drm_device *dev;
unsigned long flags = ent->driver_data; unsigned long flags = ent->driver_data;
int ret, retry = 0; int ret, retry = 0;
bool supports_atomic = false;
if (!amdgpu_virtual_display &&
amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
supports_atomic = true;
if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) { if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
DRM_INFO("This hardware requires experimental hardware support.\n" DRM_INFO("This hardware requires experimental hardware support.\n"
...@@ -596,6 +607,13 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, ...@@ -596,6 +607,13 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
if (ret) if (ret)
return ret; return ret;
/* warn the user if they mix atomic and non-atomic capable GPUs */
if ((kms_driver.driver_features & DRIVER_ATOMIC) && !supports_atomic)
DRM_ERROR("Mixing atomic and non-atomic capable GPUs!\n");
/* support atomic early so the atomic debugfs stuff gets created */
if (supports_atomic)
kms_driver.driver_features |= DRIVER_ATOMIC;
dev = drm_dev_alloc(&kms_driver, &pdev->dev); dev = drm_dev_alloc(&kms_driver, &pdev->dev);
if (IS_ERR(dev)) if (IS_ERR(dev))
return PTR_ERR(dev); return PTR_ERR(dev);
...@@ -835,8 +853,8 @@ amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe, ...@@ -835,8 +853,8 @@ amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
ktime_t *stime, ktime_t *etime, ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode) const struct drm_display_mode *mode)
{ {
return amdgpu_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos, return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
stime, etime, mode); stime, etime, mode);
} }
static struct drm_driver kms_driver = { static struct drm_driver kms_driver = {
...@@ -854,9 +872,6 @@ static struct drm_driver kms_driver = { ...@@ -854,9 +872,6 @@ static struct drm_driver kms_driver = {
.disable_vblank = amdgpu_disable_vblank_kms, .disable_vblank = amdgpu_disable_vblank_kms,
.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos, .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
.get_scanout_position = amdgpu_get_crtc_scanout_position, .get_scanout_position = amdgpu_get_crtc_scanout_position,
.irq_preinstall = amdgpu_irq_preinstall,
.irq_postinstall = amdgpu_irq_postinstall,
.irq_uninstall = amdgpu_irq_uninstall,
.irq_handler = amdgpu_irq_handler, .irq_handler = amdgpu_irq_handler,
.ioctls = amdgpu_ioctls_kms, .ioctls = amdgpu_ioctls_kms,
.gem_free_object_unlocked = amdgpu_gem_object_free, .gem_free_object_unlocked = amdgpu_gem_object_free,
...@@ -869,9 +884,7 @@ static struct drm_driver kms_driver = { ...@@ -869,9 +884,7 @@ static struct drm_driver kms_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle, .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = amdgpu_gem_prime_export, .gem_prime_export = amdgpu_gem_prime_export,
.gem_prime_import = drm_gem_prime_import, .gem_prime_import = amdgpu_gem_prime_import,
.gem_prime_pin = amdgpu_gem_prime_pin,
.gem_prime_unpin = amdgpu_gem_prime_unpin,
.gem_prime_res_obj = amdgpu_gem_prime_res_obj, .gem_prime_res_obj = amdgpu_gem_prime_res_obj,
.gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table, .gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
.gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table, .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
......
...@@ -38,6 +38,8 @@ ...@@ -38,6 +38,8 @@
#include <linux/vga_switcheroo.h> #include <linux/vga_switcheroo.h>
#include "amdgpu_display.h"
/* object hierarchy - /* object hierarchy -
this contains a helper + a amdgpu fb this contains a helper + a amdgpu fb
the helper contains a pointer to amdgpu framebuffer baseclass. the helper contains a pointer to amdgpu framebuffer baseclass.
...@@ -124,7 +126,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, ...@@ -124,7 +126,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
struct drm_gem_object *gobj = NULL; struct drm_gem_object *gobj = NULL;
struct amdgpu_bo *abo = NULL; struct amdgpu_bo *abo = NULL;
bool fb_tiled = false; /* useful for testing */ bool fb_tiled = false; /* useful for testing */
u32 tiling_flags = 0; u32 tiling_flags = 0, domain;
int ret; int ret;
int aligned_size, size; int aligned_size, size;
int height = mode_cmd->height; int height = mode_cmd->height;
...@@ -135,12 +137,12 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, ...@@ -135,12 +137,12 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
/* need to align pitch with crtc limits */ /* need to align pitch with crtc limits */
mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp, mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
fb_tiled); fb_tiled);
domain = amdgpu_display_framebuffer_domains(adev);
height = ALIGN(mode_cmd->height, 8); height = ALIGN(mode_cmd->height, 8);
size = mode_cmd->pitches[0] * height; size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE); aligned_size = ALIGN(size, PAGE_SIZE);
ret = amdgpu_gem_object_create(adev, aligned_size, 0, ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED, AMDGPU_GEM_CREATE_VRAM_CLEARED,
...@@ -166,7 +168,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, ...@@ -166,7 +168,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
} }
ret = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, NULL); ret = amdgpu_bo_pin(abo, domain, NULL);
if (ret) { if (ret) {
amdgpu_bo_unreserve(abo); amdgpu_bo_unreserve(abo);
goto out_unref; goto out_unref;
...@@ -225,7 +227,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper, ...@@ -225,7 +227,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
info->par = rfbdev; info->par = rfbdev;
info->skip_vt_switch = true; info->skip_vt_switch = true;
ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj); ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb,
&mode_cmd, gobj);
if (ret) { if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret); DRM_ERROR("failed to initialize framebuffer %d\n", ret);
goto out; goto out;
...@@ -242,8 +245,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper, ...@@ -242,8 +245,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
info->fbops = &amdgpufb_ops; info->fbops = &amdgpufb_ops;
tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start; tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start;
info->fix.smem_start = adev->mc.aper_base + tmp; info->fix.smem_start = adev->gmc.aper_base + tmp;
info->fix.smem_len = amdgpu_bo_size(abo); info->fix.smem_len = amdgpu_bo_size(abo);
info->screen_base = amdgpu_bo_kptr(abo); info->screen_base = amdgpu_bo_kptr(abo);
info->screen_size = amdgpu_bo_size(abo); info->screen_size = amdgpu_bo_size(abo);
...@@ -252,7 +255,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, ...@@ -252,7 +255,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
/* setup aperture base/size for vesafb takeover */ /* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base; info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = adev->mc.aper_size; info->apertures->ranges[0].size = adev->gmc.aper_size;
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
...@@ -262,7 +265,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, ...@@ -262,7 +265,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
} }
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base); DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->gmc.aper_base);
DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo)); DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo));
DRM_INFO("fb depth is %d\n", fb->format->depth); DRM_INFO("fb depth is %d\n", fb->format->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]); DRM_INFO(" pitch is %d\n", fb->pitches[0]);
...@@ -319,7 +322,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev) ...@@ -319,7 +322,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
return 0; return 0;
/* select 8 bpp console on low vram cards */ /* select 8 bpp console on low vram cards */
if (adev->mc.real_vram_size <= (32*1024*1024)) if (adev->gmc.real_vram_size <= (32*1024*1024))
bpp_sel = 8; bpp_sel = 8;
rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL); rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL);
......
...@@ -120,7 +120,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) ...@@ -120,7 +120,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, 0, &adev->gart.robj); NULL, NULL, &adev->gart.robj);
if (r) { if (r) {
return r; return r;
} }
...@@ -241,13 +241,14 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, ...@@ -241,13 +241,14 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
continue; continue;
for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
t, page_base, flags); t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE; page_base += AMDGPU_GPU_PAGE_SIZE;
} }
} }
mb(); mb();
amdgpu_gart_flush_gpu_tlb(adev, 0); amdgpu_asic_flush_hdp(adev, NULL);
amdgpu_gmc_flush_gpu_tlb(adev, 0);
return 0; return 0;
} }
...@@ -279,7 +280,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, ...@@ -279,7 +280,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
for (i = 0; i < pages; i++) { for (i = 0; i < pages; i++) {
page_base = dma_addr[i]; page_base = dma_addr[i];
for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags); amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE; page_base += AMDGPU_GPU_PAGE_SIZE;
} }
} }
...@@ -329,7 +330,8 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, ...@@ -329,7 +330,8 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
return r; return r;
mb(); mb();
amdgpu_gart_flush_gpu_tlb(adev, 0); amdgpu_asic_flush_hdp(adev, NULL);
amdgpu_gmc_flush_gpu_tlb(adev, 0);
return 0; return 0;
} }
...@@ -357,8 +359,8 @@ int amdgpu_gart_init(struct amdgpu_device *adev) ...@@ -357,8 +359,8 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
if (r) if (r)
return r; return r;
/* Compute table size */ /* Compute table size */
adev->gart.num_cpu_pages = adev->mc.gart_size / PAGE_SIZE; adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE;
adev->gart.num_gpu_pages = adev->mc.gart_size / AMDGPU_GPU_PAGE_SIZE; adev->gart.num_gpu_pages = adev->gmc.gart_size / AMDGPU_GPU_PAGE_SIZE;
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages); adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
*/ */
struct amdgpu_device; struct amdgpu_device;
struct amdgpu_bo; struct amdgpu_bo;
struct amdgpu_gart_funcs;
#define AMDGPU_GPU_PAGE_SIZE 4096 #define AMDGPU_GPU_PAGE_SIZE 4096
#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1) #define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
...@@ -52,8 +51,6 @@ struct amdgpu_gart { ...@@ -52,8 +51,6 @@ struct amdgpu_gart {
/* Asic default pte flags */ /* Asic default pte flags */
uint64_t gart_pte_flags; uint64_t gart_pte_flags;
const struct amdgpu_gart_funcs *gart_funcs;
}; };
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
......
...@@ -60,7 +60,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, ...@@ -60,7 +60,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
retry: retry:
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
flags, NULL, resv, 0, &bo); flags, NULL, resv, &bo);
if (r) { if (r) {
if (r != -ERESTARTSYS) { if (r != -ERESTARTSYS) {
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
...@@ -523,12 +523,13 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, ...@@ -523,12 +523,13 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
goto error; goto error;
if (operation == AMDGPU_VA_OP_MAP || if (operation == AMDGPU_VA_OP_MAP ||
operation == AMDGPU_VA_OP_REPLACE) operation == AMDGPU_VA_OP_REPLACE) {
r = amdgpu_vm_bo_update(adev, bo_va, false); r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
goto error;
}
r = amdgpu_vm_update_directories(adev, vm); r = amdgpu_vm_update_directories(adev, vm);
if (r)
goto error;
error: error:
if (r && r != -ERESTARTSYS) if (r && r != -ERESTARTSYS)
...@@ -634,7 +635,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -634,7 +635,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
if (r) if (r)
goto error_backoff; goto error_backoff;
va_flags = amdgpu_vm_get_pte_flags(adev, args->flags); va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size, args->offset_in_bo, args->map_size,
va_flags); va_flags);
...@@ -654,7 +655,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -654,7 +655,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
if (r) if (r)
goto error_backoff; goto error_backoff;
va_flags = amdgpu_vm_get_pte_flags(adev, args->flags); va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address, r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size, args->offset_in_bo, args->map_size,
va_flags); va_flags);
......
/*
* Copyright 2018 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
#ifndef __AMDGPU_GMC_H__
#define __AMDGPU_GMC_H__
#include <linux/types.h>
#include "amdgpu_irq.h"
struct firmware;
/*
* VMHUB structures, functions & helpers
*/
struct amdgpu_vmhub {
uint32_t ctx0_ptb_addr_lo32;
uint32_t ctx0_ptb_addr_hi32;
uint32_t vm_inv_eng0_req;
uint32_t vm_inv_eng0_ack;
uint32_t vm_context0_cntl;
uint32_t vm_l2_pro_fault_status;
uint32_t vm_l2_pro_fault_cntl;
};
/*
* GPU MC structures, functions & helpers
*/
struct amdgpu_gmc_funcs {
/* flush the vm tlb via mmio */
void (*flush_gpu_tlb)(struct amdgpu_device *adev,
uint32_t vmid);
/* flush the vm tlb via ring */
uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr);
/* Change the VMID -> PASID mapping */
void (*emit_pasid_mapping)(struct amdgpu_ring *ring, unsigned vmid,
unsigned pasid);
/* write pte/pde updates using the cpu */
int (*set_pte_pde)(struct amdgpu_device *adev,
void *cpu_pt_addr, /* cpu addr of page table */
uint32_t gpu_page_idx, /* pte/pde to update */
uint64_t addr, /* addr to write into pte/pde */
uint64_t flags); /* access flags */
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
/* set pte flags based per asic */
uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
uint32_t flags);
/* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags);
};
struct amdgpu_gmc {
resource_size_t aper_size;
resource_size_t aper_base;
/* for some chips with <= 32MB we need to lie
* about vram size near mc fb location */
u64 mc_vram_size;
u64 visible_vram_size;
u64 gart_size;
u64 gart_start;
u64 gart_end;
u64 vram_start;
u64 vram_end;
unsigned vram_width;
u64 real_vram_size;
int vram_mtrr;
u64 mc_mask;
const struct firmware *fw; /* MC firmware */
uint32_t fw_version;
struct amdgpu_irq_src vm_fault;
uint32_t vram_type;
uint32_t srbm_soft_reset;
bool prt_warning;
uint64_t stolen_size;
/* apertures */
u64 shared_aperture_start;
u64 shared_aperture_end;
u64 private_aperture_start;
u64 private_aperture_end;
/* protects concurrent invalidation */
spinlock_t invalidate_lock;
bool translate_further;
const struct amdgpu_gmc_funcs *gmc_funcs;
};
#endif
...@@ -56,7 +56,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man, ...@@ -56,7 +56,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
return -ENOMEM; return -ENOMEM;
start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
size = (adev->mc.gart_size >> PAGE_SHIFT) - start; size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
drm_mm_init(&mgr->mm, start, size); drm_mm_init(&mgr->mm, start, size);
spin_lock_init(&mgr->lock); spin_lock_init(&mgr->lock);
atomic64_set(&mgr->available, p_size); atomic64_set(&mgr->available, p_size);
...@@ -75,7 +75,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man, ...@@ -75,7 +75,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
{ {
struct amdgpu_gtt_mgr *mgr = man->priv; struct amdgpu_gtt_mgr *mgr = man->priv;
spin_lock(&mgr->lock);
drm_mm_takedown(&mgr->mm); drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
kfree(mgr); kfree(mgr);
......
...@@ -184,12 +184,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -184,12 +184,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ring->funcs->init_cond_exec) if (ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring); patch_offset = amdgpu_ring_init_cond_exec(ring);
if (ring->funcs->emit_hdp_flush
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
&& !(adev->flags & AMD_IS_APU) if (!(adev->flags & AMD_IS_APU))
#endif #endif
) {
amdgpu_ring_emit_hdp_flush(ring); if (ring->funcs->emit_hdp_flush)
amdgpu_ring_emit_hdp_flush(ring);
else
amdgpu_asic_flush_hdp(adev, ring);
}
skip_preamble = ring->current_ctx == fence_ctx; skip_preamble = ring->current_ctx == fence_ctx;
need_ctx_switch = ring->current_ctx != fence_ctx; need_ctx_switch = ring->current_ctx != fence_ctx;
...@@ -219,12 +222,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -219,12 +222,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ring->funcs->emit_tmz) if (ring->funcs->emit_tmz)
amdgpu_ring_emit_tmz(ring, false); amdgpu_ring_emit_tmz(ring, false);
if (ring->funcs->emit_hdp_invalidate
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
&& !(adev->flags & AMD_IS_APU) if (!(adev->flags & AMD_IS_APU))
#endif #endif
) amdgpu_asic_invalidate_hdp(adev, ring);
amdgpu_ring_emit_hdp_invalidate(ring);
r = amdgpu_fence_emit(ring, f); r = amdgpu_fence_emit(ring, f);
if (r) { if (r) {
......
...@@ -40,6 +40,12 @@ ...@@ -40,6 +40,12 @@
*/ */
static DEFINE_IDA(amdgpu_pasid_ida); static DEFINE_IDA(amdgpu_pasid_ida);
/* Helper to free pasid from a fence callback */
struct amdgpu_pasid_cb {
struct dma_fence_cb cb;
unsigned int pasid;
};
/** /**
* amdgpu_pasid_alloc - Allocate a PASID * amdgpu_pasid_alloc - Allocate a PASID
* @bits: Maximum width of the PASID in bits, must be at least 1 * @bits: Maximum width of the PASID in bits, must be at least 1
...@@ -63,6 +69,9 @@ int amdgpu_pasid_alloc(unsigned int bits) ...@@ -63,6 +69,9 @@ int amdgpu_pasid_alloc(unsigned int bits)
break; break;
} }
if (pasid >= 0)
trace_amdgpu_pasid_allocated(pasid);
return pasid; return pasid;
} }
...@@ -72,9 +81,86 @@ int amdgpu_pasid_alloc(unsigned int bits) ...@@ -72,9 +81,86 @@ int amdgpu_pasid_alloc(unsigned int bits)
*/ */
void amdgpu_pasid_free(unsigned int pasid) void amdgpu_pasid_free(unsigned int pasid)
{ {
trace_amdgpu_pasid_freed(pasid);
ida_simple_remove(&amdgpu_pasid_ida, pasid); ida_simple_remove(&amdgpu_pasid_ida, pasid);
} }
static void amdgpu_pasid_free_cb(struct dma_fence *fence,
struct dma_fence_cb *_cb)
{
struct amdgpu_pasid_cb *cb =
container_of(_cb, struct amdgpu_pasid_cb, cb);
amdgpu_pasid_free(cb->pasid);
dma_fence_put(fence);
kfree(cb);
}
/**
* amdgpu_pasid_free_delayed - free pasid when fences signal
*
* @resv: reservation object with the fences to wait for
* @pasid: pasid to free
*
* Free the pasid only after all the fences in resv are signaled.
*/
void amdgpu_pasid_free_delayed(struct reservation_object *resv,
unsigned int pasid)
{
struct dma_fence *fence, **fences;
struct amdgpu_pasid_cb *cb;
unsigned count;
int r;
r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences);
if (r)
goto fallback;
if (count == 0) {
amdgpu_pasid_free(pasid);
return;
}
if (count == 1) {
fence = fences[0];
kfree(fences);
} else {
uint64_t context = dma_fence_context_alloc(1);
struct dma_fence_array *array;
array = dma_fence_array_create(count, fences, context,
1, false);
if (!array) {
kfree(fences);
goto fallback;
}
fence = &array->base;
}
cb = kmalloc(sizeof(*cb), GFP_KERNEL);
if (!cb) {
/* Last resort when we are OOM */
dma_fence_wait(fence, false);
dma_fence_put(fence);
amdgpu_pasid_free(pasid);
} else {
cb->pasid = pasid;
if (dma_fence_add_callback(fence, &cb->cb,
amdgpu_pasid_free_cb))
amdgpu_pasid_free_cb(fence, &cb->cb);
}
return;
fallback:
/* Not enough memory for the delayed delete, as last resort
* block for all the fences to complete.
*/
reservation_object_wait_timeout_rcu(resv, true, false,
MAX_SCHEDULE_TIMEOUT);
amdgpu_pasid_free(pasid);
}
/* /*
* VMID manager * VMID manager
* *
...@@ -96,164 +182,185 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, ...@@ -96,164 +182,185 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
atomic_read(&adev->gpu_reset_counter); atomic_read(&adev->gpu_reset_counter);
} }
/* idr_mgr->lock must be held */
static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
struct amdgpu_ring *ring,
struct amdgpu_sync *sync,
struct dma_fence *fence,
struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
uint64_t fence_context = adev->fence_context + ring->idx;
struct amdgpu_vmid *id = vm->reserved_vmid[vmhub];
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct dma_fence *updates = sync->last_vm_update;
int r = 0;
struct dma_fence *flushed, *tmp;
bool needs_flush = vm->use_cpu_for_update;
flushed = id->flushed_updates;
if ((amdgpu_vmid_had_gpu_reset(adev, id)) ||
(atomic64_read(&id->owner) != vm->entity.fence_context) ||
(job->vm_pd_addr != id->pd_gpu_addr) ||
(updates && (!flushed || updates->context != flushed->context ||
dma_fence_is_later(updates, flushed))) ||
(!id->last_flush || (id->last_flush->context != fence_context &&
!dma_fence_is_signaled(id->last_flush)))) {
needs_flush = true;
/* to prevent one context starved by another context */
id->pd_gpu_addr = 0;
tmp = amdgpu_sync_peek_fence(&id->active, ring);
if (tmp) {
r = amdgpu_sync_fence(adev, sync, tmp, false);
return r;
}
}
/* Good we can use this VMID. Remember this submission as
* user of the VMID.
*/
r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
if (r)
goto out;
if (updates && (!flushed || updates->context != flushed->context ||
dma_fence_is_later(updates, flushed))) {
dma_fence_put(id->flushed_updates);
id->flushed_updates = dma_fence_get(updates);
}
id->pd_gpu_addr = job->vm_pd_addr;
atomic64_set(&id->owner, vm->entity.fence_context);
job->vm_needs_flush = needs_flush;
if (needs_flush) {
dma_fence_put(id->last_flush);
id->last_flush = NULL;
}
job->vmid = id - id_mgr->ids;
trace_amdgpu_vm_grab_id(vm, ring, job);
out:
return r;
}
/** /**
* amdgpu_vm_grab_id - allocate the next free VMID * amdgpu_vm_grab_idle - grab idle VMID
* *
* @vm: vm to allocate id for * @vm: vm to allocate id for
* @ring: ring we want to submit job to * @ring: ring we want to submit job to
* @sync: sync object where we add dependencies * @sync: sync object where we add dependencies
* @fence: fence protecting ID from reuse * @idle: resulting idle VMID
* *
* Allocate an id for the vm, adding fences to the sync obj as necessary. * Try to find an idle VMID, if none is idle add a fence to wait to the sync
* object. Returns -ENOMEM when we are out of memory.
*/ */
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
struct amdgpu_sync *sync, struct dma_fence *fence, struct amdgpu_ring *ring,
struct amdgpu_job *job) struct amdgpu_sync *sync,
struct amdgpu_vmid **idle)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub; unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx;
struct dma_fence *updates = sync->last_vm_update;
struct amdgpu_vmid *id, *idle;
struct dma_fence **fences; struct dma_fence **fences;
unsigned i; unsigned i;
int r = 0; int r;
if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait))
return amdgpu_sync_fence(adev, sync, ring->vmid_wait, false);
mutex_lock(&id_mgr->lock);
if (vm->reserved_vmid[vmhub]) {
r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, fence, job);
mutex_unlock(&id_mgr->lock);
return r;
}
fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
if (!fences) { if (!fences)
mutex_unlock(&id_mgr->lock);
return -ENOMEM; return -ENOMEM;
}
/* Check if we have an idle VMID */ /* Check if we have an idle VMID */
i = 0; i = 0;
list_for_each_entry(idle, &id_mgr->ids_lru, list) { list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
fences[i] = amdgpu_sync_peek_fence(&idle->active, ring); fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
if (!fences[i]) if (!fences[i])
break; break;
++i; ++i;
} }
/* If we can't find a idle VMID to use, wait till one becomes available */ /* If we can't find a idle VMID to use, wait till one becomes available */
if (&idle->list == &id_mgr->ids_lru) { if (&(*idle)->list == &id_mgr->ids_lru) {
u64 fence_context = adev->vm_manager.fence_context + ring->idx; u64 fence_context = adev->vm_manager.fence_context + ring->idx;
unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
struct dma_fence_array *array; struct dma_fence_array *array;
unsigned j; unsigned j;
*idle = NULL;
for (j = 0; j < i; ++j) for (j = 0; j < i; ++j)
dma_fence_get(fences[j]); dma_fence_get(fences[j]);
array = dma_fence_array_create(i, fences, fence_context, array = dma_fence_array_create(i, fences, fence_context,
seqno, true); seqno, true);
if (!array) { if (!array) {
for (j = 0; j < i; ++j) for (j = 0; j < i; ++j)
dma_fence_put(fences[j]); dma_fence_put(fences[j]);
kfree(fences); kfree(fences);
r = -ENOMEM; return -ENOMEM;
goto error;
} }
r = amdgpu_sync_fence(adev, sync, &array->base, false);
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = &array->base;
return r;
}
kfree(fences);
r = amdgpu_sync_fence(ring->adev, sync, &array->base, false); return 0;
dma_fence_put(&array->base); }
if (r)
goto error;
mutex_unlock(&id_mgr->lock); /**
return 0; * amdgpu_vm_grab_reserved - try to assign reserved VMID
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
* @sync: sync object where we add dependencies
* @fence: fence protecting ID from reuse
* @job: job who wants to use the VMID
*
* Try to assign a reserved VMID.
*/
static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
struct amdgpu_ring *ring,
struct amdgpu_sync *sync,
struct dma_fence *fence,
struct amdgpu_job *job,
struct amdgpu_vmid **id)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
uint64_t fence_context = adev->fence_context + ring->idx;
struct dma_fence *updates = sync->last_vm_update;
bool needs_flush = vm->use_cpu_for_update;
int r = 0;
*id = vm->reserved_vmid[vmhub];
if (updates && (*id)->flushed_updates &&
updates->context == (*id)->flushed_updates->context &&
!dma_fence_is_later(updates, (*id)->flushed_updates))
updates = NULL;
if ((*id)->owner != vm->entity.fence_context ||
job->vm_pd_addr != (*id)->pd_gpu_addr ||
updates || !(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
!dma_fence_is_signaled((*id)->last_flush))) {
struct dma_fence *tmp;
/* to prevent one context starved by another context */
(*id)->pd_gpu_addr = 0;
tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
if (tmp) {
*id = NULL;
r = amdgpu_sync_fence(adev, sync, tmp, false);
return r;
}
needs_flush = true;
} }
kfree(fences);
/* Good we can use this VMID. Remember this submission as
* user of the VMID.
*/
r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
if (r)
return r;
if (updates) {
dma_fence_put((*id)->flushed_updates);
(*id)->flushed_updates = dma_fence_get(updates);
}
job->vm_needs_flush = needs_flush;
return 0;
}
/**
* amdgpu_vm_grab_used - try to reuse a VMID
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
* @sync: sync object where we add dependencies
* @fence: fence protecting ID from reuse
* @job: job who wants to use the VMID
* @id: resulting VMID
*
* Try to reuse a VMID for this submission.
*/
static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct amdgpu_ring *ring,
struct amdgpu_sync *sync,
struct dma_fence *fence,
struct amdgpu_job *job,
struct amdgpu_vmid **id)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx;
struct dma_fence *updates = sync->last_vm_update;
int r;
job->vm_needs_flush = vm->use_cpu_for_update; job->vm_needs_flush = vm->use_cpu_for_update;
/* Check if we can use a VMID already assigned to this VM */ /* Check if we can use a VMID already assigned to this VM */
list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) { list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
struct dma_fence *flushed;
bool needs_flush = vm->use_cpu_for_update; bool needs_flush = vm->use_cpu_for_update;
struct dma_fence *flushed;
/* Check all the prerequisites to using this VMID */ /* Check all the prerequisites to using this VMID */
if (amdgpu_vmid_had_gpu_reset(adev, id)) if ((*id)->owner != vm->entity.fence_context)
continue;
if (atomic64_read(&id->owner) != vm->entity.fence_context)
continue; continue;
if (job->vm_pd_addr != id->pd_gpu_addr) if ((*id)->pd_gpu_addr != job->vm_pd_addr)
continue; continue;
if (!id->last_flush || if (!(*id)->last_flush ||
(id->last_flush->context != fence_context && ((*id)->last_flush->context != fence_context &&
!dma_fence_is_signaled(id->last_flush))) !dma_fence_is_signaled((*id)->last_flush)))
needs_flush = true; needs_flush = true;
flushed = id->flushed_updates; flushed = (*id)->flushed_updates;
if (updates && (!flushed || dma_fence_is_later(updates, flushed))) if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
needs_flush = true; needs_flush = true;
...@@ -261,47 +368,91 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -261,47 +368,91 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (adev->asic_type < CHIP_VEGA10 && needs_flush) if (adev->asic_type < CHIP_VEGA10 && needs_flush)
continue; continue;
/* Good we can use this VMID. Remember this submission as /* Good, we can use this VMID. Remember this submission as
* user of the VMID. * user of the VMID.
*/ */
r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
if (r) if (r)
goto error; return r;
if (updates && (!flushed || dma_fence_is_later(updates, flushed))) { if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
dma_fence_put(id->flushed_updates); dma_fence_put((*id)->flushed_updates);
id->flushed_updates = dma_fence_get(updates); (*id)->flushed_updates = dma_fence_get(updates);
} }
if (needs_flush) job->vm_needs_flush |= needs_flush;
goto needs_flush; return 0;
else }
goto no_flush_needed;
}; *id = NULL;
return 0;
}
/* Still no ID to use? Then use the idle one found earlier */ /**
id = idle; * amdgpu_vm_grab_id - allocate the next free VMID
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
* @sync: sync object where we add dependencies
* @fence: fence protecting ID from reuse
* @job: job who wants to use the VMID
*
* Allocate an id for the vm, adding fences to the sync obj as necessary.
*/
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct dma_fence *fence,
struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *idle = NULL;
struct amdgpu_vmid *id = NULL;
int r = 0;
/* Remember this submission as user of the VMID */ mutex_lock(&id_mgr->lock);
r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
if (r) if (r || !idle)
goto error; goto error;
id->pd_gpu_addr = job->vm_pd_addr; if (vm->reserved_vmid[vmhub]) {
dma_fence_put(id->flushed_updates); r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id);
id->flushed_updates = dma_fence_get(updates); if (r || !id)
atomic64_set(&id->owner, vm->entity.fence_context); goto error;
} else {
r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
if (r)
goto error;
needs_flush: if (!id) {
job->vm_needs_flush = true; struct dma_fence *updates = sync->last_vm_update;
dma_fence_put(id->last_flush);
id->last_flush = NULL;
no_flush_needed: /* Still no ID to use? Then use the idle one found earlier */
list_move_tail(&id->list, &id_mgr->ids_lru); id = idle;
/* Remember this submission as user of the VMID */
r = amdgpu_sync_fence(ring->adev, &id->active,
fence, false);
if (r)
goto error;
dma_fence_put(id->flushed_updates);
id->flushed_updates = dma_fence_get(updates);
job->vm_needs_flush = true;
}
list_move_tail(&id->list, &id_mgr->ids_lru);
}
id->pd_gpu_addr = job->vm_pd_addr;
id->owner = vm->entity.fence_context;
if (job->vm_needs_flush) {
dma_fence_put(id->last_flush);
id->last_flush = NULL;
}
job->vmid = id - id_mgr->ids; job->vmid = id - id_mgr->ids;
job->pasid = vm->pasid;
trace_amdgpu_vm_grab_id(vm, ring, job); trace_amdgpu_vm_grab_id(vm, ring, job);
error: error:
...@@ -370,13 +521,15 @@ void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, ...@@ -370,13 +521,15 @@ void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *id = &id_mgr->ids[vmid]; struct amdgpu_vmid *id = &id_mgr->ids[vmid];
atomic64_set(&id->owner, 0); mutex_lock(&id_mgr->lock);
id->owner = 0;
id->gds_base = 0; id->gds_base = 0;
id->gds_size = 0; id->gds_size = 0;
id->gws_base = 0; id->gws_base = 0;
id->gws_size = 0; id->gws_size = 0;
id->oa_base = 0; id->oa_base = 0;
id->oa_size = 0; id->oa_size = 0;
mutex_unlock(&id_mgr->lock);
} }
/** /**
...@@ -454,6 +607,7 @@ void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev) ...@@ -454,6 +607,7 @@ void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
amdgpu_sync_free(&id->active); amdgpu_sync_free(&id->active);
dma_fence_put(id->flushed_updates); dma_fence_put(id->flushed_updates);
dma_fence_put(id->last_flush); dma_fence_put(id->last_flush);
dma_fence_put(id->pasid_mapping);
} }
} }
} }
...@@ -43,7 +43,7 @@ struct amdgpu_vmid { ...@@ -43,7 +43,7 @@ struct amdgpu_vmid {
struct list_head list; struct list_head list;
struct amdgpu_sync active; struct amdgpu_sync active;
struct dma_fence *last_flush; struct dma_fence *last_flush;
atomic64_t owner; uint64_t owner;
uint64_t pd_gpu_addr; uint64_t pd_gpu_addr;
/* last flushed PD/PT update */ /* last flushed PD/PT update */
...@@ -57,6 +57,9 @@ struct amdgpu_vmid { ...@@ -57,6 +57,9 @@ struct amdgpu_vmid {
uint32_t gws_size; uint32_t gws_size;
uint32_t oa_base; uint32_t oa_base;
uint32_t oa_size; uint32_t oa_size;
unsigned pasid;
struct dma_fence *pasid_mapping;
}; };
struct amdgpu_vmid_mgr { struct amdgpu_vmid_mgr {
...@@ -69,6 +72,8 @@ struct amdgpu_vmid_mgr { ...@@ -69,6 +72,8 @@ struct amdgpu_vmid_mgr {
int amdgpu_pasid_alloc(unsigned int bits); int amdgpu_pasid_alloc(unsigned int bits);
void amdgpu_pasid_free(unsigned int pasid); void amdgpu_pasid_free(unsigned int pasid);
void amdgpu_pasid_free_delayed(struct reservation_object *resv,
unsigned int pasid);
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id); struct amdgpu_vmid *id);
......
...@@ -109,7 +109,7 @@ struct amdgpu_iv_entry { ...@@ -109,7 +109,7 @@ struct amdgpu_iv_entry {
unsigned vmid_src; unsigned vmid_src;
uint64_t timestamp; uint64_t timestamp;
unsigned timestamp_src; unsigned timestamp_src;
unsigned pas_id; unsigned pasid;
unsigned pasid_src; unsigned pasid_src;
unsigned src_data[AMDGPU_IH_SRC_DATA_MAX_SIZE_DW]; unsigned src_data[AMDGPU_IH_SRC_DATA_MAX_SIZE_DW];
const uint32_t *iv_entry; const uint32_t *iv_entry;
......
...@@ -92,7 +92,7 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work) ...@@ -92,7 +92,7 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
} }
/* Disable *all* interrupts */ /* Disable *all* interrupts */
static void amdgpu_irq_disable_all(struct amdgpu_device *adev) void amdgpu_irq_disable_all(struct amdgpu_device *adev)
{ {
unsigned long irqflags; unsigned long irqflags;
unsigned i, j, k; unsigned i, j, k;
...@@ -122,55 +122,6 @@ static void amdgpu_irq_disable_all(struct amdgpu_device *adev) ...@@ -122,55 +122,6 @@ static void amdgpu_irq_disable_all(struct amdgpu_device *adev)
spin_unlock_irqrestore(&adev->irq.lock, irqflags); spin_unlock_irqrestore(&adev->irq.lock, irqflags);
} }
/**
* amdgpu_irq_preinstall - drm irq preinstall callback
*
* @dev: drm dev pointer
*
* Gets the hw ready to enable irqs (all asics).
* This function disables all interrupt sources on the GPU.
*/
void amdgpu_irq_preinstall(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
/* Disable *all* interrupts */
amdgpu_irq_disable_all(adev);
/* Clear bits */
amdgpu_ih_process(adev);
}
/**
* amdgpu_irq_postinstall - drm irq preinstall callback
*
* @dev: drm dev pointer
*
* Handles stuff to be done after enabling irqs (all asics).
* Returns 0 on success.
*/
int amdgpu_irq_postinstall(struct drm_device *dev)
{
dev->max_vblank_count = 0x00ffffff;
return 0;
}
/**
* amdgpu_irq_uninstall - drm irq uninstall callback
*
* @dev: drm dev pointer
*
* This function disables all interrupt sources on the GPU (all asics).
*/
void amdgpu_irq_uninstall(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
if (adev == NULL) {
return;
}
amdgpu_irq_disable_all(adev);
}
/** /**
* amdgpu_irq_handler - irq handler * amdgpu_irq_handler - irq handler
* *
...@@ -261,6 +212,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev) ...@@ -261,6 +212,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
cancel_work_sync(&adev->reset_work); cancel_work_sync(&adev->reset_work);
return r; return r;
} }
adev->ddev->max_vblank_count = 0x00ffffff;
DRM_DEBUG("amdgpu: irq initialized.\n"); DRM_DEBUG("amdgpu: irq initialized.\n");
return 0; return 0;
......
...@@ -78,9 +78,7 @@ struct amdgpu_irq { ...@@ -78,9 +78,7 @@ struct amdgpu_irq {
uint32_t srbm_soft_reset; uint32_t srbm_soft_reset;
}; };
void amdgpu_irq_preinstall(struct drm_device *dev); void amdgpu_irq_disable_all(struct amdgpu_device *adev);
int amdgpu_irq_postinstall(struct drm_device *dev);
void amdgpu_irq_uninstall(struct drm_device *dev);
irqreturn_t amdgpu_irq_handler(int irq, void *arg); irqreturn_t amdgpu_irq_handler(int irq, void *arg);
int amdgpu_irq_init(struct amdgpu_device *adev); int amdgpu_irq_init(struct amdgpu_device *adev);
......
...@@ -191,7 +191,7 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, ...@@ -191,7 +191,7 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->feature = 0; fw_info->feature = 0;
break; break;
case AMDGPU_INFO_FW_GMC: case AMDGPU_INFO_FW_GMC:
fw_info->ver = adev->mc.fw_version; fw_info->ver = adev->gmc.fw_version;
fw_info->feature = 0; fw_info->feature = 0;
break; break;
case AMDGPU_INFO_FW_GFX_ME: case AMDGPU_INFO_FW_GFX_ME:
...@@ -470,9 +470,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -470,9 +470,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
case AMDGPU_INFO_VRAM_GTT: { case AMDGPU_INFO_VRAM_GTT: {
struct drm_amdgpu_info_vram_gtt vram_gtt; struct drm_amdgpu_info_vram_gtt vram_gtt;
vram_gtt.vram_size = adev->mc.real_vram_size; vram_gtt.vram_size = adev->gmc.real_vram_size;
vram_gtt.vram_size -= adev->vram_pin_size; vram_gtt.vram_size -= adev->vram_pin_size;
vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size; vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size;
vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size); vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size; vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
vram_gtt.gtt_size *= PAGE_SIZE; vram_gtt.gtt_size *= PAGE_SIZE;
...@@ -484,17 +484,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -484,17 +484,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
struct drm_amdgpu_memory_info mem; struct drm_amdgpu_memory_info mem;
memset(&mem, 0, sizeof(mem)); memset(&mem, 0, sizeof(mem));
mem.vram.total_heap_size = adev->mc.real_vram_size; mem.vram.total_heap_size = adev->gmc.real_vram_size;
mem.vram.usable_heap_size = mem.vram.usable_heap_size =
adev->mc.real_vram_size - adev->vram_pin_size; adev->gmc.real_vram_size - adev->vram_pin_size;
mem.vram.heap_usage = mem.vram.heap_usage =
amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size = mem.cpu_accessible_vram.total_heap_size =
adev->mc.visible_vram_size; adev->gmc.visible_vram_size;
mem.cpu_accessible_vram.usable_heap_size = mem.cpu_accessible_vram.usable_heap_size =
adev->mc.visible_vram_size - adev->gmc.visible_vram_size -
(adev->vram_pin_size - adev->invisible_pin_size); (adev->vram_pin_size - adev->invisible_pin_size);
mem.cpu_accessible_vram.heap_usage = mem.cpu_accessible_vram.heap_usage =
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
...@@ -580,11 +580,16 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -580,11 +580,16 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
vm_size -= AMDGPU_VA_RESERVED_SIZE;
/* Older VCE FW versions are buggy and can handle only 40bits */
if (adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
vm_size = min(vm_size, 1ULL << 40);
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
dev_info.virtual_address_max = dev_info.virtual_address_max =
min(vm_size, AMDGPU_VA_HOLE_START); min(vm_size, AMDGPU_VA_HOLE_START);
vm_size -= AMDGPU_VA_RESERVED_SIZE;
if (vm_size > AMDGPU_VA_HOLE_START) { if (vm_size > AMDGPU_VA_HOLE_START) {
dev_info.high_va_offset = AMDGPU_VA_HOLE_END; dev_info.high_va_offset = AMDGPU_VA_HOLE_END;
dev_info.high_va_max = AMDGPU_VA_HOLE_END | vm_size; dev_info.high_va_max = AMDGPU_VA_HOLE_END | vm_size;
...@@ -599,8 +604,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -599,8 +604,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
sizeof(adev->gfx.cu_info.ao_cu_bitmap)); sizeof(adev->gfx.cu_info.ao_cu_bitmap));
memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0], memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
sizeof(adev->gfx.cu_info.bitmap)); sizeof(adev->gfx.cu_info.bitmap));
dev_info.vram_type = adev->mc.vram_type; dev_info.vram_type = adev->gmc.vram_type;
dev_info.vram_bit_width = adev->mc.vram_width; dev_info.vram_bit_width = adev->gmc.vram_width;
dev_info.vce_harvest_config = adev->vce.harvest_config; dev_info.vce_harvest_config = adev->vce.harvest_config;
dev_info.gc_double_offchip_lds_buf = dev_info.gc_double_offchip_lds_buf =
adev->gfx.config.double_offchip_lds_buf; adev->gfx.config.double_offchip_lds_buf;
...@@ -758,6 +763,24 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -758,6 +763,24 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return -EINVAL; return -EINVAL;
} }
break; break;
case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
/* get stable pstate sclk in Mhz */
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
(void *)&ui32, &ui32_size)) {
return -EINVAL;
}
ui32 /= 100;
break;
case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
/* get stable pstate mclk in Mhz */
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
(void *)&ui32, &ui32_size)) {
return -EINVAL;
}
ui32 /= 100;
break;
default: default:
DRM_DEBUG_KMS("Invalid request %d\n", DRM_DEBUG_KMS("Invalid request %d\n",
info->sensor_info.type); info->sensor_info.type);
...@@ -805,7 +828,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) ...@@ -805,7 +828,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv; struct amdgpu_fpriv *fpriv;
int r; int r, pasid;
file_priv->driver_priv = NULL; file_priv->driver_priv = NULL;
...@@ -819,28 +842,25 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) ...@@ -819,28 +842,25 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
goto out_suspend; goto out_suspend;
} }
r = amdgpu_vm_init(adev, &fpriv->vm, pasid = amdgpu_pasid_alloc(16);
AMDGPU_VM_CONTEXT_GFX, 0); if (pasid < 0) {
if (r) { dev_warn(adev->dev, "No more PASIDs available!");
kfree(fpriv); pasid = 0;
goto out_suspend;
} }
r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid);
if (r)
goto error_pasid;
fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL); fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
if (!fpriv->prt_va) { if (!fpriv->prt_va) {
r = -ENOMEM; r = -ENOMEM;
amdgpu_vm_fini(adev, &fpriv->vm); goto error_vm;
kfree(fpriv);
goto out_suspend;
} }
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va); r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
if (r) { if (r)
amdgpu_vm_fini(adev, &fpriv->vm); goto error_vm;
kfree(fpriv);
goto out_suspend;
}
} }
mutex_init(&fpriv->bo_list_lock); mutex_init(&fpriv->bo_list_lock);
...@@ -849,6 +869,16 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) ...@@ -849,6 +869,16 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr); amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
file_priv->driver_priv = fpriv; file_priv->driver_priv = fpriv;
goto out_suspend;
error_vm:
amdgpu_vm_fini(adev, &fpriv->vm);
error_pasid:
if (pasid)
amdgpu_pasid_free(pasid);
kfree(fpriv);
out_suspend: out_suspend:
pm_runtime_mark_last_busy(dev->dev); pm_runtime_mark_last_busy(dev->dev);
...@@ -871,6 +901,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, ...@@ -871,6 +901,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_bo_list *list; struct amdgpu_bo_list *list;
struct amdgpu_bo *pd;
unsigned int pasid;
int handle; int handle;
if (!fpriv) if (!fpriv)
...@@ -895,7 +927,13 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, ...@@ -895,7 +927,13 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_bo_unreserve(adev->virt.csa_obj); amdgpu_bo_unreserve(adev->virt.csa_obj);
} }
pasid = fpriv->vm.pasid;
pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
amdgpu_vm_fini(adev, &fpriv->vm); amdgpu_vm_fini(adev, &fpriv->vm);
if (pasid)
amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
amdgpu_bo_unref(&pd);
idr_for_each_entry(&fpriv->bo_list_handles, list, handle) idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
amdgpu_bo_list_free(list); amdgpu_bo_list_free(list);
...@@ -947,11 +985,11 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) ...@@ -947,11 +985,11 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
*/ */
do { do {
count = amdgpu_display_vblank_get_counter(adev, pipe); count = amdgpu_display_vblank_get_counter(adev, pipe);
/* Ask amdgpu_get_crtc_scanoutpos to return vpos as /* Ask amdgpu_display_get_crtc_scanoutpos to return
* distance to start of vblank, instead of regular * vpos as distance to start of vblank, instead of
* vertical scanout pos. * regular vertical scanout pos.
*/ */
stat = amdgpu_get_crtc_scanoutpos( stat = amdgpu_display_get_crtc_scanoutpos(
dev, pipe, GET_DISTANCE_TO_VBLANKSTART, dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
&vpos, &hpos, NULL, NULL, &vpos, &hpos, NULL, NULL,
&adev->mode_info.crtcs[pipe]->base.hwmode); &adev->mode_info.crtcs[pipe]->base.hwmode);
...@@ -992,7 +1030,7 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) ...@@ -992,7 +1030,7 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe) int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe); int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
return amdgpu_irq_get(adev, &adev->crtc_irq, idx); return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
} }
...@@ -1008,7 +1046,7 @@ int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe) ...@@ -1008,7 +1046,7 @@ int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe) void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe); int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
amdgpu_irq_put(adev, &adev->crtc_irq, idx); amdgpu_irq_put(adev, &adev->crtc_irq, idx);
} }
......
...@@ -267,8 +267,6 @@ struct amdgpu_display_funcs { ...@@ -267,8 +267,6 @@ struct amdgpu_display_funcs {
void (*bandwidth_update)(struct amdgpu_device *adev); void (*bandwidth_update)(struct amdgpu_device *adev);
/* get frame count */ /* get frame count */
u32 (*vblank_get_counter)(struct amdgpu_device *adev, int crtc); u32 (*vblank_get_counter)(struct amdgpu_device *adev, int crtc);
/* wait for vblank */
void (*vblank_wait)(struct amdgpu_device *adev, int crtc);
/* set backlight level */ /* set backlight level */
void (*backlight_set_level)(struct amdgpu_encoder *amdgpu_encoder, void (*backlight_set_level)(struct amdgpu_encoder *amdgpu_encoder,
u8 level); u8 level);
...@@ -608,7 +606,7 @@ struct amdgpu_mst_connector { ...@@ -608,7 +606,7 @@ struct amdgpu_mst_connector {
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \ #define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
((em) == ATOM_ENCODER_MODE_DP_MST)) ((em) == ATOM_ENCODER_MODE_DP_MST))
/* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */ /* Driver internal use only flags of amdgpu_display_get_crtc_scanoutpos() */
#define DRM_SCANOUTPOS_VALID (1 << 0) #define DRM_SCANOUTPOS_VALID (1 << 0)
#define DRM_SCANOUTPOS_IN_VBLANK (1 << 1) #define DRM_SCANOUTPOS_IN_VBLANK (1 << 1)
#define DRM_SCANOUTPOS_ACCURATE (1 << 2) #define DRM_SCANOUTPOS_ACCURATE (1 << 2)
...@@ -627,30 +625,31 @@ bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder, ...@@ -627,30 +625,31 @@ bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder,
u16 amdgpu_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder); u16 amdgpu_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder); struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder);
bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector, bool use_aux); bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
bool use_aux);
void amdgpu_encoder_set_active_device(struct drm_encoder *encoder); void amdgpu_encoder_set_active_device(struct drm_encoder *encoder);
int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
unsigned int flags, int *vpos, int *hpos, unsigned int pipe, unsigned int flags, int *vpos,
ktime_t *stime, ktime_t *etime, int *hpos, ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode); const struct drm_display_mode *mode);
int amdgpu_framebuffer_init(struct drm_device *dev, int amdgpu_display_framebuffer_init(struct drm_device *dev,
struct amdgpu_framebuffer *rfb, struct amdgpu_framebuffer *rfb,
const struct drm_mode_fb_cmd2 *mode_cmd, const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj); struct drm_gem_object *obj);
int amdgpufb_remove(struct drm_device *dev, struct drm_framebuffer *fb); int amdgpufb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
void amdgpu_enc_destroy(struct drm_encoder *encoder); void amdgpu_enc_destroy(struct drm_encoder *encoder);
void amdgpu_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); void amdgpu_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc, bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode); struct drm_display_mode *adjusted_mode);
void amdgpu_panel_mode_fixup(struct drm_encoder *encoder, void amdgpu_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode); struct drm_display_mode *adjusted_mode);
int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc); int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc);
/* fbdev layer */ /* fbdev layer */
int amdgpu_fbdev_init(struct amdgpu_device *adev); int amdgpu_fbdev_init(struct amdgpu_device *adev);
...@@ -662,15 +661,15 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) ...@@ -662,15 +661,15 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled); int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled);
/* amdgpu_display.c */ /* amdgpu_display.c */
void amdgpu_print_display_setup(struct drm_device *dev); void amdgpu_display_print_display_setup(struct drm_device *dev);
int amdgpu_modeset_create_props(struct amdgpu_device *adev); int amdgpu_display_modeset_create_props(struct amdgpu_device *adev);
int amdgpu_crtc_set_config(struct drm_mode_set *set, int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
struct drm_modeset_acquire_ctx *ctx); struct drm_modeset_acquire_ctx *ctx);
int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
struct drm_framebuffer *fb, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event, struct drm_pending_vblank_event *event,
uint32_t page_flip_flags, uint32_t target, uint32_t page_flip_flags, uint32_t target,
struct drm_modeset_acquire_ctx *ctx); struct drm_modeset_acquire_ctx *ctx);
extern const struct drm_mode_config_funcs amdgpu_mode_funcs; extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
#endif #endif
...@@ -83,7 +83,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) ...@@ -83,7 +83,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
u32 c = 0; u32 c = 0;
if (domain & AMDGPU_GEM_DOMAIN_VRAM) { if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT; unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
places[c].fpfn = 0; places[c].fpfn = 0;
places[c].lpfn = 0; places[c].lpfn = 0;
...@@ -103,7 +103,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) ...@@ -103,7 +103,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (domain & AMDGPU_GEM_DOMAIN_GTT) { if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0; places[c].fpfn = 0;
if (flags & AMDGPU_GEM_CREATE_SHADOW) if (flags & AMDGPU_GEM_CREATE_SHADOW)
places[c].lpfn = adev->mc.gart_size >> PAGE_SHIFT; places[c].lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
else else
places[c].lpfn = 0; places[c].lpfn = 0;
places[c].flags = TTM_PL_FLAG_TT; places[c].flags = TTM_PL_FLAG_TT;
...@@ -190,7 +190,7 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev, ...@@ -190,7 +190,7 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
r = amdgpu_bo_create(adev, size, align, true, domain, r = amdgpu_bo_create(adev, size, align, true, domain,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, 0, bo_ptr); NULL, NULL, bo_ptr);
if (r) { if (r) {
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
r); r);
...@@ -336,7 +336,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, ...@@ -336,7 +336,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bool kernel, u32 domain, u64 flags, bool kernel, u32 domain, u64 flags,
struct sg_table *sg, struct sg_table *sg,
struct reservation_object *resv, struct reservation_object *resv,
uint64_t init_value,
struct amdgpu_bo **bo_ptr) struct amdgpu_bo **bo_ptr)
{ {
struct ttm_operation_ctx ctx = { struct ttm_operation_ctx ctx = {
...@@ -372,11 +371,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, ...@@ -372,11 +371,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
if (bo == NULL) if (bo == NULL)
return -ENOMEM; return -ENOMEM;
r = drm_gem_object_init(adev->ddev, &bo->gem_base, size); drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
if (unlikely(r)) {
kfree(bo);
return r;
}
INIT_LIST_HEAD(&bo->shadow_list); INIT_LIST_HEAD(&bo->shadow_list);
INIT_LIST_HEAD(&bo->va); INIT_LIST_HEAD(&bo->va);
bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
...@@ -428,9 +423,9 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, ...@@ -428,9 +423,9 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size && if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
bo->tbo.mem.mem_type == TTM_PL_VRAM && bo->tbo.mem.mem_type == TTM_PL_VRAM &&
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT) bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved); ctx.bytes_moved);
else else
...@@ -443,7 +438,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, ...@@ -443,7 +438,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
struct dma_fence *fence; struct dma_fence *fence;
r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence); r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
if (unlikely(r)) if (unlikely(r))
goto fail_unreserve; goto fail_unreserve;
...@@ -484,7 +479,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, ...@@ -484,7 +479,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
AMDGPU_GEM_DOMAIN_GTT, AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC | AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_SHADOW, AMDGPU_GEM_CREATE_SHADOW,
NULL, bo->tbo.resv, 0, NULL, bo->tbo.resv,
&bo->shadow); &bo->shadow);
if (!r) { if (!r) {
bo->shadow->parent = amdgpu_bo_ref(bo); bo->shadow->parent = amdgpu_bo_ref(bo);
...@@ -496,22 +491,18 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, ...@@ -496,22 +491,18 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
return r; return r;
} }
/* init_value will only take effect when flags contains
* AMDGPU_GEM_CREATE_VRAM_CLEARED.
*/
int amdgpu_bo_create(struct amdgpu_device *adev, int amdgpu_bo_create(struct amdgpu_device *adev,
unsigned long size, int byte_align, unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags, bool kernel, u32 domain, u64 flags,
struct sg_table *sg, struct sg_table *sg,
struct reservation_object *resv, struct reservation_object *resv,
uint64_t init_value,
struct amdgpu_bo **bo_ptr) struct amdgpu_bo **bo_ptr)
{ {
uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
int r; int r;
r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain, r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain,
parent_flags, sg, resv, init_value, bo_ptr); parent_flags, sg, resv, bo_ptr);
if (r) if (r)
return r; return r;
...@@ -832,25 +823,25 @@ static const char *amdgpu_vram_names[] = { ...@@ -832,25 +823,25 @@ static const char *amdgpu_vram_names[] = {
int amdgpu_bo_init(struct amdgpu_device *adev) int amdgpu_bo_init(struct amdgpu_device *adev)
{ {
/* reserve PAT memory space to WC for VRAM */ /* reserve PAT memory space to WC for VRAM */
arch_io_reserve_memtype_wc(adev->mc.aper_base, arch_io_reserve_memtype_wc(adev->gmc.aper_base,
adev->mc.aper_size); adev->gmc.aper_size);
/* Add an MTRR for the VRAM */ /* Add an MTRR for the VRAM */
adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base, adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
adev->mc.aper_size); adev->gmc.aper_size);
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
adev->mc.mc_vram_size >> 20, adev->gmc.mc_vram_size >> 20,
(unsigned long long)adev->mc.aper_size >> 20); (unsigned long long)adev->gmc.aper_size >> 20);
DRM_INFO("RAM width %dbits %s\n", DRM_INFO("RAM width %dbits %s\n",
adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]); adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
return amdgpu_ttm_init(adev); return amdgpu_ttm_init(adev);
} }
void amdgpu_bo_fini(struct amdgpu_device *adev) void amdgpu_bo_fini(struct amdgpu_device *adev)
{ {
amdgpu_ttm_fini(adev); amdgpu_ttm_fini(adev);
arch_phys_wc_del(adev->mc.vram_mtrr); arch_phys_wc_del(adev->gmc.vram_mtrr);
arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size); arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
} }
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
...@@ -980,7 +971,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) ...@@ -980,7 +971,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
size = bo->mem.num_pages << PAGE_SHIFT; size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.start << PAGE_SHIFT; offset = bo->mem.start << PAGE_SHIFT;
if ((offset + size) <= adev->mc.visible_vram_size) if ((offset + size) <= adev->gmc.visible_vram_size)
return 0; return 0;
/* Can't move a pinned BO to visible VRAM */ /* Can't move a pinned BO to visible VRAM */
...@@ -1003,7 +994,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) ...@@ -1003,7 +994,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
offset = bo->mem.start << PAGE_SHIFT; offset = bo->mem.start << PAGE_SHIFT;
/* this should never happen */ /* this should never happen */
if (bo->mem.mem_type == TTM_PL_VRAM && if (bo->mem.mem_type == TTM_PL_VRAM &&
(offset + size) > adev->mc.visible_vram_size) (offset + size) > adev->gmc.visible_vram_size)
return -EINVAL; return -EINVAL;
return 0; return 0;
......
...@@ -206,7 +206,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev, ...@@ -206,7 +206,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
bool kernel, u32 domain, u64 flags, bool kernel, u32 domain, u64 flags,
struct sg_table *sg, struct sg_table *sg,
struct reservation_object *resv, struct reservation_object *resv,
uint64_t init_value,
struct amdgpu_bo **bo_ptr); struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_reserved(struct amdgpu_device *adev, int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
unsigned long size, int align, unsigned long size, int align,
......
...@@ -26,9 +26,12 @@ ...@@ -26,9 +26,12 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_display.h"
#include <drm/amdgpu_drm.h> #include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
static const struct dma_buf_ops amdgpu_dmabuf_ops;
struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
{ {
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
...@@ -103,7 +106,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, ...@@ -103,7 +106,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
ww_mutex_lock(&resv->lock, NULL); ww_mutex_lock(&resv->lock, NULL);
ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, 0, &bo); AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
ww_mutex_unlock(&resv->lock); ww_mutex_unlock(&resv->lock);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
...@@ -112,49 +115,72 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, ...@@ -112,49 +115,72 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
return &bo->gem_base; return &bo->gem_base;
} }
int amdgpu_gem_prime_pin(struct drm_gem_object *obj) static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
struct device *target_dev,
struct dma_buf_attachment *attach)
{ {
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
long ret = 0; long r;
ret = amdgpu_bo_reserve(bo, false); r = drm_gem_map_attach(dma_buf, target_dev, attach);
if (unlikely(ret != 0)) if (r)
return ret; return r;
/* r = amdgpu_bo_reserve(bo, false);
* Wait for all shared fences to complete before we switch to future if (unlikely(r != 0))
* use of exclusive fence on this prime shared bo. goto error_detach;
*/
ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
MAX_SCHEDULE_TIMEOUT); if (dma_buf->ops != &amdgpu_dmabuf_ops) {
if (unlikely(ret < 0)) { /*
DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret); * Wait for all shared fences to complete before we switch to future
amdgpu_bo_unreserve(bo); * use of exclusive fence on this prime shared bo.
return ret; */
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
true, false,
MAX_SCHEDULE_TIMEOUT);
if (unlikely(r < 0)) {
DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
goto error_unreserve;
}
} }
/* pin buffer into GTT */ /* pin buffer into GTT */
ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
if (likely(ret == 0)) if (r)
goto error_unreserve;
if (dma_buf->ops != &amdgpu_dmabuf_ops)
bo->prime_shared_count++; bo->prime_shared_count++;
error_unreserve:
amdgpu_bo_unreserve(bo); amdgpu_bo_unreserve(bo);
return ret;
error_detach:
if (r)
drm_gem_map_detach(dma_buf, attach);
return r;
} }
void amdgpu_gem_prime_unpin(struct drm_gem_object *obj) static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{ {
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
int ret = 0; int ret = 0;
ret = amdgpu_bo_reserve(bo, true); ret = amdgpu_bo_reserve(bo, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return; goto error;
amdgpu_bo_unpin(bo); amdgpu_bo_unpin(bo);
if (bo->prime_shared_count) if (dma_buf->ops != &amdgpu_dmabuf_ops && bo->prime_shared_count)
bo->prime_shared_count--; bo->prime_shared_count--;
amdgpu_bo_unreserve(bo); amdgpu_bo_unreserve(bo);
error:
drm_gem_map_detach(dma_buf, attach);
} }
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
...@@ -164,6 +190,50 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) ...@@ -164,6 +190,50 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
return bo->tbo.resv; return bo->tbo.resv;
} }
static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction direction)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_operation_ctx ctx = { true, false };
u32 domain = amdgpu_display_framebuffer_domains(adev);
int ret;
bool reads = (direction == DMA_BIDIRECTIONAL ||
direction == DMA_FROM_DEVICE);
if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
return 0;
/* move to gtt */
ret = amdgpu_bo_reserve(bo, false);
if (unlikely(ret != 0))
return ret;
if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
}
amdgpu_bo_unreserve(bo);
return ret;
}
static const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_gem_map_attach,
.detach = amdgpu_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_gem_begin_cpu_access,
.map = drm_gem_dmabuf_kmap,
.map_atomic = drm_gem_dmabuf_kmap_atomic,
.unmap = drm_gem_dmabuf_kunmap,
.unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
};
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gobj, struct drm_gem_object *gobj,
int flags) int flags)
...@@ -176,7 +246,30 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, ...@@ -176,7 +246,30 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
buf = drm_gem_prime_export(dev, gobj, flags); buf = drm_gem_prime_export(dev, gobj, flags);
if (!IS_ERR(buf)) if (!IS_ERR(buf)) {
buf->file->f_mapping = dev->anon_inode->i_mapping; buf->file->f_mapping = dev->anon_inode->i_mapping;
buf->ops = &amdgpu_dmabuf_ops;
}
return buf; return buf;
} }
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
struct drm_gem_object *obj;
if (dma_buf->ops == &amdgpu_dmabuf_ops) {
obj = dma_buf->priv;
if (obj->dev == dev) {
/*
* Importing dmabuf exported from out own gem increases
* refcount on gem itself instead of f_count of dmabuf.
*/
drm_gem_object_get(obj);
return obj;
}
}
return drm_gem_prime_import(dev, dma_buf);
}
...@@ -51,29 +51,10 @@ static int psp_sw_init(void *handle) ...@@ -51,29 +51,10 @@ static int psp_sw_init(void *handle)
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
psp->init_microcode = psp_v3_1_init_microcode; psp_v3_1_set_psp_funcs(psp);
psp->bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv;
psp->bootloader_load_sos = psp_v3_1_bootloader_load_sos;
psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf;
psp->ring_init = psp_v3_1_ring_init;
psp->ring_create = psp_v3_1_ring_create;
psp->ring_stop = psp_v3_1_ring_stop;
psp->ring_destroy = psp_v3_1_ring_destroy;
psp->cmd_submit = psp_v3_1_cmd_submit;
psp->compare_sram_data = psp_v3_1_compare_sram_data;
psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
psp->mode1_reset = psp_v3_1_mode1_reset;
break; break;
case CHIP_RAVEN: case CHIP_RAVEN:
psp->init_microcode = psp_v10_0_init_microcode; psp_v10_0_set_psp_funcs(psp);
psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf;
psp->ring_init = psp_v10_0_ring_init;
psp->ring_create = psp_v10_0_ring_create;
psp->ring_stop = psp_v10_0_ring_stop;
psp->ring_destroy = psp_v10_0_ring_destroy;
psp->cmd_submit = psp_v10_0_cmd_submit;
psp->compare_sram_data = psp_v10_0_compare_sram_data;
psp->mode1_reset = psp_v10_0_mode1_reset;
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -512,19 +493,8 @@ static int psp_resume(void *handle) ...@@ -512,19 +493,8 @@ static int psp_resume(void *handle)
return ret; return ret;
} }
static bool psp_check_reset(void* handle) int psp_gpu_reset(struct amdgpu_device *adev)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->flags & AMD_IS_APU)
return true;
return false;
}
static int psp_reset(void* handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return psp_mode1_reset(&adev->psp); return psp_mode1_reset(&adev->psp);
} }
...@@ -571,9 +541,9 @@ const struct amd_ip_funcs psp_ip_funcs = { ...@@ -571,9 +541,9 @@ const struct amd_ip_funcs psp_ip_funcs = {
.suspend = psp_suspend, .suspend = psp_suspend,
.resume = psp_resume, .resume = psp_resume,
.is_idle = NULL, .is_idle = NULL,
.check_soft_reset = psp_check_reset, .check_soft_reset = NULL,
.wait_for_idle = NULL, .wait_for_idle = NULL,
.soft_reset = psp_reset, .soft_reset = NULL,
.set_clockgating_state = psp_set_clockgating_state, .set_clockgating_state = psp_set_clockgating_state,
.set_powergating_state = psp_set_powergating_state, .set_powergating_state = psp_set_powergating_state,
}; };
......
...@@ -33,6 +33,8 @@ ...@@ -33,6 +33,8 @@
#define PSP_ASD_SHARED_MEM_SIZE 0x4000 #define PSP_ASD_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000 #define PSP_1_MEG 0x100000
struct psp_context;
enum psp_ring_type enum psp_ring_type
{ {
PSP_RING_TYPE__INVALID = 0, PSP_RING_TYPE__INVALID = 0,
...@@ -53,12 +55,8 @@ struct psp_ring ...@@ -53,12 +55,8 @@ struct psp_ring
uint32_t ring_size; uint32_t ring_size;
}; };
struct psp_context struct psp_funcs
{ {
struct amdgpu_device *adev;
struct psp_ring km_ring;
struct psp_gfx_cmd_resp *cmd;
int (*init_microcode)(struct psp_context *psp); int (*init_microcode)(struct psp_context *psp);
int (*bootloader_load_sysdrv)(struct psp_context *psp); int (*bootloader_load_sysdrv)(struct psp_context *psp);
int (*bootloader_load_sos)(struct psp_context *psp); int (*bootloader_load_sos)(struct psp_context *psp);
...@@ -77,6 +75,15 @@ struct psp_context ...@@ -77,6 +75,15 @@ struct psp_context
enum AMDGPU_UCODE_ID ucode_type); enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp); bool (*smu_reload_quirk)(struct psp_context *psp);
int (*mode1_reset)(struct psp_context *psp); int (*mode1_reset)(struct psp_context *psp);
};
struct psp_context
{
struct amdgpu_device *adev;
struct psp_ring km_ring;
struct psp_gfx_cmd_resp *cmd;
const struct psp_funcs *funcs;
/* fence buffer */ /* fence buffer */
struct amdgpu_bo *fw_pri_bo; struct amdgpu_bo *fw_pri_bo;
...@@ -123,25 +130,25 @@ struct amdgpu_psp_funcs { ...@@ -123,25 +130,25 @@ struct amdgpu_psp_funcs {
enum AMDGPU_UCODE_ID); enum AMDGPU_UCODE_ID);
}; };
#define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type)) #define psp_prep_cmd_buf(ucode, type) (psp)->funcs->prep_cmd_buf((ucode), (type))
#define psp_ring_init(psp, type) (psp)->ring_init((psp), (type)) #define psp_ring_init(psp, type) (psp)->funcs->ring_init((psp), (type))
#define psp_ring_create(psp, type) (psp)->ring_create((psp), (type)) #define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type))
#define psp_ring_stop(psp, type) (psp)->ring_stop((psp), (type)) #define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type))
#define psp_ring_destroy(psp, type) ((psp)->ring_destroy((psp), (type))) #define psp_ring_destroy(psp, type) ((psp)->funcs->ring_destroy((psp), (type)))
#define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \ #define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \
(psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index)) (psp)->funcs->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index))
#define psp_compare_sram_data(psp, ucode, type) \ #define psp_compare_sram_data(psp, ucode, type) \
(psp)->compare_sram_data((psp), (ucode), (type)) (psp)->funcs->compare_sram_data((psp), (ucode), (type))
#define psp_init_microcode(psp) \ #define psp_init_microcode(psp) \
((psp)->init_microcode ? (psp)->init_microcode((psp)) : 0) ((psp)->funcs->init_microcode ? (psp)->funcs->init_microcode((psp)) : 0)
#define psp_bootloader_load_sysdrv(psp) \ #define psp_bootloader_load_sysdrv(psp) \
((psp)->bootloader_load_sysdrv ? (psp)->bootloader_load_sysdrv((psp)) : 0) ((psp)->funcs->bootloader_load_sysdrv ? (psp)->funcs->bootloader_load_sysdrv((psp)) : 0)
#define psp_bootloader_load_sos(psp) \ #define psp_bootloader_load_sos(psp) \
((psp)->bootloader_load_sos ? (psp)->bootloader_load_sos((psp)) : 0) ((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \ #define psp_smu_reload_quirk(psp) \
((psp)->smu_reload_quirk ? (psp)->smu_reload_quirk((psp)) : false) ((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
#define psp_mode1_reset(psp) \ #define psp_mode1_reset(psp) \
((psp)->mode1_reset ? (psp)->mode1_reset((psp)) : false) ((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
extern const struct amd_ip_funcs psp_ip_funcs; extern const struct amd_ip_funcs psp_ip_funcs;
...@@ -151,4 +158,6 @@ extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index, ...@@ -151,4 +158,6 @@ extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
extern const struct amdgpu_ip_block_version psp_v10_0_ip_block; extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
int psp_gpu_reset(struct amdgpu_device *adev);
#endif #endif
...@@ -360,6 +360,9 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) ...@@ -360,6 +360,9 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
amdgpu_debugfs_ring_fini(ring); amdgpu_debugfs_ring_fini(ring);
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL;
ring->adev->rings[ring->idx] = NULL; ring->adev->rings[ring->idx] = NULL;
} }
......
...@@ -128,7 +128,6 @@ struct amdgpu_ring_funcs { ...@@ -128,7 +128,6 @@ struct amdgpu_ring_funcs {
void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid, void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr); uint64_t pd_addr);
void (*emit_hdp_flush)(struct amdgpu_ring *ring); void (*emit_hdp_flush)(struct amdgpu_ring *ring);
void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
uint32_t gds_base, uint32_t gds_size, uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size, uint32_t gws_base, uint32_t gws_size,
...@@ -151,6 +150,8 @@ struct amdgpu_ring_funcs { ...@@ -151,6 +150,8 @@ struct amdgpu_ring_funcs {
void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags); void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg); void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask);
void (*emit_tmz)(struct amdgpu_ring *ring, bool start); void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
/* priority functions */ /* priority functions */
void (*set_priority) (struct amdgpu_ring *ring, void (*set_priority) (struct amdgpu_ring *ring,
...@@ -195,6 +196,7 @@ struct amdgpu_ring { ...@@ -195,6 +196,7 @@ struct amdgpu_ring {
u64 cond_exe_gpu_addr; u64 cond_exe_gpu_addr;
volatile u32 *cond_exe_cpu_addr; volatile u32 *cond_exe_cpu_addr;
unsigned vm_inv_eng; unsigned vm_inv_eng;
struct dma_fence *vmid_wait;
bool has_compute_vm_bug; bool has_compute_vm_bug;
atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX]; atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX];
......
...@@ -64,7 +64,7 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, ...@@ -64,7 +64,7 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&sa_manager->flist[i]); INIT_LIST_HEAD(&sa_manager->flist[i]);
r = amdgpu_bo_create(adev, size, align, true, domain, r = amdgpu_bo_create(adev, size, align, true, domain,
0, NULL, NULL, 0, &sa_manager->bo); 0, NULL, NULL, &sa_manager->bo);
if (r) { if (r) {
dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
return r; return r;
......
...@@ -42,7 +42,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) ...@@ -42,7 +42,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
/* Number of tests = /* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffers) / test size * (Total GTT - IB pool - writeback page - ring buffers) / test size
*/ */
n = adev->mc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024; n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
if (adev->rings[i]) if (adev->rings[i])
n -= adev->rings[i]->ring_size; n -= adev->rings[i]->ring_size;
...@@ -61,7 +61,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) ...@@ -61,7 +61,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM, 0, AMDGPU_GEM_DOMAIN_VRAM, 0,
NULL, NULL, 0, &vram_obj); NULL, NULL, &vram_obj);
if (r) { if (r) {
DRM_ERROR("Failed to create VRAM object\n"); DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup; goto out_cleanup;
...@@ -82,7 +82,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) ...@@ -82,7 +82,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
NULL, 0, gtt_obj + i); NULL, gtt_obj + i);
if (r) { if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i); DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_lclean; goto out_lclean;
...@@ -142,10 +142,10 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) ...@@ -142,10 +142,10 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
"0x%16llx/0x%16llx)\n", "0x%16llx/0x%16llx)\n",
i, *vram_start, gart_start, i, *vram_start, gart_start,
(unsigned long long) (unsigned long long)
(gart_addr - adev->mc.gart_start + (gart_addr - adev->gmc.gart_start +
(void*)gart_start - gtt_map), (void*)gart_start - gtt_map),
(unsigned long long) (unsigned long long)
(vram_addr - adev->mc.vram_start + (vram_addr - adev->gmc.vram_start +
(void*)gart_start - gtt_map)); (void*)gart_start - gtt_map));
amdgpu_bo_kunmap(vram_obj); amdgpu_bo_kunmap(vram_obj);
goto out_lclean_unpin; goto out_lclean_unpin;
...@@ -187,10 +187,10 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) ...@@ -187,10 +187,10 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
"0x%16llx/0x%16llx)\n", "0x%16llx/0x%16llx)\n",
i, *gart_start, vram_start, i, *gart_start, vram_start,
(unsigned long long) (unsigned long long)
(vram_addr - adev->mc.vram_start + (vram_addr - adev->gmc.vram_start +
(void*)vram_start - vram_map), (void*)vram_start - vram_map),
(unsigned long long) (unsigned long long)
(gart_addr - adev->mc.gart_start + (gart_addr - adev->gmc.gart_start +
(void*)vram_start - vram_map)); (void*)vram_start - vram_map));
amdgpu_bo_kunmap(gtt_obj[i]); amdgpu_bo_kunmap(gtt_obj[i]);
goto out_lclean_unpin; goto out_lclean_unpin;
...@@ -200,7 +200,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) ...@@ -200,7 +200,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(gtt_obj[i]); amdgpu_bo_kunmap(gtt_obj[i]);
DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
gart_addr - adev->mc.gart_start); gart_addr - adev->gmc.gart_start);
continue; continue;
out_lclean_unpin: out_lclean_unpin:
......
...@@ -86,7 +86,7 @@ TRACE_EVENT(amdgpu_iv, ...@@ -86,7 +86,7 @@ TRACE_EVENT(amdgpu_iv,
__field(unsigned, vmid_src) __field(unsigned, vmid_src)
__field(uint64_t, timestamp) __field(uint64_t, timestamp)
__field(unsigned, timestamp_src) __field(unsigned, timestamp_src)
__field(unsigned, pas_id) __field(unsigned, pasid)
__array(unsigned, src_data, 4) __array(unsigned, src_data, 4)
), ),
TP_fast_assign( TP_fast_assign(
...@@ -97,16 +97,16 @@ TRACE_EVENT(amdgpu_iv, ...@@ -97,16 +97,16 @@ TRACE_EVENT(amdgpu_iv,
__entry->vmid_src = iv->vmid_src; __entry->vmid_src = iv->vmid_src;
__entry->timestamp = iv->timestamp; __entry->timestamp = iv->timestamp;
__entry->timestamp_src = iv->timestamp_src; __entry->timestamp_src = iv->timestamp_src;
__entry->pas_id = iv->pas_id; __entry->pasid = iv->pasid;
__entry->src_data[0] = iv->src_data[0]; __entry->src_data[0] = iv->src_data[0];
__entry->src_data[1] = iv->src_data[1]; __entry->src_data[1] = iv->src_data[1];
__entry->src_data[2] = iv->src_data[2]; __entry->src_data[2] = iv->src_data[2];
__entry->src_data[3] = iv->src_data[3]; __entry->src_data[3] = iv->src_data[3];
), ),
TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pas_id:%u src_data: %08x %08x %08x %08x\n", TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x\n",
__entry->client_id, __entry->src_id, __entry->client_id, __entry->src_id,
__entry->ring_id, __entry->vmid, __entry->ring_id, __entry->vmid,
__entry->timestamp, __entry->pas_id, __entry->timestamp, __entry->pasid,
__entry->src_data[0], __entry->src_data[1], __entry->src_data[0], __entry->src_data[1],
__entry->src_data[2], __entry->src_data[3]) __entry->src_data[2], __entry->src_data[3])
); );
...@@ -217,7 +217,7 @@ TRACE_EVENT(amdgpu_vm_grab_id, ...@@ -217,7 +217,7 @@ TRACE_EVENT(amdgpu_vm_grab_id,
struct amdgpu_job *job), struct amdgpu_job *job),
TP_ARGS(vm, ring, job), TP_ARGS(vm, ring, job),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct amdgpu_vm *, vm) __field(u32, pasid)
__field(u32, ring) __field(u32, ring)
__field(u32, vmid) __field(u32, vmid)
__field(u32, vm_hub) __field(u32, vm_hub)
...@@ -226,15 +226,15 @@ TRACE_EVENT(amdgpu_vm_grab_id, ...@@ -226,15 +226,15 @@ TRACE_EVENT(amdgpu_vm_grab_id,
), ),
TP_fast_assign( TP_fast_assign(
__entry->vm = vm; __entry->pasid = vm->pasid;
__entry->ring = ring->idx; __entry->ring = ring->idx;
__entry->vmid = job->vmid; __entry->vmid = job->vmid;
__entry->vm_hub = ring->funcs->vmhub, __entry->vm_hub = ring->funcs->vmhub,
__entry->pd_addr = job->vm_pd_addr; __entry->pd_addr = job->vm_pd_addr;
__entry->needs_flush = job->vm_needs_flush; __entry->needs_flush = job->vm_needs_flush;
), ),
TP_printk("vm=%p, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u", TP_printk("pasid=%d, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
__entry->vm, __entry->ring, __entry->vmid, __entry->pasid, __entry->ring, __entry->vmid,
__entry->vm_hub, __entry->pd_addr, __entry->needs_flush) __entry->vm_hub, __entry->pd_addr, __entry->needs_flush)
); );
...@@ -378,6 +378,28 @@ TRACE_EVENT(amdgpu_vm_flush, ...@@ -378,6 +378,28 @@ TRACE_EVENT(amdgpu_vm_flush,
__entry->vm_hub,__entry->pd_addr) __entry->vm_hub,__entry->pd_addr)
); );
DECLARE_EVENT_CLASS(amdgpu_pasid,
TP_PROTO(unsigned pasid),
TP_ARGS(pasid),
TP_STRUCT__entry(
__field(unsigned, pasid)
),
TP_fast_assign(
__entry->pasid = pasid;
),
TP_printk("pasid=%u", __entry->pasid)
);
DEFINE_EVENT(amdgpu_pasid, amdgpu_pasid_allocated,
TP_PROTO(unsigned pasid),
TP_ARGS(pasid)
);
DEFINE_EVENT(amdgpu_pasid, amdgpu_pasid_freed,
TP_PROTO(unsigned pasid),
TP_ARGS(pasid)
);
TRACE_EVENT(amdgpu_bo_list_set, TRACE_EVENT(amdgpu_bo_list_set,
TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo), TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo),
TP_ARGS(list, bo), TP_ARGS(list, bo),
......
...@@ -161,7 +161,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, ...@@ -161,7 +161,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break; break;
case TTM_PL_TT: case TTM_PL_TT:
man->func = &amdgpu_gtt_mgr_func; man->func = &amdgpu_gtt_mgr_func;
man->gpu_offset = adev->mc.gart_start; man->gpu_offset = adev->gmc.gart_start;
man->available_caching = TTM_PL_MASK_CACHING; man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
...@@ -169,7 +169,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, ...@@ -169,7 +169,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
case TTM_PL_VRAM: case TTM_PL_VRAM:
/* "On-card" video ram */ /* "On-card" video ram */
man->func = &amdgpu_vram_mgr_func; man->func = &amdgpu_vram_mgr_func;
man->gpu_offset = adev->mc.vram_start; man->gpu_offset = adev->gmc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED | man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE; TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
...@@ -217,9 +217,9 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, ...@@ -217,9 +217,9 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
adev->mman.buffer_funcs_ring && adev->mman.buffer_funcs_ring &&
adev->mman.buffer_funcs_ring->ready == false) { adev->mman.buffer_funcs_ring->ready == false) {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
} else if (adev->mc.visible_vram_size < adev->mc.real_vram_size && } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
unsigned fpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
struct drm_mm_node *node = bo->mem.mm_node; struct drm_mm_node *node = bo->mem.mm_node;
unsigned long pages_left; unsigned long pages_left;
...@@ -638,9 +638,9 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ ...@@ -638,9 +638,9 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
case TTM_PL_VRAM: case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */ /* check if it's visible */
if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size) if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
return -EINVAL; return -EINVAL;
mem->bus.base = adev->mc.aper_base; mem->bus.base = adev->gmc.aper_base;
mem->bus.is_iomem = true; mem->bus.is_iomem = true;
break; break;
default: default:
...@@ -891,7 +891,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) ...@@ -891,7 +891,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
placement.num_busy_placement = 1; placement.num_busy_placement = 1;
placement.busy_placement = &placements; placement.busy_placement = &placements;
placements.fpfn = 0; placements.fpfn = 0;
placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) | placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
TTM_PL_FLAG_TT; TTM_PL_FLAG_TT;
...@@ -997,9 +997,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, ...@@ -997,9 +997,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated)
return 0;
if (gtt && gtt->userptr) { if (gtt && gtt->userptr) {
ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!ttm->sg) if (!ttm->sg)
...@@ -1212,7 +1209,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, ...@@ -1212,7 +1209,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset); nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
pos = (nodes->start << PAGE_SHIFT) + offset; pos = (nodes->start << PAGE_SHIFT) + offset;
while (len && pos < adev->mc.mc_vram_size) { while (len && pos < adev->gmc.mc_vram_size) {
uint64_t aligned_pos = pos & ~(uint64_t)3; uint64_t aligned_pos = pos & ~(uint64_t)3;
uint32_t bytes = 4 - (pos & 3); uint32_t bytes = 4 - (pos & 3);
uint32_t shift = (pos & 3) * 8; uint32_t shift = (pos & 3) * 8;
...@@ -1298,7 +1295,7 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) ...@@ -1298,7 +1295,7 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
struct ttm_operation_ctx ctx = { false, false }; struct ttm_operation_ctx ctx = { false, false };
int r = 0; int r = 0;
int i; int i;
u64 vram_size = adev->mc.visible_vram_size; u64 vram_size = adev->gmc.visible_vram_size;
u64 offset = adev->fw_vram_usage.start_offset; u64 offset = adev->fw_vram_usage.start_offset;
u64 size = adev->fw_vram_usage.size; u64 size = adev->fw_vram_usage.size;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
...@@ -1312,7 +1309,7 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) ...@@ -1312,7 +1309,7 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0, AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL,
&adev->fw_vram_usage.reserved_bo); &adev->fw_vram_usage.reserved_bo);
if (r) if (r)
goto error_create; goto error_create;
...@@ -1387,8 +1384,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) ...@@ -1387,8 +1384,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r; return r;
} }
adev->mman.initialized = true; adev->mman.initialized = true;
/* We opt to avoid OOM on system pages allocations */
adev->mman.bdev.no_retry = true;
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
adev->mc.real_vram_size >> PAGE_SHIFT); adev->gmc.real_vram_size >> PAGE_SHIFT);
if (r) { if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n"); DRM_ERROR("Failed initializing VRAM heap.\n");
return r; return r;
...@@ -1397,11 +1398,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) ...@@ -1397,11 +1398,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Reduce size of CPU-visible VRAM if requested */ /* Reduce size of CPU-visible VRAM if requested */
vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024; vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
if (amdgpu_vis_vram_limit > 0 && if (amdgpu_vis_vram_limit > 0 &&
vis_vram_limit <= adev->mc.visible_vram_size) vis_vram_limit <= adev->gmc.visible_vram_size)
adev->mc.visible_vram_size = vis_vram_limit; adev->gmc.visible_vram_size = vis_vram_limit;
/* Change the size here instead of the init above so only lpfn is affected */ /* Change the size here instead of the init above so only lpfn is affected */
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
/* /*
*The reserved vram for firmware must be pinned to the specified *The reserved vram for firmware must be pinned to the specified
...@@ -1412,21 +1413,21 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) ...@@ -1412,21 +1413,21 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r; return r;
} }
r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE, r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
&adev->stolen_vga_memory, &adev->stolen_vga_memory,
NULL, NULL); NULL, NULL);
if (r) if (r)
return r; return r;
DRM_INFO("amdgpu: %uM of VRAM memory ready\n", DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->mc.real_vram_size / (1024 * 1024))); (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
if (amdgpu_gtt_size == -1) { if (amdgpu_gtt_size == -1) {
struct sysinfo si; struct sysinfo si;
si_meminfo(&si); si_meminfo(&si);
gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
adev->mc.mc_vram_size), adev->gmc.mc_vram_size),
((uint64_t)si.totalram * si.mem_unit * 3/4)); ((uint64_t)si.totalram * si.mem_unit * 3/4));
} }
else else
...@@ -1559,7 +1560,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo, ...@@ -1559,7 +1560,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
*addr = adev->mc.gart_start; *addr = adev->gmc.gart_start;
*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
AMDGPU_GPU_PAGE_SIZE; AMDGPU_GPU_PAGE_SIZE;
...@@ -1677,13 +1678,12 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, ...@@ -1677,13 +1678,12 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
} }
int amdgpu_fill_buffer(struct amdgpu_bo *bo, int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint64_t src_data, uint32_t src_data,
struct reservation_object *resv, struct reservation_object *resv,
struct dma_fence **fence) struct dma_fence **fence)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
uint32_t max_bytes = 8 * uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
adev->vm_manager.vm_pte_funcs->set_max_nums_pte_pde;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct drm_mm_node *mm_node; struct drm_mm_node *mm_node;
...@@ -1714,9 +1714,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, ...@@ -1714,9 +1714,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
num_pages -= mm_node->size; num_pages -= mm_node->size;
++mm_node; ++mm_node;
} }
num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
/* num of dwords for each SDMA_OP_PTEPDE cmd */
num_dw = num_loops * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw;
/* for IB padding */ /* for IB padding */
num_dw += 64; num_dw += 64;
...@@ -1741,16 +1739,12 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, ...@@ -1741,16 +1739,12 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t byte_count = mm_node->size << PAGE_SHIFT; uint32_t byte_count = mm_node->size << PAGE_SHIFT;
uint64_t dst_addr; uint64_t dst_addr;
WARN_ONCE(byte_count & 0x7, "size should be a multiple of 8");
dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
while (byte_count) { while (byte_count) {
uint32_t cur_size_in_bytes = min(byte_count, max_bytes); uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
amdgpu_vm_set_pte_pde(adev, &job->ibs[0], amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
dst_addr, 0, dst_addr, cur_size_in_bytes);
cur_size_in_bytes >> 3, 0,
src_data);
dst_addr += cur_size_in_bytes; dst_addr += cur_size_in_bytes;
byte_count -= cur_size_in_bytes; byte_count -= cur_size_in_bytes;
...@@ -1811,14 +1805,14 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, ...@@ -1811,14 +1805,14 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
if (*pos >= adev->mc.mc_vram_size) if (*pos >= adev->gmc.mc_vram_size)
return -ENXIO; return -ENXIO;
while (size) { while (size) {
unsigned long flags; unsigned long flags;
uint32_t value; uint32_t value;
if (*pos >= adev->mc.mc_vram_size) if (*pos >= adev->gmc.mc_vram_size)
return result; return result;
spin_lock_irqsave(&adev->mmio_idx_lock, flags); spin_lock_irqsave(&adev->mmio_idx_lock, flags);
...@@ -1850,14 +1844,14 @@ static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, ...@@ -1850,14 +1844,14 @@ static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
if (*pos >= adev->mc.mc_vram_size) if (*pos >= adev->gmc.mc_vram_size)
return -ENXIO; return -ENXIO;
while (size) { while (size) {
unsigned long flags; unsigned long flags;
uint32_t value; uint32_t value;
if (*pos >= adev->mc.mc_vram_size) if (*pos >= adev->gmc.mc_vram_size)
return result; return result;
r = get_user(value, (uint32_t *)buf); r = get_user(value, (uint32_t *)buf);
...@@ -2001,9 +1995,9 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) ...@@ -2001,9 +1995,9 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
if (IS_ERR(ent)) if (IS_ERR(ent))
return PTR_ERR(ent); return PTR_ERR(ent);
if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM) if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
i_size_write(ent->d_inode, adev->mc.mc_vram_size); i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
else if (ttm_debugfs_entries[count].domain == TTM_PL_TT) else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
i_size_write(ent->d_inode, adev->mc.gart_size); i_size_write(ent->d_inode, adev->gmc.gart_size);
adev->mman.debugfs_entries[count] = ent; adev->mman.debugfs_entries[count] = ent;
} }
......
...@@ -86,7 +86,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, ...@@ -86,7 +86,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct reservation_object *resv, struct reservation_object *resv,
struct dma_fence **f); struct dma_fence **f);
int amdgpu_fill_buffer(struct amdgpu_bo *bo, int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint64_t src_data, uint32_t src_data,
struct reservation_object *resv, struct reservation_object *resv,
struct dma_fence **fence); struct dma_fence **fence);
......
...@@ -952,37 +952,28 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) ...@@ -952,37 +952,28 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
bool direct, struct dma_fence **fence) bool direct, struct dma_fence **fence)
{ {
struct ttm_operation_ctx ctx = { true, false }; struct amdgpu_device *adev = ring->adev;
struct ttm_validate_buffer tv; struct dma_fence *f = NULL;
struct ww_acquire_ctx ticket;
struct list_head head;
struct amdgpu_job *job; struct amdgpu_job *job;
struct amdgpu_ib *ib; struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
struct amdgpu_device *adev = ring->adev;
uint64_t addr;
uint32_t data[4]; uint32_t data[4];
int i, r; uint64_t addr;
long r;
memset(&tv, 0, sizeof(tv)); int i;
tv.bo = &bo->tbo;
INIT_LIST_HEAD(&head);
list_add(&tv.head, &head);
r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL); amdgpu_bo_kunmap(bo);
if (r) amdgpu_bo_unpin(bo);
return r;
if (!ring->adev->uvd.address_64_bit) { if (!ring->adev->uvd.address_64_bit) {
struct ttm_operation_ctx ctx = { true, false };
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_uvd_force_into_uvd_segment(bo); amdgpu_uvd_force_into_uvd_segment(bo);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
goto err;
} }
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
goto err;
r = amdgpu_job_alloc_with_ib(adev, 64, &job); r = amdgpu_job_alloc_with_ib(adev, 64, &job);
if (r) if (r)
goto err; goto err;
...@@ -1014,6 +1005,14 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ...@@ -1014,6 +1005,14 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16; ib->length_dw = 16;
if (direct) { if (direct) {
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
true, false,
msecs_to_jiffies(10));
if (r == 0)
r = -ETIMEDOUT;
if (r < 0)
goto err_free;
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
job->fence = dma_fence_get(f); job->fence = dma_fence_get(f);
if (r) if (r)
...@@ -1021,17 +1020,23 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ...@@ -1021,17 +1020,23 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
amdgpu_job_free(job); amdgpu_job_free(job);
} else { } else {
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
AMDGPU_FENCE_OWNER_UNDEFINED, false);
if (r)
goto err_free;
r = amdgpu_job_submit(job, ring, &adev->uvd.entity, r = amdgpu_job_submit(job, ring, &adev->uvd.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f); AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r) if (r)
goto err_free; goto err_free;
} }
ttm_eu_fence_buffer_objects(&ticket, &head, f); amdgpu_bo_fence(bo, f, false);
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
if (fence) if (fence)
*fence = dma_fence_get(f); *fence = dma_fence_get(f);
amdgpu_bo_unref(&bo);
dma_fence_put(f); dma_fence_put(f);
return 0; return 0;
...@@ -1040,7 +1045,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ...@@ -1040,7 +1045,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
amdgpu_job_free(job); amdgpu_job_free(job);
err: err:
ttm_eu_backoff_reservation(&ticket, &head); amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r; return r;
} }
...@@ -1051,31 +1057,16 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -1051,31 +1057,16 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct dma_fence **fence) struct dma_fence **fence)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo; struct amdgpu_bo *bo = NULL;
uint32_t *msg; uint32_t *msg;
int r, i; int r, i;
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | &bo, NULL, (void **)&msg);
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, 0, &bo);
if (r) if (r)
return r; return r;
r = amdgpu_bo_reserve(bo, false);
if (r) {
amdgpu_bo_unref(&bo);
return r;
}
r = amdgpu_bo_kmap(bo, (void **)&msg);
if (r) {
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r;
}
/* stitch together an UVD create msg */ /* stitch together an UVD create msg */
msg[0] = cpu_to_le32(0x00000de4); msg[0] = cpu_to_le32(0x00000de4);
msg[1] = cpu_to_le32(0x00000000); msg[1] = cpu_to_le32(0x00000000);
...@@ -1091,9 +1082,6 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -1091,9 +1082,6 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = 11; i < 1024; ++i) for (i = 11; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0); msg[i] = cpu_to_le32(0x0);
amdgpu_bo_kunmap(bo);
amdgpu_bo_unreserve(bo);
return amdgpu_uvd_send_msg(ring, bo, true, fence); return amdgpu_uvd_send_msg(ring, bo, true, fence);
} }
...@@ -1101,31 +1089,16 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -1101,31 +1089,16 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence) bool direct, struct dma_fence **fence)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo; struct amdgpu_bo *bo = NULL;
uint32_t *msg; uint32_t *msg;
int r, i; int r, i;
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | &bo, NULL, (void **)&msg);
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, 0, &bo);
if (r) if (r)
return r; return r;
r = amdgpu_bo_reserve(bo, false);
if (r) {
amdgpu_bo_unref(&bo);
return r;
}
r = amdgpu_bo_kmap(bo, (void **)&msg);
if (r) {
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r;
}
/* stitch together an UVD destroy msg */ /* stitch together an UVD destroy msg */
msg[0] = cpu_to_le32(0x00000de4); msg[0] = cpu_to_le32(0x00000de4);
msg[1] = cpu_to_le32(0x00000002); msg[1] = cpu_to_le32(0x00000002);
...@@ -1134,9 +1107,6 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -1134,9 +1107,6 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = 4; i < 1024; ++i) for (i = 4; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0); msg[i] = cpu_to_le32(0x0);
amdgpu_bo_kunmap(bo);
amdgpu_bo_unreserve(bo);
return amdgpu_uvd_send_msg(ring, bo, direct, fence); return amdgpu_uvd_send_msg(ring, bo, direct, fence);
} }
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0) #define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1) #define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
#define AMDGPU_VCE_FW_53_45 ((53 << 24) | (45 << 16))
struct amdgpu_vce { struct amdgpu_vce {
struct amdgpu_bo *vcpu_bo; struct amdgpu_bo *vcpu_bo;
uint64_t gpu_addr; uint64_t gpu_addr;
......
...@@ -270,34 +270,17 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) ...@@ -270,34 +270,17 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
return r; return r;
} }
static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
bool direct, struct dma_fence **fence) struct amdgpu_bo *bo, bool direct,
struct dma_fence **fence)
{ {
struct ttm_operation_ctx ctx = { true, false }; struct amdgpu_device *adev = ring->adev;
struct ttm_validate_buffer tv; struct dma_fence *f = NULL;
struct ww_acquire_ctx ticket;
struct list_head head;
struct amdgpu_job *job; struct amdgpu_job *job;
struct amdgpu_ib *ib; struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
struct amdgpu_device *adev = ring->adev;
uint64_t addr; uint64_t addr;
int i, r; int i, r;
memset(&tv, 0, sizeof(tv));
tv.bo = &bo->tbo;
INIT_LIST_HEAD(&head);
list_add(&tv.head, &head);
r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
if (r)
return r;
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
goto err;
r = amdgpu_job_alloc_with_ib(adev, 64, &job); r = amdgpu_job_alloc_with_ib(adev, 64, &job);
if (r) if (r)
goto err; goto err;
...@@ -330,11 +313,12 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *b ...@@ -330,11 +313,12 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *b
goto err_free; goto err_free;
} }
ttm_eu_fence_buffer_objects(&ticket, &head, f); amdgpu_bo_fence(bo, f, false);
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
if (fence) if (fence)
*fence = dma_fence_get(f); *fence = dma_fence_get(f);
amdgpu_bo_unref(&bo);
dma_fence_put(f); dma_fence_put(f);
return 0; return 0;
...@@ -343,7 +327,8 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *b ...@@ -343,7 +327,8 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *b
amdgpu_job_free(job); amdgpu_job_free(job);
err: err:
ttm_eu_backoff_reservation(&ticket, &head); amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r; return r;
} }
...@@ -351,31 +336,16 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand ...@@ -351,31 +336,16 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
struct dma_fence **fence) struct dma_fence **fence)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo; struct amdgpu_bo *bo = NULL;
uint32_t *msg; uint32_t *msg;
int r, i; int r, i;
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | &bo, NULL, (void **)&msg);
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, 0, &bo);
if (r) if (r)
return r; return r;
r = amdgpu_bo_reserve(bo, false);
if (r) {
amdgpu_bo_unref(&bo);
return r;
}
r = amdgpu_bo_kmap(bo, (void **)&msg);
if (r) {
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r;
}
msg[0] = cpu_to_le32(0x00000028); msg[0] = cpu_to_le32(0x00000028);
msg[1] = cpu_to_le32(0x00000038); msg[1] = cpu_to_le32(0x00000038);
msg[2] = cpu_to_le32(0x00000001); msg[2] = cpu_to_le32(0x00000001);
...@@ -393,9 +363,6 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand ...@@ -393,9 +363,6 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
for (i = 14; i < 1024; ++i) for (i = 14; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0); msg[i] = cpu_to_le32(0x0);
amdgpu_bo_kunmap(bo);
amdgpu_bo_unreserve(bo);
return amdgpu_vcn_dec_send_msg(ring, bo, true, fence); return amdgpu_vcn_dec_send_msg(ring, bo, true, fence);
} }
...@@ -403,31 +370,16 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han ...@@ -403,31 +370,16 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
bool direct, struct dma_fence **fence) bool direct, struct dma_fence **fence)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo; struct amdgpu_bo *bo = NULL;
uint32_t *msg; uint32_t *msg;
int r, i; int r, i;
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | &bo, NULL, (void **)&msg);
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, 0, &bo);
if (r) if (r)
return r; return r;
r = amdgpu_bo_reserve(bo, false);
if (r) {
amdgpu_bo_unref(&bo);
return r;
}
r = amdgpu_bo_kmap(bo, (void **)&msg);
if (r) {
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r;
}
msg[0] = cpu_to_le32(0x00000028); msg[0] = cpu_to_le32(0x00000028);
msg[1] = cpu_to_le32(0x00000018); msg[1] = cpu_to_le32(0x00000018);
msg[2] = cpu_to_le32(0x00000000); msg[2] = cpu_to_le32(0x00000000);
...@@ -437,9 +389,6 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han ...@@ -437,9 +389,6 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
for (i = 6; i < 1024; ++i) for (i = 6; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0); msg[i] = cpu_to_le32(0x0);
amdgpu_bo_kunmap(bo);
amdgpu_bo_unreserve(bo);
return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence); return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
} }
......
...@@ -24,6 +24,18 @@ ...@@ -24,6 +24,18 @@
#include "amdgpu.h" #include "amdgpu.h"
#define MAX_KIQ_REG_WAIT 100000000 /* in usecs */ #define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
{
uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
addr -= AMDGPU_VA_RESERVED_SIZE;
if (addr >= AMDGPU_VA_HOLE_START)
addr |= AMDGPU_VA_HOLE_END;
return addr;
}
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
{ {
/* By now all MMIO pages except mailbox are blocked */ /* By now all MMIO pages except mailbox are blocked */
...@@ -55,14 +67,14 @@ void amdgpu_free_static_csa(struct amdgpu_device *adev) { ...@@ -55,14 +67,14 @@ void amdgpu_free_static_csa(struct amdgpu_device *adev) {
/* /*
* amdgpu_map_static_csa should be called during amdgpu_vm_init * amdgpu_map_static_csa should be called during amdgpu_vm_init
* it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE" * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
* to this VM, and each command submission of GFX should use this virtual * submission of GFX should use this virtual address within META_DATA init
* address within META_DATA init package to support SRIOV gfx preemption. * package to support SRIOV gfx preemption.
*/ */
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va) struct amdgpu_bo_va **bo_va)
{ {
uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_VA_HOLE_MASK;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct list_head list; struct list_head list;
struct amdgpu_bo_list_entry pd; struct amdgpu_bo_list_entry pd;
...@@ -90,7 +102,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -90,7 +102,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return -ENOMEM; return -ENOMEM;
} }
r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR, r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
AMDGPU_CSA_SIZE); AMDGPU_CSA_SIZE);
if (r) { if (r) {
DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
...@@ -99,7 +111,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -99,7 +111,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return r; return r;
} }
r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE, r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
AMDGPU_PTE_EXECUTABLE); AMDGPU_PTE_EXECUTABLE);
......
...@@ -251,8 +251,7 @@ struct amdgpu_virt { ...@@ -251,8 +251,7 @@ struct amdgpu_virt {
uint32_t gim_feature; uint32_t gim_feature;
}; };
#define AMDGPU_CSA_SIZE (8 * 1024) #define AMDGPU_CSA_SIZE (8 * 1024)
#define AMDGPU_CSA_VADDR (AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE)
#define amdgpu_sriov_enabled(adev) \ #define amdgpu_sriov_enabled(adev) \
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
...@@ -279,6 +278,8 @@ static inline bool is_virtual_machine(void) ...@@ -279,6 +278,8 @@ static inline bool is_virtual_machine(void)
} }
struct amdgpu_vm; struct amdgpu_vm;
uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
int amdgpu_allocate_static_csa(struct amdgpu_device *adev); int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
......
...@@ -99,7 +99,7 @@ struct amdgpu_bo_list_entry; ...@@ -99,7 +99,7 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_MMHUB 1 #define AMDGPU_MMHUB 1
/* hardcode that limit for now */ /* hardcode that limit for now */
#define AMDGPU_VA_RESERVED_SIZE (8ULL << 20) #define AMDGPU_VA_RESERVED_SIZE (1ULL << 20)
/* VA hole for 48bit addresses on Vega10 */ /* VA hole for 48bit addresses on Vega10 */
#define AMDGPU_VA_HOLE_START 0x0000800000000000ULL #define AMDGPU_VA_HOLE_START 0x0000800000000000ULL
......
...@@ -89,11 +89,11 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, ...@@ -89,11 +89,11 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
uint64_t start = node->start << PAGE_SHIFT; uint64_t start = node->start << PAGE_SHIFT;
uint64_t end = (node->size + node->start) << PAGE_SHIFT; uint64_t end = (node->size + node->start) << PAGE_SHIFT;
if (start >= adev->mc.visible_vram_size) if (start >= adev->gmc.visible_vram_size)
return 0; return 0;
return (end > adev->mc.visible_vram_size ? return (end > adev->gmc.visible_vram_size ?
adev->mc.visible_vram_size : end) - start; adev->gmc.visible_vram_size : end) - start;
} }
/** /**
......
此差异已折叠。
...@@ -1715,6 +1715,27 @@ static void cik_detect_hw_virtualization(struct amdgpu_device *adev) ...@@ -1715,6 +1715,27 @@ static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
} }
static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
}
}
static void cik_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32(mmHDP_DEBUG0, 1);
RREG32(mmHDP_DEBUG0);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
}
}
static const struct amdgpu_asic_funcs cik_asic_funcs = static const struct amdgpu_asic_funcs cik_asic_funcs =
{ {
.read_disabled_bios = &cik_read_disabled_bios, .read_disabled_bios = &cik_read_disabled_bios,
...@@ -1726,6 +1747,8 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = ...@@ -1726,6 +1747,8 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.set_uvd_clocks = &cik_set_uvd_clocks, .set_uvd_clocks = &cik_set_uvd_clocks,
.set_vce_clocks = &cik_set_vce_clocks, .set_vce_clocks = &cik_set_vce_clocks,
.get_config_memsize = &cik_get_config_memsize, .get_config_memsize = &cik_get_config_memsize,
.flush_hdp = &cik_flush_hdp,
.invalidate_hdp = &cik_invalidate_hdp,
}; };
static int cik_common_early_init(void *handle) static int cik_common_early_init(void *handle)
......
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#ifndef __CIK_H__ #ifndef __CIK_H__
#define __CIK_H__ #define __CIK_H__
#define CIK_FLUSH_GPU_TLB_NUM_WREG 3
void cik_srbm_select(struct amdgpu_device *adev, void cik_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid); u32 me, u32 pipe, u32 queue, u32 vmid);
int cik_set_ip_blocks(struct amdgpu_device *adev); int cik_set_ip_blocks(struct amdgpu_device *adev);
......
...@@ -281,7 +281,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev, ...@@ -281,7 +281,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
entry->src_data[0] = dw[1] & 0xfffffff; entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff; entry->ring_id = dw[2] & 0xff;
entry->vmid = (dw[2] >> 8) & 0xff; entry->vmid = (dw[2] >> 8) & 0xff;
entry->pas_id = (dw[2] >> 16) & 0xffff; entry->pasid = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */ /* wptr/rptr are in bytes! */
adev->irq.ih.rptr += 16; adev->irq.ih.rptr += 16;
......
...@@ -260,7 +260,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev, ...@@ -260,7 +260,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
entry->src_data[0] = dw[1] & 0xfffffff; entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff; entry->ring_id = dw[2] & 0xff;
entry->vmid = (dw[2] >> 8) & 0xff; entry->vmid = (dw[2] >> 8) & 0xff;
entry->pas_id = (dw[2] >> 16) & 0xffff; entry->pasid = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */ /* wptr/rptr are in bytes! */
adev->irq.ih.rptr += 16; adev->irq.ih.rptr += 16;
......
此差异已折叠。
...@@ -260,7 +260,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev, ...@@ -260,7 +260,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev,
entry->src_data[0] = dw[1] & 0xfffffff; entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff; entry->ring_id = dw[2] & 0xff;
entry->vmid = (dw[2] >> 8) & 0xff; entry->vmid = (dw[2] >> 8) & 0xff;
entry->pas_id = (dw[2] >> 16) & 0xffff; entry->pasid = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */ /* wptr/rptr are in bytes! */
adev->irq.ih.rptr += 16; adev->irq.ih.rptr += 16;
......
...@@ -3319,7 +3319,6 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = { ...@@ -3319,7 +3319,6 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = {
}; };
const struct amd_pm_funcs kv_dpm_funcs = { const struct amd_pm_funcs kv_dpm_funcs = {
.get_temperature = &kv_dpm_get_temp,
.pre_set_power_state = &kv_dpm_pre_set_power_state, .pre_set_power_state = &kv_dpm_pre_set_power_state,
.set_power_state = &kv_dpm_set_power_state, .set_power_state = &kv_dpm_set_power_state,
.post_set_power_state = &kv_dpm_post_set_power_state, .post_set_power_state = &kv_dpm_post_set_power_state,
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册