提交 4562236b 编写于 作者: H Harry Wentland 提交者: Alex Deucher

drm/amd/dc: Add dc display driver (v2)

Supported DCE versions: 8.0, 10.0, 11.0, 11.2

v2: rebase against 4.11
Signed-off-by: NHarry Wentland <harry.wentland@amd.com>
Acked-by: NAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
上级 9c5b2b0d
......@@ -41,3 +41,4 @@ config DRM_AMDGPU_GART_DEBUGFS
pages. Uses more memory for housekeeping, enable only for debugging.
source "drivers/gpu/drm/amd/acp/Kconfig"
source "drivers/gpu/drm/amd/display/Kconfig"
......@@ -3,13 +3,19 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
FULL_AMD_PATH=$(src)/..
DISPLAY_FOLDER_NAME=display
FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \
-I$(FULL_AMD_PATH)/amdgpu \
-I$(FULL_AMD_PATH)/scheduler \
-I$(FULL_AMD_PATH)/powerplay/inc \
-I$(FULL_AMD_PATH)/acp/include
-I$(FULL_AMD_PATH)/acp/include \
-I$(FULL_AMD_DISPLAY_PATH) \
-I$(FULL_AMD_DISPLAY_PATH)/include \
-I$(FULL_AMD_DISPLAY_PATH)/dc \
-I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm
amdgpu-y := amdgpu_drv.o
......@@ -132,4 +138,13 @@ include $(FULL_AMD_PATH)/powerplay/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES)
ifneq ($(CONFIG_DRM_AMD_DC),)
RELATIVE_AMD_DISPLAY_PATH = ../$(DISPLAY_FOLDER_NAME)
include $(FULL_AMD_DISPLAY_PATH)/Makefile
amdgpu-y += $(AMD_DISPLAY_FILES)
endif
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
......@@ -66,6 +66,7 @@
#include "amdgpu_vce.h"
#include "amdgpu_vcn.h"
#include "amdgpu_mn.h"
#include "amdgpu_dm.h"
#include "gpu_scheduler.h"
#include "amdgpu_virt.h"
......@@ -101,6 +102,7 @@ extern int amdgpu_vm_fragment_size;
extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
extern int amdgpu_vm_update_mode;
extern int amdgpu_dc;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
extern int amdgpu_no_evict;
......@@ -1507,6 +1509,7 @@ struct amdgpu_device {
/* display */
bool enable_virtual_display;
struct amdgpu_mode_info mode_info;
/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
struct work_struct hotplug_work;
struct amdgpu_irq_src crtc_irq;
struct amdgpu_irq_src pageflip_irq;
......@@ -1563,6 +1566,9 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds gds;
/* display related functionality */
struct amdgpu_display_manager dm;
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
int num_ip_blocks;
struct mutex mn_lock;
......@@ -1624,6 +1630,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
/*
* Registers read & write functions.
*/
......@@ -1884,5 +1893,11 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
uint64_t addr, struct amdgpu_bo **bo,
struct amdgpu_bo_va_mapping **mapping);
#if defined(CONFIG_DRM_AMD_DC)
int amdgpu_dm_display_resume(struct amdgpu_device *adev );
#else
static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
#endif
#include "amdgpu_object.h"
#endif
......@@ -31,6 +31,7 @@
#include <linux/debugfs.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/amdgpu_drm.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
......@@ -1973,6 +1974,41 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
}
}
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
{
switch (asic_type) {
#if defined(CONFIG_DRM_AMD_DC)
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_CARRIZO:
case CHIP_STONEY:
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_TONGA:
case CHIP_FIJI:
#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
return amdgpu_dc != 0;
#else
return amdgpu_dc > 0;
#endif
#endif
default:
return false;
}
}
/**
* amdgpu_device_has_dc_support - check if dc is supported
*
* @adev: amdgpu_device_pointer
*
* Returns true for supported, false for not supported
*/
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
{
return amdgpu_device_asic_has_dc_support(adev->asic_type);
}
/**
* amdgpu_device_init - initialize the driver
*
......@@ -2168,7 +2204,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto failed;
}
/* init i2c buses */
amdgpu_atombios_i2c_init(adev);
if (!amdgpu_device_has_dc_support(adev))
amdgpu_atombios_i2c_init(adev);
}
/* Fence driver */
......@@ -2296,7 +2333,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->accel_working = false;
cancel_delayed_work_sync(&adev->late_init_work);
/* free i2c buses */
amdgpu_i2c_fini(adev);
if (!amdgpu_device_has_dc_support(adev))
amdgpu_i2c_fini(adev);
amdgpu_atombios_fini(adev);
kfree(adev->bios);
adev->bios = NULL;
......@@ -2346,12 +2384,14 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
drm_kms_helper_poll_disable(dev);
/* turn off display hw */
drm_modeset_lock_all(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
if (!amdgpu_device_has_dc_support(adev)) {
/* turn off display hw */
drm_modeset_lock_all(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
}
drm_modeset_unlock_all(dev);
}
drm_modeset_unlock_all(dev);
amdgpu_amdkfd_suspend(adev);
......@@ -2494,13 +2534,25 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
/* blat the mode back in */
if (fbcon) {
drm_helper_resume_force_mode(dev);
/* turn on display hw */
drm_modeset_lock_all(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
if (!amdgpu_device_has_dc_support(adev)) {
/* pre DCE11 */
drm_helper_resume_force_mode(dev);
/* turn on display hw */
drm_modeset_lock_all(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
drm_modeset_unlock_all(dev);
} else {
/*
* There is no equivalent atomic helper to turn on
* display, so we defined our own function for this,
* once suspend resume is supported by the atomic
* framework this will be reworked
*/
amdgpu_dm_display_resume(adev);
}
drm_modeset_unlock_all(dev);
}
drm_kms_helper_poll_enable(dev);
......@@ -2517,7 +2569,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
#ifdef CONFIG_PM
dev->dev->power.disable_depth++;
#endif
drm_helper_hpd_irq_event(dev);
if (!amdgpu_device_has_dc_support(adev))
drm_helper_hpd_irq_event(dev);
else
drm_kms_helper_hotplug_event(dev);
#ifdef CONFIG_PM
dev->dev->power.disable_depth--;
#endif
......@@ -2814,6 +2869,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
*/
int amdgpu_gpu_reset(struct amdgpu_device *adev)
{
struct drm_atomic_state *state = NULL;
int i, r;
int resched;
bool need_full_reset, vram_lost = false;
......@@ -2827,6 +2883,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
/* store modesetting */
if (amdgpu_device_has_dc_support(adev))
state = drm_atomic_helper_suspend(adev->ddev);
/* block scheduler */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
......@@ -2944,7 +3003,11 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
}
}
drm_helper_resume_force_mode(adev->ddev);
if (amdgpu_device_has_dc_support(adev)) {
r = drm_atomic_helper_resume(adev->ddev, state);
amdgpu_dm_display_resume(adev);
} else
drm_helper_resume_force_mode(adev->ddev);
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
if (r) {
......
......@@ -429,7 +429,7 @@ struct amdgpu_pm {
uint32_t fw_version;
uint32_t pcie_gen_mask;
uint32_t pcie_mlw_mask;
struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
struct amd_pp_display_configuration pm_display_cfg;/* set by dc */
};
#define R600_SSTU_DFLT 0
......
......@@ -103,6 +103,7 @@ int amdgpu_vm_debug = 0;
int amdgpu_vram_page_split = 512;
int amdgpu_vm_update_mode = -1;
int amdgpu_exp_hw_support = 0;
int amdgpu_dc = -1;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
int amdgpu_no_evict = 0;
......@@ -207,6 +208,9 @@ module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(dc, amdgpu_dc, int, 0444);
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
......
......@@ -42,11 +42,6 @@
this contains a helper + a amdgpu fb
the helper contains a pointer to amdgpu framebuffer baseclass.
*/
struct amdgpu_fbdev {
struct drm_fb_helper helper;
struct amdgpu_framebuffer rfb;
struct amdgpu_device *adev;
};
static int
amdgpufb_open(struct fb_info *info, int user)
......
......@@ -37,6 +37,10 @@
#include <linux/pm_runtime.h>
#ifdef CONFIG_DRM_AMD_DC
#include "amdgpu_dm_irq.h"
#endif
#define AMDGPU_WAIT_IDLE_TIMEOUT 200
/*
......@@ -221,15 +225,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
spin_lock_init(&adev->irq.lock);
if (!adev->enable_virtual_display)
/* Disable vblank irqs aggressively for power-saving */
adev->ddev->vblank_disable_immediate = true;
r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
if (r) {
return r;
}
/* enable msi */
adev->irq.msi_enabled = false;
......@@ -241,7 +236,21 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
}
}
INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func);
if (!amdgpu_device_has_dc_support(adev)) {
if (!adev->enable_virtual_display)
/* Disable vblank irqs aggressively for power-saving */
/* XXX: can this be enabled for DC? */
adev->ddev->vblank_disable_immediate = true;
r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
if (r)
return r;
/* pre DCE11 */
INIT_WORK(&adev->hotplug_work,
amdgpu_hotplug_work_func);
}
INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
adev->irq.installed = true;
......
......@@ -1034,7 +1034,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
};
const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
......
......@@ -38,11 +38,15 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_fb_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/hrtimer.h>
#include "amdgpu_irq.h"
#include <drm/drm_dp_mst_helper.h>
#include "modules/inc/mod_freesync.h"
struct amdgpu_bo;
struct amdgpu_device;
struct amdgpu_encoder;
......@@ -292,6 +296,27 @@ struct amdgpu_display_funcs {
uint16_t connector_object_id,
struct amdgpu_hpd *hpd,
struct amdgpu_router *router);
/* it is used to enter or exit into free sync mode */
int (*notify_freesync)(struct drm_device *dev, void *data,
struct drm_file *filp);
/* it is used to allow enablement of freesync mode */
int (*set_freesync_property)(struct drm_connector *connector,
struct drm_property *property,
uint64_t val);
};
struct amdgpu_framebuffer {
struct drm_framebuffer base;
struct drm_gem_object *obj;
};
struct amdgpu_fbdev {
struct drm_fb_helper helper;
struct amdgpu_framebuffer rfb;
struct list_head fbdev_list;
struct amdgpu_device *adev;
};
struct amdgpu_mode_info {
......@@ -400,6 +425,11 @@ struct amdgpu_crtc {
/* for virtual dce */
struct hrtimer vblank_timer;
enum amdgpu_interrupt_state vsync_timer_enabled;
int otg_inst;
uint32_t flip_flags;
/* After Set Mode target will be non-NULL */
struct dc_target *target;
};
struct amdgpu_encoder_atom_dig {
......@@ -489,6 +519,19 @@ enum amdgpu_connector_dither {
AMDGPU_FMT_DITHER_ENABLE = 1,
};
struct amdgpu_dm_dp_aux {
struct drm_dp_aux aux;
uint32_t link_index;
};
struct amdgpu_i2c_adapter {
struct i2c_adapter base;
struct amdgpu_display_manager *dm;
uint32_t link_index;
};
#define TO_DM_AUX(x) container_of((x), struct amdgpu_dm_dp_aux, aux)
struct amdgpu_connector {
struct drm_connector base;
uint32_t connector_id;
......@@ -500,6 +543,14 @@ struct amdgpu_connector {
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
struct edid *edid;
/* number of modes generated from EDID at 'dc_sink' */
int num_modes;
/* The 'old' sink - before an HPD.
* The 'current' sink is in dc_link->sink. */
const struct dc_sink *dc_sink;
const struct dc_link *dc_link;
const struct dc_sink *dc_em_sink;
const struct dc_target *target;
void *con_priv;
bool dac_load_detect;
bool detected_by_load; /* if the connection status was determined by load */
......@@ -510,11 +561,39 @@ struct amdgpu_connector {
enum amdgpu_connector_audio audio;
enum amdgpu_connector_dither dither;
unsigned pixelclock_for_modeset;
struct drm_dp_mst_topology_mgr mst_mgr;
struct amdgpu_dm_dp_aux dm_dp_aux;
struct drm_dp_mst_port *port;
struct amdgpu_connector *mst_port;
struct amdgpu_encoder *mst_encoder;
struct semaphore mst_sem;
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
/* Monitor range limits */
int min_vfreq ;
int max_vfreq ;
int pixel_clock_mhz;
/*freesync caps*/
struct mod_freesync_caps caps;
struct mutex hpd_lock;
};
struct amdgpu_framebuffer {
struct drm_framebuffer base;
struct drm_gem_object *obj;
/* TODO: start to use this struct and remove same field from base one */
struct amdgpu_mst_connector {
struct amdgpu_connector base;
struct drm_dp_mst_topology_mgr mst_mgr;
struct amdgpu_dm_dp_aux dm_dp_aux;
struct drm_dp_mst_port *port;
struct amdgpu_connector *mst_port;
bool is_mst_connector;
struct amdgpu_encoder *mst_encoder;
};
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
......
......@@ -1467,7 +1467,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
if (crtc->enabled) {
if (amdgpu_crtc->enabled) {
adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
adev->pm.dpm.new_active_crtc_count++;
}
......
......@@ -65,6 +65,7 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
#include "amdgpu_dm.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_powerplay.h"
#include "dce_virtual.h"
......@@ -1900,6 +1901,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_ip_block_add(adev, &dm_ip_block);
#endif
else
amdgpu_ip_block_add(adev, &dce_v8_2_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
......@@ -1914,6 +1919,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_ip_block_add(adev, &dm_ip_block);
#endif
else
amdgpu_ip_block_add(adev, &dce_v8_5_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block);
......
......@@ -77,6 +77,7 @@
#endif
#include "dce_virtual.h"
#include "mxgpu_vi.h"
#include "amdgpu_dm.h"
/*
* Indirect registers accessor
......@@ -1496,6 +1497,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_ip_block_add(adev, &dm_ip_block);
#endif
else
amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
......@@ -1512,6 +1517,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_ip_block_add(adev, &dm_ip_block);
#endif
else
amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
......@@ -1530,6 +1539,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_ip_block_add(adev, &dm_ip_block);
#endif
else
amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
......@@ -1544,6 +1557,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_ip_block_add(adev, &dm_ip_block);
#endif
else
amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
......@@ -1561,6 +1578,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_ip_block_add(adev, &dm_ip_block);
#endif
else
amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);
......
menu "Display Engine Configuration"
depends on DRM && DRM_AMDGPU
config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
Raven ASICs.
config DRM_AMD_DC_PRE_VEGA
bool "DC support for Polaris and older ASICs"
default n
help
Choose this option to enable the new DC support for older asics
by default. This includes Polaris, Carrizo, Tonga, Bonaire,
and Hawaii.
config DEBUG_KERNEL_DC
bool "Enable kgdb break in DC"
depends on DRM_AMD_DC
help
Choose this option
if you want to hit
kdgb_break in assert.
endmenu
#
# Makefile for the DAL (Display Abstract Layer), which is a sub-component
# of the AMDGPU drm driver.
# It provides the HW control for display related functionalities.
AMDDALPATH = $(RELATIVE_AMD_DISPLAY_PATH)
subdir-ccflags-y += -I$(AMDDALPATH)/ -I$(AMDDALPATH)/include
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
#TODO: remove when Timing Sync feature is complete
subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
DAL_LIBS = amdgpu_dm dc modules/freesync
AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
include $(AMD_DAL)
#
# Makefile for the 'dm' sub-component of DAL.
# It provides the control and status of dm blocks.
AMDGPUDM = amdgpu_dm_types.o amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o
ifneq ($(CONFIG_DRM_AMD_DC),)
AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o
endif
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM))
AMD_DISPLAY_FILES += $(AMDGPU_DM)
此差异已折叠。
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __AMDGPU_DM_H__
#define __AMDGPU_DM_H__
/*
#include "linux/switch.h"
*/
/*
* This file contains the definition for amdgpu_display_manager
* and its API for amdgpu driver's use.
* This component provides all the display related functionality
* and this is the only component that calls DAL API.
* The API contained here intended for amdgpu driver use.
* The API that is called directly from KMS framework is located
* in amdgpu_dm_kms.h file
*/
#define AMDGPU_DM_MAX_DISPLAY_INDEX 31
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
*/
#include "irq_types.h"
#include "signal_types.h"
/* Forward declarations */
struct amdgpu_device;
struct drm_device;
struct amdgpu_dm_irq_handler_data;
struct amdgpu_dm_prev_state {
struct drm_framebuffer *fb;
int32_t x;
int32_t y;
struct drm_display_mode mode;
};
struct common_irq_params {
struct amdgpu_device *adev;
enum dc_irq_source irq_src;
};
struct irq_list_head {
struct list_head head;
/* In case this interrupt needs post-processing, 'work' will be queued*/
struct work_struct work;
};
struct amdgpu_display_manager {
struct dal *dal;
struct dc *dc;
struct cgs_device *cgs_device;
/* lock to be used when DAL is called from SYNC IRQ context */
spinlock_t dal_lock;
struct amdgpu_device *adev; /*AMD base driver*/
struct drm_device *ddev; /*DRM base driver*/
u16 display_indexes_num;
struct amdgpu_dm_prev_state prev_state;
/*
* 'irq_source_handler_table' holds a list of handlers
* per (DAL) IRQ source.
*
* Each IRQ source may need to be handled at different contexts.
* By 'context' we mean, for example:
* - The ISR context, which is the direct interrupt handler.
* - The 'deferred' context - this is the post-processing of the
* interrupt, but at a lower priority.
*
* Note that handlers are called in the same order as they were
* registered (FIFO).
*/
struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
struct common_irq_params
pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
struct common_irq_params
vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1];
/* this spin lock synchronizes access to 'irq_handler_list_table' */
spinlock_t irq_handler_list_table_lock;
/* Timer-related data. */
struct list_head timer_handler_list;
struct workqueue_struct *timer_workqueue;
/* Use dal_mutex for any activity which is NOT syncronized by
* DRM mode setting locks.
* For example: amdgpu_dm_hpd_low_irq() calls into DAL *without*
* DRM mode setting locks being acquired. This is where dal_mutex
* is acquired before calling into DAL. */
struct mutex dal_mutex;
struct backlight_device *backlight_dev;
const struct dc_link *backlight_link;
struct work_struct mst_hotplug_work;
struct mod_freesync *freesync_module;
};
/* basic init/fini API */
int amdgpu_dm_init(struct amdgpu_device *adev);
void amdgpu_dm_fini(struct amdgpu_device *adev);
void amdgpu_dm_destroy(void);
/* initializes drm_device display related structures, based on the information
* provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
* drm_encoder, drm_mode_config
*
* Returns 0 on success
*/
int amdgpu_dm_initialize_drm_device(
struct amdgpu_device *adev);
/* removes and deallocates the drm structures, created by the above function */
void amdgpu_dm_destroy_drm_device(
struct amdgpu_display_manager *dm);
/* Locking/Mutex */
bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm);
bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm);
/* Register "Backlight device" accessible by user-mode. */
void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm);
extern const struct amdgpu_ip_block_version dm_ip_block;
void amdgpu_dm_update_connector_after_detect(
struct amdgpu_connector *aconnector);
struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
struct drm_atomic_state *state,
struct drm_crtc *crtc,
bool from_state_var);
#endif /* __AMDGPU_DM_H__ */
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/string.h>
#include <linux/acpi.h>
#include <linux/version.h>
#include <linux/i2c.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_edid.h>
#include "dm_services.h"
#include "amdgpu.h"
#include "dc.h"
#include "amdgpu_dm.h"
#include "amdgpu_dm_irq.h"
#include "amdgpu_dm_types.h"
#include "dm_helpers.h"
/* dm_helpers_parse_edid_caps
*
* Parse edid caps
*
* @edid: [in] pointer to edid
* edid_caps: [in] pointer to edid caps
* @return
* void
* */
enum dc_edid_status dm_helpers_parse_edid_caps(
struct dc_context *ctx,
const struct dc_edid *edid,
struct dc_edid_caps *edid_caps)
{
struct edid *edid_buf = (struct edid *) edid->raw_edid;
struct cea_sad *sads;
int sad_count = -1;
int sadb_count = -1;
int i = 0;
int j = 0;
uint8_t *sadb = NULL;
enum dc_edid_status result = EDID_OK;
if (!edid_caps || !edid)
return EDID_BAD_INPUT;
if (!drm_edid_is_valid(edid_buf))
result = EDID_BAD_CHECKSUM;
edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
((uint16_t) edid_buf->mfg_id[1])<<8;
edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
((uint16_t) edid_buf->prod_code[1])<<8;
edid_caps->serial_number = edid_buf->serial;
edid_caps->manufacture_week = edid_buf->mfg_week;
edid_caps->manufacture_year = edid_buf->mfg_year;
/* One of the four detailed_timings stores the monitor name. It's
* stored in an array of length 13. */
for (i = 0; i < 4; i++) {
if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) {
while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) {
if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n')
break;
edid_caps->display_name[j] =
edid_buf->detailed_timings[i].data.other_data.data.str.str[j];
j++;
}
}
}
edid_caps->edid_hdmi = drm_detect_hdmi_monitor(
(struct edid *) edid->raw_edid);
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
if (sad_count <= 0) {
DRM_INFO("SADs count is: %d, don't need to read it\n",
sad_count);
return result;
}
edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
for (i = 0; i < edid_caps->audio_mode_count; ++i) {
struct cea_sad *sad = &sads[i];
edid_caps->audio_modes[i].format_code = sad->format;
edid_caps->audio_modes[i].channel_count = sad->channels;
edid_caps->audio_modes[i].sample_rate = sad->freq;
edid_caps->audio_modes[i].sample_size = sad->byte2;
}
sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);
if (sadb_count < 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
sadb_count = 0;
}
if (sadb_count)
edid_caps->speaker_flags = sadb[0];
else
edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
kfree(sads);
kfree(sadb);
return result;
}
static struct amdgpu_connector *get_connector_for_sink(
struct drm_device *dev,
const struct dc_sink *sink)
{
struct drm_connector *connector;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
if (aconnector->dc_sink == sink)
return aconnector;
}
return NULL;
}
static struct amdgpu_connector *get_connector_for_link(
struct drm_device *dev,
const struct dc_link *link)
{
struct drm_connector *connector;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
if (aconnector->dc_link == link)
return aconnector;
}
return NULL;
}
static void get_payload_table(
struct amdgpu_connector *aconnector,
struct dp_mst_stream_allocation_table *proposed_table)
{
int i;
struct drm_dp_mst_topology_mgr *mst_mgr =
&aconnector->mst_port->mst_mgr;
mutex_lock(&mst_mgr->payload_lock);
proposed_table->stream_count = 0;
/* number of active streams */
for (i = 0; i < mst_mgr->max_payloads; i++) {
if (mst_mgr->payloads[i].num_slots == 0)
break; /* end of vcp_id table */
ASSERT(mst_mgr->payloads[i].payload_state !=
DP_PAYLOAD_DELETE_LOCAL);
if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL ||
mst_mgr->payloads[i].payload_state ==
DP_PAYLOAD_REMOTE) {
struct dp_mst_stream_allocation *sa =
&proposed_table->stream_allocations[
proposed_table->stream_count];
sa->slot_count = mst_mgr->payloads[i].num_slots;
sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi;
proposed_table->stream_count++;
}
}
mutex_unlock(&mst_mgr->payload_lock);
}
/*
* Writes payload allocation table in immediate downstream device.
*/
bool dm_helpers_dp_mst_write_payload_allocation_table(
struct dc_context *ctx,
const struct dc_stream *stream,
struct dp_mst_stream_allocation_table *proposed_table,
bool enable)
{
struct amdgpu_device *adev = ctx->driver_context;
struct drm_device *dev = adev->ddev;
struct amdgpu_connector *aconnector;
struct drm_dp_mst_topology_mgr *mst_mgr;
struct drm_dp_mst_port *mst_port;
int slots = 0;
bool ret;
int clock;
int bpp = 0;
int pbn = 0;
aconnector = get_connector_for_sink(dev, stream->sink);
if (!aconnector || !aconnector->mst_port)
return false;
mst_mgr = &aconnector->mst_port->mst_mgr;
if (!mst_mgr->mst_state)
return false;
mst_port = aconnector->port;
if (enable) {
clock = stream->timing.pix_clk_khz;
switch (stream->timing.display_color_depth) {
case COLOR_DEPTH_666:
bpp = 6;
break;
case COLOR_DEPTH_888:
bpp = 8;
break;
case COLOR_DEPTH_101010:
bpp = 10;
break;
case COLOR_DEPTH_121212:
bpp = 12;
break;
case COLOR_DEPTH_141414:
bpp = 14;
break;
case COLOR_DEPTH_161616:
bpp = 16;
break;
default:
ASSERT(bpp != 0);
break;
}
bpp = bpp * 3;
/* TODO need to know link rate */
pbn = drm_dp_calc_pbn_mode(clock, bpp);
slots = drm_dp_find_vcpi_slots(mst_mgr, pbn);
ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, pbn, slots);
if (!ret)
return false;
} else {
drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
}
ret = drm_dp_update_payload_part1(mst_mgr);
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
* AUX message. The sequence is slot 1-63 allocated sequence for each
* stream. AMD ASIC stream slot allocation should follow the same
* sequence. copy DRM MST allocation to dc */
get_payload_table(aconnector, proposed_table);
if (ret)
return false;
return true;
}
/*
* Polls for ACT (allocation change trigger) handled and sends
* ALLOCATE_PAYLOAD message.
*/
bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
struct dc_context *ctx,
const struct dc_stream *stream)
{
struct amdgpu_device *adev = ctx->driver_context;
struct drm_device *dev = adev->ddev;
struct amdgpu_connector *aconnector;
struct drm_dp_mst_topology_mgr *mst_mgr;
int ret;
aconnector = get_connector_for_sink(dev, stream->sink);
if (!aconnector || !aconnector->mst_port)
return false;
mst_mgr = &aconnector->mst_port->mst_mgr;
if (!mst_mgr->mst_state)
return false;
ret = drm_dp_check_act_status(mst_mgr);
if (ret)
return false;
return true;
}
bool dm_helpers_dp_mst_send_payload_allocation(
struct dc_context *ctx,
const struct dc_stream *stream,
bool enable)
{
struct amdgpu_device *adev = ctx->driver_context;
struct drm_device *dev = adev->ddev;
struct amdgpu_connector *aconnector;
struct drm_dp_mst_topology_mgr *mst_mgr;
struct drm_dp_mst_port *mst_port;
int ret;
aconnector = get_connector_for_sink(dev, stream->sink);
if (!aconnector || !aconnector->mst_port)
return false;
mst_port = aconnector->port;
mst_mgr = &aconnector->mst_port->mst_mgr;
if (!mst_mgr->mst_state)
return false;
ret = drm_dp_update_payload_part2(mst_mgr);
if (ret)
return false;
if (!enable)
drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
return true;
}
bool dm_helpers_dp_mst_start_top_mgr(
struct dc_context *ctx,
const struct dc_link *link,
bool boot)
{
struct amdgpu_device *adev = ctx->driver_context;
struct drm_device *dev = adev->ddev;
struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
if (!aconnector) {
DRM_ERROR("Failed to found connector for link!");
return false;
}
if (boot) {
DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",
aconnector, aconnector->base.base.id);
return true;
}
DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
aconnector, aconnector->base.base.id);
return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
}
void dm_helpers_dp_mst_stop_top_mgr(
struct dc_context *ctx,
const struct dc_link *link)
{
struct amdgpu_device *adev = ctx->driver_context;
struct drm_device *dev = adev->ddev;
struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
if (!aconnector) {
DRM_ERROR("Failed to found connector for link!");
return;
}
DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
aconnector, aconnector->base.base.id);
if (aconnector->mst_mgr.mst_state == true)
drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
}
bool dm_helpers_dp_read_dpcd(
struct dc_context *ctx,
const struct dc_link *link,
uint32_t address,
uint8_t *data,
uint32_t size)
{
struct amdgpu_device *adev = ctx->driver_context;
struct drm_device *dev = adev->ddev;
struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
if (!aconnector) {
DRM_ERROR("Failed to found connector for link!");
return false;
}
return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
data, size) > 0;
}
bool dm_helpers_dp_write_dpcd(
struct dc_context *ctx,
const struct dc_link *link,
uint32_t address,
const uint8_t *data,
uint32_t size)
{
struct amdgpu_device *adev = ctx->driver_context;
struct drm_device *dev = adev->ddev;
struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
if (!aconnector) {
DRM_ERROR("Failed to found connector for link!");
return false;
}
return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
address, (uint8_t *)data, size) > 0;
}
bool dm_helpers_submit_i2c(
struct dc_context *ctx,
const struct dc_link *link,
struct i2c_command *cmd)
{
struct amdgpu_device *adev = ctx->driver_context;
struct drm_device *dev = adev->ddev;
struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
struct i2c_msg *msgs;
int i = 0;
int num = cmd->number_of_payloads;
bool result;
if (!aconnector) {
DRM_ERROR("Failed to found connector for link!");
return false;
}
msgs = kzalloc(num * sizeof(struct i2c_msg), GFP_KERNEL);
if (!msgs)
return false;
for (i = 0; i < num; i++) {
msgs[i].flags = cmd->payloads[i].write ? I2C_M_RD : 0;
msgs[i].addr = cmd->payloads[i].address;
msgs[i].len = cmd->payloads[i].length;
msgs[i].buf = cmd->payloads[i].data;
}
result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num;
kfree(msgs);
return result;
}
此差异已折叠。
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __AMDGPU_DM_IRQ_H__
#define __AMDGPU_DM_IRQ_H__
#include "irq_types.h" /* DAL irq definitions */
/*
* Display Manager IRQ-related interfaces (for use by DAL).
*/
/**
* amdgpu_dm_irq_init - Initialize internal structures of 'amdgpu_dm_irq'.
*
* This function should be called exactly once - during DM initialization.
*
* Returns:
* 0 - success
* non-zero - error
*/
int amdgpu_dm_irq_init(
struct amdgpu_device *adev);
/**
* amdgpu_dm_irq_fini - deallocate internal structures of 'amdgpu_dm_irq'.
*
* This function should be called exactly once - during DM destruction.
*
*/
void amdgpu_dm_irq_fini(
struct amdgpu_device *adev);
/**
* amdgpu_dm_irq_register_interrupt - register irq handler for Display block.
*
* @adev: AMD DRM device
* @int_params: parameters for the irq
* @ih: pointer to the irq hander function
* @handler_args: arguments which will be passed to ih
*
* Returns:
* IRQ Handler Index on success.
* NULL on failure.
*
* Cannot be called from an interrupt handler.
*/
void *amdgpu_dm_irq_register_interrupt(
struct amdgpu_device *adev,
struct dc_interrupt_params *int_params,
void (*ih)(void *),
void *handler_args);
/**
* amdgpu_dm_irq_unregister_interrupt - unregister handler which was registered
* by amdgpu_dm_irq_register_interrupt().
*
* @adev: AMD DRM device.
* @ih_index: irq handler index which was returned by
* amdgpu_dm_irq_register_interrupt
*/
void amdgpu_dm_irq_unregister_interrupt(
struct amdgpu_device *adev,
enum dc_irq_source irq_source,
void *ih_index);
void amdgpu_dm_irq_register_timer(
struct amdgpu_device *adev,
struct dc_timer_interrupt_params *int_params,
interrupt_handler ih,
void *args);
/**
* amdgpu_dm_irq_handler
* Generic IRQ handler, calls all registered high irq work immediately, and
* schedules work for low irq
*/
int amdgpu_dm_irq_handler(
struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev);
void amdgpu_dm_hpd_init(struct amdgpu_device *adev);
void amdgpu_dm_hpd_fini(struct amdgpu_device *adev);
/**
* amdgpu_dm_irq_suspend - disable ASIC interrupt during suspend.
*
*/
int amdgpu_dm_irq_suspend(struct amdgpu_device *adev);
/**
* amdgpu_dm_irq_resume_early - enable HPDRX ASIC interrupts during resume.
* amdgpu_dm_irq_resume - enable ASIC interrupt during resume.
*
*/
int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev);
int amdgpu_dm_irq_resume(struct amdgpu_device *adev);
#endif /* __AMDGPU_DM_IRQ_H__ */
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/version.h>
#include <drm/drm_atomic_helper.h>
#include "dm_services.h"
#include "amdgpu.h"
#include "amdgpu_dm_types.h"
#include "amdgpu_dm_mst_types.h"
#include "dc.h"
#include "dm_helpers.h"
/* #define TRACE_DPCD */
#ifdef TRACE_DPCD
#define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI)
static inline char *side_band_msg_type_to_str(uint32_t address)
{
static char str[10] = {0};
if (address < DP_SIDEBAND_MSG_UP_REP_BASE)
strcpy(str, "DOWN_REQ");
else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE)
strcpy(str, "UP_REP");
else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE)
strcpy(str, "DOWN_REP");
else
strcpy(str, "UP_REQ");
return str;
}
void log_dpcd(uint8_t type,
uint32_t address,
uint8_t *data,
uint32_t size,
bool res)
{
DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n",
(type == DP_AUX_NATIVE_READ) ||
(type == DP_AUX_I2C_READ) ?
"Read" : "Write",
address,
SIDE_BAND_MSG(address) ?
side_band_msg_type_to_str(address) : "Nop",
res ? "OK" : "Fail");
if (res) {
print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false);
}
}
#endif
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
struct pci_dev *pdev = to_pci_dev(aux->dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_dev->dev_private;
struct dc *dc = adev->dm.dc;
bool res;
switch (msg->request) {
case DP_AUX_NATIVE_READ:
res = dc_read_dpcd(
dc,
TO_DM_AUX(aux)->link_index,
msg->address,
msg->buffer,
msg->size);
break;
case DP_AUX_NATIVE_WRITE:
res = dc_write_dpcd(
dc,
TO_DM_AUX(aux)->link_index,
msg->address,
msg->buffer,
msg->size);
break;
default:
return 0;
}
#ifdef TRACE_DPCD
log_dpcd(msg->request,
msg->address,
msg->buffer,
msg->size,
res);
#endif
return msg->size;
}
static enum drm_connector_status
dm_dp_mst_detect(struct drm_connector *connector, bool force)
{
struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
struct amdgpu_connector *master = aconnector->mst_port;
enum drm_connector_status status =
drm_dp_mst_detect_port(
connector,
&master->mst_mgr,
aconnector->port);
/*
* we do not want to make this connector connected until we have edid on
* it
*/
if (status == connector_status_connected &&
!aconnector->port->cached_edid)
status = connector_status_disconnected;
return status;
}
static void
dm_dp_mst_connector_destroy(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_encoder *amdgpu_encoder = amdgpu_connector->mst_encoder;
drm_encoder_cleanup(&amdgpu_encoder->base);
kfree(amdgpu_encoder);
drm_connector_cleanup(connector);
kfree(amdgpu_connector);
}
static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
.detect = dm_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = dm_dp_mst_connector_destroy,
.reset = amdgpu_dm_connector_funcs_reset,
.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_set_property = amdgpu_dm_connector_atomic_set_property
};
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
int ret = 0;
ret = drm_add_edid_modes(&aconnector->base, aconnector->edid);
drm_edid_to_eld(&aconnector->base, aconnector->edid);
return ret;
}
static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
return &amdgpu_connector->mst_encoder->base;
}
static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
.get_modes = dm_dp_mst_get_modes,
.mode_valid = amdgpu_dm_connector_mode_valid,
.best_encoder = dm_mst_best_encoder,
};
static struct amdgpu_encoder *
dm_dp_create_fake_mst_encoder(struct amdgpu_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder;
struct drm_encoder *encoder;
const struct drm_connector_helper_funcs *connector_funcs =
connector->base.helper_private;
struct drm_encoder *enc_master =
connector_funcs->best_encoder(&connector->base);
DRM_DEBUG_KMS("enc master is %p\n", enc_master);
amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
if (!amdgpu_encoder)
return NULL;
encoder = &amdgpu_encoder->base;
encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
drm_encoder_init(
dev,
&amdgpu_encoder->base,
NULL,
DRM_MODE_ENCODER_DPMST,
NULL);
drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
return amdgpu_encoder;
}
static struct drm_connector *dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
const char *pathprop)
{
struct amdgpu_connector *master = container_of(mgr, struct amdgpu_connector, mst_mgr);
struct drm_device *dev = master->base.dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_connector *aconnector;
struct drm_connector *connector;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
aconnector = to_amdgpu_connector(connector);
if (aconnector->mst_port == master
&& !aconnector->port) {
DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n",
aconnector, connector->base.id, aconnector->mst_port);
aconnector->port = port;
drm_mode_connector_set_path_property(connector, pathprop);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
return &aconnector->base;
}
}
drm_modeset_unlock(&dev->mode_config.connection_mutex);
aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
if (!aconnector)
return NULL;
connector = &aconnector->base;
aconnector->port = port;
aconnector->mst_port = master;
if (drm_connector_init(
dev,
connector,
&dm_dp_mst_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort)) {
kfree(aconnector);
return NULL;
}
drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
amdgpu_dm_connector_init_helper(
&adev->dm,
aconnector,
DRM_MODE_CONNECTOR_DisplayPort,
master->dc_link,
master->connector_id);
aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
/*
* TODO: understand why this one is needed
*/
drm_object_attach_property(
&connector->base,
dev->mode_config.path_property,
0);
drm_object_attach_property(
&connector->base,
dev->mode_config.tile_property,
0);
drm_mode_connector_set_path_property(connector, pathprop);
/*
* Initialize connector state before adding the connectror to drm and
* framebuffer lists
*/
amdgpu_dm_connector_funcs_reset(connector);
DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
aconnector, connector->base.id, aconnector->mst_port);
DRM_DEBUG_KMS(":%d\n", connector->base.id);
return connector;
}
static void dm_dp_destroy_mst_connector(
struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector)
{
struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
aconnector, connector->base.id, aconnector->mst_port);
aconnector->port = NULL;
if (aconnector->dc_sink) {
amdgpu_dm_remove_sink_from_freesync_module(connector);
dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
}
if (aconnector->edid) {
kfree(aconnector->edid);
aconnector->edid = NULL;
}
drm_mode_connector_update_edid_property(
&aconnector->base,
NULL);
}
static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
{
struct amdgpu_connector *master = container_of(mgr, struct amdgpu_connector, mst_mgr);
struct drm_device *dev = master->base.dev;
struct amdgpu_device *adev = dev->dev_private;
struct drm_connector *connector;
struct amdgpu_connector *aconnector;
struct edid *edid;
drm_modeset_lock_all(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
aconnector = to_amdgpu_connector(connector);
if (aconnector->port &&
aconnector->port->pdt != DP_PEER_DEVICE_NONE &&
aconnector->port->pdt != DP_PEER_DEVICE_MST_BRANCHING &&
!aconnector->dc_sink) {
/*
* This is plug in case, where port has been created but
* sink hasn't been created yet
*/
if (!aconnector->edid) {
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST};
edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
if (!edid) {
drm_mode_connector_update_edid_property(
&aconnector->base,
NULL);
continue;
}
aconnector->edid = edid;
aconnector->dc_sink = dc_link_add_remote_sink(
aconnector->dc_link,
(uint8_t *)edid,
(edid->extensions + 1) * EDID_LENGTH,
&init_params);
if (aconnector->dc_sink)
amdgpu_dm_add_sink_to_freesync_module(
connector,
edid);
dm_restore_drm_connector_state(connector->dev, connector);
} else
edid = aconnector->edid;
DRM_DEBUG_KMS("edid retrieved %p\n", edid);
drm_mode_connector_update_edid_property(
&aconnector->base,
aconnector->edid);
}
}
drm_modeset_unlock_all(dev);
schedule_work(&adev->dm.mst_hotplug_work);
}
static void dm_dp_mst_register_connector(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private;
int i;
drm_modeset_lock_all(dev);
if (adev->mode_info.rfbdev) {
/*Do not add if already registered in past*/
for (i = 0; i < adev->mode_info.rfbdev->helper.connector_count; i++) {
if (adev->mode_info.rfbdev->helper.connector_info[i]->connector
== connector) {
drm_modeset_unlock_all(dev);
return;
}
}
drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
}
else
DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
drm_modeset_unlock_all(dev);
drm_connector_register(connector);
}
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector,
.destroy_connector = dm_dp_destroy_mst_connector,
.hotplug = dm_dp_mst_hotplug,
.register_connector = dm_dp_mst_register_connector
};
void amdgpu_dm_initialize_mst_connector(
struct amdgpu_display_manager *dm,
struct amdgpu_connector *aconnector)
{
aconnector->dm_dp_aux.aux.name = "dmdc";
aconnector->dm_dp_aux.aux.dev = dm->adev->dev;
aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
aconnector->dm_dp_aux.link_index = aconnector->connector_id;
drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
aconnector->mst_mgr.cbs = &dm_mst_cbs;
drm_dp_mst_topology_mgr_init(
&aconnector->mst_mgr,
dm->adev->ddev,
&aconnector->dm_dp_aux.aux,
16,
4,
aconnector->connector_id);
}
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DAL_AMDGPU_DM_MST_TYPES_H__
#define __DAL_AMDGPU_DM_MST_TYPES_H__
struct amdgpu_display_manager;
struct amdgpu_connector;
void amdgpu_dm_initialize_mst_connector(
struct amdgpu_display_manager *dm,
struct amdgpu_connector *aconnector);
#endif
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/string.h>
#include <linux/acpi.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/amdgpu_drm.h>
#include "dm_services.h"
#include "amdgpu.h"
#include "amdgpu_dm.h"
#include "amdgpu_dm_irq.h"
#include "amdgpu_dm_types.h"
#include "amdgpu_pm.h"
#define dm_alloc(size) kzalloc(size, GFP_KERNEL)
#define dm_realloc(ptr, size) krealloc(ptr, size, GFP_KERNEL)
#define dm_free(ptr) kfree(ptr)
/******************************************************************************
* IRQ Interfaces.
*****************************************************************************/
void dal_register_timer_interrupt(
struct dc_context *ctx,
struct dc_timer_interrupt_params *int_params,
interrupt_handler ih,
void *args)
{
struct amdgpu_device *adev = ctx->driver_context;
if (!adev || !int_params) {
DRM_ERROR("DM_IRQ: invalid input!\n");
return;
}
if (int_params->int_context != INTERRUPT_LOW_IRQ_CONTEXT) {
/* only low irq ctx is supported. */
DRM_ERROR("DM_IRQ: invalid context: %d!\n",
int_params->int_context);
return;
}
amdgpu_dm_irq_register_timer(adev, int_params, ih, args);
}
void dal_isr_acquire_lock(struct dc_context *ctx)
{
/*TODO*/
}
void dal_isr_release_lock(struct dc_context *ctx)
{
/*TODO*/
}
/******************************************************************************
* End-of-IRQ Interfaces.
*****************************************************************************/
bool dm_get_platform_info(struct dc_context *ctx,
struct platform_info_params *params)
{
/*TODO*/
return false;
}
bool dm_write_persistent_data(struct dc_context *ctx,
const struct dc_sink *sink,
const char *module_name,
const char *key_name,
void *params,
unsigned int size,
struct persistent_data_flag *flag)
{
/*TODO implement*/
return false;
}
bool dm_read_persistent_data(struct dc_context *ctx,
const struct dc_sink *sink,
const char *module_name,
const char *key_name,
void *params,
unsigned int size,
struct persistent_data_flag *flag)
{
/*TODO implement*/
return false;
}
void dm_delay_in_microseconds(struct dc_context *ctx,
unsigned int microSeconds)
{
/*TODO implement*/
return;
}
/**** power component interfaces ****/
bool dm_pp_pre_dce_clock_change(
struct dc_context *ctx,
struct dm_pp_gpu_clock_range *requested_state,
struct dm_pp_gpu_clock_range *actual_state)
{
/*TODO*/
return false;
}
bool dm_pp_apply_safe_state(
const struct dc_context *ctx)
{
struct amdgpu_device *adev = ctx->driver_context;
if (adev->pm.dpm_enabled) {
/* TODO: Does this require PreModeChange event to PPLIB? */
}
return true;
}
bool dm_pp_apply_display_requirements(
const struct dc_context *ctx,
const struct dm_pp_display_configuration *pp_display_cfg)
{
struct amdgpu_device *adev = ctx->driver_context;
if (adev->pm.dpm_enabled) {
memset(&adev->pm.pm_display_cfg, 0,
sizeof(adev->pm.pm_display_cfg));
adev->pm.pm_display_cfg.cpu_cc6_disable =
pp_display_cfg->cpu_cc6_disable;
adev->pm.pm_display_cfg.cpu_pstate_disable =
pp_display_cfg->cpu_pstate_disable;
adev->pm.pm_display_cfg.cpu_pstate_separation_time =
pp_display_cfg->cpu_pstate_separation_time;
adev->pm.pm_display_cfg.nb_pstate_switch_disable =
pp_display_cfg->nb_pstate_switch_disable;
adev->pm.pm_display_cfg.num_display =
pp_display_cfg->display_count;
adev->pm.pm_display_cfg.num_path_including_non_display =
pp_display_cfg->display_count;
adev->pm.pm_display_cfg.min_core_set_clock =
pp_display_cfg->min_engine_clock_khz/10;
adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
adev->pm.pm_display_cfg.min_mem_set_clock =
pp_display_cfg->min_memory_clock_khz/10;
adev->pm.pm_display_cfg.multi_monitor_in_sync =
pp_display_cfg->all_displays_in_sync;
adev->pm.pm_display_cfg.min_vblank_time =
pp_display_cfg->avail_mclk_switch_time_us;
adev->pm.pm_display_cfg.display_clk =
pp_display_cfg->disp_clk_khz/10;
adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
adev->pm.pm_display_cfg.line_time_in_us =
pp_display_cfg->line_time_in_us;
adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
adev->pm.pm_display_cfg.crossfire_display_index = -1;
adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
/* TODO: complete implementation of
* amd_powerplay_display_configuration_change().
* Follow example of:
* PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
* PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
amd_powerplay_display_configuration_change(
adev->powerplay.pp_handle,
&adev->pm.pm_display_cfg);
/* TODO: replace by a separate call to 'apply display cfg'? */
amdgpu_pm_compute_clocks(adev);
}
return true;
}
bool dc_service_get_system_clocks_range(
const struct dc_context *ctx,
struct dm_pp_gpu_clock_range *sys_clks)
{
struct amdgpu_device *adev = ctx->driver_context;
/* Default values, in case PPLib is not compiled-in. */
sys_clks->mclk.max_khz = 800000;
sys_clks->mclk.min_khz = 800000;
sys_clks->sclk.max_khz = 600000;
sys_clks->sclk.min_khz = 300000;
if (adev->pm.dpm_enabled) {
sys_clks->mclk.max_khz = amdgpu_dpm_get_mclk(adev, false);
sys_clks->mclk.min_khz = amdgpu_dpm_get_mclk(adev, true);
sys_clks->sclk.max_khz = amdgpu_dpm_get_sclk(adev, false);
sys_clks->sclk.min_khz = amdgpu_dpm_get_sclk(adev, true);
}
return true;
}
static void get_default_clock_levels(
enum dm_pp_clock_type clk_type,
struct dm_pp_clock_levels *clks)
{
uint32_t disp_clks_in_khz[6] = {
300000, 400000, 496560, 626090, 685720, 757900 };
uint32_t sclks_in_khz[6] = {
300000, 360000, 423530, 514290, 626090, 720000 };
uint32_t mclks_in_khz[2] = { 333000, 800000 };
switch (clk_type) {
case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
clks->num_levels = 6;
memmove(clks->clocks_in_khz, disp_clks_in_khz,
sizeof(disp_clks_in_khz));
break;
case DM_PP_CLOCK_TYPE_ENGINE_CLK:
clks->num_levels = 6;
memmove(clks->clocks_in_khz, sclks_in_khz,
sizeof(sclks_in_khz));
break;
case DM_PP_CLOCK_TYPE_MEMORY_CLK:
clks->num_levels = 2;
memmove(clks->clocks_in_khz, mclks_in_khz,
sizeof(mclks_in_khz));
break;
default:
clks->num_levels = 0;
break;
}
}
static enum amd_pp_clock_type dc_to_pp_clock_type(
enum dm_pp_clock_type dm_pp_clk_type)
{
enum amd_pp_clock_type amd_pp_clk_type = 0;
switch (dm_pp_clk_type) {
case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
amd_pp_clk_type = amd_pp_disp_clock;
break;
case DM_PP_CLOCK_TYPE_ENGINE_CLK:
amd_pp_clk_type = amd_pp_sys_clock;
break;
case DM_PP_CLOCK_TYPE_MEMORY_CLK:
amd_pp_clk_type = amd_pp_mem_clock;
break;
default:
DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
dm_pp_clk_type);
break;
}
return amd_pp_clk_type;
}
static void pp_to_dc_clock_levels(
const struct amd_pp_clocks *pp_clks,
struct dm_pp_clock_levels *dc_clks,
enum dm_pp_clock_type dc_clk_type)
{
uint32_t i;
if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
pp_clks->count,
DM_PP_MAX_CLOCK_LEVELS);
dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
} else
dc_clks->num_levels = pp_clks->count;
DRM_INFO("DM_PPLIB: values for %s clock\n",
DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
for (i = 0; i < dc_clks->num_levels; i++) {
DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
/* translate 10kHz to kHz */
dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10;
}
}
bool dm_pp_get_clock_levels_by_type(
const struct dc_context *ctx,
enum dm_pp_clock_type clk_type,
struct dm_pp_clock_levels *dc_clks)
{
struct amdgpu_device *adev = ctx->driver_context;
void *pp_handle = adev->powerplay.pp_handle;
struct amd_pp_clocks pp_clks = { 0 };
struct amd_pp_simple_clock_info validation_clks = { 0 };
uint32_t i;
if (amd_powerplay_get_clock_by_type(pp_handle,
dc_to_pp_clock_type(clk_type), &pp_clks)) {
/* Error in pplib. Provide default values. */
get_default_clock_levels(clk_type, dc_clks);
return true;
}
pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
if (amd_powerplay_get_display_mode_validation_clocks(pp_handle,
&validation_clks)) {
/* Error in pplib. Provide default values. */
DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
validation_clks.engine_max_clock = 72000;
validation_clks.memory_max_clock = 80000;
validation_clks.level = 0;
}
DRM_INFO("DM_PPLIB: Validation clocks:\n");
DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
validation_clks.engine_max_clock);
DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
validation_clks.memory_max_clock);
DRM_INFO("DM_PPLIB: level : %d\n",
validation_clks.level);
/* Translate 10 kHz to kHz. */
validation_clks.engine_max_clock *= 10;
validation_clks.memory_max_clock *= 10;
/* Determine the highest non-boosted level from the Validation Clocks */
if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
for (i = 0; i < dc_clks->num_levels; i++) {
if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
/* This clock is higher the validation clock.
* Than means the previous one is the highest
* non-boosted one. */
DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
dc_clks->num_levels, i + 1);
dc_clks->num_levels = i;
break;
}
}
} else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
for (i = 0; i < dc_clks->num_levels; i++) {
if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
dc_clks->num_levels, i + 1);
dc_clks->num_levels = i;
break;
}
}
}
return true;
}
bool dm_pp_get_clock_levels_by_type_with_latency(
const struct dc_context *ctx,
enum dm_pp_clock_type clk_type,
struct dm_pp_clock_levels_with_latency *clk_level_info)
{
/* TODO: to be implemented */
return false;
}
bool dm_pp_get_clock_levels_by_type_with_voltage(
const struct dc_context *ctx,
enum dm_pp_clock_type clk_type,
struct dm_pp_clock_levels_with_voltage *clk_level_info)
{
/* TODO: to be implemented */
return false;
}
bool dm_pp_notify_wm_clock_changes(
const struct dc_context *ctx,
struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
{
/* TODO: to be implemented */
return false;
}
bool dm_pp_apply_power_level_change_request(
const struct dc_context *ctx,
struct dm_pp_power_level_change_request *level_change_req)
{
/* TODO: to be implemented */
return false;
}
bool dm_pp_apply_clock_for_voltage_request(
const struct dc_context *ctx,
struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
{
/* TODO: to be implemented */
return false;
}
bool dm_pp_get_static_clocks(
const struct dc_context *ctx,
struct dm_pp_static_clock_info *static_clk_info)
{
/* TODO: to be implemented */
return false;
}
/**** end of power component interfaces ****/
/* Calls to notification */
void dal_notify_setmode_complete(struct dc_context *ctx,
uint32_t h_total,
uint32_t v_total,
uint32_t h_active,
uint32_t v_active,
uint32_t pix_clk_in_khz)
{
/*TODO*/
}
/* End of calls to notification */
long dm_get_pid(void)
{
return current->pid;
}
long dm_get_tgid(void)
{
return current->tgid;
}
此差异已折叠。
/*
* Copyright 2012-13 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __AMDGPU_DM_TYPES_H__
#define __AMDGPU_DM_TYPES_H__
#include <drm/drmP.h>
struct amdgpu_framebuffer;
struct amdgpu_display_manager;
struct dc_validation_set;
struct dc_surface;
/*TODO Jodan Hersen use the one in amdgpu_dm*/
int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
struct amdgpu_crtc *amdgpu_crtc,
uint32_t link_index);
int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
struct amdgpu_connector *amdgpu_connector,
uint32_t link_index,
struct amdgpu_encoder *amdgpu_encoder);
int amdgpu_dm_encoder_init(
struct drm_device *dev,
struct amdgpu_encoder *aencoder,
uint32_t link_index);
void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc);
void amdgpu_dm_connector_destroy(struct drm_connector *connector);
void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder);
int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
int amdgpu_dm_atomic_commit(
struct drm_device *dev,
struct drm_atomic_state *state,
bool async);
int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state);
int dm_create_validation_set_for_target(
struct drm_connector *connector,
struct drm_display_mode *mode,
struct dc_validation_set *val_set);
void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
struct drm_connector_state *amdgpu_dm_connector_atomic_duplicate_state(
struct drm_connector *connector);
int amdgpu_dm_connector_atomic_set_property(
struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property,
uint64_t val);
int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev);
void amdgpu_dm_connector_init_helper(
struct amdgpu_display_manager *dm,
struct amdgpu_connector *aconnector,
int connector_type,
const struct dc_link *link,
int link_index);
int amdgpu_dm_connector_mode_valid(
struct drm_connector *connector,
struct drm_display_mode *mode);
void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector);
void amdgpu_dm_add_sink_to_freesync_module(
struct drm_connector *connector,
struct edid *edid);
void amdgpu_dm_remove_sink_from_freesync_module(
struct drm_connector *connector);
extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
#endif /* __AMDGPU_DM_TYPES_H__ */
#
# Makefile for Display Core (dc) component.
#
DC_LIBS = basics bios calcs dce \
gpio gpu i2caux irq virtual
DC_LIBS += dce112
DC_LIBS += dce110
DC_LIBS += dce100
DC_LIBS += dce80
AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
include $(AMD_DC)
DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_target.o dc_sink.o dc_stream.o \
dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o
AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE))
AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o)
AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
#
# Makefile for the 'utils' sub-component of DAL.
# It provides the general basic services required by other DAL
# subcomponents.
BASICS = conversion.o fixpt31_32.o fixpt32_32.o grph_object_id.o \
logger.o log_helpers.o register_logger.o signal_types.o vector.o
AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
AMD_DISPLAY_FILES += $(AMD_DAL_BASICS)
此差异已折叠。
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DAL_CONVERSION_H__
#define __DAL_CONVERSION_H__
#include "include/fixed31_32.h"
uint16_t fixed_point_to_int_frac(
struct fixed31_32 arg,
uint8_t integer_bits,
uint8_t fractional_bits);
void convert_float_matrix(
uint16_t *matrix,
struct fixed31_32 *flt,
uint32_t buffer_size);
void calculate_adjustments(
const struct fixed31_32 *ideal_matrix,
const struct dc_csc_adjustments *adjustments,
struct fixed31_32 *matrix);
void calculate_adjustments_y_only(
const struct fixed31_32 *ideal_matrix,
const struct dc_csc_adjustments *adjustments,
struct fixed31_32 *matrix);
#endif
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DAL_LOGGER_H__
#define __DAL_LOGGER_H__
/* Structure for keeping track of offsets, buffer, etc */
#define DAL_LOGGER_BUFFER_MAX_SIZE 2048
/*Connectivity log needs to output EDID, which needs at lease 256x3 bytes,
* change log line size to 896 to meet the request.
*/
#define LOG_MAX_LINE_SIZE 896
#include "include/logger_types.h"
struct dal_logger {
/* How far into the circular buffer has been read by dsat
* Read offset should never cross write offset. Write \0's to
* read data just to be sure?
*/
uint32_t buffer_read_offset;
/* How far into the circular buffer we have written
* Write offset should never cross read offset
*/
uint32_t buffer_write_offset;
uint32_t write_wrap_count;
uint32_t read_wrap_count;
uint32_t open_count;
char *log_buffer; /* Pointer to malloc'ed buffer */
uint32_t log_buffer_size; /* Size of circular buffer */
uint32_t mask; /*array of masks for major elements*/
union logger_flags flags;
struct dc_context *ctx;
};
#endif /* __DAL_LOGGER_H__ */
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
#
# Makefile for the 'calcs' sub-component of DAL.
# It calculates Bandwidth and Watermarks values for HW programming
#
BW_CALCS = bandwidth_calcs.o bw_fixed.o gamma_calcs.o
AMD_DAL_BW_CALCS = $(addprefix $(AMDDALPATH)/dc/calcs/,$(BW_CALCS))
AMD_DISPLAY_FILES += $(AMD_DAL_BW_CALCS)
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册