提交 42238da8 编写于 作者: R Rob Clark

drm/msm/mdp5: don't use void * for opaque types

For example, use 'struct mdp5_smp *' everywhere instead of 'void *', but
only declare it as 'struct mdp5_smp;' in common headers, so the struct
body is still private.  The accomplishes the desired modularity while
still letting the compiler provide some type checking for us.
Signed-off-by: NRob Clark <robdclark@gmail.com>
上级 0deed25b
......@@ -121,35 +121,27 @@ static const struct mdp5_cfg_handler cfg_handlers[] = {
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(void *cfg_hnd)
const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler)
{
struct mdp5_cfg_handler *cfg_handler = cfg_hnd;
return cfg_handler->config.hw;
}
struct mdp5_cfg *mdp5_cfg_get_config(void *cfg_hnd)
struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_handler)
{
struct mdp5_cfg_handler *cfg_handler = cfg_hnd;
return &cfg_handler->config;
}
int mdp5_cfg_get_hw_rev(void *cfg_hnd)
int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler)
{
struct mdp5_cfg_handler *cfg_handler = cfg_hnd;
return cfg_handler->revision;
}
void mdp5_cfg_destroy(void *cfg_hnd)
void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler)
{
struct mdp5_cfg_handler *cfg_handler = cfg_hnd;
kfree(cfg_handler);
}
void *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
uint32_t major, uint32_t minor)
{
struct drm_device *dev = mdp5_kms->dev;
......
......@@ -78,12 +78,14 @@ struct mdp5_cfg {
};
struct mdp5_kms;
struct mdp5_cfg_handler;
const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(void *cfg_hnd);
struct mdp5_cfg *mdp5_cfg_get_config(void *cfg_hnd);
int mdp5_cfg_get_hw_rev(void *cfg_hnd);
const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hnd);
struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd);
int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd);
void *mdp5_cfg_init(struct mdp5_kms *mdp5_kms, uint32_t major, uint32_t minor);
void mdp5_cfg_destroy(void *cfg_hnd);
struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
uint32_t major, uint32_t minor);
void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);
#endif /* __MDP5_CFG_H__ */
......@@ -37,7 +37,7 @@ struct mdp5_crtc {
spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
/* CTL used for this CRTC: */
void *ctl;
struct mdp5_ctl *ctl;
/* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event;
......@@ -261,7 +261,7 @@ static void blend_setup(struct drm_crtc *crtc)
unsigned long flags;
#define blender(stage) ((stage) - STAGE_BASE)
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg_priv);
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
......@@ -327,7 +327,7 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
/* request a free CTL, if none is already allocated for this CRTC */
if (!mdp5_crtc->ctl) {
mdp5_crtc->ctl = mdp5_ctl_request(mdp5_kms->ctl_priv, crtc);
mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
if (!mdp5_crtc->ctl)
return -EBUSY;
}
......@@ -595,7 +595,7 @@ int mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
enum mdp_mixer_stage_id stage = STAGE_BASE;
int max_nb_planes;
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg_priv);
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
max_nb_planes = hw_cfg->lm.nb_stages;
if (count_planes(crtc) >= max_nb_planes) {
......
......@@ -50,7 +50,8 @@ struct mdp5_ctl {
u32 flush_mask;
bool cursor_on;
void *crtc;
struct drm_crtc *crtc;
};
struct mdp5_ctl_manager {
......@@ -96,9 +97,8 @@ u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
}
int mdp5_ctl_set_intf(void *c, enum mdp5_intf intf)
int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf)
{
struct mdp5_ctl *ctl = c;
unsigned long flags;
static const enum mdp5_intfnum intfnum[] = {
INTF0, INTF1, INTF2, INTF3,
......@@ -113,10 +113,9 @@ int mdp5_ctl_set_intf(void *c, enum mdp5_intf intf)
return 0;
}
int mdp5_ctl_set_cursor(void *c, bool enable)
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
{
struct mdp5_ctl_manager *ctl_mgr = &mdp5_ctl_mgr;
struct mdp5_ctl *ctl = c;
unsigned long flags;
u32 blend_cfg;
int lm;
......@@ -147,9 +146,8 @@ int mdp5_ctl_set_cursor(void *c, bool enable)
}
int mdp5_ctl_blend(void *c, u32 lm, u32 blend_cfg)
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
{
struct mdp5_ctl *ctl = c;
unsigned long flags;
if (ctl->cursor_on)
......@@ -164,10 +162,9 @@ int mdp5_ctl_blend(void *c, u32 lm, u32 blend_cfg)
return 0;
}
int mdp5_ctl_commit(void *c, u32 flush_mask)
int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
{
struct mdp5_ctl_manager *ctl_mgr = &mdp5_ctl_mgr;
struct mdp5_ctl *ctl = c;
unsigned long flags;
if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) {
......@@ -190,17 +187,14 @@ int mdp5_ctl_commit(void *c, u32 flush_mask)
return 0;
}
u32 mdp5_ctl_get_flush(void *c)
u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl)
{
struct mdp5_ctl *ctl = c;
return ctl->flush_mask;
}
void mdp5_ctl_release(void *c)
void mdp5_ctl_release(struct mdp5_ctl *ctl)
{
struct mdp5_ctl_manager *ctl_mgr = &mdp5_ctl_mgr;
struct mdp5_ctl *ctl = c;
unsigned long flags;
if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
......@@ -223,9 +217,9 @@ void mdp5_ctl_release(void *c)
*
* @return first free CTL
*/
void *mdp5_ctl_request(void *ctlm, void *crtc)
struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
struct drm_crtc *crtc)
{
struct mdp5_ctl_manager *ctl_mgr = ctlm;
struct mdp5_ctl *ctl = NULL;
unsigned long flags;
int c;
......@@ -252,9 +246,8 @@ void *mdp5_ctl_request(void *ctlm, void *crtc)
return ctl;
}
void mdp5_ctlm_hw_reset(void *ctlm)
void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
{
struct mdp5_ctl_manager *ctl_mgr = ctlm;
unsigned long flags;
int c;
......@@ -267,15 +260,13 @@ void mdp5_ctlm_hw_reset(void *ctlm)
}
}
void mdp5_ctlm_destroy(void *ctlm)
void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
{
struct mdp5_ctl_manager *ctl_mgr = ctlm;
kfree(ctl_mgr);
}
void *mdp5_ctlm_init(struct drm_device *dev, void __iomem *mmio_base,
const struct mdp5_cfg_hw *hw_cfg)
struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
{
struct mdp5_ctl_manager *ctl_mgr = &mdp5_ctl_mgr;
const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl;
......
......@@ -21,30 +21,31 @@
* mdp5_ctlm_init() returns a ctlm (CTL Manager) handler,
* which is then used to call the other mdp5_ctlm_*(ctlm, ...) functions.
*/
void *mdp5_ctlm_init(struct drm_device *dev, void __iomem *mmio_base,
const struct mdp5_cfg_hw *hw_cfg);
void mdp5_ctlm_hw_reset(void *ctlm);
void mdp5_ctlm_destroy(void *ctlm);
struct mdp5_ctl_manager;
struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg);
void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
/*
* CTL prototypes:
* mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
* which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
*/
void *mdp5_ctl_request(void *ctlm, void *crtc);
struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
int mdp5_ctl_set_intf(void *ctl, enum mdp5_intf intf);
int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf);
int mdp5_ctl_set_cursor(void *ctl, bool enable);
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable);
/* @blend_cfg: see LM blender config definition below */
int mdp5_ctl_blend(void *ctl, u32 lm, u32 blend_cfg);
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
/* @flush_mask: see CTL flush masks definitions below */
int mdp5_ctl_commit(void *ctl, u32 flush_mask);
u32 mdp5_ctl_get_flush(void *ctl);
int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl);
void mdp5_ctl_release(void *ctl);
void mdp5_ctl_release(struct mdp5_ctl *ctl);
/*
* blend_cfg (LM blender config):
......
......@@ -61,7 +61,7 @@ static int mdp5_hw_init(struct msm_kms *kms)
mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
mdp5_ctlm_hw_reset(mdp5_kms->ctl_priv);
mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
pm_runtime_put_sync(dev->dev);
......@@ -88,9 +88,6 @@ static void mdp5_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct msm_mmu *mmu = mdp5_kms->mmu;
void *smp = mdp5_kms->smp_priv;
void *cfg = mdp5_kms->cfg_priv;
void *ctl = mdp5_kms->ctl_priv;
mdp5_irq_domain_fini(mdp5_kms);
......@@ -98,12 +95,13 @@ static void mdp5_destroy(struct msm_kms *kms)
mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
mmu->funcs->destroy(mmu);
}
if (ctl)
mdp5_ctlm_destroy(ctl);
if (smp)
mdp5_smp_destroy(smp);
if (cfg)
mdp5_cfg_destroy(cfg);
if (mdp5_kms->ctlm)
mdp5_ctlm_destroy(mdp5_kms->ctlm);
if (mdp5_kms->smp)
mdp5_smp_destroy(mdp5_kms->smp);
if (mdp5_kms->cfg)
mdp5_cfg_destroy(mdp5_kms->cfg);
kfree(mdp5_kms);
}
......@@ -163,7 +161,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
const struct mdp5_cfg_hw *hw_cfg;
int i, ret;
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg_priv);
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
/* register our interrupt-controller for hdmi/eDP/dsi/etc
* to use for irqs routed through mdp:
......@@ -282,7 +280,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
uint32_t major, minor;
void *priv;
int i, ret;
mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
......@@ -350,30 +347,32 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
clk_set_rate(mdp5_kms->src_clk, 200000000);
read_hw_revision(mdp5_kms, &major, &minor);
priv = mdp5_cfg_init(mdp5_kms, major, minor);
if (IS_ERR(priv)) {
ret = PTR_ERR(priv);
mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
if (IS_ERR(mdp5_kms->cfg)) {
ret = PTR_ERR(mdp5_kms->cfg);
mdp5_kms->cfg = NULL;
goto fail;
}
mdp5_kms->cfg_priv = priv;
config = mdp5_cfg_get_config(mdp5_kms->cfg_priv);
config = mdp5_cfg_get_config(mdp5_kms->cfg);
/* TODO: compute core clock rate at runtime */
clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk);
priv = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp);
if (IS_ERR(priv)) {
ret = PTR_ERR(priv);
mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp);
if (IS_ERR(mdp5_kms->smp)) {
ret = PTR_ERR(mdp5_kms->smp);
mdp5_kms->smp = NULL;
goto fail;
}
mdp5_kms->smp_priv = priv;
priv = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
if (IS_ERR(priv)) {
ret = PTR_ERR(priv);
mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
if (IS_ERR(mdp5_kms->ctlm)) {
ret = PTR_ERR(mdp5_kms->ctlm);
mdp5_kms->ctlm = NULL;
goto fail;
}
mdp5_kms->ctl_priv = priv;
/* make sure things are off before attaching iommu (bootloader could
* have left things on, in which case we'll start getting faults if
......
......@@ -31,14 +31,14 @@ struct mdp5_kms {
struct drm_device *dev;
void *cfg_priv;
struct mdp5_cfg_handler *cfg;
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
struct msm_mmu *mmu;
void *smp_priv;
void *ctl_priv;
struct mdp5_smp *smp;
struct mdp5_ctl_manager *ctlm;
/* io/register spaces: */
void __iomem *mmio, *vbif;
......
......@@ -77,7 +77,7 @@ static int mdp5_plane_disable(struct drm_plane *plane)
if (mdp5_kms) {
/* Release the memory we requested earlier from the SMP: */
mdp5_smp_release(mdp5_kms->smp_priv, pipe);
mdp5_smp_release(mdp5_kms->smp, pipe);
}
/* TODO detaching now will cause us not to get the last
......@@ -232,7 +232,7 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
/* Request some memory from the SMP: */
ret = mdp5_smp_request(mdp5_kms->smp_priv,
ret = mdp5_smp_request(mdp5_kms->smp,
mdp5_plane->pipe, fb->pixel_format, src_w);
if (ret)
return ret;
......@@ -243,7 +243,7 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
* would move into atomic->check_plane_state(), while updating the
* hw would remain here:
*/
mdp5_smp_configure(mdp5_kms->smp_priv, pipe);
mdp5_smp_configure(mdp5_kms->smp, pipe);
if (src_w != crtc_w) {
config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN;
......@@ -335,7 +335,7 @@ void mdp5_plane_complete_flip(struct drm_plane *plane)
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe;
mdp5_smp_commit(mdp5_kms->smp_priv, pipe);
mdp5_smp_commit(mdp5_kms->smp, pipe);
}
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
......
......@@ -114,7 +114,7 @@ static int smp_request_block(struct mdp5_smp *smp,
int reserved;
unsigned long flags;
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg_priv);
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
reserved = hw_cfg->smp.reserved[cid];
spin_lock_irqsave(&smp->state_lock, flags);
......@@ -174,12 +174,11 @@ static void set_fifo_thresholds(struct mdp5_smp *smp,
* decimated width. Ie. SMP buffering sits downstream of decimation (which
* presumably happens during the dma from scanout buffer).
*/
int mdp5_smp_request(void *handler, enum mdp5_pipe pipe, u32 fmt, u32 width)
int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width)
{
struct mdp5_smp *smp = handler;
struct mdp5_kms *mdp5_kms = get_kms(smp);
struct drm_device *dev = mdp5_kms->dev;
int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg_priv);
int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
int i, hsub, nplanes, nlines, nblks, ret;
nplanes = drm_format_num_planes(fmt);
......@@ -217,9 +216,8 @@ int mdp5_smp_request(void *handler, enum mdp5_pipe pipe, u32 fmt, u32 width)
}
/* Release SMP blocks for all clients of the pipe */
void mdp5_smp_release(void *handler, enum mdp5_pipe pipe)
void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
struct mdp5_smp *smp = handler;
int i, nblks;
for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
......@@ -261,9 +259,8 @@ static void update_smp_state(struct mdp5_smp *smp,
}
/* step #2: configure hw for union(pending, inuse): */
void mdp5_smp_configure(void *handler, enum mdp5_pipe pipe)
void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
struct mdp5_smp *smp = handler;
int cnt = smp->blk_cnt;
mdp5_smp_state_t assigned;
int i;
......@@ -278,9 +275,8 @@ void mdp5_smp_configure(void *handler, enum mdp5_pipe pipe)
}
/* step #3: after vblank, copy pending -> inuse: */
void mdp5_smp_commit(void *handler, enum mdp5_pipe pipe)
void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
struct mdp5_smp *smp = handler;
int cnt = smp->blk_cnt;
mdp5_smp_state_t released;
int i;
......@@ -309,14 +305,12 @@ void mdp5_smp_commit(void *handler, enum mdp5_pipe pipe)
}
}
void mdp5_smp_destroy(void *handler)
void mdp5_smp_destroy(struct mdp5_smp *smp)
{
struct mdp5_smp *smp = handler;
kfree(smp);
}
void *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
{
struct mdp5_smp *smp = NULL;
int ret;
......
......@@ -27,6 +27,7 @@ struct mdp5_client_smp_state {
};
struct mdp5_kms;
struct mdp5_smp;
/*
* SMP module prototypes:
......@@ -34,12 +35,12 @@ struct mdp5_kms;
* which is then used to call the other mdp5_smp_*(handler, ...) functions.
*/
void *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
void mdp5_smp_destroy(void *handler);
struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
void mdp5_smp_destroy(struct mdp5_smp *smp);
int mdp5_smp_request(void *handler, enum mdp5_pipe pipe, u32 fmt, u32 width);
void mdp5_smp_configure(void *handler, enum mdp5_pipe pipe);
void mdp5_smp_commit(void *handler, enum mdp5_pipe pipe);
void mdp5_smp_release(void *handler, enum mdp5_pipe pipe);
int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width);
void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe);
void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe);
void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe);
#endif /* __MDP5_SMP_H__ */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册