提交 94fb7c9c 编写于 作者: G Greg Kroah-Hartman

Staging: Merge 'tidspbridge-2.6.37-rc1' into staging-linus

This is a big revert of a lot of -rc1 tidspbridge patches in order to
get the driver back into a working state.  It also includes a OMAP patch
that was approved by the OMAP maintainer.
Signed-off-by: NGreg Kroah-Hartman <gregkh@suse.de>
...@@ -284,12 +284,14 @@ void __init omap_dsp_reserve_sdram_memblock(void) ...@@ -284,12 +284,14 @@ void __init omap_dsp_reserve_sdram_memblock(void)
if (!size) if (!size)
return; return;
paddr = __memblock_alloc_base(size, SZ_1M, MEMBLOCK_REAL_LIMIT); paddr = memblock_alloc(size, SZ_1M);
if (!paddr) { if (!paddr) {
pr_err("%s: failed to reserve %x bytes\n", pr_err("%s: failed to reserve %x bytes\n",
__func__, size); __func__, size);
return; return;
} }
memblock_free(paddr, size);
memblock_remove(paddr, size);
omap_dsp_phys_mempool_base = paddr; omap_dsp_phys_mempool_base = paddr;
} }
......
...@@ -6,7 +6,6 @@ menuconfig TIDSPBRIDGE ...@@ -6,7 +6,6 @@ menuconfig TIDSPBRIDGE
tristate "DSP Bridge driver" tristate "DSP Bridge driver"
depends on ARCH_OMAP3 depends on ARCH_OMAP3
select OMAP_MBOX_FWK select OMAP_MBOX_FWK
select OMAP_IOMMU
help help
DSP/BIOS Bridge is designed for platforms that contain a GPP and DSP/BIOS Bridge is designed for platforms that contain a GPP and
one or more attached DSPs. The GPP is considered the master or one or more attached DSPs. The GPP is considered the master or
......
...@@ -2,18 +2,19 @@ obj-$(CONFIG_TIDSPBRIDGE) += bridgedriver.o ...@@ -2,18 +2,19 @@ obj-$(CONFIG_TIDSPBRIDGE) += bridgedriver.o
libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o
libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \ libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \
core/tiomap3430_pwr.o core/tiomap_io.o core/dsp-mmu.o \ core/tiomap3430_pwr.o core/tiomap_io.o \
core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o
libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \ libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \
pmgr/cmm.o pmgr/dbll.o pmgr/dmm.o pmgr/cmm.o pmgr/dbll.o
librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \ librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \
rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \ rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \
rmgr/nldr.o rmgr/drv_interface.o rmgr/nldr.o rmgr/drv_interface.o
libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \ libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \
dynload/tramp.o dynload/tramp.o
libhw = hw/hw_mmu.o
bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \ bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \
$(libdload) $(libdload) $(libhw)
#Machine dependent #Machine dependent
ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \ ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \
......
...@@ -27,8 +27,9 @@ ...@@ -27,8 +27,9 @@
struct deh_mgr { struct deh_mgr {
struct bridge_dev_context *hbridge_context; /* Bridge context. */ struct bridge_dev_context *hbridge_context; /* Bridge context. */
struct ntfy_object *ntfy_obj; /* NTFY object */ struct ntfy_object *ntfy_obj; /* NTFY object */
};
int mmu_fault_isr(struct iommu *mmu); /* MMU Fault DPC */
struct tasklet_struct dpc_tasklet;
};
#endif /* _DEH_ */ #endif /* _DEH_ */
...@@ -23,8 +23,8 @@ ...@@ -23,8 +23,8 @@
#include <plat/clockdomain.h> #include <plat/clockdomain.h>
#include <mach-omap2/prm-regbits-34xx.h> #include <mach-omap2/prm-regbits-34xx.h>
#include <mach-omap2/cm-regbits-34xx.h> #include <mach-omap2/cm-regbits-34xx.h>
#include <dspbridge/dsp-mmu.h>
#include <dspbridge/devdefs.h> #include <dspbridge/devdefs.h>
#include <hw_defs.h>
#include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */ #include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */
#include <dspbridge/sync.h> #include <dspbridge/sync.h>
#include <dspbridge/clk.h> #include <dspbridge/clk.h>
...@@ -306,18 +306,6 @@ static const struct bpwr_clk_t bpwr_clks[] = { ...@@ -306,18 +306,6 @@ static const struct bpwr_clk_t bpwr_clks[] = {
#define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index))) #define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index)))
struct shm_segs {
u32 seg0_da;
u32 seg0_pa;
u32 seg0_va;
u32 seg0_size;
u32 seg1_da;
u32 seg1_pa;
u32 seg1_va;
u32 seg1_size;
};
/* This Bridge driver's device context: */ /* This Bridge driver's device context: */
struct bridge_dev_context { struct bridge_dev_context {
struct dev_object *hdev_obj; /* Handle to Bridge device object. */ struct dev_object *hdev_obj; /* Handle to Bridge device object. */
...@@ -328,6 +316,7 @@ struct bridge_dev_context { ...@@ -328,6 +316,7 @@ struct bridge_dev_context {
*/ */
u32 dw_dsp_ext_base_addr; /* See the comment above */ u32 dw_dsp_ext_base_addr; /* See the comment above */
u32 dw_api_reg_base; /* API mem map'd registers */ u32 dw_api_reg_base; /* API mem map'd registers */
void __iomem *dw_dsp_mmu_base; /* DSP MMU Mapped registers */
u32 dw_api_clk_base; /* CLK Registers */ u32 dw_api_clk_base; /* CLK Registers */
u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */ u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */
u32 dw_public_rhea; /* Pub Rhea */ u32 dw_public_rhea; /* Pub Rhea */
...@@ -339,8 +328,7 @@ struct bridge_dev_context { ...@@ -339,8 +328,7 @@ struct bridge_dev_context {
u32 dw_internal_size; /* Internal memory size */ u32 dw_internal_size; /* Internal memory size */
struct omap_mbox *mbox; /* Mail box handle */ struct omap_mbox *mbox; /* Mail box handle */
struct iommu *dsp_mmu; /* iommu for iva2 handler */
struct shm_segs sh_s;
struct cfg_hostres *resources; /* Host Resources */ struct cfg_hostres *resources; /* Host Resources */
/* /*
...@@ -353,6 +341,7 @@ struct bridge_dev_context { ...@@ -353,6 +341,7 @@ struct bridge_dev_context {
/* TC Settings */ /* TC Settings */
bool tc_word_swap_on; /* Traffic Controller Word Swap */ bool tc_word_swap_on; /* Traffic Controller Word Swap */
struct pg_table_attrs *pt_attrs;
u32 dsp_per_clks; u32 dsp_per_clks;
}; };
......
/*
* dsp-mmu.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* DSP iommu.
*
* Copyright (C) 2010 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <dspbridge/host_os.h>
#include <plat/dmtimer.h>
#include <dspbridge/dbdefs.h>
#include <dspbridge/dev.h>
#include <dspbridge/io_sm.h>
#include <dspbridge/dspdeh.h>
#include "_tiomap.h"
#include <dspbridge/dsp-mmu.h>
#define MMU_CNTL_TWL_EN (1 << 2)
static struct tasklet_struct mmu_tasklet;
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
{
void *dummy_addr;
u32 fa, tmp;
struct iotlb_entry e;
struct iommu *mmu = dev_context->dsp_mmu;
dummy_addr = (void *)__get_free_page(GFP_ATOMIC);
/*
* Before acking the MMU fault, let's make sure MMU can only
* access entry #0. Then add a new entry so that the DSP OS
* can continue in order to dump the stack.
*/
tmp = iommu_read_reg(mmu, MMU_CNTL);
tmp &= ~MMU_CNTL_TWL_EN;
iommu_write_reg(mmu, tmp, MMU_CNTL);
fa = iommu_read_reg(mmu, MMU_FAULT_AD);
e.da = fa & PAGE_MASK;
e.pa = virt_to_phys(dummy_addr);
e.valid = 1;
e.prsvd = 1;
e.pgsz = IOVMF_PGSZ_4K & MMU_CAM_PGSZ_MASK;
e.endian = MMU_RAM_ENDIAN_LITTLE;
e.elsz = MMU_RAM_ELSZ_32;
e.mixed = 0;
load_iotlb_entry(mmu, &e);
dsp_clk_enable(DSP_CLK_GPT8);
dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
/* Clear MMU interrupt */
tmp = iommu_read_reg(mmu, MMU_IRQSTATUS);
iommu_write_reg(mmu, tmp, MMU_IRQSTATUS);
dump_dsp_stack(dev_context);
dsp_clk_disable(DSP_CLK_GPT8);
iopgtable_clear_entry(mmu, fa);
free_page((unsigned long)dummy_addr);
}
#endif
static void fault_tasklet(unsigned long data)
{
struct iommu *mmu = (struct iommu *)data;
struct bridge_dev_context *dev_ctx;
struct deh_mgr *dm;
u32 fa;
dev_get_deh_mgr(dev_get_first(), &dm);
dev_get_bridge_context(dev_get_first(), &dev_ctx);
if (!dm || !dev_ctx)
return;
fa = iommu_read_reg(mmu, MMU_FAULT_AD);
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
print_dsp_trace_buffer(dev_ctx);
dump_dl_modules(dev_ctx);
mmu_fault_print_stack(dev_ctx);
#endif
bridge_deh_notify(dm, DSP_MMUFAULT, fa);
}
/*
* ======== mmu_fault_isr ========
* ISR to be triggered by a DSP MMU fault interrupt.
*/
static int mmu_fault_callback(struct iommu *mmu)
{
if (!mmu)
return -EPERM;
iommu_write_reg(mmu, 0, MMU_IRQENABLE);
tasklet_schedule(&mmu_tasklet);
return 0;
}
/**
* dsp_mmu_init() - initialize dsp_mmu module and returns a handle
*
* This function initialize dsp mmu module and returns a struct iommu
* handle to use it for dsp maps.
*
*/
struct iommu *dsp_mmu_init()
{
struct iommu *mmu;
mmu = iommu_get("iva2");
if (!IS_ERR(mmu)) {
tasklet_init(&mmu_tasklet, fault_tasklet, (unsigned long)mmu);
mmu->isr = mmu_fault_callback;
}
return mmu;
}
/**
* dsp_mmu_exit() - destroy dsp mmu module
* @mmu: Pointer to iommu handle.
*
* This function destroys dsp mmu module.
*
*/
void dsp_mmu_exit(struct iommu *mmu)
{
if (mmu)
iommu_put(mmu);
tasklet_kill(&mmu_tasklet);
}
/**
* user_va2_pa() - get physical address from userspace address.
* @mm: mm_struct Pointer of the process.
* @address: Virtual user space address.
*
*/
static u32 user_va2_pa(struct mm_struct *mm, u32 address)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *ptep, pte;
pgd = pgd_offset(mm, address);
if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
pmd = pmd_offset(pgd, address);
if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
ptep = pte_offset_map(pmd, address);
if (ptep) {
pte = *ptep;
if (pte_present(pte))
return pte & PAGE_MASK;
}
}
}
return 0;
}
/**
* get_io_pages() - pin and get pages of io user's buffer.
* @mm: mm_struct Pointer of the process.
* @uva: Virtual user space address.
* @pages Pages to be pined.
* @usr_pgs struct page array pointer where the user pages will be stored
*
*/
static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages,
struct page **usr_pgs)
{
u32 pa;
int i;
struct page *pg;
for (i = 0; i < pages; i++) {
pa = user_va2_pa(mm, uva);
if (!pfn_valid(__phys_to_pfn(pa)))
break;
pg = phys_to_page(pa);
usr_pgs[i] = pg;
get_page(pg);
}
return i;
}
/**
* user_to_dsp_map() - maps user to dsp virtual address
* @mmu: Pointer to iommu handle.
* @uva: Virtual user space address.
* @da DSP address
* @size Buffer size to map.
* @usr_pgs struct page array pointer where the user pages will be stored
*
* This function maps a user space buffer into DSP virtual address.
*
*/
u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
struct page **usr_pgs)
{
int res, w;
unsigned pages;
int i;
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
struct sg_table *sgt;
struct scatterlist *sg;
if (!size || !usr_pgs)
return -EINVAL;
pages = size / PG_SIZE4K;
down_read(&mm->mmap_sem);
vma = find_vma(mm, uva);
while (vma && (uva + size > vma->vm_end))
vma = find_vma(mm, vma->vm_end + 1);
if (!vma) {
pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
__func__, uva, size);
up_read(&mm->mmap_sem);
return -EINVAL;
}
if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
w = 1;
if (vma->vm_flags & VM_IO)
i = get_io_pages(mm, uva, pages, usr_pgs);
else
i = get_user_pages(current, mm, uva, pages, w, 1,
usr_pgs, NULL);
up_read(&mm->mmap_sem);
if (i < 0)
return i;
if (i < pages) {
res = -EFAULT;
goto err_pages;
}
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
res = -ENOMEM;
goto err_pages;
}
res = sg_alloc_table(sgt, pages, GFP_KERNEL);
if (res < 0)
goto err_sg;
for_each_sg(sgt->sgl, sg, sgt->nents, i)
sg_set_page(sg, usr_pgs[i], PAGE_SIZE, 0);
da = iommu_vmap(mmu, da, sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
if (!IS_ERR_VALUE(da))
return da;
res = (int)da;
sg_free_table(sgt);
err_sg:
kfree(sgt);
i = pages;
err_pages:
while (i--)
put_page(usr_pgs[i]);
return res;
}
/**
* user_to_dsp_unmap() - unmaps DSP virtual buffer.
* @mmu: Pointer to iommu handle.
* @da DSP address
*
* This function unmaps a user space buffer into DSP virtual address.
*
*/
int user_to_dsp_unmap(struct iommu *mmu, u32 da)
{
unsigned i;
struct sg_table *sgt;
struct scatterlist *sg;
sgt = iommu_vunmap(mmu, da);
if (!sgt)
return -EFAULT;
for_each_sg(sgt->sgl, sg, sgt->nents, i)
put_page(sg_page(sg));
sg_free_table(sgt);
kfree(sgt);
return 0;
}
...@@ -39,6 +39,10 @@ ...@@ -39,6 +39,10 @@
#include <dspbridge/ntfy.h> #include <dspbridge/ntfy.h>
#include <dspbridge/sync.h> #include <dspbridge/sync.h>
/* Hardware Abstraction Layer */
#include <hw_defs.h>
#include <hw_mmu.h>
/* Bridge Driver */ /* Bridge Driver */
#include <dspbridge/dspdeh.h> #include <dspbridge/dspdeh.h>
#include <dspbridge/dspio.h> #include <dspbridge/dspio.h>
...@@ -287,7 +291,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) ...@@ -287,7 +291,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
struct cod_manager *cod_man; struct cod_manager *cod_man;
struct chnl_mgr *hchnl_mgr; struct chnl_mgr *hchnl_mgr;
struct msg_mgr *hmsg_mgr; struct msg_mgr *hmsg_mgr;
struct shm_segs *sm_sg;
u32 ul_shm_base; u32 ul_shm_base;
u32 ul_shm_base_offset; u32 ul_shm_base_offset;
u32 ul_shm_limit; u32 ul_shm_limit;
...@@ -310,9 +313,18 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) ...@@ -310,9 +313,18 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB]; struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
struct cfg_hostres *host_res; struct cfg_hostres *host_res;
struct bridge_dev_context *pbridge_context; struct bridge_dev_context *pbridge_context;
u32 map_attrs;
u32 shm0_end; u32 shm0_end;
u32 ul_dyn_ext_base; u32 ul_dyn_ext_base;
u32 ul_seg1_size = 0; u32 ul_seg1_size = 0;
u32 pa_curr = 0;
u32 va_curr = 0;
u32 gpp_va_curr = 0;
u32 num_bytes = 0;
u32 all_bits = 0;
u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
};
status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context); status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context);
if (!pbridge_context) { if (!pbridge_context) {
...@@ -325,8 +337,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) ...@@ -325,8 +337,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
status = -EFAULT; status = -EFAULT;
goto func_end; goto func_end;
} }
sm_sg = &pbridge_context->sh_s;
status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man); status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man);
if (!cod_man) { if (!cod_man) {
status = -EFAULT; status = -EFAULT;
...@@ -461,14 +471,129 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) ...@@ -461,14 +471,129 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
if (status) if (status)
goto func_end; goto func_end;
sm_sg->seg1_pa = ul_gpp_pa; pa_curr = ul_gpp_pa;
sm_sg->seg1_da = ul_dyn_ext_base; va_curr = ul_dyn_ext_base * hio_mgr->word_size;
sm_sg->seg1_va = ul_gpp_va; gpp_va_curr = ul_gpp_va;
sm_sg->seg1_size = ul_seg1_size; num_bytes = ul_seg1_size;
sm_sg->seg0_pa = ul_gpp_pa + ul_pad_size + ul_seg1_size;
sm_sg->seg0_da = ul_dsp_va; /*
sm_sg->seg0_va = ul_gpp_va + ul_pad_size + ul_seg1_size; * Try to fit into TLB entries. If not possible, push them to page
sm_sg->seg0_size = ul_seg_size; * tables. It is quite possible that if sections are not on
* bigger page boundary, we may end up making several small pages.
* So, push them onto page tables, if that is the case.
*/
map_attrs = 0x00000000;
map_attrs = DSP_MAPLITTLEENDIAN;
map_attrs |= DSP_MAPPHYSICALADDR;
map_attrs |= DSP_MAPELEMSIZE32;
map_attrs |= DSP_MAPDONOTLOCK;
while (num_bytes) {
/*
* To find the max. page size with which both PA & VA are
* aligned.
*/
all_bits = pa_curr | va_curr;
dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
"num_bytes %x\n", all_bits, pa_curr, va_curr,
num_bytes);
for (i = 0; i < 4; i++) {
if ((num_bytes >= page_size[i]) && ((all_bits &
(page_size[i] -
1)) == 0)) {
status =
hio_mgr->intf_fxns->
pfn_brd_mem_map(hio_mgr->hbridge_context,
pa_curr, va_curr,
page_size[i], map_attrs,
NULL);
if (status)
goto func_end;
pa_curr += page_size[i];
va_curr += page_size[i];
gpp_va_curr += page_size[i];
num_bytes -= page_size[i];
/*
* Don't try smaller sizes. Hopefully we have
* reached an address aligned to a bigger page
* size.
*/
break;
}
}
}
pa_curr += ul_pad_size;
va_curr += ul_pad_size;
gpp_va_curr += ul_pad_size;
/* Configure the TLB entries for the next cacheable segment */
num_bytes = ul_seg_size;
va_curr = ul_dsp_va * hio_mgr->word_size;
while (num_bytes) {
/*
* To find the max. page size with which both PA & VA are
* aligned.
*/
all_bits = pa_curr | va_curr;
dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, "
"va_curr %x, num_bytes %x\n", all_bits, pa_curr,
va_curr, num_bytes);
for (i = 0; i < 4; i++) {
if (!(num_bytes >= page_size[i]) ||
!((all_bits & (page_size[i] - 1)) == 0))
continue;
if (ndx < MAX_LOCK_TLB_ENTRIES) {
/*
* This is the physical address written to
* DSP MMU.
*/
ae_proc[ndx].ul_gpp_pa = pa_curr;
/*
* This is the virtual uncached ioremapped
* address!!!
*/
ae_proc[ndx].ul_gpp_va = gpp_va_curr;
ae_proc[ndx].ul_dsp_va =
va_curr / hio_mgr->word_size;
ae_proc[ndx].ul_size = page_size[i];
ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
dev_dbg(bridge, "shm MMU TLB entry PA %x"
" VA %x DSP_VA %x Size %x\n",
ae_proc[ndx].ul_gpp_pa,
ae_proc[ndx].ul_gpp_va,
ae_proc[ndx].ul_dsp_va *
hio_mgr->word_size, page_size[i]);
ndx++;
} else {
status =
hio_mgr->intf_fxns->
pfn_brd_mem_map(hio_mgr->hbridge_context,
pa_curr, va_curr,
page_size[i], map_attrs,
NULL);
dev_dbg(bridge,
"shm MMU PTE entry PA %x"
" VA %x DSP_VA %x Size %x\n",
ae_proc[ndx].ul_gpp_pa,
ae_proc[ndx].ul_gpp_va,
ae_proc[ndx].ul_dsp_va *
hio_mgr->word_size, page_size[i]);
if (status)
goto func_end;
}
pa_curr += page_size[i];
va_curr += page_size[i];
gpp_va_curr += page_size[i];
num_bytes -= page_size[i];
/*
* Don't try smaller sizes. Hopefully we have reached
* an address aligned to a bigger page size.
*/
break;
}
}
/* /*
* Copy remaining entries from CDB. All entries are 1 MB and * Copy remaining entries from CDB. All entries are 1 MB and
...@@ -509,12 +634,38 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) ...@@ -509,12 +634,38 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
"DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa, "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa,
ae_proc[ndx].ul_dsp_va); ae_proc[ndx].ul_dsp_va);
ndx++; ndx++;
} else {
status = hio_mgr->intf_fxns->pfn_brd_mem_map
(hio_mgr->hbridge_context,
hio_mgr->ext_proc_info.ty_tlb[i].
ul_gpp_phys,
hio_mgr->ext_proc_info.ty_tlb[i].
ul_dsp_virt, 0x100000, map_attrs,
NULL);
} }
} }
if (status) if (status)
goto func_end; goto func_end;
} }
map_attrs = 0x00000000;
map_attrs = DSP_MAPLITTLEENDIAN;
map_attrs |= DSP_MAPPHYSICALADDR;
map_attrs |= DSP_MAPELEMSIZE32;
map_attrs |= DSP_MAPDONOTLOCK;
/* Map the L4 peripherals */
i = 0;
while (l4_peripheral_table[i].phys_addr) {
status = hio_mgr->intf_fxns->pfn_brd_mem_map
(hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr,
l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
map_attrs, NULL);
if (status)
goto func_end;
i++;
}
for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) { for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
ae_proc[i].ul_dsp_va = 0; ae_proc[i].ul_dsp_va = 0;
ae_proc[i].ul_gpp_pa = 0; ae_proc[i].ul_gpp_pa = 0;
...@@ -537,12 +688,12 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) ...@@ -537,12 +688,12 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
status = -EFAULT; status = -EFAULT;
goto func_end; goto func_end;
} else { } else {
if (sm_sg->seg0_da > ul_shm_base) { if (ae_proc[0].ul_dsp_va > ul_shm_base) {
status = -EPERM; status = -EPERM;
goto func_end; goto func_end;
} }
/* ul_shm_base may not be at ul_dsp_va address */ /* ul_shm_base may not be at ul_dsp_va address */
ul_shm_base_offset = (ul_shm_base - sm_sg->seg0_da) * ul_shm_base_offset = (ul_shm_base - ae_proc[0].ul_dsp_va) *
hio_mgr->word_size; hio_mgr->word_size;
/* /*
* bridge_dev_ctrl() will set dev context dsp-mmu info. In * bridge_dev_ctrl() will set dev context dsp-mmu info. In
...@@ -566,7 +717,8 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) ...@@ -566,7 +717,8 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
goto func_end; goto func_end;
} }
/* Register SM */ /* Register SM */
status = register_shm_segs(hio_mgr, cod_man, sm_sg->seg0_pa); status =
register_shm_segs(hio_mgr, cod_man, ae_proc[0].ul_gpp_pa);
} }
hio_mgr->shared_mem = (struct shm *)ul_shm_base; hio_mgr->shared_mem = (struct shm *)ul_shm_base;
......
...@@ -23,7 +23,6 @@ ...@@ -23,7 +23,6 @@
#include <dspbridge/host_os.h> #include <dspbridge/host_os.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mmzone.h> #include <linux/mmzone.h>
#include <plat/control.h>
/* ----------------------------------- DSP/BIOS Bridge */ /* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h> #include <dspbridge/dbdefs.h>
...@@ -35,6 +34,10 @@ ...@@ -35,6 +34,10 @@
#include <dspbridge/drv.h> #include <dspbridge/drv.h>
#include <dspbridge/sync.h> #include <dspbridge/sync.h>
/* ------------------------------------ Hardware Abstraction Layer */
#include <hw_defs.h>
#include <hw_mmu.h>
/* ----------------------------------- Link Driver */ /* ----------------------------------- Link Driver */
#include <dspbridge/dspdefs.h> #include <dspbridge/dspdefs.h>
#include <dspbridge/dspchnl.h> #include <dspbridge/dspchnl.h>
...@@ -47,6 +50,7 @@ ...@@ -47,6 +50,7 @@
/* ----------------------------------- Platform Manager */ /* ----------------------------------- Platform Manager */
#include <dspbridge/dev.h> #include <dspbridge/dev.h>
#include <dspbridge/dspapi.h> #include <dspbridge/dspapi.h>
#include <dspbridge/dmm.h>
#include <dspbridge/wdt.h> #include <dspbridge/wdt.h>
/* ----------------------------------- Local */ /* ----------------------------------- Local */
...@@ -67,6 +71,20 @@ ...@@ -67,6 +71,20 @@
#define MMU_SMALL_PAGE_MASK 0xFFFFF000 #define MMU_SMALL_PAGE_MASK 0xFFFFF000
#define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00 #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
#define PAGES_II_LVL_TABLE 512 #define PAGES_II_LVL_TABLE 512
#define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT)
/*
* This is a totally ugly layer violation, but needed until
* omap_ctrl_set_dsp_boot*() are provided.
*/
#define OMAP3_IVA2_BOOTMOD_IDLE 1
#define OMAP2_CONTROL_GENERAL 0x270
#define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
#define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
#define OMAP343X_CTRL_REGADDR(reg) \
OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg))
/* Forward Declarations: */ /* Forward Declarations: */
static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt); static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
...@@ -91,6 +109,12 @@ static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt, ...@@ -91,6 +109,12 @@ static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
u8 *host_buff, u32 dsp_addr, u8 *host_buff, u32 dsp_addr,
u32 ul_num_bytes, u32 mem_type); u32 ul_num_bytes, u32 mem_type);
static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
u32 ul_mpu_addr, u32 virt_addr,
u32 ul_num_bytes, u32 ul_map_attr,
struct page **mapped_pages);
static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
u32 virt_addr, u32 ul_num_bytes);
static int bridge_dev_create(struct bridge_dev_context static int bridge_dev_create(struct bridge_dev_context
**dev_cntxt, **dev_cntxt,
struct dev_object *hdev_obj, struct dev_object *hdev_obj,
...@@ -98,8 +122,57 @@ static int bridge_dev_create(struct bridge_dev_context ...@@ -98,8 +122,57 @@ static int bridge_dev_create(struct bridge_dev_context
static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
u32 dw_cmd, void *pargs); u32 dw_cmd, void *pargs);
static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt); static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
static u32 user_va2_pa(struct mm_struct *mm, u32 address);
static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
u32 va, u32 size,
struct hw_mmu_map_attrs_t *map_attrs);
static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
u32 size, struct hw_mmu_map_attrs_t *attrs);
static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
u32 ul_mpu_addr, u32 virt_addr,
u32 ul_num_bytes,
struct hw_mmu_map_attrs_t *hw_attrs);
bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr); bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
/* ----------------------------------- Globals */
/* Attributes of L2 page tables for DSP MMU */
struct page_info {
u32 num_entries; /* Number of valid PTEs in the L2 PT */
};
/* Attributes used to manage the DSP MMU page tables */
struct pg_table_attrs {
spinlock_t pg_lock; /* Critical section object handle */
u32 l1_base_pa; /* Physical address of the L1 PT */
u32 l1_base_va; /* Virtual address of the L1 PT */
u32 l1_size; /* Size of the L1 PT */
u32 l1_tbl_alloc_pa;
/* Physical address of Allocated mem for L1 table. May not be aligned */
u32 l1_tbl_alloc_va;
/* Virtual address of Allocated mem for L1 table. May not be aligned */
u32 l1_tbl_alloc_sz;
/* Size of consistent memory allocated for L1 table.
* May not be aligned */
u32 l2_base_pa; /* Physical address of the L2 PT */
u32 l2_base_va; /* Virtual address of the L2 PT */
u32 l2_size; /* Size of the L2 PT */
u32 l2_tbl_alloc_pa;
/* Physical address of Allocated mem for L2 table. May not be aligned */
u32 l2_tbl_alloc_va;
/* Virtual address of Allocated mem for L2 table. May not be aligned */
u32 l2_tbl_alloc_sz;
/* Size of consistent memory allocated for L2 table.
* May not be aligned */
u32 l2_num_pages; /* Number of allocated L2 PT */
/* Array [l2_num_pages] of L2 PT info structs */
struct page_info *pg_info;
};
/* /*
* This Bridge driver's function interface table. * This Bridge driver's function interface table.
*/ */
...@@ -119,6 +192,8 @@ static struct bridge_drv_interface drv_interface_fxns = { ...@@ -119,6 +192,8 @@ static struct bridge_drv_interface drv_interface_fxns = {
bridge_brd_set_state, bridge_brd_set_state,
bridge_brd_mem_copy, bridge_brd_mem_copy,
bridge_brd_mem_write, bridge_brd_mem_write,
bridge_brd_mem_map,
bridge_brd_mem_un_map,
/* The following CHNL functions are provided by chnl_io.lib: */ /* The following CHNL functions are provided by chnl_io.lib: */
bridge_chnl_create, bridge_chnl_create,
bridge_chnl_destroy, bridge_chnl_destroy,
...@@ -148,6 +223,27 @@ static struct bridge_drv_interface drv_interface_fxns = { ...@@ -148,6 +223,27 @@ static struct bridge_drv_interface drv_interface_fxns = {
bridge_msg_set_queue_id, bridge_msg_set_queue_id,
}; };
static inline void flush_all(struct bridge_dev_context *dev_context)
{
if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
dev_context->dw_brd_state == BRD_HIBERNATION)
wake_dsp(dev_context, NULL);
hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base);
}
static void bad_page_dump(u32 pa, struct page *pg)
{
pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
pr_emerg("Bad page state in process '%s'\n"
"page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
"Backtrace:\n",
current->comm, pg, (int)(2 * sizeof(unsigned long)),
(unsigned long)pg->flags, pg->mapping,
page_mapcount(pg), page_count(pg));
dump_stack();
}
/* /*
* ======== bridge_drv_entry ======== * ======== bridge_drv_entry ========
* purpose: * purpose:
...@@ -203,7 +299,8 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt) ...@@ -203,7 +299,8 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
} }
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
dsp_clk_enable(DSP_CLK_IVA2); dsp_clk_enable(DSP_CLK_IVA2);
/* set the device state to IDLE */ /* set the device state to IDLE */
...@@ -274,17 +371,14 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, ...@@ -274,17 +371,14 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
{ {
int status = 0; int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt; struct bridge_dev_context *dev_context = dev_ctxt;
struct iommu *mmu = NULL;
struct shm_segs *sm_sg;
int l4_i = 0, tlb_i = 0;
u32 sg0_da = 0, sg1_da = 0;
struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
u32 dw_sync_addr = 0; u32 dw_sync_addr = 0;
u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */ u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */
u32 ul_shm_base_virt; /* Dsp Virt SM base addr */ u32 ul_shm_base_virt; /* Dsp Virt SM base addr */
u32 ul_tlb_base_virt; /* Base of MMU TLB entry */ u32 ul_tlb_base_virt; /* Base of MMU TLB entry */
/* Offset of shm_base_virt from tlb_base_virt */ /* Offset of shm_base_virt from tlb_base_virt */
u32 ul_shm_offset_virt; u32 ul_shm_offset_virt;
s32 entry_ndx;
s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */
struct cfg_hostres *resources = NULL; struct cfg_hostres *resources = NULL;
u32 temp; u32 temp;
u32 ul_dsp_clk_rate; u32 ul_dsp_clk_rate;
...@@ -305,12 +399,12 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, ...@@ -305,12 +399,12 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
ul_shm_base_virt *= DSPWORDSIZE; ul_shm_base_virt *= DSPWORDSIZE;
DBC_ASSERT(ul_shm_base_virt != 0); DBC_ASSERT(ul_shm_base_virt != 0);
/* DSP Virtual address */ /* DSP Virtual address */
ul_tlb_base_virt = dev_context->sh_s.seg0_da; ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va;
DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
ul_shm_offset_virt = ul_shm_offset_virt =
ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE); ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
/* Kernel logical address */ /* Kernel logical address */
ul_shm_base = dev_context->sh_s.seg0_va + ul_shm_offset_virt; ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt;
DBC_ASSERT(ul_shm_base != 0); DBC_ASSERT(ul_shm_base != 0);
/* 2nd wd is used as sync field */ /* 2nd wd is used as sync field */
...@@ -345,83 +439,78 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, ...@@ -345,83 +439,78 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
OMAP343X_CONTROL_IVA2_BOOTMOD)); OMAP343X_CONTROL_IVA2_BOOTMOD));
} }
} }
if (!status) { if (!status) {
/* Reset and Unreset the RST2, so that BOOTADDR is copied to
* IVA2 SYSC register */
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
udelay(100);
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
mmu = dev_context->dsp_mmu; udelay(100);
if (mmu)
dsp_mmu_exit(mmu); /* Disbale the DSP MMU */
mmu = dsp_mmu_init(); hw_mmu_disable(resources->dw_dmmu_base);
if (IS_ERR(mmu)) { /* Disable TWL */
dev_err(bridge, "dsp_mmu_init failed!\n"); hw_mmu_twl_disable(resources->dw_dmmu_base);
dev_context->dsp_mmu = NULL;
status = (int)mmu; /* Only make TLB entry if both addresses are non-zero */
} for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
} entry_ndx++) {
if (!status) { struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
dev_context->dsp_mmu = mmu; struct hw_mmu_map_attrs_t map_attrs = {
sm_sg = &dev_context->sh_s; .endianism = e->endianism,
sg0_da = iommu_kmap(mmu, sm_sg->seg0_da, sm_sg->seg0_pa, .element_size = e->elem_size,
sm_sg->seg0_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); .mixed_size = e->mixed_mode,
if (IS_ERR_VALUE(sg0_da)) { };
status = (int)sg0_da;
sg0_da = 0; if (!e->ul_gpp_pa || !e->ul_dsp_va)
}
}
if (!status) {
sg1_da = iommu_kmap(mmu, sm_sg->seg1_da, sm_sg->seg1_pa,
sm_sg->seg1_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
if (IS_ERR_VALUE(sg1_da)) {
status = (int)sg1_da;
sg1_da = 0;
}
}
if (!status) {
u32 da;
for (tlb_i = 0; tlb_i < BRDIOCTL_NUMOFMMUTLB; tlb_i++) {
if (!tlb[tlb_i].ul_gpp_pa)
continue; continue;
dev_dbg(bridge, "IOMMU %d GppPa: 0x%x DspVa 0x%x Size" dev_dbg(bridge,
" 0x%x\n", tlb_i, tlb[tlb_i].ul_gpp_pa, "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
tlb[tlb_i].ul_dsp_va, tlb[tlb_i].ul_size); itmp_entry_ndx,
e->ul_gpp_pa,
da = iommu_kmap(mmu, tlb[tlb_i].ul_dsp_va, e->ul_dsp_va,
tlb[tlb_i].ul_gpp_pa, PAGE_SIZE, e->ul_size);
IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
if (IS_ERR_VALUE(da)) { hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base,
status = (int)da; e->ul_gpp_pa,
break; e->ul_dsp_va,
} e->ul_size,
} itmp_entry_ndx,
} &map_attrs, 1, 1);
if (!status) {
u32 da; itmp_entry_ndx++;
l4_i = 0;
while (l4_peripheral_table[l4_i].phys_addr) {
da = iommu_kmap(mmu, l4_peripheral_table[l4_i].
dsp_virt_addr, l4_peripheral_table[l4_i].
phys_addr, PAGE_SIZE,
IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
if (IS_ERR_VALUE(da)) {
status = (int)da;
break;
}
l4_i++;
} }
} }
/* Lock the above TLB entries and get the BIOS and load monitor timer /* Lock the above TLB entries and get the BIOS and load monitor timer
* information */ * information */
if (!status) { if (!status) {
hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx);
hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx);
hw_mmu_ttb_set(resources->dw_dmmu_base,
dev_context->pt_attrs->l1_base_pa);
hw_mmu_twl_enable(resources->dw_dmmu_base);
/* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
temp = __raw_readl((resources->dw_dmmu_base) + 0x10);
temp = (temp & 0xFFFFFFEF) | 0x11;
__raw_writel(temp, (resources->dw_dmmu_base) + 0x10);
/* Let the DSP MMU run */
hw_mmu_enable(resources->dw_dmmu_base);
/* Enable the BIOS clock */ /* Enable the BIOS clock */
(void)dev_get_symbol(dev_context->hdev_obj, (void)dev_get_symbol(dev_context->hdev_obj,
BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer); BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
(void)dev_get_symbol(dev_context->hdev_obj, (void)dev_get_symbol(dev_context->hdev_obj,
BRIDGEINIT_LOADMON_GPTIMER, BRIDGEINIT_LOADMON_GPTIMER,
&ul_load_monitor_timer); &ul_load_monitor_timer);
}
if (!status) {
if (ul_load_monitor_timer != 0xFFFF) { if (ul_load_monitor_timer != 0xFFFF) {
clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
ul_load_monitor_timer; ul_load_monitor_timer;
...@@ -430,7 +519,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, ...@@ -430,7 +519,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
dev_dbg(bridge, "Not able to get the symbol for Load " dev_dbg(bridge, "Not able to get the symbol for Load "
"Monitor Timer\n"); "Monitor Timer\n");
} }
}
if (!status) {
if (ul_bios_gp_timer != 0xFFFF) { if (ul_bios_gp_timer != 0xFFFF) {
clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
ul_bios_gp_timer; ul_bios_gp_timer;
...@@ -439,7 +530,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, ...@@ -439,7 +530,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
dev_dbg(bridge, dev_dbg(bridge,
"Not able to get the symbol for BIOS Timer\n"); "Not able to get the symbol for BIOS Timer\n");
} }
}
if (!status) {
/* Set the DSP clock rate */ /* Set the DSP clock rate */
(void)dev_get_symbol(dev_context->hdev_obj, (void)dev_get_symbol(dev_context->hdev_obj,
"_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr); "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
...@@ -492,6 +585,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, ...@@ -492,6 +585,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
/* Let DSP go */ /* Let DSP go */
dev_dbg(bridge, "%s Unreset\n", __func__); dev_dbg(bridge, "%s Unreset\n", __func__);
/* Enable DSP MMU Interrupts */
hw_mmu_event_enable(resources->dw_dmmu_base,
HW_MMU_ALL_INTERRUPTS);
/* release the RST1, DSP starts executing now .. */ /* release the RST1, DSP starts executing now .. */
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0, (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
...@@ -521,23 +617,11 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, ...@@ -521,23 +617,11 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
/* update board state */ /* update board state */
dev_context->dw_brd_state = BRD_RUNNING; dev_context->dw_brd_state = BRD_RUNNING;
return 0; /* (void)chnlsm_enable_interrupt(dev_context); */
} else { } else {
dev_context->dw_brd_state = BRD_UNKNOWN; dev_context->dw_brd_state = BRD_UNKNOWN;
} }
} }
while (tlb_i--) {
if (!tlb[tlb_i].ul_gpp_pa)
continue;
iommu_kunmap(mmu, tlb[tlb_i].ul_gpp_va);
}
while (l4_i--)
iommu_kunmap(mmu, l4_peripheral_table[l4_i].dsp_virt_addr);
if (sg0_da)
iommu_kunmap(mmu, sg0_da);
if (sg1_da)
iommu_kunmap(mmu, sg1_da);
return status; return status;
} }
...@@ -553,9 +637,8 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt) ...@@ -553,9 +637,8 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
{ {
int status = 0; int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt; struct bridge_dev_context *dev_context = dev_ctxt;
struct pg_table_attrs *pt_attrs;
u32 dsp_pwr_state; u32 dsp_pwr_state;
int i;
struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
struct omap_dsp_platform_data *pdata = struct omap_dsp_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data; omap_dspbridge_dev->dev.platform_data;
...@@ -591,37 +674,23 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt) ...@@ -591,37 +674,23 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
dsp_wdt_enable(false); dsp_wdt_enable(false);
/* Reset DSP */ /* This is a good place to clear the MMU page tables as well */
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, if (dev_context->pt_attrs) {
OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); pt_attrs = dev_context->pt_attrs;
memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
memset((u8 *) pt_attrs->pg_info, 0x00,
(pt_attrs->l2_num_pages * sizeof(struct page_info)));
}
/* Disable the mailbox interrupts */ /* Disable the mailbox interrupts */
if (dev_context->mbox) { if (dev_context->mbox) {
omap_mbox_disable_irq(dev_context->mbox, IRQ_RX); omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
omap_mbox_put(dev_context->mbox); omap_mbox_put(dev_context->mbox);
dev_context->mbox = NULL; dev_context->mbox = NULL;
} }
if (dev_context->dsp_mmu) { /* Reset IVA2 clocks*/
pr_err("Proc stop mmu if statement\n"); (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
for (i = 0; i < BRDIOCTL_NUMOFMMUTLB; i++) { OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
if (!tlb[i].ul_gpp_pa)
continue;
iommu_kunmap(dev_context->dsp_mmu, tlb[i].ul_gpp_va);
}
i = 0;
while (l4_peripheral_table[i].phys_addr) {
iommu_kunmap(dev_context->dsp_mmu,
l4_peripheral_table[i].dsp_virt_addr);
i++;
}
iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg0_da);
iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg1_da);
dsp_mmu_exit(dev_context->dsp_mmu);
dev_context->dsp_mmu = NULL;
}
/* Reset IVA IOMMU*/
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
dsp_clock_disable_all(dev_context->dsp_per_clks); dsp_clock_disable_all(dev_context->dsp_per_clks);
dsp_clk_disable(DSP_CLK_IVA2); dsp_clk_disable(DSP_CLK_IVA2);
...@@ -681,6 +750,10 @@ static int bridge_dev_create(struct bridge_dev_context ...@@ -681,6 +750,10 @@ static int bridge_dev_create(struct bridge_dev_context
struct bridge_dev_context *dev_context = NULL; struct bridge_dev_context *dev_context = NULL;
s32 entry_ndx; s32 entry_ndx;
struct cfg_hostres *resources = config_param; struct cfg_hostres *resources = config_param;
struct pg_table_attrs *pt_attrs;
u32 pg_tbl_pa;
u32 pg_tbl_va;
u32 align_size;
struct drv_data *drv_datap = dev_get_drvdata(bridge); struct drv_data *drv_datap = dev_get_drvdata(bridge);
/* Allocate and initialize a data structure to contain the bridge driver /* Allocate and initialize a data structure to contain the bridge driver
...@@ -711,8 +784,97 @@ static int bridge_dev_create(struct bridge_dev_context ...@@ -711,8 +784,97 @@ static int bridge_dev_create(struct bridge_dev_context
if (!dev_context->dw_dsp_base_addr) if (!dev_context->dw_dsp_base_addr)
status = -EPERM; status = -EPERM;
pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
if (pt_attrs != NULL) {
/* Assuming that we use only DSP's memory map
* until 0x4000:0000 , we would need only 1024
* L1 enties i.e L1 size = 4K */
pt_attrs->l1_size = 0x1000;
align_size = pt_attrs->l1_size;
/* Align sizes are expected to be power of 2 */
/* we like to get aligned on L1 table size */
pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
align_size, &pg_tbl_pa);
/* Check if the PA is aligned for us */
if ((pg_tbl_pa) & (align_size - 1)) {
/* PA not aligned to page table size ,
* try with more allocation and align */
mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
pt_attrs->l1_size);
/* we like to get aligned on L1 table size */
pg_tbl_va =
(u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
align_size, &pg_tbl_pa);
/* We should be able to get aligned table now */
pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
/* Align the PA to the next 'align' boundary */
pt_attrs->l1_base_pa =
((pg_tbl_pa) +
(align_size - 1)) & (~(align_size - 1));
pt_attrs->l1_base_va =
pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
} else {
/* We got aligned PA, cool */
pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
pt_attrs->l1_base_pa = pg_tbl_pa;
pt_attrs->l1_base_va = pg_tbl_va;
}
if (pt_attrs->l1_base_va)
memset((u8 *) pt_attrs->l1_base_va, 0x00,
pt_attrs->l1_size);
/* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
* L4 pages */
pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
pt_attrs->l2_num_pages;
align_size = 4; /* Make it u32 aligned */
/* we like to get aligned on L1 table size */
pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
align_size, &pg_tbl_pa);
pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
pt_attrs->l2_base_pa = pg_tbl_pa;
pt_attrs->l2_base_va = pg_tbl_va;
if (pt_attrs->l2_base_va)
memset((u8 *) pt_attrs->l2_base_va, 0x00,
pt_attrs->l2_size);
pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
sizeof(struct page_info), GFP_KERNEL);
dev_dbg(bridge,
"L1 pa %x, va %x, size %x\n L2 pa %x, va "
"%x, size %x\n", pt_attrs->l1_base_pa,
pt_attrs->l1_base_va, pt_attrs->l1_size,
pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
pt_attrs->l2_size);
dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
}
if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
(pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
dev_context->pt_attrs = pt_attrs;
else
status = -ENOMEM;
if (!status) { if (!status) {
spin_lock_init(&pt_attrs->pg_lock);
dev_context->tc_word_swap_on = drv_datap->tc_wordswapon; dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
/* Set the Clock Divisor for the DSP module */
udelay(5);
/* MMU address is obtained from the host
* resources struct */
dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base;
}
if (!status) {
dev_context->hdev_obj = hdev_obj; dev_context->hdev_obj = hdev_obj;
/* Store current board state. */ /* Store current board state. */
dev_context->dw_brd_state = BRD_UNKNOWN; dev_context->dw_brd_state = BRD_UNKNOWN;
...@@ -722,6 +884,23 @@ static int bridge_dev_create(struct bridge_dev_context ...@@ -722,6 +884,23 @@ static int bridge_dev_create(struct bridge_dev_context
/* Return ptr to our device state to the DSP API for storage */ /* Return ptr to our device state to the DSP API for storage */
*dev_cntxt = dev_context; *dev_cntxt = dev_context;
} else { } else {
if (pt_attrs != NULL) {
kfree(pt_attrs->pg_info);
if (pt_attrs->l2_tbl_alloc_va) {
mem_free_phys_mem((void *)
pt_attrs->l2_tbl_alloc_va,
pt_attrs->l2_tbl_alloc_pa,
pt_attrs->l2_tbl_alloc_sz);
}
if (pt_attrs->l1_tbl_alloc_va) {
mem_free_phys_mem((void *)
pt_attrs->l1_tbl_alloc_va,
pt_attrs->l1_tbl_alloc_pa,
pt_attrs->l1_tbl_alloc_sz);
}
}
kfree(pt_attrs);
kfree(dev_context); kfree(dev_context);
} }
func_end: func_end:
...@@ -789,6 +968,7 @@ static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, ...@@ -789,6 +968,7 @@ static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
*/ */
static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
{ {
struct pg_table_attrs *pt_attrs;
int status = 0; int status = 0;
struct bridge_dev_context *dev_context = (struct bridge_dev_context *) struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
dev_ctxt; dev_ctxt;
...@@ -802,6 +982,23 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) ...@@ -802,6 +982,23 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
/* first put the device to stop state */ /* first put the device to stop state */
bridge_brd_stop(dev_context); bridge_brd_stop(dev_context);
if (dev_context->pt_attrs) {
pt_attrs = dev_context->pt_attrs;
kfree(pt_attrs->pg_info);
if (pt_attrs->l2_tbl_alloc_va) {
mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
pt_attrs->l2_tbl_alloc_pa,
pt_attrs->l2_tbl_alloc_sz);
}
if (pt_attrs->l1_tbl_alloc_va) {
mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
pt_attrs->l1_tbl_alloc_pa,
pt_attrs->l1_tbl_alloc_sz);
}
kfree(pt_attrs);
}
if (dev_context->resources) { if (dev_context->resources) {
host_res = dev_context->resources; host_res = dev_context->resources;
...@@ -832,6 +1029,8 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) ...@@ -832,6 +1029,8 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
iounmap((void *)host_res->dw_mem_base[3]); iounmap((void *)host_res->dw_mem_base[3]);
if (host_res->dw_mem_base[4]) if (host_res->dw_mem_base[4])
iounmap((void *)host_res->dw_mem_base[4]); iounmap((void *)host_res->dw_mem_base[4]);
if (host_res->dw_dmmu_base)
iounmap(host_res->dw_dmmu_base);
if (host_res->dw_per_base) if (host_res->dw_per_base)
iounmap(host_res->dw_per_base); iounmap(host_res->dw_per_base);
if (host_res->dw_per_pm_base) if (host_res->dw_per_pm_base)
...@@ -845,6 +1044,7 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) ...@@ -845,6 +1044,7 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
host_res->dw_mem_base[2] = (u32) NULL; host_res->dw_mem_base[2] = (u32) NULL;
host_res->dw_mem_base[3] = (u32) NULL; host_res->dw_mem_base[3] = (u32) NULL;
host_res->dw_mem_base[4] = (u32) NULL; host_res->dw_mem_base[4] = (u32) NULL;
host_res->dw_dmmu_base = NULL;
host_res->dw_sys_ctrl_base = NULL; host_res->dw_sys_ctrl_base = NULL;
kfree(host_res); kfree(host_res);
...@@ -927,6 +1127,673 @@ static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, ...@@ -927,6 +1127,673 @@ static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
return status; return status;
} }
/*
* ======== bridge_brd_mem_map ========
* This function maps MPU buffer to the DSP address space. It performs
* linear to physical address translation if required. It translates each
* page since linear addresses can be physically non-contiguous
* All address & size arguments are assumed to be page aligned (in proc.c)
*
* TODO: Disable MMU while updating the page tables (but that'll stall DSP)
*/
static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
u32 ul_mpu_addr, u32 virt_addr,
u32 ul_num_bytes, u32 ul_map_attr,
struct page **mapped_pages)
{
u32 attrs;
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
struct hw_mmu_map_attrs_t hw_attrs;
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
u32 write = 0;
u32 num_usr_pgs = 0;
struct page *mapped_page, *pg;
s32 pg_num;
u32 va = virt_addr;
struct task_struct *curr_task = current;
u32 pg_i = 0;
u32 mpu_addr, pa;
dev_dbg(bridge,
"%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
__func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
ul_map_attr);
if (ul_num_bytes == 0)
return -EINVAL;
if (ul_map_attr & DSP_MAP_DIR_MASK) {
attrs = ul_map_attr;
} else {
/* Assign default attributes */
attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
}
/* Take mapping properties */
if (attrs & DSP_MAPBIGENDIAN)
hw_attrs.endianism = HW_BIG_ENDIAN;
else
hw_attrs.endianism = HW_LITTLE_ENDIAN;
hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
/* Ignore element_size if mixed_size is enabled */
if (hw_attrs.mixed_size == 0) {
if (attrs & DSP_MAPELEMSIZE8) {
/* Size is 8 bit */
hw_attrs.element_size = HW_ELEM_SIZE8BIT;
} else if (attrs & DSP_MAPELEMSIZE16) {
/* Size is 16 bit */
hw_attrs.element_size = HW_ELEM_SIZE16BIT;
} else if (attrs & DSP_MAPELEMSIZE32) {
/* Size is 32 bit */
hw_attrs.element_size = HW_ELEM_SIZE32BIT;
} else if (attrs & DSP_MAPELEMSIZE64) {
/* Size is 64 bit */
hw_attrs.element_size = HW_ELEM_SIZE64BIT;
} else {
/*
* Mixedsize isn't enabled, so size can't be
* zero here
*/
return -EINVAL;
}
}
if (attrs & DSP_MAPDONOTLOCK)
hw_attrs.donotlockmpupage = 1;
else
hw_attrs.donotlockmpupage = 0;
if (attrs & DSP_MAPVMALLOCADDR) {
return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
ul_num_bytes, &hw_attrs);
}
/*
* Do OS-specific user-va to pa translation.
* Combine physically contiguous regions to reduce TLBs.
* Pass the translated pa to pte_update.
*/
if ((attrs & DSP_MAPPHYSICALADDR)) {
status = pte_update(dev_context, ul_mpu_addr, virt_addr,
ul_num_bytes, &hw_attrs);
goto func_cont;
}
/*
* Important Note: ul_mpu_addr is mapped from user application process
* to current process - it must lie completely within the current
* virtual memory address space in order to be of use to us here!
*/
down_read(&mm->mmap_sem);
vma = find_vma(mm, ul_mpu_addr);
if (vma)
dev_dbg(bridge,
"VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
"vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
ul_num_bytes, vma->vm_start, vma->vm_end,
vma->vm_flags);
/*
* It is observed that under some circumstances, the user buffer is
* spread across several VMAs. So loop through and check if the entire
* user buffer is covered
*/
while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
/* jump to the next VMA region */
vma = find_vma(mm, vma->vm_end + 1);
dev_dbg(bridge,
"VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
"vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
ul_num_bytes, vma->vm_start, vma->vm_end,
vma->vm_flags);
}
if (!vma) {
pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
__func__, ul_mpu_addr, ul_num_bytes);
status = -EINVAL;
up_read(&mm->mmap_sem);
goto func_cont;
}
if (vma->vm_flags & VM_IO) {
num_usr_pgs = ul_num_bytes / PG_SIZE4K;
mpu_addr = ul_mpu_addr;
/* Get the physical addresses for user buffer */
for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
pa = user_va2_pa(mm, mpu_addr);
if (!pa) {
status = -EPERM;
pr_err("DSPBRIDGE: VM_IO mapping physical"
"address is invalid\n");
break;
}
if (pfn_valid(__phys_to_pfn(pa))) {
pg = PHYS_TO_PAGE(pa);
get_page(pg);
if (page_count(pg) < 1) {
pr_err("Bad page in VM_IO buffer\n");
bad_page_dump(pa, pg);
}
}
status = pte_set(dev_context->pt_attrs, pa,
va, HW_PAGE_SIZE4KB, &hw_attrs);
if (status)
break;
va += HW_PAGE_SIZE4KB;
mpu_addr += HW_PAGE_SIZE4KB;
pa += HW_PAGE_SIZE4KB;
}
} else {
num_usr_pgs = ul_num_bytes / PG_SIZE4K;
if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
write = 1;
for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
write, 1, &mapped_page, NULL);
if (pg_num > 0) {
if (page_count(mapped_page) < 1) {
pr_err("Bad page count after doing"
"get_user_pages on"
"user buffer\n");
bad_page_dump(page_to_phys(mapped_page),
mapped_page);
}
status = pte_set(dev_context->pt_attrs,
page_to_phys(mapped_page), va,
HW_PAGE_SIZE4KB, &hw_attrs);
if (status)
break;
if (mapped_pages)
mapped_pages[pg_i] = mapped_page;
va += HW_PAGE_SIZE4KB;
ul_mpu_addr += HW_PAGE_SIZE4KB;
} else {
pr_err("DSPBRIDGE: get_user_pages FAILED,"
"MPU addr = 0x%x,"
"vma->vm_flags = 0x%lx,"
"get_user_pages Err"
"Value = %d, Buffer"
"size=0x%x\n", ul_mpu_addr,
vma->vm_flags, pg_num, ul_num_bytes);
status = -EPERM;
break;
}
}
}
up_read(&mm->mmap_sem);
func_cont:
if (status) {
/*
* Roll out the mapped pages incase it failed in middle of
* mapping
*/
if (pg_i) {
bridge_brd_mem_un_map(dev_context, virt_addr,
(pg_i * PG_SIZE4K));
}
status = -EPERM;
}
/*
* In any case, flush the TLB
* This is called from here instead from pte_update to avoid unnecessary
* repetition while mapping non-contiguous physical regions of a virtual
* region
*/
flush_all(dev_context);
dev_dbg(bridge, "%s status %x\n", __func__, status);
return status;
}
/*
* ======== bridge_brd_mem_un_map ========
* Invalidate the PTEs for the DSP VA block to be unmapped.
*
* PTEs of a mapped memory block are contiguous in any page table
* So, instead of looking up the PTE address for every 4K block,
* we clear consecutive PTEs until we unmap all the bytes
*/
static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
u32 virt_addr, u32 ul_num_bytes)
{
u32 l1_base_va;
u32 l2_base_va;
u32 l2_base_pa;
u32 l2_page_num;
u32 pte_val;
u32 pte_size;
u32 pte_count;
u32 pte_addr_l1;
u32 pte_addr_l2 = 0;
u32 rem_bytes;
u32 rem_bytes_l2;
u32 va_curr;
struct page *pg = NULL;
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
struct pg_table_attrs *pt = dev_context->pt_attrs;
u32 temp;
u32 paddr;
u32 numof4k_pages = 0;
va_curr = virt_addr;
rem_bytes = ul_num_bytes;
rem_bytes_l2 = 0;
l1_base_va = pt->l1_base_va;
pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
"pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
ul_num_bytes, l1_base_va, pte_addr_l1);
while (rem_bytes && !status) {
u32 va_curr_orig = va_curr;
/* Find whether the L1 PTE points to a valid L2 PT */
pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
pte_val = *(u32 *) pte_addr_l1;
pte_size = hw_mmu_pte_size_l1(pte_val);
if (pte_size != HW_MMU_COARSE_PAGE_SIZE)
goto skip_coarse_page;
/*
* Get the L2 PA from the L1 PTE, and find
* corresponding L2 VA
*/
l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
l2_page_num =
(l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
/*
* Find the L2 PTE address from which we will start
* clearing, the number of PTEs to be cleared on this
* page, and the size of VA space that needs to be
* cleared on this L2 page
*/
pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
if (rem_bytes < (pte_count * PG_SIZE4K))
pte_count = rem_bytes / PG_SIZE4K;
rem_bytes_l2 = pte_count * PG_SIZE4K;
/*
* Unmap the VA space on this L2 PT. A quicker way
* would be to clear pte_count entries starting from
* pte_addr_l2. However, below code checks that we don't
* clear invalid entries or less than 64KB for a 64KB
* entry. Similar checking is done for L1 PTEs too
* below
*/
while (rem_bytes_l2 && !status) {
pte_val = *(u32 *) pte_addr_l2;
pte_size = hw_mmu_pte_size_l2(pte_val);
/* va_curr aligned to pte_size? */
if (pte_size == 0 || rem_bytes_l2 < pte_size ||
va_curr & (pte_size - 1)) {
status = -EPERM;
break;
}
/* Collect Physical addresses from VA */
paddr = (pte_val & ~(pte_size - 1));
if (pte_size == HW_PAGE_SIZE64KB)
numof4k_pages = 16;
else
numof4k_pages = 1;
temp = 0;
while (temp++ < numof4k_pages) {
if (!pfn_valid(__phys_to_pfn(paddr))) {
paddr += HW_PAGE_SIZE4KB;
continue;
}
pg = PHYS_TO_PAGE(paddr);
if (page_count(pg) < 1) {
pr_info("DSPBRIDGE: UNMAP function: "
"COUNT 0 FOR PA 0x%x, size = "
"0x%x\n", paddr, ul_num_bytes);
bad_page_dump(paddr, pg);
} else {
set_page_dirty(pg);
page_cache_release(pg);
}
paddr += HW_PAGE_SIZE4KB;
}
if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) {
status = -EPERM;
goto EXIT_LOOP;
}
status = 0;
rem_bytes_l2 -= pte_size;
va_curr += pte_size;
pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
}
spin_lock(&pt->pg_lock);
if (rem_bytes_l2 == 0) {
pt->pg_info[l2_page_num].num_entries -= pte_count;
if (pt->pg_info[l2_page_num].num_entries == 0) {
/*
* Clear the L1 PTE pointing to the L2 PT
*/
if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig,
HW_MMU_COARSE_PAGE_SIZE))
status = 0;
else {
status = -EPERM;
spin_unlock(&pt->pg_lock);
goto EXIT_LOOP;
}
}
rem_bytes -= pte_count * PG_SIZE4K;
} else
status = -EPERM;
spin_unlock(&pt->pg_lock);
continue;
skip_coarse_page:
/* va_curr aligned to pte_size? */
/* pte_size = 1 MB or 16 MB */
if (pte_size == 0 || rem_bytes < pte_size ||
va_curr & (pte_size - 1)) {
status = -EPERM;
break;
}
if (pte_size == HW_PAGE_SIZE1MB)
numof4k_pages = 256;
else
numof4k_pages = 4096;
temp = 0;
/* Collect Physical addresses from VA */
paddr = (pte_val & ~(pte_size - 1));
while (temp++ < numof4k_pages) {
if (pfn_valid(__phys_to_pfn(paddr))) {
pg = PHYS_TO_PAGE(paddr);
if (page_count(pg) < 1) {
pr_info("DSPBRIDGE: UNMAP function: "
"COUNT 0 FOR PA 0x%x, size = "
"0x%x\n", paddr, ul_num_bytes);
bad_page_dump(paddr, pg);
} else {
set_page_dirty(pg);
page_cache_release(pg);
}
}
paddr += HW_PAGE_SIZE4KB;
}
if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) {
status = 0;
rem_bytes -= pte_size;
va_curr += pte_size;
} else {
status = -EPERM;
goto EXIT_LOOP;
}
}
/*
* It is better to flush the TLB here, so that any stale old entries
* get flushed
*/
EXIT_LOOP:
flush_all(dev_context);
dev_dbg(bridge,
"%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
" rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
pte_addr_l2, rem_bytes, rem_bytes_l2, status);
return status;
}
/*
* ======== user_va2_pa ========
* Purpose:
* This function walks through the page tables to convert a userland
* virtual address to physical address
*/
static u32 user_va2_pa(struct mm_struct *mm, u32 address)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *ptep, pte;
pgd = pgd_offset(mm, address);
if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
pmd = pmd_offset(pgd, address);
if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
ptep = pte_offset_map(pmd, address);
if (ptep) {
pte = *ptep;
if (pte_present(pte))
return pte & PAGE_MASK;
}
}
}
return 0;
}
/*
* ======== pte_update ========
* This function calculates the optimum page-aligned addresses and sizes
* Caller must pass page-aligned values
*/
static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
u32 va, u32 size,
struct hw_mmu_map_attrs_t *map_attrs)
{
u32 i;
u32 all_bits;
u32 pa_curr = pa;
u32 va_curr = va;
u32 num_bytes = size;
struct bridge_dev_context *dev_context = dev_ctxt;
int status = 0;
u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
};
while (num_bytes && !status) {
/* To find the max. page size with which both PA & VA are
* aligned */
all_bits = pa_curr | va_curr;
for (i = 0; i < 4; i++) {
if ((num_bytes >= page_size[i]) && ((all_bits &
(page_size[i] -
1)) == 0)) {
status =
pte_set(dev_context->pt_attrs, pa_curr,
va_curr, page_size[i], map_attrs);
pa_curr += page_size[i];
va_curr += page_size[i];
num_bytes -= page_size[i];
/* Don't try smaller sizes. Hopefully we have
* reached an address aligned to a bigger page
* size */
break;
}
}
}
return status;
}
/*
* ======== pte_set ========
* This function calculates PTE address (MPU virtual) to be updated
* It also manages the L2 page tables
*/
static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
u32 size, struct hw_mmu_map_attrs_t *attrs)
{
u32 i;
u32 pte_val;
u32 pte_addr_l1;
u32 pte_size;
/* Base address of the PT that will be updated */
u32 pg_tbl_va;
u32 l1_base_va;
/* Compiler warns that the next three variables might be used
* uninitialized in this function. Doesn't seem so. Working around,
* anyways. */
u32 l2_base_va = 0;
u32 l2_base_pa = 0;
u32 l2_page_num = 0;
int status = 0;
l1_base_va = pt->l1_base_va;
pg_tbl_va = l1_base_va;
if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
/* Find whether the L1 PTE points to a valid L2 PT */
pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
pte_val = *(u32 *) pte_addr_l1;
pte_size = hw_mmu_pte_size_l1(pte_val);
} else {
return -EPERM;
}
spin_lock(&pt->pg_lock);
if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
/* Get the L2 PA from the L1 PTE, and find
* corresponding L2 VA */
l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
l2_base_va =
l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
l2_page_num =
(l2_base_pa -
pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
} else if (pte_size == 0) {
/* L1 PTE is invalid. Allocate a L2 PT and
* point the L1 PTE to it */
/* Find a free L2 PT. */
for (i = 0; (i < pt->l2_num_pages) &&
(pt->pg_info[i].num_entries != 0); i++)
;;
if (i < pt->l2_num_pages) {
l2_page_num = i;
l2_base_pa = pt->l2_base_pa + (l2_page_num *
HW_MMU_COARSE_PAGE_SIZE);
l2_base_va = pt->l2_base_va + (l2_page_num *
HW_MMU_COARSE_PAGE_SIZE);
/* Endianness attributes are ignored for
* HW_MMU_COARSE_PAGE_SIZE */
status =
hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
HW_MMU_COARSE_PAGE_SIZE,
attrs);
} else {
status = -ENOMEM;
}
} else {
/* Found valid L1 PTE of another size.
* Should not overwrite it. */
status = -EPERM;
}
if (!status) {
pg_tbl_va = l2_base_va;
if (size == HW_PAGE_SIZE64KB)
pt->pg_info[l2_page_num].num_entries += 16;
else
pt->pg_info[l2_page_num].num_entries++;
dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
"%x, num_entries %x\n", l2_base_va,
l2_base_pa, l2_page_num,
pt->pg_info[l2_page_num].num_entries);
}
spin_unlock(&pt->pg_lock);
}
if (!status) {
dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
pg_tbl_va, pa, va, size);
dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
"mixed_size %x\n", attrs->endianism,
attrs->element_size, attrs->mixed_size);
status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
}
return status;
}
/* Memory map kernel VA -- memory allocated with vmalloc */
static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
u32 ul_mpu_addr, u32 virt_addr,
u32 ul_num_bytes,
struct hw_mmu_map_attrs_t *hw_attrs)
{
int status = 0;
struct page *page[1];
u32 i;
u32 pa_curr;
u32 pa_next;
u32 va_curr;
u32 size_curr;
u32 num_pages;
u32 pa;
u32 num_of4k_pages;
u32 temp = 0;
/*
* Do Kernel va to pa translation.
* Combine physically contiguous regions to reduce TLBs.
* Pass the translated pa to pte_update.
*/
num_pages = ul_num_bytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */
i = 0;
va_curr = ul_mpu_addr;
page[0] = vmalloc_to_page((void *)va_curr);
pa_next = page_to_phys(page[0]);
while (!status && (i < num_pages)) {
/*
* Reuse pa_next from the previous iteraion to avoid
* an extra va2pa call
*/
pa_curr = pa_next;
size_curr = PAGE_SIZE;
/*
* If the next page is physically contiguous,
* map it with the current one by increasing
* the size of the region to be mapped
*/
while (++i < num_pages) {
page[0] =
vmalloc_to_page((void *)(va_curr + size_curr));
pa_next = page_to_phys(page[0]);
if (pa_next == (pa_curr + size_curr))
size_curr += PAGE_SIZE;
else
break;
}
if (pa_next == 0) {
status = -ENOMEM;
break;
}
pa = pa_curr;
num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
while (temp++ < num_of4k_pages) {
get_page(PHYS_TO_PAGE(pa));
pa += HW_PAGE_SIZE4KB;
}
status = pte_update(dev_context, pa_curr, virt_addr +
(va_curr - ul_mpu_addr), size_curr,
hw_attrs);
va_curr += size_curr;
}
/*
* In any case, flush the TLB
* This is called from here instead from pte_update to avoid unnecessary
* repetition while mapping non-contiguous physical regions of a virtual
* region
*/
flush_all(dev_context);
dev_dbg(bridge, "%s status %x\n", __func__, status);
return status;
}
/* /*
* ======== wait_for_start ======== * ======== wait_for_start ========
* Wait for the singal from DSP that it has started, or time out. * Wait for the singal from DSP that it has started, or time out.
......
...@@ -31,6 +31,10 @@ ...@@ -31,6 +31,10 @@
#include <dspbridge/dev.h> #include <dspbridge/dev.h>
#include <dspbridge/iodefs.h> #include <dspbridge/iodefs.h>
/* ------------------------------------ Hardware Abstraction Layer */
#include <hw_defs.h>
#include <hw_mmu.h>
#include <dspbridge/pwr_sh.h> #include <dspbridge/pwr_sh.h>
/* ----------------------------------- Bridge Driver */ /* ----------------------------------- Bridge Driver */
......
...@@ -134,16 +134,17 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt, ...@@ -134,16 +134,17 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
if (!status) { if (!status) {
ul_tlb_base_virt = ul_tlb_base_virt =
dev_context->sh_s.seg0_da * DSPWORDSIZE; dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
dw_ext_prog_virt_mem = dev_context->sh_s.seg0_va; dw_ext_prog_virt_mem =
dev_context->atlb_entry[0].ul_gpp_va;
if (!trace_read) { if (!trace_read) {
ul_shm_offset_virt = ul_shm_offset_virt =
ul_shm_base_virt - ul_tlb_base_virt; ul_shm_base_virt - ul_tlb_base_virt;
ul_shm_offset_virt += ul_shm_offset_virt +=
PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base + PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base +
1, PAGE_SIZE * 16); 1, HW_PAGE_SIZE64KB);
dw_ext_prog_virt_mem -= ul_shm_offset_virt; dw_ext_prog_virt_mem -= ul_shm_offset_virt;
dw_ext_prog_virt_mem += dw_ext_prog_virt_mem +=
(ul_ext_base - ul_dyn_ext_base); (ul_ext_base - ul_dyn_ext_base);
...@@ -317,9 +318,8 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context, ...@@ -317,9 +318,8 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
ret = -EPERM; ret = -EPERM;
if (!ret) { if (!ret) {
ul_tlb_base_virt = dev_context->sh_s.seg0_da * ul_tlb_base_virt =
DSPWORDSIZE; dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
if (symbols_reloaded) { if (symbols_reloaded) {
...@@ -337,7 +337,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context, ...@@ -337,7 +337,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
ul_shm_base_virt - ul_tlb_base_virt; ul_shm_base_virt - ul_tlb_base_virt;
if (trace_load) { if (trace_load) {
dw_ext_prog_virt_mem = dw_ext_prog_virt_mem =
dev_context->sh_s.seg0_va; dev_context->atlb_entry[0].ul_gpp_va;
} else { } else {
dw_ext_prog_virt_mem = host_res->dw_mem_base[1]; dw_ext_prog_virt_mem = host_res->dw_mem_base[1];
dw_ext_prog_virt_mem += dw_ext_prog_virt_mem +=
...@@ -393,6 +393,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val) ...@@ -393,6 +393,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
omap_dspbridge_dev->dev.platform_data; omap_dspbridge_dev->dev.platform_data;
struct cfg_hostres *resources = dev_context->resources; struct cfg_hostres *resources = dev_context->resources;
int status = 0; int status = 0;
u32 temp;
if (!dev_context->mbox) if (!dev_context->mbox)
return 0; return 0;
...@@ -436,7 +437,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val) ...@@ -436,7 +437,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
omap_mbox_restore_ctx(dev_context->mbox); omap_mbox_restore_ctx(dev_context->mbox);
/* Access MMU SYS CONFIG register to generate a short wakeup */ /* Access MMU SYS CONFIG register to generate a short wakeup */
iommu_read_reg(dev_context->dsp_mmu, MMU_SYSCONFIG); temp = readl(resources->dw_dmmu_base + 0x10);
dev_context->dw_brd_state = BRD_RUNNING; dev_context->dw_brd_state = BRD_RUNNING;
} else if (dev_context->dw_brd_state == BRD_RETENTION) { } else if (dev_context->dw_brd_state == BRD_RETENTION) {
......
...@@ -31,6 +31,57 @@ ...@@ -31,6 +31,57 @@
#include <dspbridge/drv.h> #include <dspbridge/drv.h>
#include <dspbridge/wdt.h> #include <dspbridge/wdt.h>
static u32 fault_addr;
static void mmu_fault_dpc(unsigned long data)
{
struct deh_mgr *deh = (void *)data;
if (!deh)
return;
bridge_deh_notify(deh, DSP_MMUFAULT, 0);
}
static irqreturn_t mmu_fault_isr(int irq, void *data)
{
struct deh_mgr *deh = data;
struct cfg_hostres *resources;
u32 event;
if (!deh)
return IRQ_HANDLED;
resources = deh->hbridge_context->resources;
if (!resources) {
dev_dbg(bridge, "%s: Failed to get Host Resources\n",
__func__);
return IRQ_HANDLED;
}
hw_mmu_event_status(resources->dw_dmmu_base, &event);
if (event == HW_MMU_TRANSLATION_FAULT) {
hw_mmu_fault_addr_read(resources->dw_dmmu_base, &fault_addr);
dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__,
event, fault_addr);
/*
* Schedule a DPC directly. In the future, it may be
* necessary to check if DSP MMU fault is intended for
* Bridge.
*/
tasklet_schedule(&deh->dpc_tasklet);
/* Disable the MMU events, else once we clear it will
* start to raise INTs again */
hw_mmu_event_disable(resources->dw_dmmu_base,
HW_MMU_TRANSLATION_FAULT);
} else {
hw_mmu_event_disable(resources->dw_dmmu_base,
HW_MMU_ALL_INTERRUPTS);
}
return IRQ_HANDLED;
}
int bridge_deh_create(struct deh_mgr **ret_deh, int bridge_deh_create(struct deh_mgr **ret_deh,
struct dev_object *hdev_obj) struct dev_object *hdev_obj)
{ {
...@@ -58,9 +109,18 @@ int bridge_deh_create(struct deh_mgr **ret_deh, ...@@ -58,9 +109,18 @@ int bridge_deh_create(struct deh_mgr **ret_deh,
} }
ntfy_init(deh->ntfy_obj); ntfy_init(deh->ntfy_obj);
/* Create a MMUfault DPC */
tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh);
/* Fill in context structure */ /* Fill in context structure */
deh->hbridge_context = hbridge_context; deh->hbridge_context = hbridge_context;
/* Install ISR function for DSP MMU fault */
status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0,
"DspBridge\tiommu fault", deh);
if (status < 0)
goto err;
*ret_deh = deh; *ret_deh = deh;
return 0; return 0;
...@@ -80,6 +140,11 @@ int bridge_deh_destroy(struct deh_mgr *deh) ...@@ -80,6 +140,11 @@ int bridge_deh_destroy(struct deh_mgr *deh)
ntfy_delete(deh->ntfy_obj); ntfy_delete(deh->ntfy_obj);
kfree(deh->ntfy_obj); kfree(deh->ntfy_obj);
} }
/* Disable DSP MMU fault */
free_irq(INT_DSP_MMU_IRQ, deh);
/* Free DPC object */
tasklet_kill(&deh->dpc_tasklet);
/* Deallocate the DEH manager object */ /* Deallocate the DEH manager object */
kfree(deh); kfree(deh);
...@@ -101,6 +166,48 @@ int bridge_deh_register_notify(struct deh_mgr *deh, u32 event_mask, ...@@ -101,6 +166,48 @@ int bridge_deh_register_notify(struct deh_mgr *deh, u32 event_mask,
return ntfy_unregister(deh->ntfy_obj, hnotification); return ntfy_unregister(deh->ntfy_obj, hnotification);
} }
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
{
struct cfg_hostres *resources;
struct hw_mmu_map_attrs_t map_attrs = {
.endianism = HW_LITTLE_ENDIAN,
.element_size = HW_ELEM_SIZE16BIT,
.mixed_size = HW_MMU_CPUES,
};
void *dummy_va_addr;
resources = dev_context->resources;
dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC);
/*
* Before acking the MMU fault, let's make sure MMU can only
* access entry #0. Then add a new entry so that the DSP OS
* can continue in order to dump the stack.
*/
hw_mmu_twl_disable(resources->dw_dmmu_base);
hw_mmu_tlb_flush_all(resources->dw_dmmu_base);
hw_mmu_tlb_add(resources->dw_dmmu_base,
virt_to_phys(dummy_va_addr), fault_addr,
HW_PAGE_SIZE4KB, 1,
&map_attrs, HW_SET, HW_SET);
dsp_clk_enable(DSP_CLK_GPT8);
dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
/* Clear MMU interrupt */
hw_mmu_event_ack(resources->dw_dmmu_base,
HW_MMU_TRANSLATION_FAULT);
dump_dsp_stack(dev_context);
dsp_clk_disable(DSP_CLK_GPT8);
hw_mmu_disable(resources->dw_dmmu_base);
free_page((unsigned long)dummy_va_addr);
}
#endif
static inline const char *event_to_string(int event) static inline const char *event_to_string(int event)
{ {
switch (event) { switch (event) {
...@@ -133,7 +240,13 @@ void bridge_deh_notify(struct deh_mgr *deh, int event, int info) ...@@ -133,7 +240,13 @@ void bridge_deh_notify(struct deh_mgr *deh, int event, int info)
#endif #endif
break; break;
case DSP_MMUFAULT: case DSP_MMUFAULT:
dev_err(bridge, "%s: %s, addr=0x%x", __func__, str, info); dev_err(bridge, "%s: %s, addr=0x%x", __func__,
str, fault_addr);
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
print_dsp_trace_buffer(dev_context);
dump_dl_modules(dev_context);
mmu_fault_print_stack(dev_context);
#endif
break; break;
default: default:
dev_err(bridge, "%s: %s", __func__, str); dev_err(bridge, "%s: %s", __func__, str);
......
/*
* EasiGlobal.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Copyright (C) 2007 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef _EASIGLOBAL_H
#define _EASIGLOBAL_H
#include <linux/types.h>
/*
* DEFINE: READ_ONLY, WRITE_ONLY & READ_WRITE
*
* DESCRIPTION: Defines used to describe register types for EASI-checker tests.
*/
#define READ_ONLY 1
#define WRITE_ONLY 2
#define READ_WRITE 3
/*
* MACRO: _DEBUG_LEVEL1_EASI
*
* DESCRIPTION: A MACRO which can be used to indicate that a particular beach
* register access function was called.
*
* NOTE: We currently dont use this functionality.
*/
#define _DEBUG_LEVEL1_EASI(easi_num) ((void)0)
#endif /* _EASIGLOBAL_H */
/*
* MMUAccInt.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Copyright (C) 2007 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef _MMU_ACC_INT_H
#define _MMU_ACC_INT_H
/* Mappings of level 1 EASI function numbers to function names */
#define EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32 (MMU_BASE_EASIL1 + 3)
#define EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32 (MMU_BASE_EASIL1 + 17)
#define EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32 (MMU_BASE_EASIL1 + 39)
#define EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 51)
#define EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32 (MMU_BASE_EASIL1 + 102)
#define EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 103)
#define EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32 (MMU_BASE_EASIL1 + 156)
#define EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32 (MMU_BASE_EASIL1 + 174)
#define EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 180)
#define EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 190)
#define EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32 (MMU_BASE_EASIL1 + 194)
#define EASIL1_MMUMMU_TTB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 198)
#define EASIL1_MMUMMU_LOCK_READ_REGISTER32 (MMU_BASE_EASIL1 + 203)
#define EASIL1_MMUMMU_LOCK_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 204)
#define EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32 (MMU_BASE_EASIL1 + 205)
#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32 (MMU_BASE_EASIL1 + 209)
#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32 (MMU_BASE_EASIL1 + 211)
#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32 (MMU_BASE_EASIL1 + 212)
#define EASIL1_MMUMMU_LD_TLB_READ_REGISTER32 (MMU_BASE_EASIL1 + 213)
#define EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 214)
#define EASIL1_MMUMMU_CAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 226)
#define EASIL1_MMUMMU_RAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 268)
#define EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 322)
/* Register offset address definitions */
#define MMU_MMU_SYSCONFIG_OFFSET 0x10
#define MMU_MMU_IRQSTATUS_OFFSET 0x18
#define MMU_MMU_IRQENABLE_OFFSET 0x1c
#define MMU_MMU_WALKING_ST_OFFSET 0x40
#define MMU_MMU_CNTL_OFFSET 0x44
#define MMU_MMU_FAULT_AD_OFFSET 0x48
#define MMU_MMU_TTB_OFFSET 0x4c
#define MMU_MMU_LOCK_OFFSET 0x50
#define MMU_MMU_LD_TLB_OFFSET 0x54
#define MMU_MMU_CAM_OFFSET 0x58
#define MMU_MMU_RAM_OFFSET 0x5c
#define MMU_MMU_GFLUSH_OFFSET 0x60
#define MMU_MMU_FLUSH_ENTRY_OFFSET 0x64
/* Bitfield mask and offset declarations */
#define MMU_MMU_SYSCONFIG_IDLE_MODE_MASK 0x18
#define MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET 3
#define MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK 0x1
#define MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET 0
#define MMU_MMU_WALKING_ST_TWL_RUNNING_MASK 0x1
#define MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET 0
#define MMU_MMU_CNTL_TWL_ENABLE_MASK 0x4
#define MMU_MMU_CNTL_TWL_ENABLE_OFFSET 2
#define MMU_MMU_CNTL_MMU_ENABLE_MASK 0x2
#define MMU_MMU_CNTL_MMU_ENABLE_OFFSET 1
#define MMU_MMU_LOCK_BASE_VALUE_MASK 0xfc00
#define MMU_MMU_LOCK_BASE_VALUE_OFFSET 10
#define MMU_MMU_LOCK_CURRENT_VICTIM_MASK 0x3f0
#define MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET 4
#endif /* _MMU_ACC_INT_H */
/*
* MMURegAcM.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Copyright (C) 2007 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef _MMU_REG_ACM_H
#define _MMU_REG_ACM_H
#include <linux/io.h>
#include <EasiGlobal.h>
#include "MMUAccInt.h"
#if defined(USE_LEVEL_1_MACROS)
#define MMUMMU_SYSCONFIG_READ_REGISTER32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32),\
__raw_readl((base_address)+MMU_MMU_SYSCONFIG_OFFSET))
#define MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32(base_address, value)\
{\
const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
register u32 data = __raw_readl((base_address)+offset);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32);\
data &= ~(MMU_MMU_SYSCONFIG_IDLE_MODE_MASK);\
new_value <<= MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET;\
new_value &= MMU_MMU_SYSCONFIG_IDLE_MODE_MASK;\
new_value |= data;\
__raw_writel(new_value, base_address+offset);\
}
#define MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32(base_address, value)\
{\
const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
register u32 data = __raw_readl((base_address)+offset);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32);\
data &= ~(MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK);\
new_value <<= MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET;\
new_value &= MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK;\
new_value |= data;\
__raw_writel(new_value, base_address+offset);\
}
#define MMUMMU_IRQSTATUS_READ_REGISTER32(base_address)\
(_DEBUG_LEVEL1_EASI(easil1_mmummu_irqstatus_read_register32),\
__raw_readl((base_address)+MMU_MMU_IRQSTATUS_OFFSET))
#define MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_IRQENABLE_READ_REGISTER32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32),\
__raw_readl((base_address)+MMU_MMU_IRQENABLE_OFFSET))
#define MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_WALKING_STTWL_RUNNING_READ32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32),\
(((__raw_readl(((base_address)+(MMU_MMU_WALKING_ST_OFFSET))))\
& MMU_MMU_WALKING_ST_TWL_RUNNING_MASK) >>\
MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET))
#define MMUMMU_CNTLTWL_ENABLE_READ32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32),\
(((__raw_readl(((base_address)+(MMU_MMU_CNTL_OFFSET)))) &\
MMU_MMU_CNTL_TWL_ENABLE_MASK) >>\
MMU_MMU_CNTL_TWL_ENABLE_OFFSET))
#define MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, value)\
{\
const u32 offset = MMU_MMU_CNTL_OFFSET;\
register u32 data = __raw_readl((base_address)+offset);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32);\
data &= ~(MMU_MMU_CNTL_TWL_ENABLE_MASK);\
new_value <<= MMU_MMU_CNTL_TWL_ENABLE_OFFSET;\
new_value &= MMU_MMU_CNTL_TWL_ENABLE_MASK;\
new_value |= data;\
__raw_writel(new_value, base_address+offset);\
}
#define MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, value)\
{\
const u32 offset = MMU_MMU_CNTL_OFFSET;\
register u32 data = __raw_readl((base_address)+offset);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32);\
data &= ~(MMU_MMU_CNTL_MMU_ENABLE_MASK);\
new_value <<= MMU_MMU_CNTL_MMU_ENABLE_OFFSET;\
new_value &= MMU_MMU_CNTL_MMU_ENABLE_MASK;\
new_value |= data;\
__raw_writel(new_value, base_address+offset);\
}
#define MMUMMU_FAULT_AD_READ_REGISTER32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32),\
__raw_readl((base_address)+MMU_MMU_FAULT_AD_OFFSET))
#define MMUMMU_TTB_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_TTB_OFFSET;\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_TTB_WRITE_REGISTER32);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_LOCK_READ_REGISTER32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_READ_REGISTER32),\
__raw_readl((base_address)+MMU_MMU_LOCK_OFFSET))
#define MMUMMU_LOCK_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_LOCK_OFFSET;\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_WRITE_REGISTER32);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_LOCK_BASE_VALUE_READ32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32),\
(((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
MMU_MMU_LOCK_BASE_VALUE_MASK) >>\
MMU_MMU_LOCK_BASE_VALUE_OFFSET))
#define MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, value)\
{\
const u32 offset = MMU_MMU_LOCK_OFFSET;\
register u32 data = __raw_readl((base_address)+offset);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(easil1_mmummu_lock_base_value_write32);\
data &= ~(MMU_MMU_LOCK_BASE_VALUE_MASK);\
new_value <<= MMU_MMU_LOCK_BASE_VALUE_OFFSET;\
new_value &= MMU_MMU_LOCK_BASE_VALUE_MASK;\
new_value |= data;\
__raw_writel(new_value, base_address+offset);\
}
#define MMUMMU_LOCK_CURRENT_VICTIM_READ32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32),\
(((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
MMU_MMU_LOCK_CURRENT_VICTIM_MASK) >>\
MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET))
#define MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, value)\
{\
const u32 offset = MMU_MMU_LOCK_OFFSET;\
register u32 data = __raw_readl((base_address)+offset);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32);\
data &= ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK);\
new_value <<= MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET;\
new_value &= MMU_MMU_LOCK_CURRENT_VICTIM_MASK;\
new_value |= data;\
__raw_writel(new_value, base_address+offset);\
}
#define MMUMMU_LOCK_CURRENT_VICTIM_SET32(var, value)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32),\
(((var) & ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK)) |\
(((value) << MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET) &\
MMU_MMU_LOCK_CURRENT_VICTIM_MASK)))
#define MMUMMU_LD_TLB_READ_REGISTER32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_READ_REGISTER32),\
__raw_readl((base_address)+MMU_MMU_LD_TLB_OFFSET))
#define MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_LD_TLB_OFFSET;\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_CAM_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_CAM_OFFSET;\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CAM_WRITE_REGISTER32);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_RAM_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_RAM_OFFSET;\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_RAM_WRITE_REGISTER32);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32);\
__raw_writel(new_value, (base_address)+offset);\
}
#endif /* USE_LEVEL_1_MACROS */
#endif /* _MMU_REG_ACM_H */
/*
* hw_defs.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Global HW definitions
*
* Copyright (C) 2007 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef _HW_DEFS_H
#define _HW_DEFS_H
/* Page size */
#define HW_PAGE_SIZE4KB 0x1000
#define HW_PAGE_SIZE64KB 0x10000
#define HW_PAGE_SIZE1MB 0x100000
#define HW_PAGE_SIZE16MB 0x1000000
/* hw_status: return type for HW API */
typedef long hw_status;
/* Macro used to set and clear any bit */
#define HW_CLEAR 0
#define HW_SET 1
/* hw_endianism_t: Enumerated Type used to specify the endianism
* Do NOT change these values. They are used as bit fields. */
enum hw_endianism_t {
HW_LITTLE_ENDIAN,
HW_BIG_ENDIAN
};
/* hw_element_size_t: Enumerated Type used to specify the element size
* Do NOT change these values. They are used as bit fields. */
enum hw_element_size_t {
HW_ELEM_SIZE8BIT,
HW_ELEM_SIZE16BIT,
HW_ELEM_SIZE32BIT,
HW_ELEM_SIZE64BIT
};
/* hw_idle_mode_t: Enumerated Type used to specify Idle modes */
enum hw_idle_mode_t {
HW_FORCE_IDLE,
HW_NO_IDLE,
HW_SMART_IDLE
};
#endif /* _HW_DEFS_H */
/*
* hw_mmu.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* API definitions to setup MMU TLB and PTE
*
* Copyright (C) 2007 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <linux/io.h>
#include "MMURegAcM.h"
#include <hw_defs.h>
#include <hw_mmu.h>
#include <linux/types.h>
#include <linux/err.h>
#define MMU_BASE_VAL_MASK 0xFC00
#define MMU_PAGE_MAX 3
#define MMU_ELEMENTSIZE_MAX 3
#define MMU_ADDR_MASK 0xFFFFF000
#define MMU_TTB_MASK 0xFFFFC000
#define MMU_SECTION_ADDR_MASK 0xFFF00000
#define MMU_SSECTION_ADDR_MASK 0xFF000000
#define MMU_PAGE_TABLE_MASK 0xFFFFFC00
#define MMU_LARGE_PAGE_MASK 0xFFFF0000
#define MMU_SMALL_PAGE_MASK 0xFFFFF000
#define MMU_LOAD_TLB 0x00000001
#define MMU_GFLUSH 0x60
/*
* hw_mmu_page_size_t: Enumerated Type used to specify the MMU Page Size(SLSS)
*/
enum hw_mmu_page_size_t {
HW_MMU_SECTION,
HW_MMU_LARGE_PAGE,
HW_MMU_SMALL_PAGE,
HW_MMU_SUPERSECTION
};
/*
* FUNCTION : mmu_flush_entry
*
* INPUTS:
*
* Identifier : base_address
* Type : const u32
* Description : Base Address of instance of MMU module
*
* RETURNS:
*
* Type : hw_status
* Description : 0 -- No errors occured
* RET_BAD_NULL_PARAM -- A Pointer
* Paramater was set to NULL
*
* PURPOSE: : Flush the TLB entry pointed by the
* lock counter register
* even if this entry is set protected
*
* METHOD: : Check the Input parameter and Flush a
* single entry in the TLB.
*/
static hw_status mmu_flush_entry(const void __iomem *base_address);
/*
* FUNCTION : mmu_set_cam_entry
*
* INPUTS:
*
* Identifier : base_address
* TypE : const u32
* Description : Base Address of instance of MMU module
*
* Identifier : page_sz
* TypE : const u32
* Description : It indicates the page size
*
* Identifier : preserved_bit
* Type : const u32
* Description : It indicates the TLB entry is preserved entry
* or not
*
* Identifier : valid_bit
* Type : const u32
* Description : It indicates the TLB entry is valid entry or not
*
*
* Identifier : virtual_addr_tag
* Type : const u32
* Description : virtual Address
*
* RETURNS:
*
* Type : hw_status
* Description : 0 -- No errors occured
* RET_BAD_NULL_PARAM -- A Pointer Paramater
* was set to NULL
* RET_PARAM_OUT_OF_RANGE -- Input Parameter out
* of Range
*
* PURPOSE: : Set MMU_CAM reg
*
* METHOD: : Check the Input parameters and set the CAM entry.
*/
static hw_status mmu_set_cam_entry(const void __iomem *base_address,
const u32 page_sz,
const u32 preserved_bit,
const u32 valid_bit,
const u32 virtual_addr_tag);
/*
* FUNCTION : mmu_set_ram_entry
*
* INPUTS:
*
* Identifier : base_address
* Type : const u32
* Description : Base Address of instance of MMU module
*
* Identifier : physical_addr
* Type : const u32
* Description : Physical Address to which the corresponding
* virtual Address shouldpoint
*
* Identifier : endianism
* Type : hw_endianism_t
* Description : endianism for the given page
*
* Identifier : element_size
* Type : hw_element_size_t
* Description : The element size ( 8,16, 32 or 64 bit)
*
* Identifier : mixed_size
* Type : hw_mmu_mixed_size_t
* Description : Element Size to follow CPU or TLB
*
* RETURNS:
*
* Type : hw_status
* Description : 0 -- No errors occured
* RET_BAD_NULL_PARAM -- A Pointer Paramater
* was set to NULL
* RET_PARAM_OUT_OF_RANGE -- Input Parameter
* out of Range
*
* PURPOSE: : Set MMU_CAM reg
*
* METHOD: : Check the Input parameters and set the RAM entry.
*/
static hw_status mmu_set_ram_entry(const void __iomem *base_address,
const u32 physical_addr,
enum hw_endianism_t endianism,
enum hw_element_size_t element_size,
enum hw_mmu_mixed_size_t mixed_size);
/* HW FUNCTIONS */
hw_status hw_mmu_enable(const void __iomem *base_address)
{
hw_status status = 0;
MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_SET);
return status;
}
hw_status hw_mmu_disable(const void __iomem *base_address)
{
hw_status status = 0;
MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_CLEAR);
return status;
}
hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
u32 num_locked_entries)
{
hw_status status = 0;
MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, num_locked_entries);
return status;
}
hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
u32 victim_entry_num)
{
hw_status status = 0;
MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num);
return status;
}
hw_status hw_mmu_event_ack(const void __iomem *base_address, u32 irq_mask)
{
hw_status status = 0;
MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, irq_mask);
return status;
}
hw_status hw_mmu_event_disable(const void __iomem *base_address, u32 irq_mask)
{
hw_status status = 0;
u32 irq_reg;
irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg & ~irq_mask);
return status;
}
hw_status hw_mmu_event_enable(const void __iomem *base_address, u32 irq_mask)
{
hw_status status = 0;
u32 irq_reg;
irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg | irq_mask);
return status;
}
hw_status hw_mmu_event_status(const void __iomem *base_address, u32 *irq_mask)
{
hw_status status = 0;
*irq_mask = MMUMMU_IRQSTATUS_READ_REGISTER32(base_address);
return status;
}
hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, u32 *addr)
{
hw_status status = 0;
/* read values from register */
*addr = MMUMMU_FAULT_AD_READ_REGISTER32(base_address);
return status;
}
hw_status hw_mmu_ttb_set(const void __iomem *base_address, u32 ttb_phys_addr)
{
hw_status status = 0;
u32 load_ttb;
load_ttb = ttb_phys_addr & ~0x7FUL;
/* write values to register */
MMUMMU_TTB_WRITE_REGISTER32(base_address, load_ttb);
return status;
}
hw_status hw_mmu_twl_enable(const void __iomem *base_address)
{
hw_status status = 0;
MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_SET);
return status;
}
hw_status hw_mmu_twl_disable(const void __iomem *base_address)
{
hw_status status = 0;
MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_CLEAR);
return status;
}
hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtual_addr,
u32 page_sz)
{
hw_status status = 0;
u32 virtual_addr_tag;
enum hw_mmu_page_size_t pg_size_bits;
switch (page_sz) {
case HW_PAGE_SIZE4KB:
pg_size_bits = HW_MMU_SMALL_PAGE;
break;
case HW_PAGE_SIZE64KB:
pg_size_bits = HW_MMU_LARGE_PAGE;
break;
case HW_PAGE_SIZE1MB:
pg_size_bits = HW_MMU_SECTION;
break;
case HW_PAGE_SIZE16MB:
pg_size_bits = HW_MMU_SUPERSECTION;
break;
default:
return -EINVAL;
}
/* Generate the 20-bit tag from virtual address */
virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag);
mmu_flush_entry(base_address);
return status;
}
hw_status hw_mmu_tlb_add(const void __iomem *base_address,
u32 physical_addr,
u32 virtual_addr,
u32 page_sz,
u32 entry_num,
struct hw_mmu_map_attrs_t *map_attrs,
s8 preserved_bit, s8 valid_bit)
{
hw_status status = 0;
u32 lock_reg;
u32 virtual_addr_tag;
enum hw_mmu_page_size_t mmu_pg_size;
/*Check the input Parameters */
switch (page_sz) {
case HW_PAGE_SIZE4KB:
mmu_pg_size = HW_MMU_SMALL_PAGE;
break;
case HW_PAGE_SIZE64KB:
mmu_pg_size = HW_MMU_LARGE_PAGE;
break;
case HW_PAGE_SIZE1MB:
mmu_pg_size = HW_MMU_SECTION;
break;
case HW_PAGE_SIZE16MB:
mmu_pg_size = HW_MMU_SUPERSECTION;
break;
default:
return -EINVAL;
}
lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address);
/* Generate the 20-bit tag from virtual address */
virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
/* Write the fields in the CAM Entry Register */
mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit,
virtual_addr_tag);
/* Write the different fields of the RAM Entry Register */
/* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */
mmu_set_ram_entry(base_address, physical_addr, map_attrs->endianism,
map_attrs->element_size, map_attrs->mixed_size);
/* Update the MMU Lock Register */
/* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */
MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entry_num);
/* Enable loading of an entry in TLB by writing 1
into LD_TLB_REG register */
MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, MMU_LOAD_TLB);
MMUMMU_LOCK_WRITE_REGISTER32(base_address, lock_reg);
return status;
}
hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
u32 physical_addr,
u32 virtual_addr,
u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs)
{
hw_status status = 0;
u32 pte_addr, pte_val;
s32 num_entries = 1;
switch (page_sz) {
case HW_PAGE_SIZE4KB:
pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
virtual_addr &
MMU_SMALL_PAGE_MASK);
pte_val =
((physical_addr & MMU_SMALL_PAGE_MASK) |
(map_attrs->endianism << 9) | (map_attrs->
element_size << 4) |
(map_attrs->mixed_size << 11) | 2);
break;
case HW_PAGE_SIZE64KB:
num_entries = 16;
pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
virtual_addr &
MMU_LARGE_PAGE_MASK);
pte_val =
((physical_addr & MMU_LARGE_PAGE_MASK) |
(map_attrs->endianism << 9) | (map_attrs->
element_size << 4) |
(map_attrs->mixed_size << 11) | 1);
break;
case HW_PAGE_SIZE1MB:
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
virtual_addr &
MMU_SECTION_ADDR_MASK);
pte_val =
((((physical_addr & MMU_SECTION_ADDR_MASK) |
(map_attrs->endianism << 15) | (map_attrs->
element_size << 10) |
(map_attrs->mixed_size << 17)) & ~0x40000) | 0x2);
break;
case HW_PAGE_SIZE16MB:
num_entries = 16;
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
virtual_addr &
MMU_SSECTION_ADDR_MASK);
pte_val =
(((physical_addr & MMU_SSECTION_ADDR_MASK) |
(map_attrs->endianism << 15) | (map_attrs->
element_size << 10) |
(map_attrs->mixed_size << 17)
) | 0x40000 | 0x2);
break;
case HW_MMU_COARSE_PAGE_SIZE:
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
virtual_addr &
MMU_SECTION_ADDR_MASK);
pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
break;
default:
return -EINVAL;
}
while (--num_entries >= 0)
((u32 *) pte_addr)[num_entries] = pte_val;
return status;
}
hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size)
{
hw_status status = 0;
u32 pte_addr;
s32 num_entries = 1;
switch (page_size) {
case HW_PAGE_SIZE4KB:
pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
virtual_addr &
MMU_SMALL_PAGE_MASK);
break;
case HW_PAGE_SIZE64KB:
num_entries = 16;
pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
virtual_addr &
MMU_LARGE_PAGE_MASK);
break;
case HW_PAGE_SIZE1MB:
case HW_MMU_COARSE_PAGE_SIZE:
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
virtual_addr &
MMU_SECTION_ADDR_MASK);
break;
case HW_PAGE_SIZE16MB:
num_entries = 16;
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
virtual_addr &
MMU_SSECTION_ADDR_MASK);
break;
default:
return -EINVAL;
}
while (--num_entries >= 0)
((u32 *) pte_addr)[num_entries] = 0;
return status;
}
/* mmu_flush_entry */
static hw_status mmu_flush_entry(const void __iomem *base_address)
{
hw_status status = 0;
u32 flush_entry_data = 0x1;
/* write values to register */
MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, flush_entry_data);
return status;
}
/* mmu_set_cam_entry */
static hw_status mmu_set_cam_entry(const void __iomem *base_address,
const u32 page_sz,
const u32 preserved_bit,
const u32 valid_bit,
const u32 virtual_addr_tag)
{
hw_status status = 0;
u32 mmu_cam_reg;
mmu_cam_reg = (virtual_addr_tag << 12);
mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) |
(preserved_bit << 3);
/* write values to register */
MMUMMU_CAM_WRITE_REGISTER32(base_address, mmu_cam_reg);
return status;
}
/* mmu_set_ram_entry */
static hw_status mmu_set_ram_entry(const void __iomem *base_address,
const u32 physical_addr,
enum hw_endianism_t endianism,
enum hw_element_size_t element_size,
enum hw_mmu_mixed_size_t mixed_size)
{
hw_status status = 0;
u32 mmu_ram_reg;
mmu_ram_reg = (physical_addr & MMU_ADDR_MASK);
mmu_ram_reg = (mmu_ram_reg) | ((endianism << 9) | (element_size << 7) |
(mixed_size << 6));
/* write values to register */
MMUMMU_RAM_WRITE_REGISTER32(base_address, mmu_ram_reg);
return status;
}
void hw_mmu_tlb_flush_all(const void __iomem *base)
{
__raw_writeb(1, base + MMU_GFLUSH);
}
/*
* hw_mmu.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* MMU types and API declarations
*
* Copyright (C) 2007 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef _HW_MMU_H
#define _HW_MMU_H
#include <linux/types.h>
/* Bitmasks for interrupt sources */
#define HW_MMU_TRANSLATION_FAULT 0x2
#define HW_MMU_ALL_INTERRUPTS 0x1F
#define HW_MMU_COARSE_PAGE_SIZE 0x400
/* hw_mmu_mixed_size_t: Enumerated Type used to specify whether to follow
CPU/TLB Element size */
enum hw_mmu_mixed_size_t {
HW_MMU_TLBES,
HW_MMU_CPUES
};
/* hw_mmu_map_attrs_t: Struct containing MMU mapping attributes */
struct hw_mmu_map_attrs_t {
enum hw_endianism_t endianism;
enum hw_element_size_t element_size;
enum hw_mmu_mixed_size_t mixed_size;
bool donotlockmpupage;
};
extern hw_status hw_mmu_enable(const void __iomem *base_address);
extern hw_status hw_mmu_disable(const void __iomem *base_address);
extern hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
u32 num_locked_entries);
extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
u32 victim_entry_num);
/* For MMU faults */
extern hw_status hw_mmu_event_ack(const void __iomem *base_address,
u32 irq_mask);
extern hw_status hw_mmu_event_disable(const void __iomem *base_address,
u32 irq_mask);
extern hw_status hw_mmu_event_enable(const void __iomem *base_address,
u32 irq_mask);
extern hw_status hw_mmu_event_status(const void __iomem *base_address,
u32 *irq_mask);
extern hw_status hw_mmu_fault_addr_read(const void __iomem *base_address,
u32 *addr);
/* Set the TT base address */
extern hw_status hw_mmu_ttb_set(const void __iomem *base_address,
u32 ttb_phys_addr);
extern hw_status hw_mmu_twl_enable(const void __iomem *base_address);
extern hw_status hw_mmu_twl_disable(const void __iomem *base_address);
extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address,
u32 virtual_addr, u32 page_sz);
extern hw_status hw_mmu_tlb_add(const void __iomem *base_address,
u32 physical_addr,
u32 virtual_addr,
u32 page_sz,
u32 entry_num,
struct hw_mmu_map_attrs_t *map_attrs,
s8 preserved_bit, s8 valid_bit);
/* For PTEs */
extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
u32 physical_addr,
u32 virtual_addr,
u32 page_sz,
struct hw_mmu_map_attrs_t *map_attrs);
extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
u32 virtual_addr, u32 page_size);
void hw_mmu_tlb_flush_all(const void __iomem *base);
static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va)
{
u32 pte_addr;
u32 va31_to20;
va31_to20 = va >> (20 - 2); /* Left-shift by 2 here itself */
va31_to20 &= 0xFFFFFFFCUL;
pte_addr = l1_base + va31_to20;
return pte_addr;
}
static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va)
{
u32 pte_addr;
pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC);
return pte_addr;
}
static inline u32 hw_mmu_pte_coarse_l1(u32 pte_val)
{
u32 pte_coarse;
pte_coarse = pte_val & 0xFFFFFC00;
return pte_coarse;
}
static inline u32 hw_mmu_pte_size_l1(u32 pte_val)
{
u32 pte_size = 0;
if ((pte_val & 0x3) == 0x1) {
/* Points to L2 PT */
pte_size = HW_MMU_COARSE_PAGE_SIZE;
}
if ((pte_val & 0x3) == 0x2) {
if (pte_val & (1 << 18))
pte_size = HW_PAGE_SIZE16MB;
else
pte_size = HW_PAGE_SIZE1MB;
}
return pte_size;
}
static inline u32 hw_mmu_pte_size_l2(u32 pte_val)
{
u32 pte_size = 0;
if (pte_val & 0x2)
pte_size = HW_PAGE_SIZE4KB;
else if (pte_val & 0x1)
pte_size = HW_PAGE_SIZE64KB;
return pte_size;
}
#endif /* _HW_MMU_H */
...@@ -68,6 +68,7 @@ struct cfg_hostres { ...@@ -68,6 +68,7 @@ struct cfg_hostres {
void __iomem *dw_per_base; void __iomem *dw_per_base;
u32 dw_per_pm_base; u32 dw_per_pm_base;
u32 dw_core_pm_base; u32 dw_core_pm_base;
void __iomem *dw_dmmu_base;
void __iomem *dw_sys_ctrl_base; void __iomem *dw_sys_ctrl_base;
}; };
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <dspbridge/nodedefs.h> #include <dspbridge/nodedefs.h>
#include <dspbridge/dispdefs.h> #include <dspbridge/dispdefs.h>
#include <dspbridge/dspdefs.h> #include <dspbridge/dspdefs.h>
#include <dspbridge/dmm.h>
#include <dspbridge/host_os.h> #include <dspbridge/host_os.h>
/* ----------------------------------- This */ /* ----------------------------------- This */
...@@ -232,6 +233,29 @@ extern int dev_get_chnl_mgr(struct dev_object *hdev_obj, ...@@ -232,6 +233,29 @@ extern int dev_get_chnl_mgr(struct dev_object *hdev_obj,
extern int dev_get_cmm_mgr(struct dev_object *hdev_obj, extern int dev_get_cmm_mgr(struct dev_object *hdev_obj,
struct cmm_object **mgr); struct cmm_object **mgr);
/*
* ======== dev_get_dmm_mgr ========
* Purpose:
* Retrieve the handle to the dynamic memory manager created for this
* device.
* Parameters:
* hdev_obj: Handle to device object created with
* dev_create_device().
* *mgr: Ptr to location to store handle.
* Returns:
* 0: Success.
* -EFAULT: Invalid hdev_obj.
* Requires:
* mgr != NULL.
* DEV Initialized.
* Ensures:
* 0: *mgr contains a handle to a channel manager object,
* or NULL.
* else: *mgr is NULL.
*/
extern int dev_get_dmm_mgr(struct dev_object *hdev_obj,
struct dmm_object **mgr);
/* /*
* ======== dev_get_cod_mgr ======== * ======== dev_get_cod_mgr ========
* Purpose: * Purpose:
......
/*
* dmm.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* The Dynamic Memory Mapping(DMM) module manages the DSP Virtual address
* space that can be directly mapped to any MPU buffer or memory region.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef DMM_
#define DMM_
#include <dspbridge/dbdefs.h>
struct dmm_object;
/* DMM attributes used in dmm_create() */
struct dmm_mgrattrs {
u32 reserved;
};
#define DMMPOOLSIZE 0x4000000
/*
* ======== dmm_get_handle ========
* Purpose:
* Return the dynamic memory manager object for this device.
* This is typically called from the client process.
*/
extern int dmm_get_handle(void *hprocessor,
struct dmm_object **dmm_manager);
extern int dmm_reserve_memory(struct dmm_object *dmm_mgr,
u32 size, u32 *prsv_addr);
extern int dmm_un_reserve_memory(struct dmm_object *dmm_mgr,
u32 rsv_addr);
extern int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr,
u32 size);
extern int dmm_un_map_memory(struct dmm_object *dmm_mgr,
u32 addr, u32 *psize);
extern int dmm_destroy(struct dmm_object *dmm_mgr);
extern int dmm_delete_tables(struct dmm_object *dmm_mgr);
extern int dmm_create(struct dmm_object **dmm_manager,
struct dev_object *hdev_obj,
const struct dmm_mgrattrs *mgr_attrts);
extern bool dmm_init(void);
extern void dmm_exit(void);
extern int dmm_create_tables(struct dmm_object *dmm_mgr,
u32 addr, u32 size);
#ifdef DSP_DMM_DEBUG
u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr);
#endif
#endif /* DMM_ */
...@@ -108,6 +108,12 @@ struct dmm_map_object { ...@@ -108,6 +108,12 @@ struct dmm_map_object {
struct bridge_dma_map_info dma_info; struct bridge_dma_map_info dma_info;
}; };
/* Used for DMM reserved memory accounting */
struct dmm_rsv_object {
struct list_head link;
u32 dsp_reserved_addr;
};
/* New structure (member of process context) abstracts DMM resource info */ /* New structure (member of process context) abstracts DMM resource info */
struct dspheap_res_object { struct dspheap_res_object {
s32 heap_allocated; /* DMM status */ s32 heap_allocated; /* DMM status */
...@@ -159,6 +165,10 @@ struct process_context { ...@@ -159,6 +165,10 @@ struct process_context {
struct list_head dmm_map_list; struct list_head dmm_map_list;
spinlock_t dmm_map_lock; spinlock_t dmm_map_lock;
/* DMM reserved memory resources */
struct list_head dmm_rsv_list;
spinlock_t dmm_rsv_lock;
/* DSP Heap resources */ /* DSP Heap resources */
struct dspheap_res_object *pdspheap_list; struct dspheap_res_object *pdspheap_list;
......
/*
* dsp-mmu.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* DSP iommu.
*
* Copyright (C) 2005-2010 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef _DSP_MMU_
#define _DSP_MMU_
#include <plat/iommu.h>
#include <plat/iovmm.h>
/**
* dsp_mmu_init() - initialize dsp_mmu module and returns a handle
*
* This function initialize dsp mmu module and returns a struct iommu
* handle to use it for dsp maps.
*
*/
struct iommu *dsp_mmu_init(void);
/**
* dsp_mmu_exit() - destroy dsp mmu module
* @mmu: Pointer to iommu handle.
*
* This function destroys dsp mmu module.
*
*/
void dsp_mmu_exit(struct iommu *mmu);
/**
* user_to_dsp_map() - maps user to dsp virtual address
* @mmu: Pointer to iommu handle.
* @uva: Virtual user space address.
* @da DSP address
* @size Buffer size to map.
* @usr_pgs struct page array pointer where the user pages will be stored
*
* This function maps a user space buffer into DSP virtual address.
*
*/
u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
struct page **usr_pgs);
/**
* user_to_dsp_unmap() - unmaps DSP virtual buffer.
* @mmu: Pointer to iommu handle.
* @da DSP address
*
* This function unmaps a user space buffer into DSP virtual address.
*
*/
int user_to_dsp_unmap(struct iommu *mmu, u32 da);
#endif
...@@ -161,6 +161,48 @@ typedef int(*fxn_brd_memwrite) (struct bridge_dev_context ...@@ -161,6 +161,48 @@ typedef int(*fxn_brd_memwrite) (struct bridge_dev_context
u32 dsp_addr, u32 ul_num_bytes, u32 dsp_addr, u32 ul_num_bytes,
u32 mem_type); u32 mem_type);
/*
* ======== bridge_brd_mem_map ========
* Purpose:
* Map a MPU memory region to a DSP/IVA memory space
* Parameters:
* dev_ctxt: Handle to Bridge driver defined device info.
* ul_mpu_addr: MPU memory region start address.
* virt_addr: DSP/IVA memory region u8 address.
* ul_num_bytes: Number of bytes to map.
* map_attrs: Mapping attributes (e.g. endianness).
* Returns:
* 0: Success.
* -EPERM: Other, unspecified error.
* Requires:
* dev_ctxt != NULL;
* Ensures:
*/
typedef int(*fxn_brd_memmap) (struct bridge_dev_context
* dev_ctxt, u32 ul_mpu_addr,
u32 virt_addr, u32 ul_num_bytes,
u32 map_attr,
struct page **mapped_pages);
/*
* ======== bridge_brd_mem_un_map ========
* Purpose:
* UnMap an MPU memory region from DSP/IVA memory space
* Parameters:
* dev_ctxt: Handle to Bridge driver defined device info.
* virt_addr: DSP/IVA memory region u8 address.
* ul_num_bytes: Number of bytes to unmap.
* Returns:
* 0: Success.
* -EPERM: Other, unspecified error.
* Requires:
* dev_ctxt != NULL;
* Ensures:
*/
typedef int(*fxn_brd_memunmap) (struct bridge_dev_context
* dev_ctxt,
u32 virt_addr, u32 ul_num_bytes);
/* /*
* ======== bridge_brd_stop ======== * ======== bridge_brd_stop ========
* Purpose: * Purpose:
...@@ -951,6 +993,8 @@ struct bridge_drv_interface { ...@@ -951,6 +993,8 @@ struct bridge_drv_interface {
fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */ fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */
fxn_brd_memcopy pfn_brd_mem_copy; /* Copies DSP Memory */ fxn_brd_memcopy pfn_brd_mem_copy; /* Copies DSP Memory */
fxn_brd_memwrite pfn_brd_mem_write; /* Write DSP Memory w/o halt */ fxn_brd_memwrite pfn_brd_mem_write; /* Write DSP Memory w/o halt */
fxn_brd_memmap pfn_brd_mem_map; /* Maps MPU mem to DSP mem */
fxn_brd_memunmap pfn_brd_mem_un_map; /* Unmaps MPU mem to DSP mem */
fxn_chnl_create pfn_chnl_create; /* Create channel manager. */ fxn_chnl_create pfn_chnl_create; /* Create channel manager. */
fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */ fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */
fxn_chnl_open pfn_chnl_open; /* Create a new channel. */ fxn_chnl_open pfn_chnl_open; /* Create a new channel. */
......
...@@ -19,6 +19,10 @@ ...@@ -19,6 +19,10 @@
#ifndef DSPIOCTL_ #ifndef DSPIOCTL_
#define DSPIOCTL_ #define DSPIOCTL_
/* ------------------------------------ Hardware Abstraction Layer */
#include <hw_defs.h>
#include <hw_mmu.h>
/* /*
* Any IOCTLS at or above this value are reserved for standard Bridge driver * Any IOCTLS at or above this value are reserved for standard Bridge driver
* interfaces. * interfaces.
...@@ -61,6 +65,9 @@ struct bridge_ioctl_extproc { ...@@ -61,6 +65,9 @@ struct bridge_ioctl_extproc {
/* GPP virtual address. __va does not work for ioremapped addresses */ /* GPP virtual address. __va does not work for ioremapped addresses */
u32 ul_gpp_va; u32 ul_gpp_va;
u32 ul_size; /* Size of the mapped memory in bytes */ u32 ul_size; /* Size of the mapped memory in bytes */
enum hw_endianism_t endianism;
enum hw_mmu_mixed_size_t mixed_mode;
enum hw_element_size_t elem_size;
}; };
#endif /* DSPIOCTL_ */ #endif /* DSPIOCTL_ */
...@@ -550,6 +550,29 @@ extern int proc_map(void *hprocessor, ...@@ -550,6 +550,29 @@ extern int proc_map(void *hprocessor,
void **pp_map_addr, u32 ul_map_attr, void **pp_map_addr, u32 ul_map_attr,
struct process_context *pr_ctxt); struct process_context *pr_ctxt);
/*
* ======== proc_reserve_memory ========
* Purpose:
* Reserve a virtually contiguous region of DSP address space.
* Parameters:
* hprocessor : The processor handle.
* ul_size : Size of the address space to reserve.
* pp_rsv_addr : Ptr to DSP side reserved u8 address.
* Returns:
* 0 : Success.
* -EFAULT : Invalid processor handle.
* -EPERM : General failure.
* -ENOMEM : Cannot reserve chunk of this size.
* Requires:
* pp_rsv_addr is not NULL
* PROC Initialized.
* Ensures:
* Details:
*/
extern int proc_reserve_memory(void *hprocessor,
u32 ul_size, void **pp_rsv_addr,
struct process_context *pr_ctxt);
/* /*
* ======== proc_un_map ======== * ======== proc_un_map ========
* Purpose: * Purpose:
...@@ -572,4 +595,27 @@ extern int proc_map(void *hprocessor, ...@@ -572,4 +595,27 @@ extern int proc_map(void *hprocessor,
extern int proc_un_map(void *hprocessor, void *map_addr, extern int proc_un_map(void *hprocessor, void *map_addr,
struct process_context *pr_ctxt); struct process_context *pr_ctxt);
/*
* ======== proc_un_reserve_memory ========
* Purpose:
* Frees a previously reserved region of DSP address space.
* Parameters:
* hprocessor : The processor handle.
* prsv_addr : Ptr to DSP side reservedBYTE address.
* Returns:
* 0 : Success.
* -EFAULT : Invalid processor handle.
* -EPERM : General failure.
* -ENOENT : Cannot find a reserved region starting with this
* : address.
* Requires:
* prsv_addr is not NULL
* PROC Initialized.
* Ensures:
* Details:
*/
extern int proc_un_reserve_memory(void *hprocessor,
void *prsv_addr,
struct process_context *pr_ctxt);
#endif /* PROC_ */ #endif /* PROC_ */
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <dspbridge/cod.h> #include <dspbridge/cod.h>
#include <dspbridge/drv.h> #include <dspbridge/drv.h>
#include <dspbridge/proc.h> #include <dspbridge/proc.h>
#include <dspbridge/dmm.h>
/* ----------------------------------- Resource Manager */ /* ----------------------------------- Resource Manager */
#include <dspbridge/mgr.h> #include <dspbridge/mgr.h>
...@@ -74,6 +75,7 @@ struct dev_object { ...@@ -74,6 +75,7 @@ struct dev_object {
struct msg_mgr *hmsg_mgr; /* Message manager. */ struct msg_mgr *hmsg_mgr; /* Message manager. */
struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */ struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */
struct cmm_object *hcmm_mgr; /* SM memory manager. */ struct cmm_object *hcmm_mgr; /* SM memory manager. */
struct dmm_object *dmm_mgr; /* Dynamic memory manager. */
struct ldr_module *module_obj; /* Bridge Module handle. */ struct ldr_module *module_obj; /* Bridge Module handle. */
u32 word_size; /* DSP word size: quick access. */ u32 word_size; /* DSP word size: quick access. */
struct drv_object *hdrv_obj; /* Driver Object */ struct drv_object *hdrv_obj; /* Driver Object */
...@@ -248,6 +250,9 @@ int dev_create_device(struct dev_object **device_obj, ...@@ -248,6 +250,9 @@ int dev_create_device(struct dev_object **device_obj,
/* Instantiate the DEH module */ /* Instantiate the DEH module */
status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj); status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj);
} }
/* Create DMM mgr . */
status = dmm_create(&dev_obj->dmm_mgr,
(struct dev_object *)dev_obj, NULL);
} }
/* Add the new DEV_Object to the global list: */ /* Add the new DEV_Object to the global list: */
if (!status) { if (!status) {
...@@ -273,6 +278,8 @@ int dev_create_device(struct dev_object **device_obj, ...@@ -273,6 +278,8 @@ int dev_create_device(struct dev_object **device_obj,
kfree(dev_obj->proc_list); kfree(dev_obj->proc_list);
if (dev_obj->cod_mgr) if (dev_obj->cod_mgr)
cod_delete(dev_obj->cod_mgr); cod_delete(dev_obj->cod_mgr);
if (dev_obj->dmm_mgr)
dmm_destroy(dev_obj->dmm_mgr);
kfree(dev_obj); kfree(dev_obj);
} }
...@@ -382,6 +389,11 @@ int dev_destroy_device(struct dev_object *hdev_obj) ...@@ -382,6 +389,11 @@ int dev_destroy_device(struct dev_object *hdev_obj)
dev_obj->hcmm_mgr = NULL; dev_obj->hcmm_mgr = NULL;
} }
if (dev_obj->dmm_mgr) {
dmm_destroy(dev_obj->dmm_mgr);
dev_obj->dmm_mgr = NULL;
}
/* Call the driver's bridge_dev_destroy() function: */ /* Call the driver's bridge_dev_destroy() function: */
/* Require of DevDestroy */ /* Require of DevDestroy */
if (dev_obj->hbridge_context) { if (dev_obj->hbridge_context) {
...@@ -461,6 +473,32 @@ int dev_get_cmm_mgr(struct dev_object *hdev_obj, ...@@ -461,6 +473,32 @@ int dev_get_cmm_mgr(struct dev_object *hdev_obj,
return status; return status;
} }
/*
* ======== dev_get_dmm_mgr ========
* Purpose:
* Retrieve the handle to the dynamic memory manager created for this
* device.
*/
int dev_get_dmm_mgr(struct dev_object *hdev_obj,
struct dmm_object **mgr)
{
int status = 0;
struct dev_object *dev_obj = hdev_obj;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(mgr != NULL);
if (hdev_obj) {
*mgr = dev_obj->dmm_mgr;
} else {
*mgr = NULL;
status = -EFAULT;
}
DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
return status;
}
/* /*
* ======== dev_get_cod_mgr ======== * ======== dev_get_cod_mgr ========
* Purpose: * Purpose:
...@@ -713,8 +751,10 @@ void dev_exit(void) ...@@ -713,8 +751,10 @@ void dev_exit(void)
refs--; refs--;
if (refs == 0) if (refs == 0) {
cmm_exit(); cmm_exit();
dmm_exit();
}
DBC_ENSURE(refs >= 0); DBC_ENSURE(refs >= 0);
} }
...@@ -726,12 +766,25 @@ void dev_exit(void) ...@@ -726,12 +766,25 @@ void dev_exit(void)
*/ */
bool dev_init(void) bool dev_init(void)
{ {
bool ret = true; bool cmm_ret, dmm_ret, ret = true;
DBC_REQUIRE(refs >= 0); DBC_REQUIRE(refs >= 0);
if (refs == 0) if (refs == 0) {
ret = cmm_init(); cmm_ret = cmm_init();
dmm_ret = dmm_init();
ret = cmm_ret && dmm_ret;
if (!ret) {
if (cmm_ret)
cmm_exit();
if (dmm_ret)
dmm_exit();
}
}
if (ret) if (ret)
refs++; refs++;
...@@ -1065,6 +1118,8 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns, ...@@ -1065,6 +1118,8 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
STORE_FXN(fxn_brd_setstate, pfn_brd_set_state); STORE_FXN(fxn_brd_setstate, pfn_brd_set_state);
STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy); STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy);
STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write); STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write);
STORE_FXN(fxn_brd_memmap, pfn_brd_mem_map);
STORE_FXN(fxn_brd_memunmap, pfn_brd_mem_un_map);
STORE_FXN(fxn_chnl_create, pfn_chnl_create); STORE_FXN(fxn_chnl_create, pfn_chnl_create);
STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy); STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy);
STORE_FXN(fxn_chnl_open, pfn_chnl_open); STORE_FXN(fxn_chnl_open, pfn_chnl_open);
......
/*
* dmm.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* The Dynamic Memory Manager (DMM) module manages the DSP Virtual address
* space that can be directly mapped to any MPU buffer or memory region
*
* Notes:
* Region: Generic memory entitiy having a start address and a size
* Chunk: Reserved region
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <linux/types.h>
/* ----------------------------------- Host OS */
#include <dspbridge/host_os.h>
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
/* ----------------------------------- Trace & Debug */
#include <dspbridge/dbc.h>
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/dev.h>
#include <dspbridge/proc.h>
/* ----------------------------------- This */
#include <dspbridge/dmm.h>
/* ----------------------------------- Defines, Data Structures, Typedefs */
#define DMM_ADDR_VIRTUAL(a) \
(((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\
dyn_mem_map_beg)
#define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K)
/* DMM Mgr */
struct dmm_object {
/* Dmm Lock is used to serialize access mem manager for
* multi-threads. */
spinlock_t dmm_lock; /* Lock to access dmm mgr */
};
/* ----------------------------------- Globals */
static u32 refs; /* module reference count */
struct map_page {
u32 region_size:15;
u32 mapped_size:15;
u32 reserved:1;
u32 mapped:1;
};
/* Create the free list */
static struct map_page *virtual_mapping_table;
static u32 free_region; /* The index of free region */
static u32 free_size;
static u32 dyn_mem_map_beg; /* The Beginning of dynamic memory mapping */
static u32 table_size; /* The size of virt and phys pages tables */
/* ----------------------------------- Function Prototypes */
static struct map_page *get_region(u32 addr);
static struct map_page *get_free_region(u32 len);
static struct map_page *get_mapped_region(u32 addrs);
/* ======== dmm_create_tables ========
* Purpose:
* Create table to hold the information of physical address
* the buffer pages that is passed by the user, and the table
* to hold the information of the virtual memory that is reserved
* for DSP.
*/
int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size)
{
struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
int status = 0;
status = dmm_delete_tables(dmm_obj);
if (!status) {
dyn_mem_map_beg = addr;
table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K;
/* Create the free list */
virtual_mapping_table = __vmalloc(table_size *
sizeof(struct map_page), GFP_KERNEL |
__GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
if (virtual_mapping_table == NULL)
status = -ENOMEM;
else {
/* On successful allocation,
* all entries are zero ('free') */
free_region = 0;
free_size = table_size * PG_SIZE4K;
virtual_mapping_table[0].region_size = table_size;
}
}
if (status)
pr_err("%s: failure, status 0x%x\n", __func__, status);
return status;
}
/*
* ======== dmm_create ========
* Purpose:
* Create a dynamic memory manager object.
*/
int dmm_create(struct dmm_object **dmm_manager,
struct dev_object *hdev_obj,
const struct dmm_mgrattrs *mgr_attrts)
{
struct dmm_object *dmm_obj = NULL;
int status = 0;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(dmm_manager != NULL);
*dmm_manager = NULL;
/* create, zero, and tag a cmm mgr object */
dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL);
if (dmm_obj != NULL) {
spin_lock_init(&dmm_obj->dmm_lock);
*dmm_manager = dmm_obj;
} else {
status = -ENOMEM;
}
return status;
}
/*
* ======== dmm_destroy ========
* Purpose:
* Release the communication memory manager resources.
*/
int dmm_destroy(struct dmm_object *dmm_mgr)
{
struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
int status = 0;
DBC_REQUIRE(refs > 0);
if (dmm_mgr) {
status = dmm_delete_tables(dmm_obj);
if (!status)
kfree(dmm_obj);
} else
status = -EFAULT;
return status;
}
/*
* ======== dmm_delete_tables ========
* Purpose:
* Delete DMM Tables.
*/
int dmm_delete_tables(struct dmm_object *dmm_mgr)
{
int status = 0;
DBC_REQUIRE(refs > 0);
/* Delete all DMM tables */
if (dmm_mgr)
vfree(virtual_mapping_table);
else
status = -EFAULT;
return status;
}
/*
* ======== dmm_exit ========
* Purpose:
* Discontinue usage of module; free resources when reference count
* reaches 0.
*/
void dmm_exit(void)
{
DBC_REQUIRE(refs > 0);
refs--;
}
/*
* ======== dmm_get_handle ========
* Purpose:
* Return the dynamic memory manager object for this device.
* This is typically called from the client process.
*/
int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
{
int status = 0;
struct dev_object *hdev_obj;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(dmm_manager != NULL);
if (hprocessor != NULL)
status = proc_get_dev_object(hprocessor, &hdev_obj);
else
hdev_obj = dev_get_first(); /* default */
if (!status)
status = dev_get_dmm_mgr(hdev_obj, dmm_manager);
return status;
}
/*
* ======== dmm_init ========
* Purpose:
* Initializes private state of DMM module.
*/
bool dmm_init(void)
{
bool ret = true;
DBC_REQUIRE(refs >= 0);
if (ret)
refs++;
DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
virtual_mapping_table = NULL;
table_size = 0;
return ret;
}
/*
* ======== dmm_map_memory ========
* Purpose:
* Add a mapping block to the reserved chunk. DMM assumes that this block
* will be mapped in the DSP/IVA's address space. DMM returns an error if a
* mapping overlaps another one. This function stores the info that will be
* required later while unmapping the block.
*/
int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size)
{
struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
struct map_page *chunk;
int status = 0;
spin_lock(&dmm_obj->dmm_lock);
/* Find the Reserved memory chunk containing the DSP block to
* be mapped */
chunk = (struct map_page *)get_region(addr);
if (chunk != NULL) {
/* Mark the region 'mapped', leave the 'reserved' info as-is */
chunk->mapped = true;
chunk->mapped_size = (size / PG_SIZE4K);
} else
status = -ENOENT;
spin_unlock(&dmm_obj->dmm_lock);
dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, "
"chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
return status;
}
/*
* ======== dmm_reserve_memory ========
* Purpose:
* Reserve a chunk of virtually contiguous DSP/IVA address space.
*/
int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size,
u32 *prsv_addr)
{
int status = 0;
struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
struct map_page *node;
u32 rsv_addr = 0;
u32 rsv_size = 0;
spin_lock(&dmm_obj->dmm_lock);
/* Try to get a DSP chunk from the free list */
node = get_free_region(size);
if (node != NULL) {
/* DSP chunk of given size is available. */
rsv_addr = DMM_ADDR_VIRTUAL(node);
/* Calculate the number entries to use */
rsv_size = size / PG_SIZE4K;
if (rsv_size < node->region_size) {
/* Mark remainder of free region */
node[rsv_size].mapped = false;
node[rsv_size].reserved = false;
node[rsv_size].region_size =
node->region_size - rsv_size;
node[rsv_size].mapped_size = 0;
}
/* get_region will return first fit chunk. But we only use what
is requested. */
node->mapped = false;
node->reserved = true;
node->region_size = rsv_size;
node->mapped_size = 0;
/* Return the chunk's starting address */
*prsv_addr = rsv_addr;
} else
/*dSP chunk of given size is not available */
status = -ENOMEM;
spin_unlock(&dmm_obj->dmm_lock);
dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, "
"rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size,
prsv_addr, status, rsv_addr, rsv_size);
return status;
}
/*
* ======== dmm_un_map_memory ========
* Purpose:
* Remove the mapped block from the reserved chunk.
*/
int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize)
{
struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
struct map_page *chunk;
int status = 0;
spin_lock(&dmm_obj->dmm_lock);
chunk = get_mapped_region(addr);
if (chunk == NULL)
status = -ENOENT;
if (!status) {
/* Unmap the region */
*psize = chunk->mapped_size * PG_SIZE4K;
chunk->mapped = false;
chunk->mapped_size = 0;
}
spin_unlock(&dmm_obj->dmm_lock);
dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, "
"chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk);
return status;
}
/*
* ======== dmm_un_reserve_memory ========
* Purpose:
* Free a chunk of reserved DSP/IVA address space.
*/
int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr)
{
struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
struct map_page *chunk;
u32 i;
int status = 0;
u32 chunk_size;
spin_lock(&dmm_obj->dmm_lock);
/* Find the chunk containing the reserved address */
chunk = get_mapped_region(rsv_addr);
if (chunk == NULL)
status = -ENOENT;
if (!status) {
/* Free all the mapped pages for this reserved region */
i = 0;
while (i < chunk->region_size) {
if (chunk[i].mapped) {
/* Remove mapping from the page tables. */
chunk_size = chunk[i].mapped_size;
/* Clear the mapping flags */
chunk[i].mapped = false;
chunk[i].mapped_size = 0;
i += chunk_size;
} else
i++;
}
/* Clear the flags (mark the region 'free') */
chunk->reserved = false;
/* NOTE: We do NOT coalesce free regions here.
* Free regions are coalesced in get_region(), as it traverses
*the whole mapping table
*/
}
spin_unlock(&dmm_obj->dmm_lock);
dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p",
__func__, dmm_mgr, rsv_addr, status, chunk);
return status;
}
/*
* ======== get_region ========
* Purpose:
* Returns a region containing the specified memory region
*/
static struct map_page *get_region(u32 addr)
{
struct map_page *curr_region = NULL;
u32 i = 0;
if (virtual_mapping_table != NULL) {
/* find page mapped by this address */
i = DMM_ADDR_TO_INDEX(addr);
if (i < table_size)
curr_region = virtual_mapping_table + i;
}
dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n",
__func__, curr_region, free_region, free_size);
return curr_region;
}
/*
* ======== get_free_region ========
* Purpose:
* Returns the requested free region
*/
static struct map_page *get_free_region(u32 len)
{
struct map_page *curr_region = NULL;
u32 i = 0;
u32 region_size = 0;
u32 next_i = 0;
if (virtual_mapping_table == NULL)
return curr_region;
if (len > free_size) {
/* Find the largest free region
* (coalesce during the traversal) */
while (i < table_size) {
region_size = virtual_mapping_table[i].region_size;
next_i = i + region_size;
if (virtual_mapping_table[i].reserved == false) {
/* Coalesce, if possible */
if (next_i < table_size &&
virtual_mapping_table[next_i].reserved
== false) {
virtual_mapping_table[i].region_size +=
virtual_mapping_table
[next_i].region_size;
continue;
}
region_size *= PG_SIZE4K;
if (region_size > free_size) {
free_region = i;
free_size = region_size;
}
}
i = next_i;
}
}
if (len <= free_size) {
curr_region = virtual_mapping_table + free_region;
free_region += (len / PG_SIZE4K);
free_size -= len;
}
return curr_region;
}
/*
* ======== get_mapped_region ========
* Purpose:
* Returns the requestedmapped region
*/
static struct map_page *get_mapped_region(u32 addrs)
{
u32 i = 0;
struct map_page *curr_region = NULL;
if (virtual_mapping_table == NULL)
return curr_region;
i = DMM_ADDR_TO_INDEX(addrs);
if (i < table_size && (virtual_mapping_table[i].mapped ||
virtual_mapping_table[i].reserved))
curr_region = virtual_mapping_table + i;
return curr_region;
}
#ifdef DSP_DMM_DEBUG
u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr)
{
struct map_page *curr_node = NULL;
u32 i;
u32 freemem = 0;
u32 bigsize = 0;
spin_lock(&dmm_mgr->dmm_lock);
if (virtual_mapping_table != NULL) {
for (i = 0; i < table_size; i +=
virtual_mapping_table[i].region_size) {
curr_node = virtual_mapping_table + i;
if (curr_node->reserved) {
/*printk("RESERVED size = 0x%x, "
"Map size = 0x%x\n",
(curr_node->region_size * PG_SIZE4K),
(curr_node->mapped == false) ? 0 :
(curr_node->mapped_size * PG_SIZE4K));
*/
} else {
/* printk("UNRESERVED size = 0x%x\n",
(curr_node->region_size * PG_SIZE4K));
*/
freemem += (curr_node->region_size * PG_SIZE4K);
if (curr_node->region_size > bigsize)
bigsize = curr_node->region_size;
}
}
}
spin_unlock(&dmm_mgr->dmm_lock);
printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
freemem / (1024 * 1024));
printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
(((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024));
printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
(bigsize * PG_SIZE4K / (1024 * 1024)));
return 0;
}
#endif
...@@ -993,10 +993,27 @@ u32 procwrap_register_notify(union trapped_args *args, void *pr_ctxt) ...@@ -993,10 +993,27 @@ u32 procwrap_register_notify(union trapped_args *args, void *pr_ctxt)
/* /*
* ======== procwrap_reserve_memory ======== * ======== procwrap_reserve_memory ========
*/ */
u32 __deprecated procwrap_reserve_memory(union trapped_args *args, u32 procwrap_reserve_memory(union trapped_args *args, void *pr_ctxt)
void *pr_ctxt)
{ {
return 0; int status;
void *prsv_addr;
void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
if ((args->args_proc_rsvmem.ul_size <= 0) ||
(args->args_proc_rsvmem.ul_size & (PG_SIZE4K - 1)) != 0)
return -EINVAL;
status = proc_reserve_memory(hprocessor,
args->args_proc_rsvmem.ul_size, &prsv_addr,
pr_ctxt);
if (!status) {
if (put_user(prsv_addr, args->args_proc_rsvmem.pp_rsv_addr)) {
status = -EINVAL;
proc_un_reserve_memory(args->args_proc_rsvmem.
hprocessor, prsv_addr, pr_ctxt);
}
}
return status;
} }
/* /*
...@@ -1025,10 +1042,15 @@ u32 procwrap_un_map(union trapped_args *args, void *pr_ctxt) ...@@ -1025,10 +1042,15 @@ u32 procwrap_un_map(union trapped_args *args, void *pr_ctxt)
/* /*
* ======== procwrap_un_reserve_memory ======== * ======== procwrap_un_reserve_memory ========
*/ */
u32 __deprecated procwrap_un_reserve_memory(union trapped_args *args, u32 procwrap_un_reserve_memory(union trapped_args *args, void *pr_ctxt)
void *pr_ctxt)
{ {
return 0; int status;
void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
status = proc_un_reserve_memory(hprocessor,
args->args_proc_unrsvmem.prsv_addr,
pr_ctxt);
return status;
} }
/* /*
......
...@@ -146,6 +146,7 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt) ...@@ -146,6 +146,7 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt)
struct process_context *ctxt = (struct process_context *)process_ctxt; struct process_context *ctxt = (struct process_context *)process_ctxt;
int status = 0; int status = 0;
struct dmm_map_object *temp_map, *map_obj; struct dmm_map_object *temp_map, *map_obj;
struct dmm_rsv_object *temp_rsv, *rsv_obj;
/* Free DMM mapped memory resources */ /* Free DMM mapped memory resources */
list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) { list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) {
...@@ -155,6 +156,16 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt) ...@@ -155,6 +156,16 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt)
pr_err("%s: proc_un_map failed!" pr_err("%s: proc_un_map failed!"
" status = 0x%xn", __func__, status); " status = 0x%xn", __func__, status);
} }
/* Free DMM reserved memory resources */
list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) {
status = proc_un_reserve_memory(ctxt->hprocessor, (void *)
rsv_obj->dsp_reserved_addr,
ctxt);
if (status)
pr_err("%s: proc_un_reserve_memory failed!"
" status = 0x%xn", __func__, status);
}
return status; return status;
} }
...@@ -732,6 +743,7 @@ static int request_bridge_resources(struct cfg_hostres *res) ...@@ -732,6 +743,7 @@ static int request_bridge_resources(struct cfg_hostres *res)
host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE); host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE);
dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]); dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]);
dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]); dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]);
dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
/* for 24xx base port is not mapping the mamory for DSP /* for 24xx base port is not mapping the mamory for DSP
* internal memory TODO Do a ioremap here */ * internal memory TODO Do a ioremap here */
...@@ -785,6 +797,8 @@ int drv_request_bridge_res_dsp(void **phost_resources) ...@@ -785,6 +797,8 @@ int drv_request_bridge_res_dsp(void **phost_resources)
OMAP_PER_PRM_SIZE); OMAP_PER_PRM_SIZE);
host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE, host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
OMAP_CORE_PRM_SIZE); OMAP_CORE_PRM_SIZE);
host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE,
OMAP_DMMU_SIZE);
dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", dev_dbg(bridge, "dw_mem_base[0] 0x%x\n",
host_res->dw_mem_base[0]); host_res->dw_mem_base[0]);
...@@ -796,6 +810,7 @@ int drv_request_bridge_res_dsp(void **phost_resources) ...@@ -796,6 +810,7 @@ int drv_request_bridge_res_dsp(void **phost_resources)
host_res->dw_mem_base[3]); host_res->dw_mem_base[3]);
dev_dbg(bridge, "dw_mem_base[4] 0x%x\n", dev_dbg(bridge, "dw_mem_base[4] 0x%x\n",
host_res->dw_mem_base[4]); host_res->dw_mem_base[4]);
dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
shm_size = drv_datap->shm_size; shm_size = drv_datap->shm_size;
if (shm_size >= 0x10000) { if (shm_size >= 0x10000) {
......
...@@ -509,6 +509,8 @@ static int bridge_open(struct inode *ip, struct file *filp) ...@@ -509,6 +509,8 @@ static int bridge_open(struct inode *ip, struct file *filp)
pr_ctxt->res_state = PROC_RES_ALLOCATED; pr_ctxt->res_state = PROC_RES_ALLOCATED;
spin_lock_init(&pr_ctxt->dmm_map_lock); spin_lock_init(&pr_ctxt->dmm_map_lock);
INIT_LIST_HEAD(&pr_ctxt->dmm_map_list); INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
spin_lock_init(&pr_ctxt->dmm_rsv_lock);
INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL); pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
if (pr_ctxt->node_id) { if (pr_ctxt->node_id) {
......
...@@ -56,6 +56,7 @@ ...@@ -56,6 +56,7 @@
/* ----------------------------------- This */ /* ----------------------------------- This */
#include <dspbridge/nodepriv.h> #include <dspbridge/nodepriv.h>
#include <dspbridge/node.h> #include <dspbridge/node.h>
#include <dspbridge/dmm.h>
/* Static/Dynamic Loader includes */ /* Static/Dynamic Loader includes */
#include <dspbridge/dbll.h> #include <dspbridge/dbll.h>
...@@ -316,6 +317,10 @@ int node_allocate(struct proc_object *hprocessor, ...@@ -316,6 +317,10 @@ int node_allocate(struct proc_object *hprocessor,
u32 mapped_addr = 0; u32 mapped_addr = 0;
u32 map_attrs = 0x0; u32 map_attrs = 0x0;
struct dsp_processorstate proc_state; struct dsp_processorstate proc_state;
#ifdef DSP_DMM_DEBUG
struct dmm_object *dmm_mgr;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
#endif
void *node_res; void *node_res;
...@@ -425,12 +430,34 @@ int node_allocate(struct proc_object *hprocessor, ...@@ -425,12 +430,34 @@ int node_allocate(struct proc_object *hprocessor,
if (status) if (status)
goto func_cont; goto func_cont;
status = proc_reserve_memory(hprocessor,
pnode->create_args.asa.task_arg_obj.
heap_size + PAGE_SIZE,
(void **)&(pnode->create_args.asa.
task_arg_obj.udsp_heap_res_addr),
pr_ctxt);
if (status) {
pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
__func__, status);
goto func_cont;
}
#ifdef DSP_DMM_DEBUG
status = dmm_get_handle(p_proc_object, &dmm_mgr);
if (!dmm_mgr) {
status = DSP_EHANDLE;
goto func_cont;
}
dmm_mem_map_dump(dmm_mgr);
#endif
map_attrs |= DSP_MAPLITTLEENDIAN; map_attrs |= DSP_MAPLITTLEENDIAN;
map_attrs |= DSP_MAPELEMSIZE32; map_attrs |= DSP_MAPELEMSIZE32;
map_attrs |= DSP_MAPVIRTUALADDR; map_attrs |= DSP_MAPVIRTUALADDR;
status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr, status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
pnode->create_args.asa.task_arg_obj.heap_size, pnode->create_args.asa.task_arg_obj.heap_size,
NULL, (void **)&mapped_addr, map_attrs, (void *)pnode->create_args.asa.task_arg_obj.
udsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
pr_ctxt); pr_ctxt);
if (status) if (status)
pr_err("%s: Failed to map memory for Heap: 0x%x\n", pr_err("%s: Failed to map memory for Heap: 0x%x\n",
...@@ -2484,7 +2511,11 @@ static void delete_node(struct node_object *hnode, ...@@ -2484,7 +2511,11 @@ static void delete_node(struct node_object *hnode,
struct stream_chnl stream; struct stream_chnl stream;
struct node_msgargs node_msg_args; struct node_msgargs node_msg_args;
struct node_taskargs task_arg_obj; struct node_taskargs task_arg_obj;
#ifdef DSP_DMM_DEBUG
struct dmm_object *dmm_mgr;
struct proc_object *p_proc_object =
(struct proc_object *)hnode->hprocessor;
#endif
int status; int status;
if (!hnode) if (!hnode)
goto func_end; goto func_end;
...@@ -2545,6 +2576,19 @@ static void delete_node(struct node_object *hnode, ...@@ -2545,6 +2576,19 @@ static void delete_node(struct node_object *hnode,
status = proc_un_map(hnode->hprocessor, (void *) status = proc_un_map(hnode->hprocessor, (void *)
task_arg_obj.udsp_heap_addr, task_arg_obj.udsp_heap_addr,
pr_ctxt); pr_ctxt);
status = proc_un_reserve_memory(hnode->hprocessor,
(void *)
task_arg_obj.
udsp_heap_res_addr,
pr_ctxt);
#ifdef DSP_DMM_DEBUG
status = dmm_get_handle(p_proc_object, &dmm_mgr);
if (dmm_mgr)
dmm_mem_map_dump(dmm_mgr);
else
status = DSP_EHANDLE;
#endif
} }
} }
if (node_type != NODE_MESSAGE) { if (node_type != NODE_MESSAGE) {
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <dspbridge/cod.h> #include <dspbridge/cod.h>
#include <dspbridge/dev.h> #include <dspbridge/dev.h>
#include <dspbridge/procpriv.h> #include <dspbridge/procpriv.h>
#include <dspbridge/dmm.h>
/* ----------------------------------- Resource Manager */ /* ----------------------------------- Resource Manager */
#include <dspbridge/mgr.h> #include <dspbridge/mgr.h>
...@@ -51,7 +52,6 @@ ...@@ -51,7 +52,6 @@
#include <dspbridge/msg.h> #include <dspbridge/msg.h>
#include <dspbridge/dspioctl.h> #include <dspbridge/dspioctl.h>
#include <dspbridge/drv.h> #include <dspbridge/drv.h>
#include <_tiomap.h>
/* ----------------------------------- This */ /* ----------------------------------- This */
#include <dspbridge/proc.h> #include <dspbridge/proc.h>
...@@ -151,21 +151,34 @@ static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt, ...@@ -151,21 +151,34 @@ static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt,
return map_obj; return map_obj;
} }
static int match_exact_map_obj(struct dmm_map_object *map_obj,
u32 dsp_addr, u32 size)
{
if (map_obj->dsp_addr == dsp_addr && map_obj->size != size)
pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
__func__, dsp_addr, map_obj->size, size);
return map_obj->dsp_addr == dsp_addr &&
map_obj->size == size;
}
static void remove_mapping_information(struct process_context *pr_ctxt, static void remove_mapping_information(struct process_context *pr_ctxt,
u32 dsp_addr) u32 dsp_addr, u32 size)
{ {
struct dmm_map_object *map_obj; struct dmm_map_object *map_obj;
pr_debug("%s: looking for virt 0x%x\n", __func__, dsp_addr); pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__,
dsp_addr, size);
spin_lock(&pr_ctxt->dmm_map_lock); spin_lock(&pr_ctxt->dmm_map_lock);
list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) { list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x\n", pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
__func__, __func__,
map_obj->mpu_addr, map_obj->mpu_addr,
map_obj->dsp_addr); map_obj->dsp_addr,
map_obj->size);
if (map_obj->dsp_addr == dsp_addr) { if (match_exact_map_obj(map_obj, dsp_addr, size)) {
pr_debug("%s: match, deleting map info\n", __func__); pr_debug("%s: match, deleting map info\n", __func__);
list_del(&map_obj->link); list_del(&map_obj->link);
kfree(map_obj->dma_info.sg); kfree(map_obj->dma_info.sg);
...@@ -1077,6 +1090,7 @@ int proc_load(void *hprocessor, const s32 argc_index, ...@@ -1077,6 +1090,7 @@ int proc_load(void *hprocessor, const s32 argc_index,
s32 cnew_envp; /* " " in new_envp[] */ s32 cnew_envp; /* " " in new_envp[] */
s32 nproc_id = 0; /* Anticipate MP version. */ s32 nproc_id = 0; /* Anticipate MP version. */
struct dcd_manager *hdcd_handle; struct dcd_manager *hdcd_handle;
struct dmm_object *dmm_mgr;
u32 dw_ext_end; u32 dw_ext_end;
u32 proc_id; u32 proc_id;
int brd_state; int brd_state;
...@@ -1267,6 +1281,25 @@ int proc_load(void *hprocessor, const s32 argc_index, ...@@ -1267,6 +1281,25 @@ int proc_load(void *hprocessor, const s32 argc_index,
if (!status) if (!status)
status = cod_get_sym_value(cod_mgr, EXTEND, status = cod_get_sym_value(cod_mgr, EXTEND,
&dw_ext_end); &dw_ext_end);
/* Reset DMM structs and add an initial free chunk */
if (!status) {
status =
dev_get_dmm_mgr(p_proc_object->hdev_obj,
&dmm_mgr);
if (dmm_mgr) {
/* Set dw_ext_end to DMM START u8
* address */
dw_ext_end =
(dw_ext_end + 1) * DSPWORDSIZE;
/* DMM memory is from EXT_END */
status = dmm_create_tables(dmm_mgr,
dw_ext_end,
DMMPOOLSIZE);
} else {
status = -EFAULT;
}
}
} }
} }
/* Restore the original argv[0] */ /* Restore the original argv[0] */
...@@ -1319,10 +1352,12 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size, ...@@ -1319,10 +1352,12 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
{ {
u32 va_align; u32 va_align;
u32 pa_align; u32 pa_align;
struct dmm_object *dmm_mgr;
u32 size_align; u32 size_align;
int status = 0; int status = 0;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
struct dmm_map_object *map_obj; struct dmm_map_object *map_obj;
u32 tmp_addr = 0;
#ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK #ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK
if ((ul_map_attr & BUFMODE_MASK) != RBUF) { if ((ul_map_attr & BUFMODE_MASK) != RBUF) {
...@@ -1347,30 +1382,33 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size, ...@@ -1347,30 +1382,33 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
} }
/* Critical section */ /* Critical section */
mutex_lock(&proc_lock); mutex_lock(&proc_lock);
dmm_get_handle(p_proc_object, &dmm_mgr);
if (dmm_mgr)
status = dmm_map_memory(dmm_mgr, va_align, size_align);
else
status = -EFAULT;
/* Add mapping to the page tables. */ /* Add mapping to the page tables. */
if (!status) { if (!status) {
/* Mapped address = MSB of VA | LSB of PA */
tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1)));
/* mapped memory resource tracking */ /* mapped memory resource tracking */
map_obj = add_mapping_info(pr_ctxt, pa_align, va_align, map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr,
size_align); size_align);
if (!map_obj) { if (!map_obj)
status = -ENOMEM; status = -ENOMEM;
} else { else
va_align = user_to_dsp_map( status = (*p_proc_object->intf_fxns->pfn_brd_mem_map)
p_proc_object->hbridge_context->dsp_mmu, (p_proc_object->hbridge_context, pa_align, va_align,
pa_align, va_align, size_align, size_align, ul_map_attr, map_obj->pages);
map_obj->pages);
if (IS_ERR_VALUE(va_align))
status = (int)va_align;
}
} }
if (!status) { if (!status) {
/* Mapped address = MSB of VA | LSB of PA */ /* Mapped address = MSB of VA | LSB of PA */
map_obj->dsp_addr = (va_align | *pp_map_addr = (void *) tmp_addr;
((u32)pmpu_addr & (PG_SIZE4K - 1)));
*pp_map_addr = (void *)map_obj->dsp_addr;
} else { } else {
remove_mapping_information(pr_ctxt, va_align); remove_mapping_information(pr_ctxt, tmp_addr, size_align);
dmm_un_map_memory(dmm_mgr, va_align, &size_align);
} }
mutex_unlock(&proc_lock); mutex_unlock(&proc_lock);
...@@ -1462,6 +1500,55 @@ int proc_register_notify(void *hprocessor, u32 event_mask, ...@@ -1462,6 +1500,55 @@ int proc_register_notify(void *hprocessor, u32 event_mask,
return status; return status;
} }
/*
* ======== proc_reserve_memory ========
* Purpose:
* Reserve a virtually contiguous region of DSP address space.
*/
int proc_reserve_memory(void *hprocessor, u32 ul_size,
void **pp_rsv_addr,
struct process_context *pr_ctxt)
{
struct dmm_object *dmm_mgr;
int status = 0;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
struct dmm_rsv_object *rsv_obj;
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
}
status = dmm_get_handle(p_proc_object, &dmm_mgr);
if (!dmm_mgr) {
status = -EFAULT;
goto func_end;
}
status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr);
if (status != 0)
goto func_end;
/*
* A successful reserve should be followed by insertion of rsv_obj
* into dmm_rsv_list, so that reserved memory resource tracking
* remains uptodate
*/
rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL);
if (rsv_obj) {
rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr;
spin_lock(&pr_ctxt->dmm_rsv_lock);
list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list);
spin_unlock(&pr_ctxt->dmm_rsv_lock);
}
func_end:
dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p "
"status 0x%x\n", __func__, hprocessor,
ul_size, pp_rsv_addr, status);
return status;
}
/* /*
* ======== proc_start ======== * ======== proc_start ========
* Purpose: * Purpose:
...@@ -1610,7 +1697,9 @@ int proc_un_map(void *hprocessor, void *map_addr, ...@@ -1610,7 +1697,9 @@ int proc_un_map(void *hprocessor, void *map_addr,
{ {
int status = 0; int status = 0;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
struct dmm_object *dmm_mgr;
u32 va_align; u32 va_align;
u32 size_align;
va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K); va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K);
if (!p_proc_object) { if (!p_proc_object) {
...@@ -1618,11 +1707,24 @@ int proc_un_map(void *hprocessor, void *map_addr, ...@@ -1618,11 +1707,24 @@ int proc_un_map(void *hprocessor, void *map_addr,
goto func_end; goto func_end;
} }
status = dmm_get_handle(hprocessor, &dmm_mgr);
if (!dmm_mgr) {
status = -EFAULT;
goto func_end;
}
/* Critical section */ /* Critical section */
mutex_lock(&proc_lock); mutex_lock(&proc_lock);
/*
* Update DMM structures. Get the size to unmap.
* This function returns error if the VA is not mapped
*/
status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align);
/* Remove mapping from the page tables. */ /* Remove mapping from the page tables. */
status = user_to_dsp_unmap(p_proc_object->hbridge_context->dsp_mmu, if (!status) {
va_align); status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map)
(p_proc_object->hbridge_context, va_align, size_align);
}
mutex_unlock(&proc_lock); mutex_unlock(&proc_lock);
if (status) if (status)
...@@ -1633,7 +1735,7 @@ int proc_un_map(void *hprocessor, void *map_addr, ...@@ -1633,7 +1735,7 @@ int proc_un_map(void *hprocessor, void *map_addr,
* from dmm_map_list, so that mapped memory resource tracking * from dmm_map_list, so that mapped memory resource tracking
* remains uptodate * remains uptodate
*/ */
remove_mapping_information(pr_ctxt, (u32) map_addr); remove_mapping_information(pr_ctxt, (u32) map_addr, size_align);
func_end: func_end:
dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n", dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
...@@ -1641,6 +1743,55 @@ int proc_un_map(void *hprocessor, void *map_addr, ...@@ -1641,6 +1743,55 @@ int proc_un_map(void *hprocessor, void *map_addr,
return status; return status;
} }
/*
* ======== proc_un_reserve_memory ========
* Purpose:
* Frees a previously reserved region of DSP address space.
*/
int proc_un_reserve_memory(void *hprocessor, void *prsv_addr,
struct process_context *pr_ctxt)
{
struct dmm_object *dmm_mgr;
int status = 0;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
struct dmm_rsv_object *rsv_obj;
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
}
status = dmm_get_handle(p_proc_object, &dmm_mgr);
if (!dmm_mgr) {
status = -EFAULT;
goto func_end;
}
status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr);
if (status != 0)
goto func_end;
/*
* A successful unreserve should be followed by removal of rsv_obj
* from dmm_rsv_list, so that reserved memory resource tracking
* remains uptodate
*/
spin_lock(&pr_ctxt->dmm_rsv_lock);
list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) {
if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) {
list_del(&rsv_obj->link);
kfree(rsv_obj);
break;
}
}
spin_unlock(&pr_ctxt->dmm_rsv_lock);
func_end:
dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n",
__func__, hprocessor, prsv_addr, status);
return status;
}
/* /*
* ======== = proc_monitor ======== == * ======== = proc_monitor ======== ==
* Purpose: * Purpose:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册