提交 f614c817 编写于 作者: L Linus Torvalds

Merge branch 'parisc-4.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc fixes from Helge Deller:
 "The patch by Guenter Roeck fixes the build on parisc which got broken
  because of commit f24ffde4 ("parisc: expose number of page table
  levels on Kconfig level") and the patch from Matthew Wilcox converts
  our code to use the generic scatterlist.h header file"

* 'parisc-4.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: Replace PT_NLEVELS with CONFIG_PGTABLE_LEVELS
  parisc: Eliminate sg_virt_addr() and private scatterlist.h
......@@ -20,6 +20,7 @@ generic-y += param.h
generic-y += percpu.h
generic-y += poll.h
generic-y += preempt.h
generic-y += scatterlist.h
generic-y += seccomp.h
generic-y += segment.h
generic-y += topology.h
......
......@@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
if (likely(pgd != NULL)) {
memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
#if PT_NLEVELS == 3
#if CONFIG_PGTABLE_LEVELS == 3
actual_pgd += PTRS_PER_PGD;
/* Populate first pmd with allocated memory. We mark it
* with PxD_FLAG_ATTACHED as a signal to the system that this
......@@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
#if PT_NLEVELS == 3
#if CONFIG_PGTABLE_LEVELS == 3
pgd -= PTRS_PER_PGD;
#endif
free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
......@@ -102,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
#if PT_NLEVELS == 3
#if CONFIG_PGTABLE_LEVELS == 3
/* preserve the gateway marker if this is the beginning of
* the permanent pmd */
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
......
#ifndef _ASM_PARISC_SCATTERLIST_H
#define _ASM_PARISC_SCATTERLIST_H
#include <asm/page.h>
#include <asm/types.h>
#include <asm-generic/scatterlist.h>
#define sg_virt_addr(sg) ((unsigned long)sg_virt(sg))
#endif /* _ASM_PARISC_SCATTERLIST_H */
......@@ -482,7 +482,7 @@ static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int n
BUG_ON(direction == DMA_NONE);
for (i = 0; i < nents; i++, sglist++ ) {
unsigned long vaddr = sg_virt_addr(sglist);
unsigned long vaddr = (unsigned long)sg_virt(sglist);
sg_dma_address(sglist) = (dma_addr_t) virt_to_phys(vaddr);
sg_dma_len(sglist) = sglist->length;
flush_kernel_dcache_range(vaddr, sglist->length);
......@@ -502,7 +502,7 @@ static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, in
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for (i = 0; i < nents; i++, sglist++ )
flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
flush_kernel_vmap_range(sg_virt(sglist), sglist->length);
return;
}
......@@ -527,7 +527,7 @@ static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for (i = 0; i < nents; i++, sglist++ )
flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
flush_kernel_vmap_range(sg_virt(sglist), sglist->length);
}
static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
......@@ -537,7 +537,7 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for (i = 0; i < nents; i++, sglist++ )
flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
flush_kernel_vmap_range(sg_virt(sglist), sglist->length);
}
struct hppa_dma_ops pcxl_dma_ops = {
......
......@@ -916,7 +916,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
/* Fast path single entry scatterlists. */
if (nents == 1) {
sg_dma_address(sglist) = ccio_map_single(dev,
(void *)sg_virt_addr(sglist), sglist->length,
sg_virt(sglist), sglist->length,
direction);
sg_dma_len(sglist) = sglist->length;
return 1;
......@@ -983,8 +983,8 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
BUG_ON(!dev);
ioc = GET_IOC(dev);
DBG_RUN_SG("%s() START %d entries, %08lx,%x\n",
__func__, nents, sg_virt_addr(sglist), sglist->length);
DBG_RUN_SG("%s() START %d entries, %p,%x\n",
__func__, nents, sg_virt(sglist), sglist->length);
#ifdef CCIO_COLLECT_STATS
ioc->usg_calls++;
......
......@@ -30,9 +30,9 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
unsigned long vaddr;
long size;
DBG_RUN_SG(" %d : %08lx/%05x %08lx/%05x\n", nents,
DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents,
(unsigned long)sg_dma_address(startsg), cnt,
sg_virt_addr(startsg), startsg->length
sg_virt(startsg), startsg->length
);
......@@ -66,7 +66,7 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
BUG_ON(pdirp == NULL);
vaddr = sg_virt_addr(startsg);
vaddr = (unsigned long)sg_virt(startsg);
sg_dma_len(dma_sg) += startsg->length;
size = startsg->length + dma_offset;
dma_offset = 0;
......@@ -113,7 +113,7 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
*/
contig_sg = startsg;
dma_len = startsg->length;
dma_offset = sg_virt_addr(startsg) & ~IOVP_MASK;
dma_offset = startsg->offset;
/* PARANOID: clear entries */
sg_dma_address(startsg) = 0;
......@@ -124,14 +124,13 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
** it's always looking one "ahead".
*/
while(--nents > 0) {
unsigned long prevstartsg_end, startsg_end;
unsigned long prev_end, sg_start;
prevstartsg_end = sg_virt_addr(startsg) +
startsg->length;
prev_end = (unsigned long)sg_virt(startsg) +
startsg->length;
startsg++;
startsg_end = sg_virt_addr(startsg) +
startsg->length;
sg_start = (unsigned long)sg_virt(startsg);
/* PARANOID: clear entries */
sg_dma_address(startsg) = 0;
......@@ -150,10 +149,13 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
break;
/*
** Next see if we can append the next chunk (i.e.
** it must end on one page and begin on another
* Next see if we can append the next chunk (i.e.
* it must end on one page and begin on another, or
* it must start on the same address as the previous
* entry ended.
*/
if (unlikely(((prevstartsg_end | sg_virt_addr(startsg)) & ~PAGE_MASK) != 0))
if (unlikely((prev_end != sg_start) ||
((prev_end | sg_start) & ~PAGE_MASK)))
break;
dma_len += startsg->length;
......
......@@ -278,7 +278,7 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
nents,
(unsigned long) sg_dma_address(startsg),
sg_dma_len(startsg),
sg_virt_addr(startsg), startsg->length);
sg_virt(startsg), startsg->length);
startsg++;
}
}
......@@ -945,8 +945,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
/* Fast path single entry scatterlists. */
if (nents == 1) {
sg_dma_address(sglist) = sba_map_single(dev,
(void *)sg_virt_addr(sglist),
sg_dma_address(sglist) = sba_map_single(dev, sg_virt(sglist),
sglist->length, direction);
sg_dma_len(sglist) = sglist->length;
return 1;
......@@ -1025,7 +1024,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
#endif
DBG_RUN_SG("%s() START %d entries, %p,%x\n",
__func__, nents, sg_virt_addr(sglist), sglist->length);
__func__, nents, sg_virt(sglist), sglist->length);
ioc = GET_IOC(dev);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册