提交 fde6a3c8 编写于 作者: F FUJITA Tomonori 提交者: Linus Torvalds

iommu sg merging: sparc64: make iommu respect the segment size limits

This patch makes iommu respect segment size limits when merging sg
lists.
Signed-off-by: NFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Jeff Garzik <jeff@garzik.org>
Cc: James Bottomley <James.Bottomley@steeleye.com>
Acked-by: NJens Axboe <jens.axboe@oracle.com>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 7c53664d
...@@ -580,7 +580,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -580,7 +580,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
/* Step 1: Prepare scatter list. */ /* Step 1: Prepare scatter list. */
npages = prepare_sg(sglist, nelems); npages = prepare_sg(dev, sglist, nelems);
/* Step 2: Allocate a cluster and context, if necessary. */ /* Step 2: Allocate a cluster and context, if necessary. */
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
* Copyright (C) 1999 David S. Miller (davem@redhat.com) * Copyright (C) 1999 David S. Miller (davem@redhat.com)
*/ */
#include <linux/dma-mapping.h>
#include "iommu_common.h" #include "iommu_common.h"
/* You are _strongly_ advised to enable the following debugging code /* You are _strongly_ advised to enable the following debugging code
...@@ -201,21 +202,24 @@ void verify_sglist(struct scatterlist *sglist, int nents, iopte_t *iopte, int np ...@@ -201,21 +202,24 @@ void verify_sglist(struct scatterlist *sglist, int nents, iopte_t *iopte, int np
} }
#endif #endif
unsigned long prepare_sg(struct scatterlist *sg, int nents) unsigned long prepare_sg(struct device *dev, struct scatterlist *sg, int nents)
{ {
struct scatterlist *dma_sg = sg; struct scatterlist *dma_sg = sg;
unsigned long prev; unsigned long prev;
u32 dent_addr, dent_len; u32 dent_addr, dent_len;
unsigned int max_seg_size;
prev = (unsigned long) sg_virt(sg); prev = (unsigned long) sg_virt(sg);
prev += (unsigned long) (dent_len = sg->length); prev += (unsigned long) (dent_len = sg->length);
dent_addr = (u32) ((unsigned long)(sg_virt(sg)) & (IO_PAGE_SIZE - 1UL)); dent_addr = (u32) ((unsigned long)(sg_virt(sg)) & (IO_PAGE_SIZE - 1UL));
max_seg_size = dma_get_max_seg_size(dev);
while (--nents) { while (--nents) {
unsigned long addr; unsigned long addr;
sg = sg_next(sg); sg = sg_next(sg);
addr = (unsigned long) sg_virt(sg); addr = (unsigned long) sg_virt(sg);
if (! VCONTIG(prev, addr)) { if (! VCONTIG(prev, addr) ||
dent_len + sg->length > max_seg_size) {
dma_sg->dma_address = dent_addr; dma_sg->dma_address = dent_addr;
dma_sg->dma_length = dent_len; dma_sg->dma_length = dent_len;
dma_sg = sg_next(dma_sg); dma_sg = sg_next(dma_sg);
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/device.h>
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
...@@ -46,4 +47,4 @@ extern void verify_sglist(struct scatterlist *sg, int nents, iopte_t *iopte, int ...@@ -46,4 +47,4 @@ extern void verify_sglist(struct scatterlist *sg, int nents, iopte_t *iopte, int
#define VCONTIG(__X, __Y) (((__X) == (__Y)) || \ #define VCONTIG(__X, __Y) (((__X) == (__Y)) || \
(((__X) | (__Y)) << (64UL - PAGE_SHIFT)) == 0UL) (((__X) | (__Y)) << (64UL - PAGE_SHIFT)) == 0UL)
extern unsigned long prepare_sg(struct scatterlist *sg, int nents); extern unsigned long prepare_sg(struct device *dev, struct scatterlist *sg, int nents);
...@@ -490,7 +490,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -490,7 +490,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
goto bad; goto bad;
/* Step 1: Prepare scatter list. */ /* Step 1: Prepare scatter list. */
npages = prepare_sg(sglist, nelems); npages = prepare_sg(dev, sglist, nelems);
/* Step 2: Allocate a cluster and context, if necessary. */ /* Step 2: Allocate a cluster and context, if necessary. */
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册