Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
f1970baf
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
160
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f1970baf
编写于
6月 20, 2005
作者:
J
James Bottomley
提交者:
Jens Axboe
6月 20, 2005
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[PATCH] Add scatter-gather support for the block layer SG_IO
Signed-off-by:
N
Jens Axboe
<
axboe@suse.de
>
上级
dd1cab95
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
191 addition
and
62 deletion
+191
-62
drivers/block/ll_rw_blk.c
drivers/block/ll_rw_blk.c
+58
-6
drivers/block/scsi_ioctl.c
drivers/block/scsi_ioctl.c
+23
-11
fs/bio.c
fs/bio.c
+105
-45
include/linux/bio.h
include/linux/bio.h
+4
-0
include/linux/blkdev.h
include/linux/blkdev.h
+1
-0
未找到文件。
drivers/block/ll_rw_blk.c
浏览文件 @
f1970baf
...
...
@@ -2148,6 +2148,50 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
EXPORT_SYMBOL
(
blk_rq_map_user
);
/**
* blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request to map data to
* @iov: pointer to the iovec
* @iov_count: number of elements in the iovec
*
* Description:
* Data will be mapped directly for zero copy io, if possible. Otherwise
* a kernel bounce buffer is used.
*
* A matching blk_rq_unmap_user() must be issued at the end of io, while
* still in process context.
*
* Note: The mapped bio may need to be bounced through blk_queue_bounce()
* before being submitted to the device, as pages mapped may be out of
* reach. It's the callers responsibility to make sure this happens. The
* original bio must be passed back in to blk_rq_unmap_user() for proper
* unmapping.
*/
int
blk_rq_map_user_iov
(
request_queue_t
*
q
,
struct
request
*
rq
,
struct
sg_iovec
*
iov
,
int
iov_count
)
{
struct
bio
*
bio
;
if
(
!
iov
||
iov_count
<=
0
)
return
-
EINVAL
;
/* we don't allow misaligned data like bio_map_user() does. If the
* user is using sg, they're expected to know the alignment constraints
* and respect them accordingly */
bio
=
bio_map_user_iov
(
q
,
NULL
,
iov
,
iov_count
,
rq_data_dir
(
rq
)
==
READ
);
if
(
IS_ERR
(
bio
))
return
PTR_ERR
(
bio
);
rq
->
bio
=
rq
->
biotail
=
bio
;
blk_rq_bio_prep
(
q
,
rq
,
bio
);
rq
->
buffer
=
rq
->
data
=
NULL
;
rq
->
data_len
=
bio
->
bi_size
;
return
0
;
}
EXPORT_SYMBOL
(
blk_rq_map_user_iov
);
/**
* blk_rq_unmap_user - unmap a request with user data
* @rq: request to be unmapped
...
...
@@ -2207,6 +2251,19 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
EXPORT_SYMBOL
(
blk_rq_map_kern
);
void
blk_execute_rq_nowait
(
request_queue_t
*
q
,
struct
gendisk
*
bd_disk
,
struct
request
*
rq
,
int
at_head
,
void
(
*
done
)(
struct
request
*
))
{
int
where
=
at_head
?
ELEVATOR_INSERT_FRONT
:
ELEVATOR_INSERT_BACK
;
rq
->
rq_disk
=
bd_disk
;
rq
->
flags
|=
REQ_NOMERGE
;
rq
->
end_io
=
done
;
elv_add_request
(
q
,
rq
,
where
,
1
);
generic_unplug_device
(
q
);
}
/**
* blk_execute_rq - insert a request into queue for execution
* @q: queue to insert the request in
...
...
@@ -2224,8 +2281,6 @@ int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
char
sense
[
SCSI_SENSE_BUFFERSIZE
];
int
err
=
0
;
rq
->
rq_disk
=
bd_disk
;
/*
* we need an extra reference to the request, so we can look at
* it after io completion
...
...
@@ -2238,11 +2293,8 @@ int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
rq
->
sense_len
=
0
;
}
rq
->
flags
|=
REQ_NOMERGE
;
rq
->
waiting
=
&
wait
;
rq
->
end_io
=
blk_end_sync_rq
;
elv_add_request
(
q
,
rq
,
ELEVATOR_INSERT_BACK
,
1
);
generic_unplug_device
(
q
);
blk_execute_rq_nowait
(
q
,
bd_disk
,
rq
,
0
,
blk_end_sync_rq
);
wait_for_completion
(
&
wait
);
rq
->
waiting
=
NULL
;
...
...
drivers/block/scsi_ioctl.c
浏览文件 @
f1970baf
...
...
@@ -231,17 +231,11 @@ static int sg_io(struct file *file, request_queue_t *q,
if
(
verify_command
(
file
,
cmd
))
return
-
EPERM
;
/*
* we'll do that later
*/
if
(
hdr
->
iovec_count
)
return
-
EOPNOTSUPP
;
if
(
hdr
->
dxfer_len
>
(
q
->
max_sectors
<<
9
))
return
-
EIO
;
reading
=
writing
=
0
;
if
(
hdr
->
dxfer_len
)
{
if
(
hdr
->
dxfer_len
)
switch
(
hdr
->
dxfer_direction
)
{
default:
return
-
EINVAL
;
...
...
@@ -261,11 +255,29 @@ static int sg_io(struct file *file, request_queue_t *q,
if
(
!
rq
)
return
-
ENOMEM
;
if
(
reading
||
writing
)
{
ret
=
blk_rq_map_user
(
q
,
rq
,
hdr
->
dxferp
,
hdr
->
dxfer_len
);
if
(
ret
)
if
(
hdr
->
iovec_count
)
{
const
int
size
=
sizeof
(
struct
sg_iovec
)
*
hdr
->
iovec_count
;
struct
sg_iovec
*
iov
;
iov
=
kmalloc
(
size
,
GFP_KERNEL
);
if
(
!
iov
)
{
ret
=
-
ENOMEM
;
goto
out
;
}
}
if
(
copy_from_user
(
iov
,
hdr
->
dxferp
,
size
))
{
kfree
(
iov
);
ret
=
-
EFAULT
;
goto
out
;
}
ret
=
blk_rq_map_user_iov
(
q
,
rq
,
iov
,
hdr
->
iovec_count
);
kfree
(
iov
);
}
else
if
(
hdr
->
dxfer_len
)
ret
=
blk_rq_map_user
(
q
,
rq
,
hdr
->
dxferp
,
hdr
->
dxfer_len
);
if
(
ret
)
goto
out
;
/*
* fill in request structure
...
...
fs/bio.c
浏览文件 @
f1970baf
...
...
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/mempool.h>
#include <linux/workqueue.h>
#include <scsi/sg.h>
/* for struct sg_iovec */
#define BIO_POOL_SIZE 256
...
...
@@ -549,22 +550,34 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
return
ERR_PTR
(
ret
);
}
static
struct
bio
*
__bio_map_user
(
request_queue_t
*
q
,
struct
block_device
*
bdev
,
unsigned
long
uaddr
,
unsigned
int
len
,
int
write_to_vm
)
static
struct
bio
*
__bio_map_user_iov
(
request_queue_t
*
q
,
struct
block_device
*
bdev
,
struct
sg_iovec
*
iov
,
int
iov_count
,
int
write_to_vm
)
{
unsigned
long
end
=
(
uaddr
+
len
+
PAGE_SIZE
-
1
)
>>
PAGE_SHIFT
;
unsigned
long
start
=
uaddr
>>
PAGE_SHIFT
;
const
int
nr_pages
=
end
-
start
;
int
ret
,
offset
,
i
;
int
i
,
j
;
int
nr_pages
=
0
;
struct
page
**
pages
;
struct
bio
*
bio
;
int
cur_page
=
0
;
int
ret
,
offset
;
/*
* transfer and buffer must be aligned to at least hardsector
* size for now, in the future we can relax this restriction
*/
if
((
uaddr
&
queue_dma_alignment
(
q
))
||
(
len
&
queue_dma_alignment
(
q
)))
for
(
i
=
0
;
i
<
iov_count
;
i
++
)
{
unsigned
long
uaddr
=
(
unsigned
long
)
iov
[
i
].
iov_base
;
unsigned
long
len
=
iov
[
i
].
iov_len
;
unsigned
long
end
=
(
uaddr
+
len
+
PAGE_SIZE
-
1
)
>>
PAGE_SHIFT
;
unsigned
long
start
=
uaddr
>>
PAGE_SHIFT
;
nr_pages
+=
end
-
start
;
/*
* transfer and buffer must be aligned to at least hardsector
* size for now, in the future we can relax this restriction
*/
if
((
uaddr
&
queue_dma_alignment
(
q
))
||
(
len
&
queue_dma_alignment
(
q
)))
return
ERR_PTR
(
-
EINVAL
);
}
if
(
!
nr_pages
)
return
ERR_PTR
(
-
EINVAL
);
bio
=
bio_alloc
(
GFP_KERNEL
,
nr_pages
);
...
...
@@ -576,42 +589,54 @@ static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev,
if
(
!
pages
)
goto
out
;
down_read
(
&
current
->
mm
->
mmap_sem
);
ret
=
get_user_pages
(
current
,
current
->
mm
,
uaddr
,
nr_pages
,
write_to_vm
,
0
,
pages
,
NULL
);
up_read
(
&
current
->
mm
->
mmap_sem
);
if
(
ret
<
nr_pages
)
goto
out
;
bio
->
bi_bdev
=
bdev
;
offset
=
uaddr
&
~
PAGE_MASK
;
for
(
i
=
0
;
i
<
nr_pages
;
i
++
)
{
unsigned
int
bytes
=
PAGE_SIZE
-
offset
;
if
(
len
<=
0
)
break
;
if
(
bytes
>
len
)
bytes
=
len
;
memset
(
pages
,
0
,
nr_pages
*
sizeof
(
struct
page
*
));
for
(
i
=
0
;
i
<
iov_count
;
i
++
)
{
unsigned
long
uaddr
=
(
unsigned
long
)
iov
[
i
].
iov_base
;
unsigned
long
len
=
iov
[
i
].
iov_len
;
unsigned
long
end
=
(
uaddr
+
len
+
PAGE_SIZE
-
1
)
>>
PAGE_SHIFT
;
unsigned
long
start
=
uaddr
>>
PAGE_SHIFT
;
const
int
local_nr_pages
=
end
-
start
;
const
int
page_limit
=
cur_page
+
local_nr_pages
;
down_read
(
&
current
->
mm
->
mmap_sem
);
ret
=
get_user_pages
(
current
,
current
->
mm
,
uaddr
,
local_nr_pages
,
write_to_vm
,
0
,
&
pages
[
cur_page
],
NULL
);
up_read
(
&
current
->
mm
->
mmap_sem
);
if
(
ret
<
local_nr_pages
)
goto
out_unmap
;
offset
=
uaddr
&
~
PAGE_MASK
;
for
(
j
=
cur_page
;
j
<
page_limit
;
j
++
)
{
unsigned
int
bytes
=
PAGE_SIZE
-
offset
;
if
(
len
<=
0
)
break
;
if
(
bytes
>
len
)
bytes
=
len
;
/*
* sorry...
*/
if
(
__bio_add_page
(
q
,
bio
,
pages
[
j
],
bytes
,
offset
)
<
bytes
)
break
;
len
-=
bytes
;
offset
=
0
;
}
cur_page
=
j
;
/*
*
sorry...
*
release the pages we didn't map into the bio, if any
*/
if
(
__bio_add_page
(
q
,
bio
,
pages
[
i
],
bytes
,
offset
)
<
bytes
)
break
;
len
-=
bytes
;
offset
=
0
;
while
(
j
<
page_limit
)
page_cache_release
(
pages
[
j
++
]);
}
/*
* release the pages we didn't map into the bio, if any
*/
while
(
i
<
nr_pages
)
page_cache_release
(
pages
[
i
++
]);
kfree
(
pages
);
/*
...
...
@@ -620,9 +645,17 @@ static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev,
if
(
!
write_to_vm
)
bio
->
bi_rw
|=
(
1
<<
BIO_RW
);
bio
->
bi_bdev
=
bdev
;
bio
->
bi_flags
|=
(
1
<<
BIO_USER_MAPPED
);
return
bio
;
out:
out_unmap:
for
(
i
=
0
;
i
<
nr_pages
;
i
++
)
{
if
(
!
pages
[
i
])
break
;
page_cache_release
(
pages
[
i
]);
}
out:
kfree
(
pages
);
bio_put
(
bio
);
return
ERR_PTR
(
ret
);
...
...
@@ -641,10 +674,34 @@ static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev,
*/
struct
bio
*
bio_map_user
(
request_queue_t
*
q
,
struct
block_device
*
bdev
,
unsigned
long
uaddr
,
unsigned
int
len
,
int
write_to_vm
)
{
struct
sg_iovec
iov
;
iov
.
iov_base
=
(
__user
void
*
)
uaddr
;
iov
.
iov_len
=
len
;
return
bio_map_user_iov
(
q
,
bdev
,
&
iov
,
1
,
write_to_vm
);
}
/**
* bio_map_user_iov - map user sg_iovec table into bio
* @q: the request_queue_t for the bio
* @bdev: destination block device
* @iov: the iovec.
* @iov_count: number of elements in the iovec
* @write_to_vm: bool indicating writing to pages or not
*
* Map the user space address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
struct
bio
*
bio_map_user_iov
(
request_queue_t
*
q
,
struct
block_device
*
bdev
,
struct
sg_iovec
*
iov
,
int
iov_count
,
int
write_to_vm
)
{
struct
bio
*
bio
;
int
len
=
0
,
i
;
bio
=
__bio_map_user
(
q
,
bdev
,
uaddr
,
len
,
write_to_vm
);
bio
=
__bio_map_user
_iov
(
q
,
bdev
,
iov
,
iov_count
,
write_to_vm
);
if
(
IS_ERR
(
bio
))
return
bio
;
...
...
@@ -657,6 +714,9 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
*/
bio_get
(
bio
);
for
(
i
=
0
;
i
<
iov_count
;
i
++
)
len
+=
iov
[
i
].
iov_len
;
if
(
bio
->
bi_size
==
len
)
return
bio
;
...
...
include/linux/bio.h
浏览文件 @
f1970baf
...
...
@@ -281,6 +281,10 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern
int
bio_get_nr_vecs
(
struct
block_device
*
);
extern
struct
bio
*
bio_map_user
(
struct
request_queue
*
,
struct
block_device
*
,
unsigned
long
,
unsigned
int
,
int
);
struct
sg_iovec
;
extern
struct
bio
*
bio_map_user_iov
(
struct
request_queue
*
,
struct
block_device
*
,
struct
sg_iovec
*
,
int
,
int
);
extern
void
bio_unmap_user
(
struct
bio
*
);
extern
struct
bio
*
bio_map_kern
(
struct
request_queue
*
,
void
*
,
unsigned
int
,
unsigned
int
);
...
...
include/linux/blkdev.h
浏览文件 @
f1970baf
...
...
@@ -561,6 +561,7 @@ extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *);
extern
int
blk_rq_map_user
(
request_queue_t
*
,
struct
request
*
,
void
__user
*
,
unsigned
int
);
extern
int
blk_rq_unmap_user
(
struct
bio
*
,
unsigned
int
);
extern
int
blk_rq_map_kern
(
request_queue_t
*
,
struct
request
*
,
void
*
,
unsigned
int
,
unsigned
int
);
extern
int
blk_rq_map_user_iov
(
request_queue_t
*
,
struct
request
*
,
struct
sg_iovec
*
,
int
);
extern
int
blk_execute_rq
(
request_queue_t
*
,
struct
gendisk
*
,
struct
request
*
);
static
inline
request_queue_t
*
bdev_get_queue
(
struct
block_device
*
bdev
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录