Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
6728cb0e
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
6728cb0e
编写于
1月 31, 2008
作者:
J
Jens Axboe
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
block: make core bits checkpatch compliant
Signed-off-by:
N
Jens Axboe
<
jens.axboe@oracle.com
>
上级
22b13210
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
117 addition
and
142 deletion
+117
-142
block/blk-barrier.c
block/blk-barrier.c
+2
-3
block/blk-core.c
block/blk-core.c
+72
-81
block/blk-exec.c
block/blk-exec.c
+0
-1
block/blk-map.c
block/blk-map.c
+4
-6
block/blk-merge.c
block/blk-merge.c
+6
-6
block/blk-settings.c
block/blk-settings.c
+27
-34
block/blk-sysfs.c
block/blk-sysfs.c
+3
-2
block/blk-tag.c
block/blk-tag.c
+3
-9
未找到文件。
block/blk-barrier.c
浏览文件 @
6728cb0e
...
...
@@ -26,7 +26,8 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
{
if
(
ordered
&
(
QUEUE_ORDERED_PREFLUSH
|
QUEUE_ORDERED_POSTFLUSH
)
&&
prepare_flush_fn
==
NULL
)
{
printk
(
KERN_ERR
"blk_queue_ordered: prepare_flush_fn required
\n
"
);
printk
(
KERN_ERR
"%s: prepare_flush_fn required
\n
"
,
__FUNCTION__
);
return
-
EINVAL
;
}
...
...
@@ -47,7 +48,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
return
0
;
}
EXPORT_SYMBOL
(
blk_queue_ordered
);
/*
...
...
@@ -315,5 +315,4 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
bio_put
(
bio
);
return
ret
;
}
EXPORT_SYMBOL
(
blkdev_issue_flush
);
block/blk-core.c
浏览文件 @
6728cb0e
...
...
@@ -3,7 +3,8 @@
* Copyright (C) 1994, Karl Keyte: Added support for disk statistics
* Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
* Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
* kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> - July2000
* kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
* - July2000
* bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
*/
...
...
@@ -42,7 +43,7 @@ struct kmem_cache *request_cachep;
/*
* For queue allocation
*/
struct
kmem_cache
*
blk_requestq_cachep
=
NULL
;
struct
kmem_cache
*
blk_requestq_cachep
;
/*
* Controlling structure to kblockd
...
...
@@ -137,7 +138,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
error
=
-
EIO
;
if
(
unlikely
(
nbytes
>
bio
->
bi_size
))
{
printk
(
"%s: want %u bytes done, only
%u left
\n
"
,
printk
(
KERN_ERR
"%s: want %u bytes done,
%u left
\n
"
,
__FUNCTION__
,
nbytes
,
bio
->
bi_size
);
nbytes
=
bio
->
bi_size
;
}
...
...
@@ -161,23 +162,26 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
{
int
bit
;
printk
(
"%s: dev %s: type=%x, flags=%x
\n
"
,
msg
,
printk
(
KERN_INFO
"%s: dev %s: type=%x, flags=%x
\n
"
,
msg
,
rq
->
rq_disk
?
rq
->
rq_disk
->
disk_name
:
"?"
,
rq
->
cmd_type
,
rq
->
cmd_flags
);
printk
(
"
\n
sector %llu, nr/cnr %lu/%u
\n
"
,
(
unsigned
long
long
)
rq
->
sector
,
rq
->
nr_sectors
,
rq
->
current_nr_sectors
);
printk
(
"bio %p, biotail %p, buffer %p, data %p, len %u
\n
"
,
rq
->
bio
,
rq
->
biotail
,
rq
->
buffer
,
rq
->
data
,
rq
->
data_len
);
printk
(
KERN_INFO
" sector %llu, nr/cnr %lu/%u
\n
"
,
(
unsigned
long
long
)
rq
->
sector
,
rq
->
nr_sectors
,
rq
->
current_nr_sectors
);
printk
(
KERN_INFO
" bio %p, biotail %p, buffer %p, data %p, len %u
\n
"
,
rq
->
bio
,
rq
->
biotail
,
rq
->
buffer
,
rq
->
data
,
rq
->
data_len
);
if
(
blk_pc_request
(
rq
))
{
printk
(
"
cdb: "
);
printk
(
KERN_INFO
"
cdb: "
);
for
(
bit
=
0
;
bit
<
sizeof
(
rq
->
cmd
);
bit
++
)
printk
(
"%02x "
,
rq
->
cmd
[
bit
]);
printk
(
"
\n
"
);
}
}
EXPORT_SYMBOL
(
blk_dump_rq_flags
);
/*
...
...
@@ -204,7 +208,6 @@ void blk_plug_device(struct request_queue *q)
blk_add_trace_generic
(
q
,
NULL
,
0
,
BLK_TA_PLUG
);
}
}
EXPORT_SYMBOL
(
blk_plug_device
);
/*
...
...
@@ -221,7 +224,6 @@ int blk_remove_plug(struct request_queue *q)
del_timer
(
&
q
->
unplug_timer
);
return
1
;
}
EXPORT_SYMBOL
(
blk_remove_plug
);
/*
...
...
@@ -328,7 +330,6 @@ void blk_start_queue(struct request_queue *q)
kblockd_schedule_work
(
&
q
->
unplug_work
);
}
}
EXPORT_SYMBOL
(
blk_start_queue
);
/**
...
...
@@ -408,7 +409,7 @@ void blk_put_queue(struct request_queue *q)
}
EXPORT_SYMBOL
(
blk_put_queue
);
void
blk_cleanup_queue
(
struct
request_queue
*
q
)
void
blk_cleanup_queue
(
struct
request_queue
*
q
)
{
mutex_lock
(
&
q
->
sysfs_lock
);
set_bit
(
QUEUE_FLAG_DEAD
,
&
q
->
queue_flags
);
...
...
@@ -419,7 +420,6 @@ void blk_cleanup_queue(struct request_queue * q)
blk_put_queue
(
q
);
}
EXPORT_SYMBOL
(
blk_cleanup_queue
);
static
int
blk_init_free_list
(
struct
request_queue
*
q
)
...
...
@@ -575,7 +575,6 @@ int blk_get_queue(struct request_queue *q)
return
1
;
}
EXPORT_SYMBOL
(
blk_get_queue
);
static
inline
void
blk_free_request
(
struct
request_queue
*
q
,
struct
request
*
rq
)
...
...
@@ -774,7 +773,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
*/
if
(
ioc_batching
(
q
,
ioc
))
ioc
->
nr_batch_requests
--
;
rq_init
(
q
,
rq
);
blk_add_trace_generic
(
q
,
bio
,
rw
,
BLK_TA_GETRQ
);
...
...
@@ -888,7 +887,6 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
elv_requeue_request
(
q
,
rq
);
}
EXPORT_SYMBOL
(
blk_requeue_request
);
/**
...
...
@@ -939,7 +937,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
blk_start_queueing
(
q
);
spin_unlock_irqrestore
(
q
->
queue_lock
,
flags
);
}
EXPORT_SYMBOL
(
blk_insert_request
);
/*
...
...
@@ -947,7 +944,7 @@ EXPORT_SYMBOL(blk_insert_request);
* queue lock is held and interrupts disabled, as we muck with the
* request queue list.
*/
static
inline
void
add_request
(
struct
request_queue
*
q
,
struct
request
*
req
)
static
inline
void
add_request
(
struct
request_queue
*
q
,
struct
request
*
req
)
{
drive_stat_acct
(
req
,
1
);
...
...
@@ -957,7 +954,7 @@ static inline void add_request(struct request_queue * q, struct request * req)
*/
__elv_add_request
(
q
,
req
,
ELEVATOR_INSERT_SORT
,
0
);
}
/*
* disk_round_stats() - Round off the performance stats on a struct
* disk_stats.
...
...
@@ -987,7 +984,6 @@ void disk_round_stats(struct gendisk *disk)
}
disk
->
stamp
=
now
;
}
EXPORT_SYMBOL_GPL
(
disk_round_stats
);
/*
...
...
@@ -1017,7 +1013,6 @@ void __blk_put_request(struct request_queue *q, struct request *req)
freed_request
(
q
,
rw
,
priv
);
}
}
EXPORT_SYMBOL_GPL
(
__blk_put_request
);
void
blk_put_request
(
struct
request
*
req
)
...
...
@@ -1035,7 +1030,6 @@ void blk_put_request(struct request *req)
spin_unlock_irqrestore
(
q
->
queue_lock
,
flags
);
}
}
EXPORT_SYMBOL
(
blk_put_request
);
void
init_request_from_bio
(
struct
request
*
req
,
struct
bio
*
bio
)
...
...
@@ -1096,53 +1090,53 @@ static int __make_request(struct request_queue *q, struct bio *bio)
el_ret
=
elv_merge
(
q
,
&
req
,
bio
);
switch
(
el_ret
)
{
case
ELEVATOR_BACK_MERGE
:
BUG_ON
(
!
rq_mergeable
(
req
));
case
ELEVATOR_BACK_MERGE
:
BUG_ON
(
!
rq_mergeable
(
req
));
if
(
!
ll_back_merge_fn
(
q
,
req
,
bio
))
break
;
if
(
!
ll_back_merge_fn
(
q
,
req
,
bio
))
break
;
blk_add_trace_bio
(
q
,
bio
,
BLK_TA_BACKMERGE
);
blk_add_trace_bio
(
q
,
bio
,
BLK_TA_BACKMERGE
);
req
->
biotail
->
bi_next
=
bio
;
req
->
biotail
=
bio
;
req
->
nr_sectors
=
req
->
hard_nr_sectors
+=
nr_sectors
;
req
->
ioprio
=
ioprio_best
(
req
->
ioprio
,
prio
);
drive_stat_acct
(
req
,
0
);
if
(
!
attempt_back_merge
(
q
,
req
))
elv_merged_request
(
q
,
req
,
el_ret
);
goto
out
;
req
->
biotail
->
bi_next
=
bio
;
req
->
biotail
=
bio
;
req
->
nr_sectors
=
req
->
hard_nr_sectors
+=
nr_sectors
;
req
->
ioprio
=
ioprio_best
(
req
->
ioprio
,
prio
);
drive_stat_acct
(
req
,
0
);
if
(
!
attempt_back_merge
(
q
,
req
))
elv_merged_request
(
q
,
req
,
el_ret
);
goto
out
;
case
ELEVATOR_FRONT_MERGE
:
BUG_ON
(
!
rq_mergeable
(
req
));
case
ELEVATOR_FRONT_MERGE
:
BUG_ON
(
!
rq_mergeable
(
req
));
if
(
!
ll_front_merge_fn
(
q
,
req
,
bio
))
break
;
if
(
!
ll_front_merge_fn
(
q
,
req
,
bio
))
break
;
blk_add_trace_bio
(
q
,
bio
,
BLK_TA_FRONTMERGE
);
blk_add_trace_bio
(
q
,
bio
,
BLK_TA_FRONTMERGE
);
bio
->
bi_next
=
req
->
bio
;
req
->
bio
=
bio
;
bio
->
bi_next
=
req
->
bio
;
req
->
bio
=
bio
;
/*
* may not be valid. if the low level driver said
* it didn't need a bounce buffer then it better
* not touch req->buffer either...
*/
req
->
buffer
=
bio_data
(
bio
);
req
->
current_nr_sectors
=
bio_cur_sectors
(
bio
);
req
->
hard_cur_sectors
=
req
->
current_nr_sectors
;
req
->
sector
=
req
->
hard_sector
=
bio
->
bi_sector
;
req
->
nr_sectors
=
req
->
hard_nr_sectors
+=
nr_sectors
;
req
->
ioprio
=
ioprio_best
(
req
->
ioprio
,
prio
);
drive_stat_acct
(
req
,
0
);
if
(
!
attempt_front_merge
(
q
,
req
))
elv_merged_request
(
q
,
req
,
el_ret
);
goto
out
;
/* ELV_NO_MERGE: elevator says don't/can't merge. */
default:
;
/*
* may not be valid. if the low level driver said
* it didn't need a bounce buffer then it better
* not touch req->buffer either...
*/
req
->
buffer
=
bio_data
(
bio
);
req
->
current_nr_sectors
=
bio_cur_sectors
(
bio
);
req
->
hard_cur_sectors
=
req
->
current_nr_sectors
;
req
->
sector
=
req
->
hard_sector
=
bio
->
bi_sector
;
req
->
nr_sectors
=
req
->
hard_nr_sectors
+=
nr_sectors
;
req
->
ioprio
=
ioprio_best
(
req
->
ioprio
,
prio
);
drive_stat_acct
(
req
,
0
);
if
(
!
attempt_front_merge
(
q
,
req
))
elv_merged_request
(
q
,
req
,
el_ret
);
goto
out
;
/* ELV_NO_MERGE: elevator says don't/can't merge. */
default:
;
}
get_rq:
...
...
@@ -1350,7 +1344,7 @@ static inline void __generic_make_request(struct bio *bio)
}
if
(
unlikely
(
nr_sectors
>
q
->
max_hw_sectors
))
{
printk
(
"bio too big device %s (%u > %u)
\n
"
,
printk
(
KERN_ERR
"bio too big device %s (%u > %u)
\n
"
,
bdevname
(
bio
->
bi_bdev
,
b
),
bio_sectors
(
bio
),
q
->
max_hw_sectors
);
...
...
@@ -1439,7 +1433,6 @@ void generic_make_request(struct bio *bio)
}
while
(
bio
);
current
->
bio_tail
=
NULL
;
/* deactivate */
}
EXPORT_SYMBOL
(
generic_make_request
);
/**
...
...
@@ -1480,13 +1473,12 @@ void submit_bio(int rw, struct bio *bio)
current
->
comm
,
task_pid_nr
(
current
),
(
rw
&
WRITE
)
?
"WRITE"
:
"READ"
,
(
unsigned
long
long
)
bio
->
bi_sector
,
bdevname
(
bio
->
bi_bdev
,
b
));
bdevname
(
bio
->
bi_bdev
,
b
));
}
}
generic_make_request
(
bio
);
}
EXPORT_SYMBOL
(
submit_bio
);
/**
...
...
@@ -1518,9 +1510,8 @@ static int __end_that_request_first(struct request *req, int error,
if
(
!
blk_pc_request
(
req
))
req
->
errors
=
0
;
if
(
error
)
{
if
(
blk_fs_request
(
req
)
&&
!
(
req
->
cmd_flags
&
REQ_QUIET
))
printk
(
"end_request: I/O error, dev %s, sector %llu
\n
"
,
if
(
error
&&
(
blk_fs_request
(
req
)
&&
!
(
req
->
cmd_flags
&
REQ_QUIET
)))
{
printk
(
KERN_ERR
"end_request: I/O error, dev %s, sector %llu
\n
"
,
req
->
rq_disk
?
req
->
rq_disk
->
disk_name
:
"?"
,
(
unsigned
long
long
)
req
->
sector
);
}
...
...
@@ -1554,9 +1545,9 @@ static int __end_that_request_first(struct request *req, int error,
if
(
unlikely
(
bio
->
bi_idx
>=
bio
->
bi_vcnt
))
{
blk_dump_rq_flags
(
req
,
"__end_that"
);
printk
(
"%s: bio idx %d >= vcnt %d
\n
"
,
__FUNCTION__
,
bio
->
bi_
idx
,
bio
->
bi_
vcnt
);
printk
(
KERN_ERR
"%s: bio idx %d >= vcnt %d
\n
"
,
__FUNCTION__
,
bio
->
bi_idx
,
bio
->
bi_vcnt
);
break
;
}
...
...
@@ -1582,7 +1573,8 @@ static int __end_that_request_first(struct request *req, int error,
total_bytes
+=
nbytes
;
nr_bytes
-=
nbytes
;
if
((
bio
=
req
->
bio
))
{
bio
=
req
->
bio
;
if
(
bio
)
{
/*
* end more in this run, or just return 'not-done'
*/
...
...
@@ -1626,15 +1618,16 @@ static void blk_done_softirq(struct softirq_action *h)
local_irq_enable
();
while
(
!
list_empty
(
&
local_list
))
{
struct
request
*
rq
=
list_entry
(
local_list
.
next
,
struct
request
,
donelist
)
;
struct
request
*
rq
;
rq
=
list_entry
(
local_list
.
next
,
struct
request
,
donelist
);
list_del_init
(
&
rq
->
donelist
);
rq
->
q
->
softirq_done_fn
(
rq
);
}
}
static
int
__cpuinit
blk_cpu_notify
(
struct
notifier_block
*
self
,
unsigned
long
action
,
void
*
hcpu
)
static
int
__cpuinit
blk_cpu_notify
(
struct
notifier_block
*
self
,
unsigned
long
action
,
void
*
hcpu
)
{
/*
* If a CPU goes away, splice its entries to the current CPU
...
...
@@ -1676,7 +1669,7 @@ void blk_complete_request(struct request *req)
unsigned
long
flags
;
BUG_ON
(
!
req
->
q
->
softirq_done_fn
);
local_irq_save
(
flags
);
cpu_list
=
&
__get_cpu_var
(
blk_cpu_done
);
...
...
@@ -1685,9 +1678,8 @@ void blk_complete_request(struct request *req)
local_irq_restore
(
flags
);
}
EXPORT_SYMBOL
(
blk_complete_request
);
/*
* queue lock must be held
*/
...
...
@@ -2002,7 +1994,6 @@ int kblockd_schedule_work(struct work_struct *work)
{
return
queue_work
(
kblockd_workqueue
,
work
);
}
EXPORT_SYMBOL
(
kblockd_schedule_work
);
void
kblockd_flush_work
(
struct
work_struct
*
work
)
...
...
block/blk-exec.c
浏览文件 @
6728cb0e
...
...
@@ -101,5 +101,4 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
return
err
;
}
EXPORT_SYMBOL
(
blk_execute_rq
);
block/blk-map.c
浏览文件 @
6728cb0e
...
...
@@ -53,7 +53,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
* direct dma. else, set up kernel bounce buffers
*/
uaddr
=
(
unsigned
long
)
ubuf
;
if
(
!
(
uaddr
&
queue_dma_alignment
(
q
))
&&
!
(
len
&
queue_dma_alignment
(
q
)))
if
(
!
(
uaddr
&
queue_dma_alignment
(
q
))
&&
!
(
len
&
queue_dma_alignment
(
q
)))
bio
=
bio_map_user
(
q
,
NULL
,
uaddr
,
len
,
reading
);
else
bio
=
bio_copy_user
(
q
,
uaddr
,
len
,
reading
);
...
...
@@ -144,7 +145,6 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
blk_rq_unmap_user
(
bio
);
return
ret
;
}
EXPORT_SYMBOL
(
blk_rq_map_user
);
/**
...
...
@@ -179,7 +179,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
/* we don't allow misaligned data like bio_map_user() does. If the
* user is using sg, they're expected to know the alignment constraints
* and respect them accordingly */
bio
=
bio_map_user_iov
(
q
,
NULL
,
iov
,
iov_count
,
rq_data_dir
(
rq
)
==
READ
);
bio
=
bio_map_user_iov
(
q
,
NULL
,
iov
,
iov_count
,
rq_data_dir
(
rq
)
==
READ
);
if
(
IS_ERR
(
bio
))
return
PTR_ERR
(
bio
);
...
...
@@ -194,7 +195,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
rq
->
buffer
=
rq
->
data
=
NULL
;
return
0
;
}
EXPORT_SYMBOL
(
blk_rq_map_user_iov
);
/**
...
...
@@ -227,7 +227,6 @@ int blk_rq_unmap_user(struct bio *bio)
return
ret
;
}
EXPORT_SYMBOL
(
blk_rq_unmap_user
);
/**
...
...
@@ -260,5 +259,4 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
rq
->
buffer
=
rq
->
data
=
NULL
;
return
0
;
}
EXPORT_SYMBOL
(
blk_rq_map_kern
);
block/blk-merge.c
浏览文件 @
6728cb0e
...
...
@@ -32,7 +32,7 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
* size, something has gone terribly wrong
*/
if
(
rq
->
nr_sectors
<
rq
->
current_nr_sectors
)
{
printk
(
"blk: request botched
\n
"
);
printk
(
KERN_ERR
"blk: request botched
\n
"
);
rq
->
nr_sectors
=
rq
->
current_nr_sectors
;
}
}
...
...
@@ -235,7 +235,6 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
return
nsegs
;
}
EXPORT_SYMBOL
(
blk_rq_map_sg
);
static
inline
int
ll_new_mergeable
(
struct
request_queue
*
q
,
...
...
@@ -305,8 +304,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
if
(
unlikely
(
!
bio_flagged
(
bio
,
BIO_SEG_VALID
)))
blk_recount_segments
(
q
,
bio
);
len
=
req
->
biotail
->
bi_hw_back_size
+
bio
->
bi_hw_front_size
;
if
(
BIOVEC_VIRT_MERGEABLE
(
__BVEC_END
(
req
->
biotail
),
__BVEC_START
(
bio
))
&&
!
BIOVEC_VIRT_OVERSIZE
(
len
))
{
if
(
BIOVEC_VIRT_MERGEABLE
(
__BVEC_END
(
req
->
biotail
),
__BVEC_START
(
bio
))
&&
!
BIOVEC_VIRT_OVERSIZE
(
len
))
{
int
mergeable
=
ll_new_mergeable
(
q
,
req
,
bio
);
if
(
mergeable
)
{
...
...
@@ -321,7 +320,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
return
ll_new_hw_segment
(
q
,
req
,
bio
);
}
int
ll_front_merge_fn
(
struct
request_queue
*
q
,
struct
request
*
req
,
int
ll_front_merge_fn
(
struct
request_queue
*
q
,
struct
request
*
req
,
struct
bio
*
bio
)
{
unsigned
short
max_sectors
;
...
...
@@ -388,7 +387,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
total_hw_segments
=
req
->
nr_hw_segments
+
next
->
nr_hw_segments
;
if
(
blk_hw_contig_segment
(
q
,
req
->
biotail
,
next
->
bio
))
{
int
len
=
req
->
biotail
->
bi_hw_back_size
+
next
->
bio
->
bi_hw_front_size
;
int
len
=
req
->
biotail
->
bi_hw_back_size
+
next
->
bio
->
bi_hw_front_size
;
/*
* propagate the combined length to the end of the requests
*/
...
...
block/blk-settings.c
浏览文件 @
6728cb0e
...
...
@@ -10,8 +10,10 @@
#include "blk.h"
unsigned
long
blk_max_low_pfn
,
blk_max_pfn
;
unsigned
long
blk_max_low_pfn
;
EXPORT_SYMBOL
(
blk_max_low_pfn
);
unsigned
long
blk_max_pfn
;
EXPORT_SYMBOL
(
blk_max_pfn
);
/**
...
...
@@ -29,7 +31,6 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
{
q
->
prep_rq_fn
=
pfn
;
}
EXPORT_SYMBOL
(
blk_queue_prep_rq
);
/**
...
...
@@ -52,14 +53,12 @@ void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
{
q
->
merge_bvec_fn
=
mbfn
;
}
EXPORT_SYMBOL
(
blk_queue_merge_bvec
);
void
blk_queue_softirq_done
(
struct
request_queue
*
q
,
softirq_done_fn
*
fn
)
{
q
->
softirq_done_fn
=
fn
;
}
EXPORT_SYMBOL
(
blk_queue_softirq_done
);
/**
...
...
@@ -84,7 +83,7 @@ EXPORT_SYMBOL(blk_queue_softirq_done);
* __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
* blk_queue_bounce() to create a buffer in normal memory.
**/
void
blk_queue_make_request
(
struct
request_queue
*
q
,
make_request_fn
*
mfn
)
void
blk_queue_make_request
(
struct
request_queue
*
q
,
make_request_fn
*
mfn
)
{
/*
* set defaults
...
...
@@ -93,7 +92,8 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
blk_queue_max_phys_segments
(
q
,
MAX_PHYS_SEGMENTS
);
blk_queue_max_hw_segments
(
q
,
MAX_HW_SEGMENTS
);
q
->
make_request_fn
=
mfn
;
q
->
backing_dev_info
.
ra_pages
=
(
VM_MAX_READAHEAD
*
1024
)
/
PAGE_CACHE_SIZE
;
q
->
backing_dev_info
.
ra_pages
=
(
VM_MAX_READAHEAD
*
1024
)
/
PAGE_CACHE_SIZE
;
q
->
backing_dev_info
.
state
=
0
;
q
->
backing_dev_info
.
capabilities
=
BDI_CAP_MAP_COPY
;
blk_queue_max_sectors
(
q
,
SAFE_MAX_SECTORS
);
...
...
@@ -117,7 +117,6 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
*/
blk_queue_bounce_limit
(
q
,
BLK_BOUNCE_HIGH
);
}
EXPORT_SYMBOL
(
blk_queue_make_request
);
/**
...
...
@@ -133,7 +132,7 @@ EXPORT_SYMBOL(blk_queue_make_request);
**/
void
blk_queue_bounce_limit
(
struct
request_queue
*
q
,
u64
dma_addr
)
{
unsigned
long
b
ounce
_pfn
=
dma_addr
>>
PAGE_SHIFT
;
unsigned
long
b_pfn
=
dma_addr
>>
PAGE_SHIFT
;
int
dma
=
0
;
q
->
bounce_gfp
=
GFP_NOIO
;
...
...
@@ -141,21 +140,20 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
/* Assume anything <= 4GB can be handled by IOMMU.
Actually some IOMMUs can handle everything, but I don't
know of a way to test this here. */
if
(
b
ounce_pfn
<
(
min_t
(
u64
,
0xffffffff
,
BLK_BOUNCE_HIGH
)
>>
PAGE_SHIFT
))
if
(
b
_pfn
<
(
min_t
(
u64
,
0xffffffff
,
BLK_BOUNCE_HIGH
)
>>
PAGE_SHIFT
))
dma
=
1
;
q
->
bounce_pfn
=
max_low_pfn
;
#else
if
(
b
ounce
_pfn
<
blk_max_low_pfn
)
if
(
b_pfn
<
blk_max_low_pfn
)
dma
=
1
;
q
->
bounce_pfn
=
b
ounce
_pfn
;
q
->
bounce_pfn
=
b_pfn
;
#endif
if
(
dma
)
{
init_emergency_isa_pool
();
q
->
bounce_gfp
=
GFP_NOIO
|
GFP_DMA
;
q
->
bounce_pfn
=
b
ounce
_pfn
;
q
->
bounce_pfn
=
b_pfn
;
}
}
EXPORT_SYMBOL
(
blk_queue_bounce_limit
);
/**
...
...
@@ -171,7 +169,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
{
if
((
max_sectors
<<
9
)
<
PAGE_CACHE_SIZE
)
{
max_sectors
=
1
<<
(
PAGE_CACHE_SHIFT
-
9
);
printk
(
"%s: set to minimum %d
\n
"
,
__FUNCTION__
,
max_sectors
);
printk
(
KERN_INFO
"%s: set to minimum %d
\n
"
,
__FUNCTION__
,
max_sectors
);
}
if
(
BLK_DEF_MAX_SECTORS
>
max_sectors
)
...
...
@@ -181,7 +180,6 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
q
->
max_hw_sectors
=
max_sectors
;
}
}
EXPORT_SYMBOL
(
blk_queue_max_sectors
);
/**
...
...
@@ -199,12 +197,12 @@ void blk_queue_max_phys_segments(struct request_queue *q,
{
if
(
!
max_segments
)
{
max_segments
=
1
;
printk
(
"%s: set to minimum %d
\n
"
,
__FUNCTION__
,
max_segments
);
printk
(
KERN_INFO
"%s: set to minimum %d
\n
"
,
__FUNCTION__
,
max_segments
);
}
q
->
max_phys_segments
=
max_segments
;
}
EXPORT_SYMBOL
(
blk_queue_max_phys_segments
);
/**
...
...
@@ -223,12 +221,12 @@ void blk_queue_max_hw_segments(struct request_queue *q,
{
if
(
!
max_segments
)
{
max_segments
=
1
;
printk
(
"%s: set to minimum %d
\n
"
,
__FUNCTION__
,
max_segments
);
printk
(
KERN_INFO
"%s: set to minimum %d
\n
"
,
__FUNCTION__
,
max_segments
);
}
q
->
max_hw_segments
=
max_segments
;
}
EXPORT_SYMBOL
(
blk_queue_max_hw_segments
);
/**
...
...
@@ -244,12 +242,12 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{
if
(
max_size
<
PAGE_CACHE_SIZE
)
{
max_size
=
PAGE_CACHE_SIZE
;
printk
(
"%s: set to minimum %d
\n
"
,
__FUNCTION__
,
max_size
);
printk
(
KERN_INFO
"%s: set to minimum %d
\n
"
,
__FUNCTION__
,
max_size
);
}
q
->
max_segment_size
=
max_size
;
}
EXPORT_SYMBOL
(
blk_queue_max_segment_size
);
/**
...
...
@@ -267,7 +265,6 @@ void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
{
q
->
hardsect_size
=
size
;
}
EXPORT_SYMBOL
(
blk_queue_hardsect_size
);
/*
...
...
@@ -283,17 +280,16 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
void
blk_queue_stack_limits
(
struct
request_queue
*
t
,
struct
request_queue
*
b
)
{
/* zero is "infinity" */
t
->
max_sectors
=
min_not_zero
(
t
->
max_sectors
,
b
->
max_sectors
);
t
->
max_hw_sectors
=
min_not_zero
(
t
->
max_hw_sectors
,
b
->
max_hw_sectors
);
t
->
max_sectors
=
min_not_zero
(
t
->
max_sectors
,
b
->
max_sectors
);
t
->
max_hw_sectors
=
min_not_zero
(
t
->
max_hw_sectors
,
b
->
max_hw_sectors
);
t
->
max_phys_segments
=
min
(
t
->
max_phys_segments
,
b
->
max_phys_segments
);
t
->
max_hw_segments
=
min
(
t
->
max_hw_segments
,
b
->
max_hw_segments
);
t
->
max_segment_size
=
min
(
t
->
max_segment_size
,
b
->
max_segment_size
);
t
->
hardsect_size
=
max
(
t
->
hardsect_size
,
b
->
hardsect_size
);
t
->
max_phys_segments
=
min
(
t
->
max_phys_segments
,
b
->
max_phys_segments
);
t
->
max_hw_segments
=
min
(
t
->
max_hw_segments
,
b
->
max_hw_segments
);
t
->
max_segment_size
=
min
(
t
->
max_segment_size
,
b
->
max_segment_size
);
t
->
hardsect_size
=
max
(
t
->
hardsect_size
,
b
->
hardsect_size
);
if
(
!
test_bit
(
QUEUE_FLAG_CLUSTER
,
&
b
->
queue_flags
))
clear_bit
(
QUEUE_FLAG_CLUSTER
,
&
t
->
queue_flags
);
}
EXPORT_SYMBOL
(
blk_queue_stack_limits
);
/**
...
...
@@ -332,7 +328,6 @@ int blk_queue_dma_drain(struct request_queue *q, void *buf,
return
0
;
}
EXPORT_SYMBOL_GPL
(
blk_queue_dma_drain
);
/**
...
...
@@ -344,12 +339,12 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{
if
(
mask
<
PAGE_CACHE_SIZE
-
1
)
{
mask
=
PAGE_CACHE_SIZE
-
1
;
printk
(
"%s: set to minimum %lx
\n
"
,
__FUNCTION__
,
mask
);
printk
(
KERN_INFO
"%s: set to minimum %lx
\n
"
,
__FUNCTION__
,
mask
);
}
q
->
seg_boundary_mask
=
mask
;
}
EXPORT_SYMBOL
(
blk_queue_segment_boundary
);
/**
...
...
@@ -366,7 +361,6 @@ void blk_queue_dma_alignment(struct request_queue *q, int mask)
{
q
->
dma_alignment
=
mask
;
}
EXPORT_SYMBOL
(
blk_queue_dma_alignment
);
/**
...
...
@@ -390,7 +384,6 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
if
(
mask
>
q
->
dma_alignment
)
q
->
dma_alignment
=
mask
;
}
EXPORT_SYMBOL
(
blk_queue_update_dma_alignment
);
int
__init
blk_settings_init
(
void
)
...
...
block/blk-sysfs.c
浏览文件 @
6728cb0e
...
...
@@ -207,12 +207,13 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
const
char
*
page
,
size_t
length
)
{
struct
queue_sysfs_entry
*
entry
=
to_queue
(
attr
);
struct
request_queue
*
q
=
container_of
(
kobj
,
struct
request_queue
,
kobj
);
struct
request_queue
*
q
;
ssize_t
res
;
if
(
!
entry
->
store
)
return
-
EIO
;
q
=
container_of
(
kobj
,
struct
request_queue
,
kobj
);
mutex_lock
(
&
q
->
sysfs_lock
);
if
(
test_bit
(
QUEUE_FLAG_DEAD
,
&
q
->
queue_flags
))
{
mutex_unlock
(
&
q
->
sysfs_lock
);
...
...
block/blk-tag.c
浏览文件 @
6728cb0e
...
...
@@ -21,7 +21,6 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
{
return
blk_map_queue_find_tag
(
q
->
queue_tags
,
tag
);
}
EXPORT_SYMBOL
(
blk_queue_find_tag
);
/**
...
...
@@ -99,7 +98,6 @@ void blk_queue_free_tags(struct request_queue *q)
{
clear_bit
(
QUEUE_FLAG_QUEUED
,
&
q
->
queue_flags
);
}
EXPORT_SYMBOL
(
blk_queue_free_tags
);
static
int
...
...
@@ -185,7 +183,8 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
if
(
!
tags
)
goto
fail
;
}
else
if
(
q
->
queue_tags
)
{
if
((
rc
=
blk_queue_resize_tags
(
q
,
depth
)))
rc
=
blk_queue_resize_tags
(
q
,
depth
);
if
(
rc
)
return
rc
;
set_bit
(
QUEUE_FLAG_QUEUED
,
&
q
->
queue_flags
);
return
0
;
...
...
@@ -203,7 +202,6 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
kfree
(
tags
);
return
-
ENOMEM
;
}
EXPORT_SYMBOL
(
blk_queue_init_tags
);
/**
...
...
@@ -260,7 +258,6 @@ int blk_queue_resize_tags(struct request_queue *q, int new_depth)
kfree
(
tag_map
);
return
0
;
}
EXPORT_SYMBOL
(
blk_queue_resize_tags
);
/**
...
...
@@ -313,7 +310,6 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
clear_bit_unlock
(
tag
,
bqt
->
tag_map
);
bqt
->
busy
--
;
}
EXPORT_SYMBOL
(
blk_queue_end_tag
);
/**
...
...
@@ -340,7 +336,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
int
tag
;
if
(
unlikely
((
rq
->
cmd_flags
&
REQ_QUEUED
)))
{
printk
(
KERN_ERR
printk
(
KERN_ERR
"%s: request %p for device [%s] already tagged %d"
,
__FUNCTION__
,
rq
,
rq
->
rq_disk
?
rq
->
rq_disk
->
disk_name
:
"?"
,
rq
->
tag
);
...
...
@@ -370,7 +366,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
bqt
->
busy
++
;
return
0
;
}
EXPORT_SYMBOL
(
blk_queue_start_tag
);
/**
...
...
@@ -392,5 +387,4 @@ void blk_queue_invalidate_tags(struct request_queue *q)
list_for_each_safe
(
tmp
,
n
,
&
q
->
tag_busy_list
)
blk_requeue_request
(
q
,
list_entry_rq
(
tmp
));
}
EXPORT_SYMBOL
(
blk_queue_invalidate_tags
);
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录