提交 dfec8a14 编写于 作者: M Mike Christie 提交者: Jens Axboe

fs: have ll_rw_block users pass in op and flags separately

This has ll_rw_block users pass in the operation and flags separately,
so ll_rw_block can setup the bio op and bi_rw flags on the bio that
is submitted.
Signed-off-by: NMike Christie <mchristi@redhat.com>
Reviewed-by: NChristoph Hellwig <hch@lst.de>
Reviewed-by: NHannes Reinecke <hare@suse.com>
Signed-off-by: NJens Axboe <axboe@fb.com>
上级 2a222ca9
...@@ -588,7 +588,7 @@ void write_boundary_block(struct block_device *bdev, ...@@ -588,7 +588,7 @@ void write_boundary_block(struct block_device *bdev,
struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
if (bh) { if (bh) {
if (buffer_dirty(bh)) if (buffer_dirty(bh))
ll_rw_block(WRITE, 1, &bh); ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
put_bh(bh); put_bh(bh);
} }
} }
...@@ -1395,7 +1395,7 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size) ...@@ -1395,7 +1395,7 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
{ {
struct buffer_head *bh = __getblk(bdev, block, size); struct buffer_head *bh = __getblk(bdev, block, size);
if (likely(bh)) { if (likely(bh)) {
ll_rw_block(READA, 1, &bh); ll_rw_block(REQ_OP_READ, READA, 1, &bh);
brelse(bh); brelse(bh);
} }
} }
...@@ -1955,7 +1955,7 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len, ...@@ -1955,7 +1955,7 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
if (!buffer_uptodate(bh) && !buffer_delay(bh) && if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
!buffer_unwritten(bh) && !buffer_unwritten(bh) &&
(block_start < from || block_end > to)) { (block_start < from || block_end > to)) {
ll_rw_block(READ, 1, &bh); ll_rw_block(REQ_OP_READ, 0, 1, &bh);
*wait_bh++=bh; *wait_bh++=bh;
} }
} }
...@@ -2852,7 +2852,7 @@ int block_truncate_page(struct address_space *mapping, ...@@ -2852,7 +2852,7 @@ int block_truncate_page(struct address_space *mapping,
if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
err = -EIO; err = -EIO;
ll_rw_block(READ, 1, &bh); ll_rw_block(REQ_OP_READ, 0, 1, &bh);
wait_on_buffer(bh); wait_on_buffer(bh);
/* Uhhuh. Read error. Complain and punt. */ /* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh)) if (!buffer_uptodate(bh))
...@@ -3051,7 +3051,8 @@ EXPORT_SYMBOL(submit_bh); ...@@ -3051,7 +3051,8 @@ EXPORT_SYMBOL(submit_bh);
/** /**
* ll_rw_block: low-level access to block devices (DEPRECATED) * ll_rw_block: low-level access to block devices (DEPRECATED)
* @rw: whether to %READ or %WRITE or maybe %READA (readahead) * @op: whether to %READ or %WRITE
* @op_flags: rq_flag_bits or %READA (readahead)
* @nr: number of &struct buffer_heads in the array * @nr: number of &struct buffer_heads in the array
* @bhs: array of pointers to &struct buffer_head * @bhs: array of pointers to &struct buffer_head
* *
...@@ -3074,7 +3075,7 @@ EXPORT_SYMBOL(submit_bh); ...@@ -3074,7 +3075,7 @@ EXPORT_SYMBOL(submit_bh);
* All of the buffers must be for the same device, and must also be a * All of the buffers must be for the same device, and must also be a
* multiple of the current approved size for the device. * multiple of the current approved size for the device.
*/ */
void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) void ll_rw_block(int op, int op_flags, int nr, struct buffer_head *bhs[])
{ {
int i; int i;
...@@ -3083,18 +3084,18 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) ...@@ -3083,18 +3084,18 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
if (!trylock_buffer(bh)) if (!trylock_buffer(bh))
continue; continue;
if (rw == WRITE) { if (op == WRITE) {
if (test_clear_buffer_dirty(bh)) { if (test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync; bh->b_end_io = end_buffer_write_sync;
get_bh(bh); get_bh(bh);
submit_bh(rw, 0, bh); submit_bh(op, op_flags, bh);
continue; continue;
} }
} else { } else {
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
bh->b_end_io = end_buffer_read_sync; bh->b_end_io = end_buffer_read_sync;
get_bh(bh); get_bh(bh);
submit_bh(rw, 0, bh); submit_bh(op, op_flags, bh);
continue; continue;
} }
} }
......
...@@ -981,7 +981,7 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, ...@@ -981,7 +981,7 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
return bh; return bh;
if (!bh || buffer_uptodate(bh)) if (!bh || buffer_uptodate(bh))
return bh; return bh;
ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
wait_on_buffer(bh); wait_on_buffer(bh);
if (buffer_uptodate(bh)) if (buffer_uptodate(bh))
return bh; return bh;
...@@ -1135,7 +1135,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, ...@@ -1135,7 +1135,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
if (!buffer_uptodate(bh) && !buffer_delay(bh) && if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
!buffer_unwritten(bh) && !buffer_unwritten(bh) &&
(block_start < from || block_end > to)) { (block_start < from || block_end > to)) {
ll_rw_block(READ, 1, &bh); ll_rw_block(REQ_OP_READ, 0, 1, &bh);
*wait_bh++ = bh; *wait_bh++ = bh;
decrypt = ext4_encrypted_inode(inode) && decrypt = ext4_encrypted_inode(inode) &&
S_ISREG(inode->i_mode); S_ISREG(inode->i_mode);
...@@ -3698,7 +3698,7 @@ static int __ext4_block_zero_page_range(handle_t *handle, ...@@ -3698,7 +3698,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
err = -EIO; err = -EIO;
ll_rw_block(READ, 1, &bh); ll_rw_block(REQ_OP_READ, 0, 1, &bh);
wait_on_buffer(bh); wait_on_buffer(bh);
/* Uhhuh. Read error. Complain and punt. */ /* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh)) if (!buffer_uptodate(bh))
......
...@@ -1443,7 +1443,8 @@ static struct buffer_head * ext4_find_entry (struct inode *dir, ...@@ -1443,7 +1443,8 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
} }
bh_use[ra_max] = bh; bh_use[ra_max] = bh;
if (bh) if (bh)
ll_rw_block(READ | REQ_META | REQ_PRIO, ll_rw_block(REQ_OP_READ,
REQ_META | REQ_PRIO,
1, &bh); 1, &bh);
} }
} }
......
...@@ -4204,7 +4204,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb, ...@@ -4204,7 +4204,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
goto out_bdev; goto out_bdev;
} }
journal->j_private = sb; journal->j_private = sb;
ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer); ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
wait_on_buffer(journal->j_sb_buffer); wait_on_buffer(journal->j_sb_buffer);
if (!buffer_uptodate(journal->j_sb_buffer)) { if (!buffer_uptodate(journal->j_sb_buffer)) {
ext4_msg(sb, KERN_ERR, "I/O error on journal device"); ext4_msg(sb, KERN_ERR, "I/O error on journal device");
......
...@@ -974,7 +974,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from) ...@@ -974,7 +974,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
err = -EIO; err = -EIO;
ll_rw_block(READ, 1, &bh); ll_rw_block(REQ_OP_READ, 0, 1, &bh);
wait_on_buffer(bh); wait_on_buffer(bh);
/* Uhhuh. Read error. Complain and punt. */ /* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh)) if (!buffer_uptodate(bh))
......
...@@ -449,7 +449,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) ...@@ -449,7 +449,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
if (buffer_uptodate(first_bh)) if (buffer_uptodate(first_bh))
goto out; goto out;
if (!buffer_locked(first_bh)) if (!buffer_locked(first_bh))
ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh); ll_rw_block(REQ_OP_READ, READ_SYNC | REQ_META, 1, &first_bh);
dblock++; dblock++;
extlen--; extlen--;
...@@ -458,7 +458,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) ...@@ -458,7 +458,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
bh = gfs2_getbuf(gl, dblock, CREATE); bh = gfs2_getbuf(gl, dblock, CREATE);
if (!buffer_uptodate(bh) && !buffer_locked(bh)) if (!buffer_uptodate(bh) && !buffer_locked(bh))
ll_rw_block(READA | REQ_META, 1, &bh); ll_rw_block(REQ_OP_READ, READA | REQ_META, 1, &bh);
brelse(bh); brelse(bh);
dblock++; dblock++;
extlen--; extlen--;
......
...@@ -730,7 +730,7 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index, ...@@ -730,7 +730,7 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
if (PageUptodate(page)) if (PageUptodate(page))
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
ll_rw_block(READ | REQ_META, 1, &bh); ll_rw_block(REQ_OP_READ, REQ_META, 1, &bh);
wait_on_buffer(bh); wait_on_buffer(bh);
if (!buffer_uptodate(bh)) if (!buffer_uptodate(bh))
goto unlock_out; goto unlock_out;
......
...@@ -81,7 +81,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start, ...@@ -81,7 +81,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
blocknum = block_start >> bufshift; blocknum = block_start >> bufshift;
memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *)); memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *));
haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks); haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
ll_rw_block(READ, haveblocks, bhs); ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs);
curbh = 0; curbh = 0;
curpage = 0; curpage = 0;
......
...@@ -1498,7 +1498,7 @@ static int journal_get_superblock(journal_t *journal) ...@@ -1498,7 +1498,7 @@ static int journal_get_superblock(journal_t *journal)
J_ASSERT(bh != NULL); J_ASSERT(bh != NULL);
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
ll_rw_block(READ, 1, &bh); ll_rw_block(REQ_OP_READ, 0, 1, &bh);
wait_on_buffer(bh); wait_on_buffer(bh);
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
printk(KERN_ERR printk(KERN_ERR
......
...@@ -104,7 +104,7 @@ static int do_readahead(journal_t *journal, unsigned int start) ...@@ -104,7 +104,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
if (!buffer_uptodate(bh) && !buffer_locked(bh)) { if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
bufs[nbufs++] = bh; bufs[nbufs++] = bh;
if (nbufs == MAXBUF) { if (nbufs == MAXBUF) {
ll_rw_block(READ, nbufs, bufs); ll_rw_block(REQ_OP_READ, 0, nbufs, bufs);
journal_brelse_array(bufs, nbufs); journal_brelse_array(bufs, nbufs);
nbufs = 0; nbufs = 0;
} }
...@@ -113,7 +113,7 @@ static int do_readahead(journal_t *journal, unsigned int start) ...@@ -113,7 +113,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
} }
if (nbufs) if (nbufs)
ll_rw_block(READ, nbufs, bufs); ll_rw_block(REQ_OP_READ, 0, nbufs, bufs);
err = 0; err = 0;
failed: failed:
......
...@@ -640,7 +640,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, ...@@ -640,7 +640,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
!buffer_new(bh) && !buffer_new(bh) &&
ocfs2_should_read_blk(inode, page, block_start) && ocfs2_should_read_blk(inode, page, block_start) &&
(block_start < from || block_end > to)) { (block_start < from || block_end > to)) {
ll_rw_block(READ, 1, &bh); ll_rw_block(REQ_OP_READ, 0, 1, &bh);
*wait_bh++=bh; *wait_bh++=bh;
} }
......
...@@ -1819,7 +1819,7 @@ static int ocfs2_get_sector(struct super_block *sb, ...@@ -1819,7 +1819,7 @@ static int ocfs2_get_sector(struct super_block *sb,
if (!buffer_dirty(*bh)) if (!buffer_dirty(*bh))
clear_buffer_uptodate(*bh); clear_buffer_uptodate(*bh);
unlock_buffer(*bh); unlock_buffer(*bh);
ll_rw_block(READ, 1, bh); ll_rw_block(REQ_OP_READ, 0, 1, bh);
wait_on_buffer(*bh); wait_on_buffer(*bh);
if (!buffer_uptodate(*bh)) { if (!buffer_uptodate(*bh)) {
mlog_errno(-EIO); mlog_errno(-EIO);
......
...@@ -870,7 +870,7 @@ static int write_ordered_buffers(spinlock_t * lock, ...@@ -870,7 +870,7 @@ static int write_ordered_buffers(spinlock_t * lock,
*/ */
if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) { if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
spin_unlock(lock); spin_unlock(lock);
ll_rw_block(WRITE, 1, &bh); ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
spin_lock(lock); spin_lock(lock);
} }
put_bh(bh); put_bh(bh);
...@@ -1057,7 +1057,7 @@ static int flush_commit_list(struct super_block *s, ...@@ -1057,7 +1057,7 @@ static int flush_commit_list(struct super_block *s,
if (tbh) { if (tbh) {
if (buffer_dirty(tbh)) { if (buffer_dirty(tbh)) {
depth = reiserfs_write_unlock_nested(s); depth = reiserfs_write_unlock_nested(s);
ll_rw_block(WRITE, 1, &tbh); ll_rw_block(REQ_OP_WRITE, 0, 1, &tbh);
reiserfs_write_lock_nested(s, depth); reiserfs_write_lock_nested(s, depth);
} }
put_bh(tbh) ; put_bh(tbh) ;
...@@ -2244,7 +2244,7 @@ static int journal_read_transaction(struct super_block *sb, ...@@ -2244,7 +2244,7 @@ static int journal_read_transaction(struct super_block *sb,
} }
} }
/* read in the log blocks, memcpy to the corresponding real block */ /* read in the log blocks, memcpy to the corresponding real block */
ll_rw_block(READ, get_desc_trans_len(desc), log_blocks); ll_rw_block(REQ_OP_READ, 0, get_desc_trans_len(desc), log_blocks);
for (i = 0; i < get_desc_trans_len(desc); i++) { for (i = 0; i < get_desc_trans_len(desc); i++) {
wait_on_buffer(log_blocks[i]); wait_on_buffer(log_blocks[i]);
...@@ -2346,7 +2346,7 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev, ...@@ -2346,7 +2346,7 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
} else } else
bhlist[j++] = bh; bhlist[j++] = bh;
} }
ll_rw_block(READ, j, bhlist); ll_rw_block(REQ_OP_READ, 0, j, bhlist);
for (i = 1; i < j; i++) for (i = 1; i < j; i++)
brelse(bhlist[i]); brelse(bhlist[i]);
bh = bhlist[0]; bh = bhlist[0];
......
...@@ -551,7 +551,7 @@ static int search_by_key_reada(struct super_block *s, ...@@ -551,7 +551,7 @@ static int search_by_key_reada(struct super_block *s,
if (!buffer_uptodate(bh[j])) { if (!buffer_uptodate(bh[j])) {
if (depth == -1) if (depth == -1)
depth = reiserfs_write_unlock_nested(s); depth = reiserfs_write_unlock_nested(s);
ll_rw_block(READA, 1, bh + j); ll_rw_block(REQ_OP_READ, READA, 1, bh + j);
} }
brelse(bh[j]); brelse(bh[j]);
} }
...@@ -660,7 +660,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, ...@@ -660,7 +660,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key,
if (!buffer_uptodate(bh) && depth == -1) if (!buffer_uptodate(bh) && depth == -1)
depth = reiserfs_write_unlock_nested(sb); depth = reiserfs_write_unlock_nested(sb);
ll_rw_block(READ, 1, &bh); ll_rw_block(REQ_OP_READ, 0, 1, &bh);
wait_on_buffer(bh); wait_on_buffer(bh);
if (depth != -1) if (depth != -1)
......
...@@ -1661,7 +1661,7 @@ static int read_super_block(struct super_block *s, int offset) ...@@ -1661,7 +1661,7 @@ static int read_super_block(struct super_block *s, int offset)
/* after journal replay, reread all bitmap and super blocks */ /* after journal replay, reread all bitmap and super blocks */
static int reread_meta_blocks(struct super_block *s) static int reread_meta_blocks(struct super_block *s)
{ {
ll_rw_block(READ, 1, &SB_BUFFER_WITH_SB(s)); ll_rw_block(REQ_OP_READ, 0, 1, &SB_BUFFER_WITH_SB(s));
wait_on_buffer(SB_BUFFER_WITH_SB(s)); wait_on_buffer(SB_BUFFER_WITH_SB(s));
if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) { if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
reiserfs_warning(s, "reiserfs-2504", "error reading the super"); reiserfs_warning(s, "reiserfs-2504", "error reading the super");
......
...@@ -124,7 +124,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length, ...@@ -124,7 +124,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
goto block_release; goto block_release;
bytes += msblk->devblksize; bytes += msblk->devblksize;
} }
ll_rw_block(READ, b, bh); ll_rw_block(REQ_OP_READ, 0, b, bh);
} else { } else {
/* /*
* Metadata block. * Metadata block.
...@@ -156,7 +156,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length, ...@@ -156,7 +156,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
goto block_release; goto block_release;
bytes += msblk->devblksize; bytes += msblk->devblksize;
} }
ll_rw_block(READ, b - 1, bh + 1); ll_rw_block(REQ_OP_READ, 0, b - 1, bh + 1);
} }
for (i = 0; i < b; i++) { for (i = 0; i < b; i++) {
......
...@@ -113,7 +113,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx) ...@@ -113,7 +113,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
brelse(tmp); brelse(tmp);
} }
if (num) { if (num) {
ll_rw_block(READA, num, bha); ll_rw_block(REQ_OP_READ, READA, num, bha);
for (i = 0; i < num; i++) for (i = 0; i < num; i++)
brelse(bha[i]); brelse(bha[i]);
} }
......
...@@ -87,7 +87,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos, ...@@ -87,7 +87,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
brelse(tmp); brelse(tmp);
} }
if (num) { if (num) {
ll_rw_block(READA, num, bha); ll_rw_block(REQ_OP_READ, READA, num, bha);
for (i = 0; i < num; i++) for (i = 0; i < num; i++)
brelse(bha[i]); brelse(bha[i]);
} }
......
...@@ -1199,7 +1199,7 @@ struct buffer_head *udf_bread(struct inode *inode, int block, ...@@ -1199,7 +1199,7 @@ struct buffer_head *udf_bread(struct inode *inode, int block,
if (buffer_uptodate(bh)) if (buffer_uptodate(bh))
return bh; return bh;
ll_rw_block(READ, 1, &bh); ll_rw_block(REQ_OP_READ, 0, 1, &bh);
wait_on_buffer(bh); wait_on_buffer(bh);
if (buffer_uptodate(bh)) if (buffer_uptodate(bh))
......
...@@ -292,7 +292,7 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg, ...@@ -292,7 +292,7 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
if (!buffer_mapped(bh)) if (!buffer_mapped(bh))
map_bh(bh, inode->i_sb, oldb + pos); map_bh(bh, inode->i_sb, oldb + pos);
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
ll_rw_block(READ, 1, &bh); ll_rw_block(REQ_OP_READ, 0, 1, &bh);
wait_on_buffer(bh); wait_on_buffer(bh);
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
ufs_error(inode->i_sb, __func__, ufs_error(inode->i_sb, __func__,
......
...@@ -187,7 +187,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); ...@@ -187,7 +187,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh); void free_buffer_head(struct buffer_head * bh);
void unlock_buffer(struct buffer_head *bh); void unlock_buffer(struct buffer_head *bh);
void __lock_buffer(struct buffer_head *bh); void __lock_buffer(struct buffer_head *bh);
void ll_rw_block(int, int, struct buffer_head * bh[]); void ll_rw_block(int, int, int, struct buffer_head * bh[]);
int sync_dirty_buffer(struct buffer_head *bh); int sync_dirty_buffer(struct buffer_head *bh);
int __sync_dirty_buffer(struct buffer_head *bh, int op_flags); int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
void write_dirty_buffer(struct buffer_head *bh, int op_flags); void write_dirty_buffer(struct buffer_head *bh, int op_flags);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册