提交 29309a4e 编写于 作者: L Linus Torvalds

Merge tag 'gfs2-4.15.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull gfs2 updates from Bob Peterson:
 "We've got a total of 17 GFS2 patches for this merge window. The
  patches are basically in three categories: (1) patches related to
  broken xfstest cases, (2) patches related to improving iomap and start
  using it in GFS2, and (3) general typos and clarifications.

  Please note that one of the iomap patches extends beyond GFS2 and
  affects other file systems, but it was publically reviewed by a
  variety of file system people in the community.

  From Andreas Gruenbacher:

   - rename variable 'bsize' to 'factor' to clarify the logic related to
     gfs2_block_map.

   - correctly set ctime in the setflags ioctl, which fixes broken
     xfstests test 277.

   - fix broken xfstest 258, due to an atime initialization problem.

   - fix broken xfstest 307, in which GFS2 was not setting ctime when
     setting acls.

   - switch general iomap code from blkno to disk offset for a variety
     of file systems.

   - add a new IOMAP_F_DATA_INLINE flag for iomap to indicate blocks
     that have data mixed with metadata.

   - implement SEEK_HOLE and SEEK_DATA via iomap in GFS2.

   - fix failing xfstest case 066, which was due to not properly syncing
     dirty inodes when changing extended attributes.

   - fix a minor typo in a comment.

   - partially fix xfstest 424, which involved GET_FLAGS and SET_FLAGS
     ioctl. This is also a cleanup and simplification of the translation
     of flags from fs flags to gfs2 flags.

   - add support for STATX_ATTR_ in statx, which fixed broken xfstest
     424.

   - fix for failing xfstest 093 which fixes a recursive glock problem
     with gfs2_xattr_get and _set

  From me:

   - make inode height info part of the 'metapath' data structure to
     facilitate using iomap in GFS2.

   - start using iomap inside GFS2 and switch GFS2's block_map functions
     to use iomap under the covers.

   - switch GFS2's fiemap implementation from using block_map to using
     iomap under the covers.

   - fix journaled data pages not being properly synced to media when
     writing inodes. This was caught with xfstests.

   - fix another failing xfstest case in which switching a file from
     ordered_write to journaled data via set_flags caused a deadlock"

* tag 'gfs2-4.15.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2:
  gfs2: Allow gfs2_xattr_set to be called with the glock held
  gfs2: Add support for statx inode flags
  gfs2: Fix and clean up {GET,SET}FLAGS ioctl
  gfs2: Fix a harmless typo
  gfs2: Fix xattr fsync
  GFS2: Take inode off order_write list when setting jdata flag
  GFS2: flush the log and all pages for jdata as we do for WB_SYNC_ALL
  gfs2: Implement SEEK_HOLE / SEEK_DATA via iomap
  GFS2: Switch fiemap implementation to use iomap
  GFS2: Implement iomap for block_map
  GFS2: Make height info part of metapath
  gfs2: Always update inode ctime in set_acl
  gfs2: Support negative atimes
  gfs2: Update ctime in setflags ioctl
  gfs2: Clarify gfs2_block_map
...@@ -4,6 +4,7 @@ config GFS2_FS ...@@ -4,6 +4,7 @@ config GFS2_FS
select FS_POSIX_ACL select FS_POSIX_ACL
select CRC32 select CRC32
select QUOTACTL select QUOTACTL
select FS_IOMAP
help help
A cluster filesystem. A cluster filesystem.
......
...@@ -141,6 +141,7 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) ...@@ -141,6 +141,7 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
ret = __gfs2_set_acl(inode, acl, type); ret = __gfs2_set_acl(inode, acl, type);
if (!ret && mode != inode->i_mode) { if (!ret && mode != inode->i_mode) {
inode->i_ctime = current_time(inode);
inode->i_mode = mode; inode->i_mode = mode;
mark_inode_dirty(inode); mark_inode_dirty(inode);
} }
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/gfs2_ondisk.h> #include <linux/gfs2_ondisk.h>
#include <linux/crc32.h> #include <linux/crc32.h>
#include <linux/iomap.h>
#include "gfs2.h" #include "gfs2.h"
#include "incore.h" #include "incore.h"
...@@ -36,6 +37,8 @@ ...@@ -36,6 +37,8 @@
struct metapath { struct metapath {
struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT]; struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
__u16 mp_list[GFS2_MAX_META_HEIGHT]; __u16 mp_list[GFS2_MAX_META_HEIGHT];
int mp_fheight; /* find_metapath height */
int mp_aheight; /* actual height (lookup height) */
}; };
/** /**
...@@ -235,9 +238,9 @@ static void find_metapath(const struct gfs2_sbd *sdp, u64 block, ...@@ -235,9 +238,9 @@ static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
{ {
unsigned int i; unsigned int i;
mp->mp_fheight = height;
for (i = height; i--;) for (i = height; i--;)
mp->mp_list[i] = do_div(block, sdp->sd_inptrs); mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
} }
static inline unsigned int metapath_branch_start(const struct metapath *mp) static inline unsigned int metapath_branch_start(const struct metapath *mp)
...@@ -248,7 +251,7 @@ static inline unsigned int metapath_branch_start(const struct metapath *mp) ...@@ -248,7 +251,7 @@ static inline unsigned int metapath_branch_start(const struct metapath *mp)
} }
/** /**
* metaptr1 - Return the first possible metadata pointer in a metaath buffer * metaptr1 - Return the first possible metadata pointer in a metapath buffer
* @height: The metadata height (0 = dinode) * @height: The metadata height (0 = dinode)
* @mp: The metapath * @mp: The metapath
*/ */
...@@ -345,10 +348,13 @@ static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp) ...@@ -345,10 +348,13 @@ static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
for (x = 0; x < end_of_metadata; x++) { for (x = 0; x < end_of_metadata; x++) {
ret = lookup_mp_height(ip, mp, x); ret = lookup_mp_height(ip, mp, x);
if (ret) if (ret)
return ret; goto out;
} }
return ip->i_height; ret = ip->i_height;
out:
mp->mp_aheight = ret;
return ret;
} }
/** /**
...@@ -480,10 +486,11 @@ static inline unsigned int hptrs(struct gfs2_sbd *sdp, const unsigned int hgt) ...@@ -480,10 +486,11 @@ static inline unsigned int hptrs(struct gfs2_sbd *sdp, const unsigned int hgt)
* @inode: The GFS2 inode * @inode: The GFS2 inode
* @lblock: The logical starting block of the extent * @lblock: The logical starting block of the extent
* @bh_map: This is used to return the mapping details * @bh_map: This is used to return the mapping details
* @mp: The metapath * @zero_new: True if newly allocated blocks should be zeroed
* @sheight: The starting height (i.e. whats already mapped) * @mp: The metapath, with proper height information calculated
* @height: The height to build to
* @maxlen: The max number of data blocks to alloc * @maxlen: The max number of data blocks to alloc
* @dblock: Pointer to return the resulting new block
* @dblks: Pointer to return the number of blocks allocated
* *
* In this routine we may have to alloc: * In this routine we may have to alloc:
* i) Indirect blocks to grow the metadata tree height * i) Indirect blocks to grow the metadata tree height
...@@ -499,63 +506,63 @@ static inline unsigned int hptrs(struct gfs2_sbd *sdp, const unsigned int hgt) ...@@ -499,63 +506,63 @@ static inline unsigned int hptrs(struct gfs2_sbd *sdp, const unsigned int hgt)
* Returns: errno on error * Returns: errno on error
*/ */
static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock, static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
struct buffer_head *bh_map, struct metapath *mp, unsigned flags, struct metapath *mp)
const unsigned int sheight,
const unsigned int height,
const size_t maxlen)
{ {
struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_sbd *sdp = GFS2_SB(inode);
struct super_block *sb = sdp->sd_vfs; struct super_block *sb = sdp->sd_vfs;
struct buffer_head *dibh = mp->mp_bh[0]; struct buffer_head *dibh = mp->mp_bh[0];
u64 bn, dblock = 0; u64 bn;
unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0; unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
unsigned dblks = 0; unsigned dblks = 0;
unsigned ptrs_per_blk; unsigned ptrs_per_blk;
const unsigned end_of_metadata = height - 1; const unsigned end_of_metadata = mp->mp_fheight - 1;
int ret; int ret;
int eob = 0;
enum alloc_state state; enum alloc_state state;
__be64 *ptr; __be64 *ptr;
__be64 zero_bn = 0; __be64 zero_bn = 0;
size_t maxlen = iomap->length >> inode->i_blkbits;
BUG_ON(sheight < 1); BUG_ON(mp->mp_aheight < 1);
BUG_ON(dibh == NULL); BUG_ON(dibh == NULL);
gfs2_trans_add_meta(ip->i_gl, dibh); gfs2_trans_add_meta(ip->i_gl, dibh);
if (height == sheight) { if (mp->mp_fheight == mp->mp_aheight) {
struct buffer_head *bh; struct buffer_head *bh;
int eob;
/* Bottom indirect block exists, find unalloced extent size */ /* Bottom indirect block exists, find unalloced extent size */
ptr = metapointer(end_of_metadata, mp); ptr = metapointer(end_of_metadata, mp);
bh = mp->mp_bh[end_of_metadata]; bh = mp->mp_bh[end_of_metadata];
dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr,
&eob); maxlen, &eob);
BUG_ON(dblks < 1); BUG_ON(dblks < 1);
state = ALLOC_DATA; state = ALLOC_DATA;
} else { } else {
/* Need to allocate indirect blocks */ /* Need to allocate indirect blocks */
ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs; ptrs_per_blk = mp->mp_fheight > 1 ? sdp->sd_inptrs :
sdp->sd_diptrs;
dblks = min(maxlen, (size_t)(ptrs_per_blk - dblks = min(maxlen, (size_t)(ptrs_per_blk -
mp->mp_list[end_of_metadata])); mp->mp_list[end_of_metadata]));
if (height == ip->i_height) { if (mp->mp_fheight == ip->i_height) {
/* Writing into existing tree, extend tree down */ /* Writing into existing tree, extend tree down */
iblks = height - sheight; iblks = mp->mp_fheight - mp->mp_aheight;
state = ALLOC_GROW_DEPTH; state = ALLOC_GROW_DEPTH;
} else { } else {
/* Building up tree height */ /* Building up tree height */
state = ALLOC_GROW_HEIGHT; state = ALLOC_GROW_HEIGHT;
iblks = height - ip->i_height; iblks = mp->mp_fheight - ip->i_height;
branch_start = metapath_branch_start(mp); branch_start = metapath_branch_start(mp);
iblks += (height - branch_start); iblks += (mp->mp_fheight - branch_start);
} }
} }
/* start of the second part of the function (state machine) */ /* start of the second part of the function (state machine) */
blks = dblks + iblks; blks = dblks + iblks;
i = sheight; i = mp->mp_aheight;
do { do {
int error; int error;
n = blks - alloced; n = blks - alloced;
...@@ -573,9 +580,10 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock, ...@@ -573,9 +580,10 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
sizeof(struct gfs2_dinode)); sizeof(struct gfs2_dinode));
zero_bn = *ptr; zero_bn = *ptr;
} }
for (; i - 1 < height - ip->i_height && n > 0; i++, n--) for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0;
i++, n--)
gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++); gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
if (i - 1 == height - ip->i_height) { if (i - 1 == mp->mp_fheight - ip->i_height) {
i--; i--;
gfs2_buffer_copy_tail(mp->mp_bh[i], gfs2_buffer_copy_tail(mp->mp_bh[i],
sizeof(struct gfs2_meta_header), sizeof(struct gfs2_meta_header),
...@@ -587,7 +595,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock, ...@@ -587,7 +595,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
sizeof(struct gfs2_meta_header)); sizeof(struct gfs2_meta_header));
*ptr = zero_bn; *ptr = zero_bn;
state = ALLOC_GROW_DEPTH; state = ALLOC_GROW_DEPTH;
for(i = branch_start; i < height; i++) { for(i = branch_start; i < mp->mp_fheight; i++) {
if (mp->mp_bh[i] == NULL) if (mp->mp_bh[i] == NULL)
break; break;
brelse(mp->mp_bh[i]); brelse(mp->mp_bh[i]);
...@@ -599,12 +607,12 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock, ...@@ -599,12 +607,12 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
break; break;
/* Branching from existing tree */ /* Branching from existing tree */
case ALLOC_GROW_DEPTH: case ALLOC_GROW_DEPTH:
if (i > 1 && i < height) if (i > 1 && i < mp->mp_fheight)
gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]); gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
for (; i < height && n > 0; i++, n--) for (; i < mp->mp_fheight && n > 0; i++, n--)
gfs2_indirect_init(mp, ip->i_gl, i, gfs2_indirect_init(mp, ip->i_gl, i,
mp->mp_list[i-1], bn++); mp->mp_list[i-1], bn++);
if (i == height) if (i == mp->mp_fheight)
state = ALLOC_DATA; state = ALLOC_DATA;
if (n == 0) if (n == 0)
break; break;
...@@ -615,119 +623,269 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock, ...@@ -615,119 +623,269 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]); gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
dblks = n; dblks = n;
ptr = metapointer(end_of_metadata, mp); ptr = metapointer(end_of_metadata, mp);
dblock = bn; iomap->addr = bn << inode->i_blkbits;
iomap->flags |= IOMAP_F_NEW;
while (n-- > 0) while (n-- > 0)
*ptr++ = cpu_to_be64(bn++); *ptr++ = cpu_to_be64(bn++);
if (buffer_zeronew(bh_map)) { if (flags & IOMAP_ZERO) {
ret = sb_issue_zeroout(sb, dblock, dblks, ret = sb_issue_zeroout(sb, iomap->addr >> inode->i_blkbits,
GFP_NOFS); dblks, GFP_NOFS);
if (ret) { if (ret) {
fs_err(sdp, fs_err(sdp,
"Failed to zero data buffers\n"); "Failed to zero data buffers\n");
clear_buffer_zeronew(bh_map); flags &= ~IOMAP_ZERO;
} }
} }
break; break;
} }
} while ((state != ALLOC_DATA) || !dblock); } while (iomap->addr == IOMAP_NULL_ADDR);
ip->i_height = height; iomap->length = (u64)dblks << inode->i_blkbits;
ip->i_height = mp->mp_fheight;
gfs2_add_inode_blocks(&ip->i_inode, alloced); gfs2_add_inode_blocks(&ip->i_inode, alloced);
gfs2_dinode_out(ip, mp->mp_bh[0]->b_data); gfs2_dinode_out(ip, mp->mp_bh[0]->b_data);
map_bh(bh_map, inode->i_sb, dblock);
bh_map->b_size = dblks << inode->i_blkbits;
set_buffer_new(bh_map);
return 0; return 0;
} }
/** /**
* gfs2_block_map - Map a block from an inode to a disk block * hole_size - figure out the size of a hole
* @inode: The inode * @inode: The inode
* @lblock: The logical block number * @lblock: The logical starting block number
* @bh_map: The bh to be mapped * @mp: The metapath
* @create: True if its ok to alloc blocks to satify the request
* *
* Sets buffer_mapped() if successful, sets buffer_boundary() if a * Returns: The hole size in bytes
* read of metadata will be required before the next block can be
* mapped. Sets buffer_new() if new blocks were allocated.
* *
* Returns: errno
*/ */
static u64 hole_size(struct inode *inode, sector_t lblock, struct metapath *mp)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct metapath mp_eof;
u64 factor = 1;
int hgt;
u64 holesz = 0;
const __be64 *first, *end, *ptr;
const struct buffer_head *bh;
u64 lblock_stop = (i_size_read(inode) - 1) >> inode->i_blkbits;
int zeroptrs;
bool done = false;
/* Get another metapath, to the very last byte */
find_metapath(sdp, lblock_stop, &mp_eof, ip->i_height);
for (hgt = ip->i_height - 1; hgt >= 0 && !done; hgt--) {
bh = mp->mp_bh[hgt];
if (bh) {
zeroptrs = 0;
first = metapointer(hgt, mp);
end = (const __be64 *)(bh->b_data + bh->b_size);
for (ptr = first; ptr < end; ptr++) {
if (*ptr) {
done = true;
break;
} else {
zeroptrs++;
}
}
} else {
zeroptrs = sdp->sd_inptrs;
}
if (factor * zeroptrs >= lblock_stop - lblock + 1) {
holesz = lblock_stop - lblock + 1;
break;
}
holesz += factor * zeroptrs;
int gfs2_block_map(struct inode *inode, sector_t lblock, factor *= sdp->sd_inptrs;
struct buffer_head *bh_map, int create) if (hgt && (mp->mp_list[hgt - 1] < mp_eof.mp_list[hgt - 1]))
(mp->mp_list[hgt - 1])++;
}
return holesz << inode->i_blkbits;
}
static void gfs2_stuffed_iomap(struct inode *inode, struct iomap *iomap)
{
struct gfs2_inode *ip = GFS2_I(inode);
iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
sizeof(struct gfs2_dinode);
iomap->offset = 0;
iomap->length = i_size_read(inode);
iomap->type = IOMAP_MAPPED;
iomap->flags = IOMAP_F_DATA_INLINE;
}
/**
* gfs2_iomap_begin - Map blocks from an inode to disk blocks
* @inode: The inode
* @pos: Starting position in bytes
* @length: Length to map, in bytes
* @flags: iomap flags
* @iomap: The iomap structure
*
* Returns: errno
*/
int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
unsigned flags, struct iomap *iomap)
{ {
struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_sbd *sdp = GFS2_SB(inode);
unsigned int bsize = sdp->sd_sb.sb_bsize; struct metapath mp = { .mp_aheight = 1, };
const size_t maxlen = bh_map->b_size >> inode->i_blkbits; unsigned int factor = sdp->sd_sb.sb_bsize;
const u64 *arr = sdp->sd_heightsize; const u64 *arr = sdp->sd_heightsize;
__be64 *ptr; __be64 *ptr;
u64 size; sector_t lblock;
struct metapath mp; sector_t lend;
int ret; int ret;
int eob; int eob;
unsigned int len; unsigned int len;
struct buffer_head *bh; struct buffer_head *bh;
u8 height; u8 height;
BUG_ON(maxlen == 0); trace_gfs2_iomap_start(ip, pos, length, flags);
if (!length) {
ret = -EINVAL;
goto out;
}
memset(&mp, 0, sizeof(mp)); if ((flags & IOMAP_REPORT) && gfs2_is_stuffed(ip)) {
bmap_lock(ip, create); gfs2_stuffed_iomap(inode, iomap);
clear_buffer_mapped(bh_map); if (pos >= iomap->length)
clear_buffer_new(bh_map); return -ENOENT;
clear_buffer_boundary(bh_map); ret = 0;
trace_gfs2_bmap(ip, bh_map, lblock, create, 1); goto out;
}
lblock = pos >> inode->i_blkbits;
lend = (pos + length + sdp->sd_sb.sb_bsize - 1) >> inode->i_blkbits;
iomap->offset = lblock << inode->i_blkbits;
iomap->addr = IOMAP_NULL_ADDR;
iomap->type = IOMAP_HOLE;
iomap->length = (u64)(lend - lblock) << inode->i_blkbits;
iomap->flags = IOMAP_F_MERGED;
bmap_lock(ip, 0);
/*
* Directory data blocks have a struct gfs2_meta_header header, so the
* remaining size is smaller than the filesystem block size. Logical
* block numbers for directories are in units of this remaining size!
*/
if (gfs2_is_dir(ip)) { if (gfs2_is_dir(ip)) {
bsize = sdp->sd_jbsize; factor = sdp->sd_jbsize;
arr = sdp->sd_jheightsize; arr = sdp->sd_jheightsize;
} }
ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]); ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]);
if (ret) if (ret)
goto out; goto out_release;
height = ip->i_height; height = ip->i_height;
size = (lblock + 1) * bsize; while ((lblock + 1) * factor > arr[height])
while (size > arr[height])
height++; height++;
find_metapath(sdp, lblock, &mp, height); find_metapath(sdp, lblock, &mp, height);
ret = 1;
if (height > ip->i_height || gfs2_is_stuffed(ip)) if (height > ip->i_height || gfs2_is_stuffed(ip))
goto do_alloc; goto do_alloc;
ret = lookup_metapath(ip, &mp); ret = lookup_metapath(ip, &mp);
if (ret < 0) if (ret < 0)
goto out; goto out_release;
if (ret != ip->i_height)
if (mp.mp_aheight != ip->i_height)
goto do_alloc; goto do_alloc;
ptr = metapointer(ip->i_height - 1, &mp); ptr = metapointer(ip->i_height - 1, &mp);
if (*ptr == 0) if (*ptr == 0)
goto do_alloc; goto do_alloc;
map_bh(bh_map, inode->i_sb, be64_to_cpu(*ptr));
iomap->type = IOMAP_MAPPED;
iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
bh = mp.mp_bh[ip->i_height - 1]; bh = mp.mp_bh[ip->i_height - 1];
len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, &eob); len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, lend - lblock, &eob);
bh_map->b_size = (len << inode->i_blkbits);
if (eob) if (eob)
set_buffer_boundary(bh_map); iomap->flags |= IOMAP_F_BOUNDARY;
iomap->length = (u64)len << inode->i_blkbits;
ret = 0; ret = 0;
out:
out_release:
release_metapath(&mp); release_metapath(&mp);
trace_gfs2_bmap(ip, bh_map, lblock, create, ret); bmap_unlock(ip, 0);
bmap_unlock(ip, create); out:
trace_gfs2_iomap_end(ip, iomap, ret);
return ret; return ret;
do_alloc: do_alloc:
/* All allocations are done here, firstly check create flag */ if (!(flags & IOMAP_WRITE)) {
if (!create) { if (pos >= i_size_read(inode)) {
BUG_ON(gfs2_is_stuffed(ip)); ret = -ENOENT;
goto out_release;
}
ret = 0; ret = 0;
goto out; iomap->length = hole_size(inode, lblock, &mp);
goto out_release;
} }
/* At this point ret is the tree depth of already allocated blocks */ ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
ret = gfs2_bmap_alloc(inode, lblock, bh_map, &mp, ret, height, maxlen); goto out_release;
}
/**
* gfs2_block_map - Map a block from an inode to a disk block
* @inode: The inode
* @lblock: The logical block number
* @bh_map: The bh to be mapped
* @create: True if its ok to alloc blocks to satify the request
*
* Sets buffer_mapped() if successful, sets buffer_boundary() if a
* read of metadata will be required before the next block can be
* mapped. Sets buffer_new() if new blocks were allocated.
*
* Returns: errno
*/
int gfs2_block_map(struct inode *inode, sector_t lblock,
struct buffer_head *bh_map, int create)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct iomap iomap;
int ret, flags = 0;
clear_buffer_mapped(bh_map);
clear_buffer_new(bh_map);
clear_buffer_boundary(bh_map);
trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
if (create)
flags |= IOMAP_WRITE;
if (buffer_zeronew(bh_map))
flags |= IOMAP_ZERO;
ret = gfs2_iomap_begin(inode, (loff_t)lblock << inode->i_blkbits,
bh_map->b_size, flags, &iomap);
if (ret) {
if (!create && ret == -ENOENT) {
/* Return unmapped buffer beyond the end of file. */
ret = 0;
}
goto out; goto out;
}
if (iomap.length > bh_map->b_size) {
iomap.length = bh_map->b_size;
iomap.flags &= ~IOMAP_F_BOUNDARY;
}
if (iomap.addr != IOMAP_NULL_ADDR)
map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits);
bh_map->b_size = iomap.length;
if (iomap.flags & IOMAP_F_BOUNDARY)
set_buffer_boundary(bh_map);
if (iomap.flags & IOMAP_F_NEW)
set_buffer_new(bh_map);
out:
trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
return ret;
} }
/* /*
......
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#ifndef __BMAP_DOT_H__ #ifndef __BMAP_DOT_H__
#define __BMAP_DOT_H__ #define __BMAP_DOT_H__
#include <linux/iomap.h>
#include "inode.h" #include "inode.h"
struct inode; struct inode;
...@@ -47,6 +49,8 @@ static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip, ...@@ -47,6 +49,8 @@ static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip,
extern int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page); extern int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page);
extern int gfs2_block_map(struct inode *inode, sector_t lblock, extern int gfs2_block_map(struct inode *inode, sector_t lblock,
struct buffer_head *bh, int create); struct buffer_head *bh, int create);
extern int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
unsigned flags, struct iomap *iomap);
extern int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, extern int gfs2_extent_map(struct inode *inode, u64 lblock, int *new,
u64 *dblock, unsigned *extlen); u64 *dblock, unsigned *extlen);
extern int gfs2_setattr_size(struct inode *inode, u64 size); extern int gfs2_setattr_size(struct inode *inode, u64 size);
......
...@@ -60,9 +60,7 @@ static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence) ...@@ -60,9 +60,7 @@ static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
loff_t error; loff_t error;
switch (whence) { switch (whence) {
case SEEK_END: /* These reference inode->i_size */ case SEEK_END:
case SEEK_DATA:
case SEEK_HOLE:
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
&i_gh); &i_gh);
if (!error) { if (!error) {
...@@ -70,8 +68,21 @@ static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence) ...@@ -70,8 +68,21 @@ static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
gfs2_glock_dq_uninit(&i_gh); gfs2_glock_dq_uninit(&i_gh);
} }
break; break;
case SEEK_DATA:
error = gfs2_seek_data(file, offset);
break;
case SEEK_HOLE:
error = gfs2_seek_hole(file, offset);
break;
case SEEK_CUR: case SEEK_CUR:
case SEEK_SET: case SEEK_SET:
/*
* These don't reference inode->i_size and don't depend on the
* block mapping, so we don't need the glock.
*/
error = generic_file_llseek(file, offset, whence); error = generic_file_llseek(file, offset, whence);
break; break;
default: default:
...@@ -108,45 +119,22 @@ static int gfs2_readdir(struct file *file, struct dir_context *ctx) ...@@ -108,45 +119,22 @@ static int gfs2_readdir(struct file *file, struct dir_context *ctx)
} }
/** /**
* fsflags_cvt * fsflag_gfs2flag
* @table: A table of 32 u32 flags
* @val: a 32 bit value to convert
*
* This function can be used to convert between fsflags values and
* GFS2's own flags values.
* *
* Returns: the converted flags * The FS_JOURNAL_DATA_FL flag maps to GFS2_DIF_INHERIT_JDATA for directories,
* and to GFS2_DIF_JDATA for non-directories.
*/ */
static u32 fsflags_cvt(const u32 *table, u32 val) static struct {
{ u32 fsflag;
u32 res = 0; u32 gfsflag;
while(val) { } fsflag_gfs2flag[] = {
if (val & 1) {FS_SYNC_FL, GFS2_DIF_SYNC},
res |= *table; {FS_IMMUTABLE_FL, GFS2_DIF_IMMUTABLE},
table++; {FS_APPEND_FL, GFS2_DIF_APPENDONLY},
val >>= 1; {FS_NOATIME_FL, GFS2_DIF_NOATIME},
} {FS_INDEX_FL, GFS2_DIF_EXHASH},
return res; {FS_TOPDIR_FL, GFS2_DIF_TOPDIR},
} {FS_JOURNAL_DATA_FL, GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA},
static const u32 fsflags_to_gfs2[32] = {
[3] = GFS2_DIF_SYNC,
[4] = GFS2_DIF_IMMUTABLE,
[5] = GFS2_DIF_APPENDONLY,
[7] = GFS2_DIF_NOATIME,
[12] = GFS2_DIF_EXHASH,
[14] = GFS2_DIF_INHERIT_JDATA,
[17] = GFS2_DIF_TOPDIR,
};
static const u32 gfs2_to_fsflags[32] = {
[gfs2fl_Sync] = FS_SYNC_FL,
[gfs2fl_Immutable] = FS_IMMUTABLE_FL,
[gfs2fl_AppendOnly] = FS_APPEND_FL,
[gfs2fl_NoAtime] = FS_NOATIME_FL,
[gfs2fl_ExHash] = FS_INDEX_FL,
[gfs2fl_TopLevel] = FS_TOPDIR_FL,
[gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
}; };
static int gfs2_get_flags(struct file *filp, u32 __user *ptr) static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
...@@ -154,17 +142,23 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr) ...@@ -154,17 +142,23 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
struct inode *inode = file_inode(filp); struct inode *inode = file_inode(filp);
struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh; struct gfs2_holder gh;
int error; int i, error;
u32 fsflags; u32 gfsflags, fsflags = 0;
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
error = gfs2_glock_nq(&gh); error = gfs2_glock_nq(&gh);
if (error) if (error)
goto out_uninit; goto out_uninit;
fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags); gfsflags = ip->i_diskflags;
if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA) if (S_ISDIR(inode->i_mode))
fsflags |= FS_JOURNAL_DATA_FL; gfsflags &= ~GFS2_DIF_JDATA;
else
gfsflags &= ~GFS2_DIF_INHERIT_JDATA;
for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++)
if (gfsflags & fsflag_gfs2flag[i].gfsflag)
fsflags |= fsflag_gfs2flag[i].fsflag;
if (put_user(fsflags, ptr)) if (put_user(fsflags, ptr))
error = -EFAULT; error = -EFAULT;
...@@ -199,7 +193,6 @@ void gfs2_set_inode_flags(struct inode *inode) ...@@ -199,7 +193,6 @@ void gfs2_set_inode_flags(struct inode *inode)
GFS2_DIF_APPENDONLY| \ GFS2_DIF_APPENDONLY| \
GFS2_DIF_NOATIME| \ GFS2_DIF_NOATIME| \
GFS2_DIF_SYNC| \ GFS2_DIF_SYNC| \
GFS2_DIF_SYSTEM| \
GFS2_DIF_TOPDIR| \ GFS2_DIF_TOPDIR| \
GFS2_DIF_INHERIT_JDATA) GFS2_DIF_INHERIT_JDATA)
...@@ -238,10 +231,6 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) ...@@ -238,10 +231,6 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
if ((new_flags ^ flags) == 0) if ((new_flags ^ flags) == 0)
goto out; goto out;
error = -EINVAL;
if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
goto out;
error = -EPERM; error = -EPERM;
if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE)) if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
goto out; goto out;
...@@ -256,7 +245,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) ...@@ -256,7 +245,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
goto out; goto out;
} }
if ((flags ^ new_flags) & GFS2_DIF_JDATA) { if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
if (flags & GFS2_DIF_JDATA) if (new_flags & GFS2_DIF_JDATA)
gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH); gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
error = filemap_fdatawrite(inode->i_mapping); error = filemap_fdatawrite(inode->i_mapping);
if (error) if (error)
...@@ -264,6 +253,8 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) ...@@ -264,6 +253,8 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
error = filemap_fdatawait(inode->i_mapping); error = filemap_fdatawait(inode->i_mapping);
if (error) if (error)
goto out; goto out;
if (new_flags & GFS2_DIF_JDATA)
gfs2_ordered_del_inode(ip);
} }
error = gfs2_trans_begin(sdp, RES_DINODE, 0); error = gfs2_trans_begin(sdp, RES_DINODE, 0);
if (error) if (error)
...@@ -271,6 +262,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) ...@@ -271,6 +262,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
error = gfs2_meta_inode_buffer(ip, &bh); error = gfs2_meta_inode_buffer(ip, &bh);
if (error) if (error)
goto out_trans_end; goto out_trans_end;
inode->i_ctime = current_time(inode);
gfs2_trans_add_meta(ip->i_gl, bh); gfs2_trans_add_meta(ip->i_gl, bh);
ip->i_diskflags = new_flags; ip->i_diskflags = new_flags;
gfs2_dinode_out(ip, bh->b_data); gfs2_dinode_out(ip, bh->b_data);
...@@ -289,19 +281,33 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) ...@@ -289,19 +281,33 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
static int gfs2_set_flags(struct file *filp, u32 __user *ptr) static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
{ {
struct inode *inode = file_inode(filp); struct inode *inode = file_inode(filp);
u32 fsflags, gfsflags; u32 fsflags, gfsflags = 0;
u32 mask;
int i;
if (get_user(fsflags, ptr)) if (get_user(fsflags, ptr))
return -EFAULT; return -EFAULT;
gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags); for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++) {
if (!S_ISDIR(inode->i_mode)) { if (fsflags & fsflag_gfs2flag[i].fsflag) {
gfsflags &= ~GFS2_DIF_TOPDIR; fsflags &= ~fsflag_gfs2flag[i].fsflag;
if (gfsflags & GFS2_DIF_INHERIT_JDATA) gfsflags |= fsflag_gfs2flag[i].gfsflag;
gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_SYSTEM);
} }
return do_gfs2_set_flags(filp, gfsflags, ~(GFS2_DIF_SYSTEM | GFS2_DIF_JDATA)); }
if (fsflags || gfsflags & ~GFS2_FLAGS_USER_SET)
return -EINVAL;
mask = GFS2_FLAGS_USER_SET;
if (S_ISDIR(inode->i_mode)) {
mask &= ~GFS2_DIF_JDATA;
} else {
/* The GFS2_DIF_TOPDIR flag is only valid for directories. */
if (gfsflags & GFS2_DIF_TOPDIR)
return -EINVAL;
mask &= ~(GFS2_DIF_TOPDIR | GFS2_DIF_INHERIT_JDATA);
}
return do_gfs2_set_flags(filp, gfsflags, mask);
} }
static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <linux/posix_acl.h> #include <linux/posix_acl.h>
#include <linux/gfs2_ondisk.h> #include <linux/gfs2_ondisk.h>
#include <linux/crc32.h> #include <linux/crc32.h>
#include <linux/fiemap.h> #include <linux/iomap.h>
#include <linux/security.h> #include <linux/security.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -189,7 +189,8 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, ...@@ -189,7 +189,8 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
gfs2_set_iop(inode); gfs2_set_iop(inode);
inode->i_atime.tv_sec = 0; /* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */
inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
inode->i_atime.tv_nsec = 0; inode->i_atime.tv_nsec = 0;
unlock_new_inode(inode); unlock_new_inode(inode);
...@@ -1986,6 +1987,7 @@ static int gfs2_getattr(const struct path *path, struct kstat *stat, ...@@ -1986,6 +1987,7 @@ static int gfs2_getattr(const struct path *path, struct kstat *stat,
struct inode *inode = d_inode(path->dentry); struct inode *inode = d_inode(path->dentry);
struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh; struct gfs2_holder gh;
u32 gfsflags;
int error; int error;
gfs2_holder_mark_uninitialized(&gh); gfs2_holder_mark_uninitialized(&gh);
...@@ -1995,13 +1997,30 @@ static int gfs2_getattr(const struct path *path, struct kstat *stat, ...@@ -1995,13 +1997,30 @@ static int gfs2_getattr(const struct path *path, struct kstat *stat,
return error; return error;
} }
gfsflags = ip->i_diskflags;
if (gfsflags & GFS2_DIF_APPENDONLY)
stat->attributes |= STATX_ATTR_APPEND;
if (gfsflags & GFS2_DIF_IMMUTABLE)
stat->attributes |= STATX_ATTR_IMMUTABLE;
stat->attributes_mask |= (STATX_ATTR_APPEND |
STATX_ATTR_COMPRESSED |
STATX_ATTR_ENCRYPTED |
STATX_ATTR_IMMUTABLE |
STATX_ATTR_NODUMP);
generic_fillattr(inode, stat); generic_fillattr(inode, stat);
if (gfs2_holder_initialized(&gh)) if (gfs2_holder_initialized(&gh))
gfs2_glock_dq_uninit(&gh); gfs2_glock_dq_uninit(&gh);
return 0; return 0;
} }
const struct iomap_ops gfs2_iomap_ops = {
.iomap_begin = gfs2_iomap_begin,
};
static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len) u64 start, u64 len)
{ {
...@@ -2009,39 +2028,57 @@ static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, ...@@ -2009,39 +2028,57 @@ static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
struct gfs2_holder gh; struct gfs2_holder gh;
int ret; int ret;
ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC); inode_lock_shared(inode);
if (ret)
return ret;
inode_lock(inode);
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
if (ret) if (ret)
goto out; goto out;
if (gfs2_is_stuffed(ip)) { ret = iomap_fiemap(inode, fieinfo, start, len, &gfs2_iomap_ops);
u64 phys = ip->i_no_addr << inode->i_blkbits;
u64 size = i_size_read(inode);
u32 flags = FIEMAP_EXTENT_LAST|FIEMAP_EXTENT_NOT_ALIGNED|
FIEMAP_EXTENT_DATA_INLINE;
phys += sizeof(struct gfs2_dinode);
phys += start;
if (start + len > size)
len = size - start;
if (start < size)
ret = fiemap_fill_next_extent(fieinfo, start, phys,
len, flags);
if (ret == 1)
ret = 0;
} else {
ret = __generic_block_fiemap(inode, fieinfo, start, len,
gfs2_block_map);
}
gfs2_glock_dq_uninit(&gh); gfs2_glock_dq_uninit(&gh);
out: out:
inode_unlock(inode); inode_unlock_shared(inode);
return ret;
}
loff_t gfs2_seek_data(struct file *file, loff_t offset)
{
struct inode *inode = file->f_mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
loff_t ret;
inode_lock_shared(inode);
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
if (!ret)
ret = iomap_seek_data(inode, offset, &gfs2_iomap_ops);
gfs2_glock_dq_uninit(&gh);
inode_unlock_shared(inode);
if (ret < 0)
return ret;
return vfs_setpos(file, ret, inode->i_sb->s_maxbytes);
}
loff_t gfs2_seek_hole(struct file *file, loff_t offset)
{
struct inode *inode = file->f_mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
loff_t ret;
inode_lock_shared(inode);
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
if (!ret)
ret = iomap_seek_hole(inode, offset, &gfs2_iomap_ops);
gfs2_glock_dq_uninit(&gh);
inode_unlock_shared(inode);
if (ret < 0)
return ret; return ret;
return vfs_setpos(file, ret, inode->i_sb->s_maxbytes);
} }
const struct inode_operations gfs2_file_iops = { const struct inode_operations gfs2_file_iops = {
......
...@@ -109,6 +109,8 @@ extern int gfs2_setattr_simple(struct inode *inode, struct iattr *attr); ...@@ -109,6 +109,8 @@ extern int gfs2_setattr_simple(struct inode *inode, struct iattr *attr);
extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name); extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf); extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
extern int gfs2_open_common(struct inode *inode, struct file *file); extern int gfs2_open_common(struct inode *inode, struct file *file);
extern loff_t gfs2_seek_data(struct file *file, loff_t offset);
extern loff_t gfs2_seek_hole(struct file *file, loff_t offset);
extern const struct inode_operations gfs2_file_iops; extern const struct inode_operations gfs2_file_iops;
extern const struct inode_operations gfs2_dir_iops; extern const struct inode_operations gfs2_dir_iops;
......
...@@ -754,14 +754,15 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc) ...@@ -754,14 +754,15 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl); struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
struct backing_dev_info *bdi = inode_to_bdi(metamapping->host); struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
int ret = 0; int ret = 0;
bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
if (wbc->sync_mode == WB_SYNC_ALL) if (flush_all)
gfs2_log_flush(GFS2_SB(inode), ip->i_gl, NORMAL_FLUSH); gfs2_log_flush(GFS2_SB(inode), ip->i_gl, NORMAL_FLUSH);
if (bdi->wb.dirty_exceeded) if (bdi->wb.dirty_exceeded)
gfs2_ail1_flush(sdp, wbc); gfs2_ail1_flush(sdp, wbc);
else else
filemap_fdatawrite(metamapping); filemap_fdatawrite(metamapping);
if (wbc->sync_mode == WB_SYNC_ALL) if (flush_all)
ret = filemap_fdatawait(metamapping); ret = filemap_fdatawait(metamapping);
if (ret) if (ret)
mark_inode_dirty_sync(inode); mark_inode_dirty_sync(inode);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/gfs2_ondisk.h> #include <linux/gfs2_ondisk.h>
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/ktime.h> #include <linux/ktime.h>
#include <linux/iomap.h>
#include "incore.h" #include "incore.h"
#include "glock.h" #include "glock.h"
#include "rgrp.h" #include "rgrp.h"
...@@ -470,6 +471,70 @@ TRACE_EVENT(gfs2_bmap, ...@@ -470,6 +471,70 @@ TRACE_EVENT(gfs2_bmap,
__entry->errno) __entry->errno)
); );
TRACE_EVENT(gfs2_iomap_start,
TP_PROTO(const struct gfs2_inode *ip, loff_t pos, ssize_t length,
u16 flags),
TP_ARGS(ip, pos, length, flags),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( u64, inum )
__field( loff_t, pos )
__field( ssize_t, length )
__field( u16, flags )
),
TP_fast_assign(
__entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
__entry->inum = ip->i_no_addr;
__entry->pos = pos;
__entry->length = length;
__entry->flags = flags;
),
TP_printk("%u,%u bmap %llu iomap start %llu/%lu flags:%08x",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->inum,
(unsigned long long)__entry->pos,
(unsigned long)__entry->length, (u16)__entry->flags)
);
TRACE_EVENT(gfs2_iomap_end,
TP_PROTO(const struct gfs2_inode *ip, struct iomap *iomap, int ret),
TP_ARGS(ip, iomap, ret),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( u64, inum )
__field( loff_t, offset )
__field( ssize_t, length )
__field( u16, flags )
__field( u16, type )
__field( int, ret )
),
TP_fast_assign(
__entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
__entry->inum = ip->i_no_addr;
__entry->offset = iomap->offset;
__entry->length = iomap->length;
__entry->flags = iomap->flags;
__entry->type = iomap->type;
__entry->ret = ret;
),
TP_printk("%u,%u bmap %llu iomap end %llu/%lu ty:%d flags:%08x rc:%d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->inum,
(unsigned long long)__entry->offset,
(unsigned long)__entry->length, (u16)__entry->type,
(u16)__entry->flags, __entry->ret)
);
/* Keep track of blocks as they are allocated/freed */ /* Keep track of blocks as they are allocated/freed */
TRACE_EVENT(gfs2_block_alloc, TRACE_EVENT(gfs2_block_alloc,
......
...@@ -145,7 +145,7 @@ static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl, ...@@ -145,7 +145,7 @@ static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl,
* *
* This is used in two distinct cases: * This is used in two distinct cases:
* i) In ordered write mode * i) In ordered write mode
* We put the data buffer on a list so that we can ensure that its * We put the data buffer on a list so that we can ensure that it's
* synced to disk at the right time * synced to disk at the right time
* ii) In journaled data mode * ii) In journaled data mode
* We need to journal the data block in the same way as metadata in * We need to journal the data block in the same way as metadata in
......
...@@ -231,7 +231,6 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh, ...@@ -231,7 +231,6 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *rgd; struct gfs2_rgrpd *rgd;
struct gfs2_holder rg_gh; struct gfs2_holder rg_gh;
struct buffer_head *dibh;
__be64 *dataptrs; __be64 *dataptrs;
u64 bn = 0; u64 bn = 0;
u64 bstart = 0; u64 bstart = 0;
...@@ -308,13 +307,8 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh, ...@@ -308,13 +307,8 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
ea->ea_num_ptrs = 0; ea->ea_num_ptrs = 0;
} }
error = gfs2_meta_inode_buffer(ip, &dibh);
if (!error) {
ip->i_inode.i_ctime = current_time(&ip->i_inode); ip->i_inode.i_ctime = current_time(&ip->i_inode);
gfs2_trans_add_meta(ip->i_gl, dibh); __mark_inode_dirty(&ip->i_inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
}
gfs2_trans_end(sdp); gfs2_trans_end(sdp);
...@@ -616,7 +610,6 @@ static int gfs2_xattr_get(const struct xattr_handler *handler, ...@@ -616,7 +610,6 @@ static int gfs2_xattr_get(const struct xattr_handler *handler,
{ {
struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh; struct gfs2_holder gh;
bool need_unlock = false;
int ret; int ret;
/* During lookup, SELinux calls this function with the glock locked. */ /* During lookup, SELinux calls this function with the glock locked. */
...@@ -625,10 +618,11 @@ static int gfs2_xattr_get(const struct xattr_handler *handler, ...@@ -625,10 +618,11 @@ static int gfs2_xattr_get(const struct xattr_handler *handler,
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
if (ret) if (ret)
return ret; return ret;
need_unlock = true; } else {
gfs2_holder_mark_uninitialized(&gh);
} }
ret = __gfs2_xattr_get(inode, name, buffer, size, handler->flags); ret = __gfs2_xattr_get(inode, name, buffer, size, handler->flags);
if (need_unlock) if (gfs2_holder_initialized(&gh))
gfs2_glock_dq_uninit(&gh); gfs2_glock_dq_uninit(&gh);
return ret; return ret;
} }
...@@ -749,7 +743,6 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er, ...@@ -749,7 +743,6 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
ea_skeleton_call_t skeleton_call, void *private) ea_skeleton_call_t skeleton_call, void *private)
{ {
struct gfs2_alloc_parms ap = { .target = blks }; struct gfs2_alloc_parms ap = { .target = blks };
struct buffer_head *dibh;
int error; int error;
error = gfs2_rindex_update(GFS2_SB(&ip->i_inode)); error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
...@@ -774,13 +767,8 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er, ...@@ -774,13 +767,8 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
if (error) if (error)
goto out_end_trans; goto out_end_trans;
error = gfs2_meta_inode_buffer(ip, &dibh);
if (!error) {
ip->i_inode.i_ctime = current_time(&ip->i_inode); ip->i_inode.i_ctime = current_time(&ip->i_inode);
gfs2_trans_add_meta(ip->i_gl, dibh); __mark_inode_dirty(&ip->i_inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
}
out_end_trans: out_end_trans:
gfs2_trans_end(GFS2_SB(&ip->i_inode)); gfs2_trans_end(GFS2_SB(&ip->i_inode));
...@@ -891,7 +879,6 @@ static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh, ...@@ -891,7 +879,6 @@ static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_ea_header *ea, struct ea_set *es) struct gfs2_ea_header *ea, struct ea_set *es)
{ {
struct gfs2_ea_request *er = es->es_er; struct gfs2_ea_request *er = es->es_er;
struct buffer_head *dibh;
int error; int error;
error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0); error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
...@@ -908,14 +895,9 @@ static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh, ...@@ -908,14 +895,9 @@ static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
if (es->es_el) if (es->es_el)
ea_set_remove_stuffed(ip, es->es_el); ea_set_remove_stuffed(ip, es->es_el);
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error)
goto out;
ip->i_inode.i_ctime = current_time(&ip->i_inode); ip->i_inode.i_ctime = current_time(&ip->i_inode);
gfs2_trans_add_meta(ip->i_gl, dibh); __mark_inode_dirty(&ip->i_inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
out:
gfs2_trans_end(GFS2_SB(&ip->i_inode)); gfs2_trans_end(GFS2_SB(&ip->i_inode));
return error; return error;
} }
...@@ -1111,7 +1093,6 @@ static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el) ...@@ -1111,7 +1093,6 @@ static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
{ {
struct gfs2_ea_header *ea = el->el_ea; struct gfs2_ea_header *ea = el->el_ea;
struct gfs2_ea_header *prev = el->el_prev; struct gfs2_ea_header *prev = el->el_prev;
struct buffer_head *dibh;
int error; int error;
error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0); error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
...@@ -1132,13 +1113,8 @@ static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el) ...@@ -1132,13 +1113,8 @@ static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
ea->ea_type = GFS2_EATYPE_UNUSED; ea->ea_type = GFS2_EATYPE_UNUSED;
} }
error = gfs2_meta_inode_buffer(ip, &dibh);
if (!error) {
ip->i_inode.i_ctime = current_time(&ip->i_inode); ip->i_inode.i_ctime = current_time(&ip->i_inode);
gfs2_trans_add_meta(ip->i_gl, dibh); __mark_inode_dirty(&ip->i_inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
}
gfs2_trans_end(GFS2_SB(&ip->i_inode)); gfs2_trans_end(GFS2_SB(&ip->i_inode));
...@@ -1268,10 +1244,19 @@ static int gfs2_xattr_set(const struct xattr_handler *handler, ...@@ -1268,10 +1244,19 @@ static int gfs2_xattr_set(const struct xattr_handler *handler,
if (ret) if (ret)
return ret; return ret;
/* May be called from gfs_setattr with the glock locked. */
if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (ret) if (ret)
return ret; return ret;
} else {
if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
return -EIO;
gfs2_holder_mark_uninitialized(&gh);
}
ret = __gfs2_xattr_set(inode, name, value, size, flags, handler->flags); ret = __gfs2_xattr_set(inode, name, value, size, flags, handler->flags);
if (gfs2_holder_initialized(&gh))
gfs2_glock_dq_uninit(&gh); gfs2_glock_dq_uninit(&gh);
return ret; return ret;
} }
......
...@@ -23,6 +23,7 @@ struct vm_fault; ...@@ -23,6 +23,7 @@ struct vm_fault;
* Flags for all iomap mappings: * Flags for all iomap mappings:
*/ */
#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */ #define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
#define IOMAP_F_BOUNDARY 0x02 /* mapping ends at metadata boundary */
/* /*
* Flags that only need to be reported for IOMAP_REPORT requests: * Flags that only need to be reported for IOMAP_REPORT requests:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册