提交 af8cb8aa 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ryusuke/nilfs2

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ryusuke/nilfs2: (21 commits)
  fs/Kconfig: move nilfs2 outside misc filesystems
  nilfs2: convert nilfs_bmap_lookup to an inline function
  nilfs2: allow btree code to directly call dat operations
  nilfs2: add update functions of virtual block address to dat
  nilfs2: remove individual gfp constants for each metadata file
  nilfs2: stop zero-fill of btree path just before free it
  nilfs2: remove unused btree argument from btree functions
  nilfs2: remove nilfs_dat_abort_start and nilfs_dat_abort_free
  nilfs2: shorten freeze period due to GC in write operation v3
  nilfs2: add more check routines in mount process
  nilfs2: An unassigned variable is assigned to a never used structure member
  nilfs2: use GFP_NOIO for bio_alloc instead of GFP_NOWAIT
  nilfs2: stop using periodic write_super callback
  nilfs2: clean up nilfs_write_super
  nilfs2: fix disorder of nilfs_write_super in nilfs_sync_fs
  nilfs2: remove redundant super block commit
  nilfs2: implement nilfs_show_options to display mount options in /proc/mounts
  nilfs2: always lookup disk block address before reading metadata block
  nilfs2: use semaphore to protect pointer to a writable FS-instance
  nilfs2: fix format string compile warning (ino_t)
  ...
......@@ -43,6 +43,7 @@ source "fs/xfs/Kconfig"
source "fs/gfs2/Kconfig"
source "fs/ocfs2/Kconfig"
source "fs/btrfs/Kconfig"
source "fs/nilfs2/Kconfig"
endif # BLOCK
......@@ -186,7 +187,6 @@ source "fs/romfs/Kconfig"
source "fs/sysv/Kconfig"
source "fs/ufs/Kconfig"
source "fs/exofs/Kconfig"
source "fs/nilfs2/Kconfig"
endif # MISC_FILESYSTEMS
......
config NILFS2_FS
tristate "NILFS2 file system support (EXPERIMENTAL)"
depends on BLOCK && EXPERIMENTAL
depends on EXPERIMENTAL
select CRC32
help
NILFS2 is a log-structured file system (LFS) supporting continuous
......
......@@ -36,6 +36,26 @@ struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *bmap)
return nilfs_dat_inode(NILFS_I_NILFS(bmap->b_inode));
}
/**
* nilfs_bmap_lookup_at_level - find a data block or node block
* @bmap: bmap
* @key: key
* @level: level
* @ptrp: place to store the value associated to @key
*
* Description: nilfs_bmap_lookup_at_level() finds a record whose key
* matches @key in the block at @level of the bmap.
*
* Return Value: On success, 0 is returned and the record associated with @key
* is stored in the place pointed by @ptrp. On error, one of the following
* negative error codes is returned.
*
* %-EIO - I/O error.
*
* %-ENOMEM - Insufficient amount of memory available.
*
* %-ENOENT - A record associated with @key does not exist.
*/
int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level,
__u64 *ptrp)
{
......@@ -69,39 +89,6 @@ int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp,
return ret;
}
/**
* nilfs_bmap_lookup - find a record
* @bmap: bmap
* @key: key
* @recp: pointer to record
*
* Description: nilfs_bmap_lookup() finds a record whose key matches @key in
* @bmap.
*
* Return Value: On success, 0 is returned and the record associated with @key
* is stored in the place pointed by @recp. On error, one of the following
* negative error codes is returned.
*
* %-EIO - I/O error.
*
* %-ENOMEM - Insufficient amount of memory available.
*
* %-ENOENT - A record associated with @key does not exist.
*/
int nilfs_bmap_lookup(struct nilfs_bmap *bmap,
unsigned long key,
unsigned long *recp)
{
__u64 ptr;
int ret;
/* XXX: use macro for level 1 */
ret = nilfs_bmap_lookup_at_level(bmap, key, 1, &ptr);
if (recp != NULL)
*recp = ptr;
return ret;
}
static int nilfs_bmap_do_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
{
__u64 keys[NILFS_BMAP_SMALL_HIGH + 1];
......@@ -469,104 +456,6 @@ __u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *bmap)
(entries_per_group / NILFS_BMAP_GROUP_DIV);
}
int nilfs_bmap_prepare_alloc_v(struct nilfs_bmap *bmap,
union nilfs_bmap_ptr_req *req)
{
return nilfs_dat_prepare_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req);
}
void nilfs_bmap_commit_alloc_v(struct nilfs_bmap *bmap,
union nilfs_bmap_ptr_req *req)
{
nilfs_dat_commit_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req);
}
void nilfs_bmap_abort_alloc_v(struct nilfs_bmap *bmap,
union nilfs_bmap_ptr_req *req)
{
nilfs_dat_abort_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req);
}
int nilfs_bmap_start_v(struct nilfs_bmap *bmap, union nilfs_bmap_ptr_req *req,
sector_t blocknr)
{
struct inode *dat = nilfs_bmap_get_dat(bmap);
int ret;
ret = nilfs_dat_prepare_start(dat, &req->bpr_req);
if (likely(!ret))
nilfs_dat_commit_start(dat, &req->bpr_req, blocknr);
return ret;
}
int nilfs_bmap_prepare_end_v(struct nilfs_bmap *bmap,
union nilfs_bmap_ptr_req *req)
{
return nilfs_dat_prepare_end(nilfs_bmap_get_dat(bmap), &req->bpr_req);
}
void nilfs_bmap_commit_end_v(struct nilfs_bmap *bmap,
union nilfs_bmap_ptr_req *req)
{
nilfs_dat_commit_end(nilfs_bmap_get_dat(bmap), &req->bpr_req,
bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
}
void nilfs_bmap_abort_end_v(struct nilfs_bmap *bmap,
union nilfs_bmap_ptr_req *req)
{
nilfs_dat_abort_end(nilfs_bmap_get_dat(bmap), &req->bpr_req);
}
int nilfs_bmap_move_v(const struct nilfs_bmap *bmap, __u64 vblocknr,
sector_t blocknr)
{
return nilfs_dat_move(nilfs_bmap_get_dat(bmap), vblocknr, blocknr);
}
int nilfs_bmap_mark_dirty(const struct nilfs_bmap *bmap, __u64 vblocknr)
{
return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(bmap), vblocknr);
}
int nilfs_bmap_prepare_update_v(struct nilfs_bmap *bmap,
union nilfs_bmap_ptr_req *oldreq,
union nilfs_bmap_ptr_req *newreq)
{
struct inode *dat = nilfs_bmap_get_dat(bmap);
int ret;
ret = nilfs_dat_prepare_end(dat, &oldreq->bpr_req);
if (ret < 0)
return ret;
ret = nilfs_dat_prepare_alloc(dat, &newreq->bpr_req);
if (ret < 0)
nilfs_dat_abort_end(dat, &oldreq->bpr_req);
return ret;
}
void nilfs_bmap_commit_update_v(struct nilfs_bmap *bmap,
union nilfs_bmap_ptr_req *oldreq,
union nilfs_bmap_ptr_req *newreq)
{
struct inode *dat = nilfs_bmap_get_dat(bmap);
nilfs_dat_commit_end(dat, &oldreq->bpr_req,
bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
nilfs_dat_commit_alloc(dat, &newreq->bpr_req);
}
void nilfs_bmap_abort_update_v(struct nilfs_bmap *bmap,
union nilfs_bmap_ptr_req *oldreq,
union nilfs_bmap_ptr_req *newreq)
{
struct inode *dat = nilfs_bmap_get_dat(bmap);
nilfs_dat_abort_end(dat, &oldreq->bpr_req);
nilfs_dat_abort_alloc(dat, &newreq->bpr_req);
}
static struct lock_class_key nilfs_bmap_dat_lock_key;
static struct lock_class_key nilfs_bmap_mdt_lock_key;
......
......@@ -28,6 +28,7 @@
#include <linux/buffer_head.h>
#include <linux/nilfs2_fs.h>
#include "alloc.h"
#include "dat.h"
#define NILFS_BMAP_INVALID_PTR 0
......@@ -141,7 +142,6 @@ struct nilfs_bmap {
int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *);
int nilfs_bmap_read(struct nilfs_bmap *, struct nilfs_inode *);
void nilfs_bmap_write(struct nilfs_bmap *, struct nilfs_inode *);
int nilfs_bmap_lookup(struct nilfs_bmap *, unsigned long, unsigned long *);
int nilfs_bmap_lookup_contig(struct nilfs_bmap *, __u64, __u64 *, unsigned);
int nilfs_bmap_insert(struct nilfs_bmap *, unsigned long, unsigned long);
int nilfs_bmap_delete(struct nilfs_bmap *, unsigned long);
......@@ -160,90 +160,76 @@ void nilfs_bmap_init_gcdat(struct nilfs_bmap *, struct nilfs_bmap *);
void nilfs_bmap_commit_gcdat(struct nilfs_bmap *, struct nilfs_bmap *);
static inline int nilfs_bmap_lookup(struct nilfs_bmap *bmap, __u64 key,
__u64 *ptr)
{
return nilfs_bmap_lookup_at_level(bmap, key, 1, ptr);
}
/*
* Internal use only
*/
struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *);
int nilfs_bmap_prepare_alloc_v(struct nilfs_bmap *,
union nilfs_bmap_ptr_req *);
void nilfs_bmap_commit_alloc_v(struct nilfs_bmap *,
union nilfs_bmap_ptr_req *);
void nilfs_bmap_abort_alloc_v(struct nilfs_bmap *,
union nilfs_bmap_ptr_req *);
static inline int nilfs_bmap_prepare_alloc_ptr(struct nilfs_bmap *bmap,
union nilfs_bmap_ptr_req *req)
union nilfs_bmap_ptr_req *req,
struct inode *dat)
{
if (NILFS_BMAP_USE_VBN(bmap))
return nilfs_bmap_prepare_alloc_v(bmap, req);
if (dat)
return nilfs_dat_prepare_alloc(dat, &req->bpr_req);
/* ignore target ptr */
req->bpr_ptr = bmap->b_last_allocated_ptr++;
return 0;
}
static inline void nilfs_bmap_commit_alloc_ptr(struct nilfs_bmap *bmap,
union nilfs_bmap_ptr_req *req)
union nilfs_bmap_ptr_req *req,
struct inode *dat)
{
if (NILFS_BMAP_USE_VBN(bmap))
nilfs_bmap_commit_alloc_v(bmap, req);
if (dat)
nilfs_dat_commit_alloc(dat, &req->bpr_req);
}
static inline void nilfs_bmap_abort_alloc_ptr(struct nilfs_bmap *bmap,
union nilfs_bmap_ptr_req *req)
union nilfs_bmap_ptr_req *req,
struct inode *dat)
{
if (NILFS_BMAP_USE_VBN(bmap))
nilfs_bmap_abort_alloc_v(bmap, req);
if (dat)
nilfs_dat_abort_alloc(dat, &req->bpr_req);
else
bmap->b_last_allocated_ptr--;
}
int nilfs_bmap_prepare_end_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *);
void nilfs_bmap_commit_end_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *);
void nilfs_bmap_abort_end_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *);
static inline int nilfs_bmap_prepare_end_ptr(struct nilfs_bmap *bmap,
union nilfs_bmap_ptr_req *req)
union nilfs_bmap_ptr_req *req,
struct inode *dat)
{
return NILFS_BMAP_USE_VBN(bmap) ?
nilfs_bmap_prepare_end_v(bmap, req) : 0;
return dat ? nilfs_dat_prepare_end(dat, &req->bpr_req) : 0;
}
static inline void nilfs_bmap_commit_end_ptr(struct nilfs_bmap *bmap,
union nilfs_bmap_ptr_req *req)
union nilfs_bmap_ptr_req *req,
struct inode *dat)
{
if (NILFS_BMAP_USE_VBN(bmap))
nilfs_bmap_commit_end_v(bmap, req);
if (dat)
nilfs_dat_commit_end(dat, &req->bpr_req,
bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
}
static inline void nilfs_bmap_abort_end_ptr(struct nilfs_bmap *bmap,
union nilfs_bmap_ptr_req *req)
union nilfs_bmap_ptr_req *req,
struct inode *dat)
{
if (NILFS_BMAP_USE_VBN(bmap))
nilfs_bmap_abort_end_v(bmap, req);
if (dat)
nilfs_dat_abort_end(dat, &req->bpr_req);
}
int nilfs_bmap_start_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *,
sector_t);
int nilfs_bmap_move_v(const struct nilfs_bmap *, __u64, sector_t);
int nilfs_bmap_mark_dirty(const struct nilfs_bmap *, __u64);
__u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *,
const struct buffer_head *);
__u64 nilfs_bmap_find_target_seq(const struct nilfs_bmap *, __u64);
__u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *);
int nilfs_bmap_prepare_update_v(struct nilfs_bmap *,
union nilfs_bmap_ptr_req *,
union nilfs_bmap_ptr_req *);
void nilfs_bmap_commit_update_v(struct nilfs_bmap *,
union nilfs_bmap_ptr_req *,
union nilfs_bmap_ptr_req *);
void nilfs_bmap_abort_update_v(struct nilfs_bmap *,
union nilfs_bmap_ptr_req *,
union nilfs_bmap_ptr_req *);
void nilfs_bmap_add_blocks(const struct nilfs_bmap *, int);
void nilfs_bmap_sub_blocks(const struct nilfs_bmap *, int);
......
此差异已折叠。
......@@ -815,8 +815,10 @@ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
void *kaddr;
int ret;
if (cno == 0)
return -ENOENT; /* checkpoint number 0 is invalid */
/* CP number is invalid if it's zero or larger than the
largest exist one.*/
if (cno == 0 || cno >= nilfs_mdt_cno(cpfile))
return -ENOENT;
down_read(&NILFS_MDT(cpfile)->mi_sem);
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
......@@ -824,7 +826,10 @@ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
goto out;
kaddr = kmap_atomic(bh->b_page, KM_USER0);
cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
ret = nilfs_checkpoint_snapshot(cp);
if (nilfs_checkpoint_invalid(cp))
ret = -ENOENT;
else
ret = nilfs_checkpoint_snapshot(cp);
kunmap_atomic(kaddr, KM_USER0);
brelse(bh);
......
......@@ -27,8 +27,6 @@
#include <linux/buffer_head.h>
#include <linux/nilfs2_fs.h>
#define NILFS_CPFILE_GFP NILFS_MDT_GFP
int nilfs_cpfile_get_checkpoint(struct inode *, __u64, int,
struct nilfs_checkpoint **,
......
......@@ -109,12 +109,6 @@ void nilfs_dat_commit_free(struct inode *dat, struct nilfs_palloc_req *req)
nilfs_palloc_commit_free_entry(dat, req);
}
void nilfs_dat_abort_free(struct inode *dat, struct nilfs_palloc_req *req)
{
nilfs_dat_abort_entry(dat, req);
nilfs_palloc_abort_free_entry(dat, req);
}
int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
{
int ret;
......@@ -140,11 +134,6 @@ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
nilfs_dat_commit_entry(dat, req);
}
void nilfs_dat_abort_start(struct inode *dat, struct nilfs_palloc_req *req)
{
nilfs_dat_abort_entry(dat, req);
}
int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
{
struct nilfs_dat_entry *entry;
......@@ -222,6 +211,37 @@ void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
nilfs_dat_abort_entry(dat, req);
}
int nilfs_dat_prepare_update(struct inode *dat,
struct nilfs_palloc_req *oldreq,
struct nilfs_palloc_req *newreq)
{
int ret;
ret = nilfs_dat_prepare_end(dat, oldreq);
if (!ret) {
ret = nilfs_dat_prepare_alloc(dat, newreq);
if (ret < 0)
nilfs_dat_abort_end(dat, oldreq);
}
return ret;
}
void nilfs_dat_commit_update(struct inode *dat,
struct nilfs_palloc_req *oldreq,
struct nilfs_palloc_req *newreq, int dead)
{
nilfs_dat_commit_end(dat, oldreq, dead);
nilfs_dat_commit_alloc(dat, newreq);
}
void nilfs_dat_abort_update(struct inode *dat,
struct nilfs_palloc_req *oldreq,
struct nilfs_palloc_req *newreq)
{
nilfs_dat_abort_end(dat, oldreq);
nilfs_dat_abort_alloc(dat, newreq);
}
/**
* nilfs_dat_mark_dirty -
* @dat: DAT file inode
......
......@@ -27,7 +27,6 @@
#include <linux/buffer_head.h>
#include <linux/fs.h>
#define NILFS_DAT_GFP NILFS_MDT_GFP
struct nilfs_palloc_req;
......@@ -39,10 +38,15 @@ void nilfs_dat_abort_alloc(struct inode *, struct nilfs_palloc_req *);
int nilfs_dat_prepare_start(struct inode *, struct nilfs_palloc_req *);
void nilfs_dat_commit_start(struct inode *, struct nilfs_palloc_req *,
sector_t);
void nilfs_dat_abort_start(struct inode *, struct nilfs_palloc_req *);
int nilfs_dat_prepare_end(struct inode *, struct nilfs_palloc_req *);
void nilfs_dat_commit_end(struct inode *, struct nilfs_palloc_req *, int);
void nilfs_dat_abort_end(struct inode *, struct nilfs_palloc_req *);
int nilfs_dat_prepare_update(struct inode *, struct nilfs_palloc_req *,
struct nilfs_palloc_req *);
void nilfs_dat_commit_update(struct inode *, struct nilfs_palloc_req *,
struct nilfs_palloc_req *, int);
void nilfs_dat_abort_update(struct inode *, struct nilfs_palloc_req *,
struct nilfs_palloc_req *);
int nilfs_dat_mark_dirty(struct inode *, __u64);
int nilfs_dat_freev(struct inode *, __u64 *, size_t);
......
......@@ -125,106 +125,64 @@ static void nilfs_direct_set_target_v(struct nilfs_direct *direct,
direct->d_bmap.b_last_allocated_ptr = ptr;
}
static int nilfs_direct_prepare_insert(struct nilfs_direct *direct,
__u64 key,
union nilfs_bmap_ptr_req *req,
struct nilfs_bmap_stats *stats)
{
int ret;
if (NILFS_BMAP_USE_VBN(&direct->d_bmap))
req->bpr_ptr = nilfs_direct_find_target_v(direct, key);
ret = nilfs_bmap_prepare_alloc_ptr(&direct->d_bmap, req);
if (ret < 0)
return ret;
stats->bs_nblocks = 1;
return 0;
}
static void nilfs_direct_commit_insert(struct nilfs_direct *direct,
union nilfs_bmap_ptr_req *req,
__u64 key, __u64 ptr)
{
struct buffer_head *bh;
/* ptr must be a pointer to a buffer head. */
bh = (struct buffer_head *)((unsigned long)ptr);
set_buffer_nilfs_volatile(bh);
nilfs_bmap_commit_alloc_ptr(&direct->d_bmap, req);
nilfs_direct_set_ptr(direct, key, req->bpr_ptr);
if (!nilfs_bmap_dirty(&direct->d_bmap))
nilfs_bmap_set_dirty(&direct->d_bmap);
if (NILFS_BMAP_USE_VBN(&direct->d_bmap))
nilfs_direct_set_target_v(direct, key, req->bpr_ptr);
}
static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
{
struct nilfs_direct *direct;
struct nilfs_direct *direct = (struct nilfs_direct *)bmap;
union nilfs_bmap_ptr_req req;
struct nilfs_bmap_stats stats;
struct inode *dat = NULL;
struct buffer_head *bh;
int ret;
direct = (struct nilfs_direct *)bmap;
if (key > NILFS_DIRECT_KEY_MAX)
return -ENOENT;
if (nilfs_direct_get_ptr(direct, key) != NILFS_BMAP_INVALID_PTR)
return -EEXIST;
ret = nilfs_direct_prepare_insert(direct, key, &req, &stats);
if (ret < 0)
return ret;
nilfs_direct_commit_insert(direct, &req, key, ptr);
nilfs_bmap_add_blocks(bmap, stats.bs_nblocks);
if (NILFS_BMAP_USE_VBN(bmap)) {
req.bpr_ptr = nilfs_direct_find_target_v(direct, key);
dat = nilfs_bmap_get_dat(bmap);
}
ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat);
if (!ret) {
/* ptr must be a pointer to a buffer head. */
bh = (struct buffer_head *)((unsigned long)ptr);
set_buffer_nilfs_volatile(bh);
return 0;
}
nilfs_bmap_commit_alloc_ptr(bmap, &req, dat);
nilfs_direct_set_ptr(direct, key, req.bpr_ptr);
static int nilfs_direct_prepare_delete(struct nilfs_direct *direct,
union nilfs_bmap_ptr_req *req,
__u64 key,
struct nilfs_bmap_stats *stats)
{
int ret;
if (!nilfs_bmap_dirty(bmap))
nilfs_bmap_set_dirty(bmap);
req->bpr_ptr = nilfs_direct_get_ptr(direct, key);
ret = nilfs_bmap_prepare_end_ptr(&direct->d_bmap, req);
if (!ret)
stats->bs_nblocks = 1;
return ret;
}
if (NILFS_BMAP_USE_VBN(bmap))
nilfs_direct_set_target_v(direct, key, req.bpr_ptr);
static void nilfs_direct_commit_delete(struct nilfs_direct *direct,
union nilfs_bmap_ptr_req *req,
__u64 key)
{
nilfs_bmap_commit_end_ptr(&direct->d_bmap, req);
nilfs_direct_set_ptr(direct, key, NILFS_BMAP_INVALID_PTR);
nilfs_bmap_add_blocks(bmap, 1);
}
return ret;
}
static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key)
{
struct nilfs_direct *direct;
struct nilfs_direct *direct = (struct nilfs_direct *)bmap;
union nilfs_bmap_ptr_req req;
struct nilfs_bmap_stats stats;
struct inode *dat;
int ret;
direct = (struct nilfs_direct *)bmap;
if ((key > NILFS_DIRECT_KEY_MAX) ||
if (key > NILFS_DIRECT_KEY_MAX ||
nilfs_direct_get_ptr(direct, key) == NILFS_BMAP_INVALID_PTR)
return -ENOENT;
ret = nilfs_direct_prepare_delete(direct, &req, key, &stats);
if (ret < 0)
return ret;
nilfs_direct_commit_delete(direct, &req, key);
nilfs_bmap_sub_blocks(bmap, stats.bs_nblocks);
dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL;
req.bpr_ptr = nilfs_direct_get_ptr(direct, key);
return 0;
ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat);
if (!ret) {
nilfs_bmap_commit_end_ptr(bmap, &req, dat);
nilfs_direct_set_ptr(direct, key, NILFS_BMAP_INVALID_PTR);
nilfs_bmap_sub_blocks(bmap, 1);
}
return ret;
}
static int nilfs_direct_last_key(const struct nilfs_bmap *bmap, __u64 *keyp)
......@@ -310,59 +268,56 @@ int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap,
return 0;
}
static int nilfs_direct_propagate_v(struct nilfs_direct *direct,
struct buffer_head *bh)
static int nilfs_direct_propagate(const struct nilfs_bmap *bmap,
struct buffer_head *bh)
{
union nilfs_bmap_ptr_req oldreq, newreq;
struct nilfs_direct *direct = (struct nilfs_direct *)bmap;
struct nilfs_palloc_req oldreq, newreq;
struct inode *dat;
__u64 key;
__u64 ptr;
int ret;
key = nilfs_bmap_data_get_key(&direct->d_bmap, bh);
if (!NILFS_BMAP_USE_VBN(bmap))
return 0;
dat = nilfs_bmap_get_dat(bmap);
key = nilfs_bmap_data_get_key(bmap, bh);
ptr = nilfs_direct_get_ptr(direct, key);
if (!buffer_nilfs_volatile(bh)) {
oldreq.bpr_ptr = ptr;
newreq.bpr_ptr = ptr;
ret = nilfs_bmap_prepare_update_v(&direct->d_bmap, &oldreq,
&newreq);
oldreq.pr_entry_nr = ptr;
newreq.pr_entry_nr = ptr;
ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq);
if (ret < 0)
return ret;
nilfs_bmap_commit_update_v(&direct->d_bmap, &oldreq, &newreq);
nilfs_dat_commit_update(dat, &oldreq, &newreq,
bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
set_buffer_nilfs_volatile(bh);
nilfs_direct_set_ptr(direct, key, newreq.bpr_ptr);
nilfs_direct_set_ptr(direct, key, newreq.pr_entry_nr);
} else
ret = nilfs_bmap_mark_dirty(&direct->d_bmap, ptr);
ret = nilfs_dat_mark_dirty(dat, ptr);
return ret;
}
static int nilfs_direct_propagate(const struct nilfs_bmap *bmap,
struct buffer_head *bh)
{
struct nilfs_direct *direct = (struct nilfs_direct *)bmap;
return NILFS_BMAP_USE_VBN(bmap) ?
nilfs_direct_propagate_v(direct, bh) : 0;
}
static int nilfs_direct_assign_v(struct nilfs_direct *direct,
__u64 key, __u64 ptr,
struct buffer_head **bh,
sector_t blocknr,
union nilfs_binfo *binfo)
{
struct inode *dat = nilfs_bmap_get_dat(&direct->d_bmap);
union nilfs_bmap_ptr_req req;
int ret;
req.bpr_ptr = ptr;
ret = nilfs_bmap_start_v(&direct->d_bmap, &req, blocknr);
if (unlikely(ret < 0))
return ret;
binfo->bi_v.bi_vblocknr = nilfs_bmap_ptr_to_dptr(ptr);
binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key);
return 0;
ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
if (!ret) {
nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
binfo->bi_v.bi_vblocknr = nilfs_bmap_ptr_to_dptr(ptr);
binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key);
}
return ret;
}
static int nilfs_direct_assign_p(struct nilfs_direct *direct,
......
......@@ -31,7 +31,6 @@
#include "mdt.h"
#include "alloc.h"
#define NILFS_IFILE_GFP NILFS_MDT_GFP
static inline struct nilfs_inode *
nilfs_ifile_map_inode(struct inode *ifile, ino_t ino, struct buffer_head *ibh)
......
......@@ -430,7 +430,8 @@ static int __nilfs_read_inode(struct super_block *sb, unsigned long ino,
raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, bh);
if (nilfs_read_inode_common(inode, raw_inode))
err = nilfs_read_inode_common(inode, raw_inode);
if (err)
goto failed_unmap;
if (S_ISREG(inode->i_mode)) {
......
......@@ -442,12 +442,6 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs,
const char *msg;
int ret;
ret = nilfs_ioctl_move_blocks(nilfs, &argv[0], kbufs[0]);
if (ret < 0) {
msg = "cannot read source blocks";
goto failed;
}
ret = nilfs_ioctl_delete_checkpoints(nilfs, &argv[1], kbufs[1]);
if (ret < 0) {
/*
......@@ -548,7 +542,25 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
}
}
ret = nilfs_clean_segments(inode->i_sb, argv, kbufs);
/*
* nilfs_ioctl_move_blocks() will call nilfs_gc_iget(),
* which will operates an inode list without blocking.
* To protect the list from concurrent operations,
* nilfs_ioctl_move_blocks should be atomic operation.
*/
if (test_and_set_bit(THE_NILFS_GC_RUNNING, &nilfs->ns_flags)) {
ret = -EBUSY;
goto out_free;
}
ret = nilfs_ioctl_move_blocks(nilfs, &argv[0], kbufs[0]);
if (ret < 0)
printk(KERN_ERR "NILFS: GC failed during preparation: "
"cannot read source blocks: err=%d\n", ret);
else
ret = nilfs_clean_segments(inode->i_sb, argv, kbufs);
clear_nilfs_gc_running(nilfs);
out_free:
while (--n >= 0)
......
......@@ -103,15 +103,12 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
goto failed_unlock;
err = -EEXIST;
if (buffer_uptodate(bh) || buffer_mapped(bh))
if (buffer_uptodate(bh))
goto failed_bh;
#if 0
/* The uptodate flag is not protected by the page lock, but
the mapped flag is. Thus, we don't have to wait the buffer. */
wait_on_buffer(bh);
if (buffer_uptodate(bh))
goto failed_bh;
#endif
bh->b_bdev = nilfs->ns_bdev;
err = nilfs_mdt_insert_new_block(inode, block, bh, init_block);
......@@ -139,7 +136,7 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
int mode, struct buffer_head **out_bh)
{
struct buffer_head *bh;
unsigned long blknum = 0;
__u64 blknum = 0;
int ret = -ENOMEM;
bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0);
......@@ -162,17 +159,15 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
unlock_buffer(bh);
goto out;
}
if (!buffer_mapped(bh)) { /* unused buffer */
ret = nilfs_bmap_lookup(NILFS_I(inode)->i_bmap, blkoff,
&blknum);
if (unlikely(ret)) {
unlock_buffer(bh);
goto failed_bh;
}
bh->b_bdev = NILFS_MDT(inode)->mi_nilfs->ns_bdev;
bh->b_blocknr = blknum;
set_buffer_mapped(bh);
ret = nilfs_bmap_lookup(NILFS_I(inode)->i_bmap, blkoff, &blknum);
if (unlikely(ret)) {
unlock_buffer(bh);
goto failed_bh;
}
bh->b_bdev = NILFS_MDT(inode)->mi_nilfs->ns_bdev;
bh->b_blocknr = (sector_t)blknum;
set_buffer_mapped(bh);
bh->b_end_io = end_buffer_read_sync;
get_bh(bh);
......@@ -402,6 +397,7 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
struct inode *inode = container_of(page->mapping,
struct inode, i_data);
struct super_block *sb = inode->i_sb;
struct the_nilfs *nilfs = NILFS_MDT(inode)->mi_nilfs;
struct nilfs_sb_info *writer = NULL;
int err = 0;
......@@ -411,9 +407,10 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
if (page->mapping->assoc_mapping)
return 0; /* Do not request flush for shadow page cache */
if (!sb) {
writer = nilfs_get_writer(NILFS_MDT(inode)->mi_nilfs);
down_read(&nilfs->ns_writer_sem);
writer = nilfs->ns_writer;
if (!writer) {
nilfs_put_writer(NILFS_MDT(inode)->mi_nilfs);
up_read(&nilfs->ns_writer_sem);
return -EROFS;
}
sb = writer->s_super;
......@@ -425,7 +422,7 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
nilfs_flush_segment(sb, inode->i_ino);
if (writer)
nilfs_put_writer(NILFS_MDT(inode)->mi_nilfs);
up_read(&nilfs->ns_writer_sem);
return err;
}
......@@ -516,9 +513,10 @@ nilfs_mdt_new_common(struct the_nilfs *nilfs, struct super_block *sb,
}
struct inode *nilfs_mdt_new(struct the_nilfs *nilfs, struct super_block *sb,
ino_t ino, gfp_t gfp_mask)
ino_t ino)
{
struct inode *inode = nilfs_mdt_new_common(nilfs, sb, ino, gfp_mask);
struct inode *inode = nilfs_mdt_new_common(nilfs, sb, ino,
NILFS_MDT_GFP);
if (!inode)
return NULL;
......
......@@ -74,8 +74,7 @@ int nilfs_mdt_forget_block(struct inode *, unsigned long);
int nilfs_mdt_mark_block_dirty(struct inode *, unsigned long);
int nilfs_mdt_fetch_dirty(struct inode *);
struct inode *nilfs_mdt_new(struct the_nilfs *, struct super_block *, ino_t,
gfp_t);
struct inode *nilfs_mdt_new(struct the_nilfs *, struct super_block *, ino_t);
struct inode *nilfs_mdt_new_common(struct the_nilfs *, struct super_block *,
ino_t, gfp_t);
void nilfs_mdt_destroy(struct inode *);
......
......@@ -552,7 +552,8 @@ static int recover_dsync_blocks(struct nilfs_sb_info *sbi,
printk(KERN_WARNING
"NILFS warning: error recovering data block "
"(err=%d, ino=%lu, block-offset=%llu)\n",
err, rb->ino, (unsigned long long)rb->blkoff);
err, (unsigned long)rb->ino,
(unsigned long long)rb->blkoff);
if (!err2)
err2 = err;
next:
......
......@@ -316,10 +316,10 @@ static struct bio *nilfs_alloc_seg_bio(struct super_block *sb, sector_t start,
{
struct bio *bio;
bio = bio_alloc(GFP_NOWAIT, nr_vecs);
bio = bio_alloc(GFP_NOIO, nr_vecs);
if (bio == NULL) {
while (!bio && (nr_vecs >>= 1))
bio = bio_alloc(GFP_NOWAIT, nr_vecs);
bio = bio_alloc(GFP_NOIO, nr_vecs);
}
if (likely(bio)) {
bio->bi_bdev = sb->s_bdev;
......
......@@ -2501,7 +2501,8 @@ static int nilfs_segctor_construct(struct nilfs_sc_info *sci,
if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
nilfs_discontinued(nilfs)) {
down_write(&nilfs->ns_sem);
req->sb_err = nilfs_commit_super(sbi, 0);
req->sb_err = nilfs_commit_super(sbi,
nilfs_altsb_need_update(nilfs));
up_write(&nilfs->ns_sem);
}
}
......@@ -2689,6 +2690,7 @@ static int nilfs_segctor_thread(void *arg)
} else {
DEFINE_WAIT(wait);
int should_sleep = 1;
struct the_nilfs *nilfs;
prepare_to_wait(&sci->sc_wait_daemon, &wait,
TASK_INTERRUPTIBLE);
......@@ -2709,6 +2711,9 @@ static int nilfs_segctor_thread(void *arg)
finish_wait(&sci->sc_wait_daemon, &wait);
timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
time_after_eq(jiffies, sci->sc_timer->expires));
nilfs = sci->sc_sbi->s_nilfs;
if (sci->sc_super->s_dirt && nilfs_sb_need_update(nilfs))
set_nilfs_discontinued(nilfs);
}
goto loop;
......
......@@ -28,7 +28,6 @@
#include <linux/nilfs2_fs.h>
#include "mdt.h"
#define NILFS_SUFILE_GFP NILFS_MDT_GFP
static inline unsigned long nilfs_sufile_get_nsegments(struct inode *sufile)
{
......
......@@ -50,6 +50,8 @@
#include <linux/writeback.h>
#include <linux/kobject.h>
#include <linux/exportfs.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
#include "nilfs.h"
#include "mdt.h"
#include "alloc.h"
......@@ -65,7 +67,6 @@ MODULE_DESCRIPTION("A New Implementation of the Log-structured Filesystem "
"(NILFS)");
MODULE_LICENSE("GPL");
static void nilfs_write_super(struct super_block *sb);
static int nilfs_remount(struct super_block *sb, int *flags, char *data);
/**
......@@ -311,9 +312,6 @@ static void nilfs_put_super(struct super_block *sb)
lock_kernel();
if (sb->s_dirt)
nilfs_write_super(sb);
nilfs_detach_segment_constructor(sbi);
if (!(sb->s_flags & MS_RDONLY)) {
......@@ -336,63 +334,21 @@ static void nilfs_put_super(struct super_block *sb)
unlock_kernel();
}
/**
* nilfs_write_super - write super block(s) of NILFS
* @sb: super_block
*
* nilfs_write_super() gets a fs-dependent lock, writes super block(s), and
* clears s_dirt. This function is called in the section protected by
* lock_super().
*
* The s_dirt flag is managed by each filesystem and we protect it by ns_sem
* of the struct the_nilfs. Lock order must be as follows:
*
* 1. lock_super()
* 2. down_write(&nilfs->ns_sem)
*
* Inside NILFS, locking ns_sem is enough to protect s_dirt and the buffer
* of the super block (nilfs->ns_sbp[]).
*
* In most cases, VFS functions call lock_super() before calling these
* methods. So we must be careful not to bring on deadlocks when using
* lock_super(); see generic_shutdown_super(), write_super(), and so on.
*
* Note that order of lock_kernel() and lock_super() depends on contexts
* of VFS. We should also note that lock_kernel() can be used in its
* protective section and only the outermost one has an effect.
*/
static void nilfs_write_super(struct super_block *sb)
static int nilfs_sync_fs(struct super_block *sb, int wait)
{
struct nilfs_sb_info *sbi = NILFS_SB(sb);
struct the_nilfs *nilfs = sbi->s_nilfs;
down_write(&nilfs->ns_sem);
if (!(sb->s_flags & MS_RDONLY)) {
struct nilfs_super_block **sbp = nilfs->ns_sbp;
u64 t = get_seconds();
int dupsb;
if (!nilfs_discontinued(nilfs) && t >= nilfs->ns_sbwtime[0] &&
t < nilfs->ns_sbwtime[0] + NILFS_SB_FREQ) {
up_write(&nilfs->ns_sem);
return;
}
dupsb = sbp[1] && t > nilfs->ns_sbwtime[1] + NILFS_ALTSB_FREQ;
nilfs_commit_super(sbi, dupsb);
}
sb->s_dirt = 0;
up_write(&nilfs->ns_sem);
}
static int nilfs_sync_fs(struct super_block *sb, int wait)
{
int err = 0;
nilfs_write_super(sb);
/* This function is called when super block should be written back */
if (wait)
err = nilfs_construct_segment(sb);
down_write(&nilfs->ns_sem);
if (sb->s_dirt)
nilfs_commit_super(sbi, 1);
up_write(&nilfs->ns_sem);
return err;
}
......@@ -407,8 +363,7 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno)
list_add(&sbi->s_list, &nilfs->ns_supers);
up_write(&nilfs->ns_super_sem);
sbi->s_ifile = nilfs_mdt_new(
nilfs, sbi->s_super, NILFS_IFILE_INO, NILFS_IFILE_GFP);
sbi->s_ifile = nilfs_mdt_new(nilfs, sbi->s_super, NILFS_IFILE_INO);
if (!sbi->s_ifile)
return -ENOMEM;
......@@ -529,6 +484,26 @@ static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
{
struct super_block *sb = vfs->mnt_sb;
struct nilfs_sb_info *sbi = NILFS_SB(sb);
if (!nilfs_test_opt(sbi, BARRIER))
seq_printf(seq, ",barrier=off");
if (nilfs_test_opt(sbi, SNAPSHOT))
seq_printf(seq, ",cp=%llu",
(unsigned long long int)sbi->s_snapshot_cno);
if (nilfs_test_opt(sbi, ERRORS_RO))
seq_printf(seq, ",errors=remount-ro");
if (nilfs_test_opt(sbi, ERRORS_PANIC))
seq_printf(seq, ",errors=panic");
if (nilfs_test_opt(sbi, STRICT_ORDER))
seq_printf(seq, ",order=strict");
return 0;
}
static struct super_operations nilfs_sops = {
.alloc_inode = nilfs_alloc_inode,
.destroy_inode = nilfs_destroy_inode,
......@@ -538,7 +513,7 @@ static struct super_operations nilfs_sops = {
/* .drop_inode = nilfs_drop_inode, */
.delete_inode = nilfs_delete_inode,
.put_super = nilfs_put_super,
.write_super = nilfs_write_super,
/* .write_super = nilfs_write_super, */
.sync_fs = nilfs_sync_fs,
/* .write_super_lockfs */
/* .unlockfs */
......@@ -546,7 +521,7 @@ static struct super_operations nilfs_sops = {
.remount_fs = nilfs_remount,
.clear_inode = nilfs_clear_inode,
/* .umount_begin */
/* .show_options */
.show_options = nilfs_show_options
};
static struct inode *
......@@ -816,10 +791,15 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent,
if (sb->s_flags & MS_RDONLY) {
if (nilfs_test_opt(sbi, SNAPSHOT)) {
down_read(&nilfs->ns_segctor_sem);
err = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile,
sbi->s_snapshot_cno);
if (err < 0)
up_read(&nilfs->ns_segctor_sem);
if (err < 0) {
if (err == -ENOENT)
err = -EINVAL;
goto failed_sbi;
}
if (!err) {
printk(KERN_ERR
"NILFS: The specified checkpoint is "
......@@ -1127,10 +1107,6 @@ nilfs_get_sb(struct file_system_type *fs_type, int flags,
*/
sd.sbi = nilfs_find_sbinfo(nilfs, !(flags & MS_RDONLY), sd.cno);
if (!sd.cno)
/* trying to get the latest checkpoint. */
sd.cno = nilfs_last_cno(nilfs);
/*
* Get super block instance holding the nilfs_sb_info struct.
* A new instance is allocated if no existing mount is present or
......
......@@ -68,12 +68,11 @@ static struct the_nilfs *alloc_nilfs(struct block_device *bdev)
nilfs->ns_bdev = bdev;
atomic_set(&nilfs->ns_count, 1);
atomic_set(&nilfs->ns_writer_refcount, -1);
atomic_set(&nilfs->ns_ndirtyblks, 0);
init_rwsem(&nilfs->ns_sem);
init_rwsem(&nilfs->ns_super_sem);
mutex_init(&nilfs->ns_mount_mutex);
mutex_init(&nilfs->ns_writer_mutex);
init_rwsem(&nilfs->ns_writer_sem);
INIT_LIST_HEAD(&nilfs->ns_list);
INIT_LIST_HEAD(&nilfs->ns_supers);
spin_lock_init(&nilfs->ns_last_segment_lock);
......@@ -188,23 +187,19 @@ static int nilfs_load_super_root(struct the_nilfs *nilfs,
inode_size = nilfs->ns_inode_size;
err = -ENOMEM;
nilfs->ns_dat = nilfs_mdt_new(
nilfs, NULL, NILFS_DAT_INO, NILFS_DAT_GFP);
nilfs->ns_dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO);
if (unlikely(!nilfs->ns_dat))
goto failed;
nilfs->ns_gc_dat = nilfs_mdt_new(
nilfs, NULL, NILFS_DAT_INO, NILFS_DAT_GFP);
nilfs->ns_gc_dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO);
if (unlikely(!nilfs->ns_gc_dat))
goto failed_dat;
nilfs->ns_cpfile = nilfs_mdt_new(
nilfs, NULL, NILFS_CPFILE_INO, NILFS_CPFILE_GFP);
nilfs->ns_cpfile = nilfs_mdt_new(nilfs, NULL, NILFS_CPFILE_INO);
if (unlikely(!nilfs->ns_cpfile))
goto failed_gc_dat;
nilfs->ns_sufile = nilfs_mdt_new(
nilfs, NULL, NILFS_SUFILE_INO, NILFS_SUFILE_GFP);
nilfs->ns_sufile = nilfs_mdt_new(nilfs, NULL, NILFS_SUFILE_INO);
if (unlikely(!nilfs->ns_sufile))
goto failed_cpfile;
......
......@@ -37,6 +37,7 @@ enum {
THE_NILFS_LOADED, /* Roll-back/roll-forward has done and
the latest checkpoint was loaded */
THE_NILFS_DISCONTINUED, /* 'next' pointer chain has broken */
THE_NILFS_GC_RUNNING, /* gc process is running */
};
/**
......@@ -50,8 +51,7 @@ enum {
* @ns_sem: semaphore for shared states
* @ns_super_sem: semaphore for global operations across super block instances
* @ns_mount_mutex: mutex protecting mount process of nilfs
* @ns_writer_mutex: mutex protecting ns_writer attach/detach
* @ns_writer_refcount: number of referrers on ns_writer
* @ns_writer_sem: semaphore protecting ns_writer attach/detach
* @ns_current: back pointer to current mount
* @ns_sbh: buffer heads of on-disk super blocks
* @ns_sbp: pointers to super block data
......@@ -100,8 +100,7 @@ struct the_nilfs {
struct rw_semaphore ns_sem;
struct rw_semaphore ns_super_sem;
struct mutex ns_mount_mutex;
struct mutex ns_writer_mutex;
atomic_t ns_writer_refcount;
struct rw_semaphore ns_writer_sem;
/*
* components protected by ns_super_sem
......@@ -197,11 +196,26 @@ static inline int nilfs_##name(struct the_nilfs *nilfs) \
THE_NILFS_FNS(INIT, init)
THE_NILFS_FNS(LOADED, loaded)
THE_NILFS_FNS(DISCONTINUED, discontinued)
THE_NILFS_FNS(GC_RUNNING, gc_running)
/* Minimum interval of periodical update of superblocks (in seconds) */
#define NILFS_SB_FREQ 10
#define NILFS_ALTSB_FREQ 60 /* spare superblock */
static inline int nilfs_sb_need_update(struct the_nilfs *nilfs)
{
u64 t = get_seconds();
return t < nilfs->ns_sbwtime[0] ||
t > nilfs->ns_sbwtime[0] + NILFS_SB_FREQ;
}
static inline int nilfs_altsb_need_update(struct the_nilfs *nilfs)
{
u64 t = get_seconds();
struct nilfs_super_block **sbp = nilfs->ns_sbp;
return sbp[1] && t > nilfs->ns_sbwtime[1] + NILFS_ALTSB_FREQ;
}
void nilfs_set_last_segment(struct the_nilfs *, sector_t, u64, __u64);
struct the_nilfs *find_or_create_nilfs(struct block_device *);
void put_nilfs(struct the_nilfs *);
......@@ -221,34 +235,21 @@ static inline void get_nilfs(struct the_nilfs *nilfs)
atomic_inc(&nilfs->ns_count);
}
static inline struct nilfs_sb_info *nilfs_get_writer(struct the_nilfs *nilfs)
{
if (atomic_inc_and_test(&nilfs->ns_writer_refcount))
mutex_lock(&nilfs->ns_writer_mutex);
return nilfs->ns_writer;
}
static inline void nilfs_put_writer(struct the_nilfs *nilfs)
{
if (atomic_add_negative(-1, &nilfs->ns_writer_refcount))
mutex_unlock(&nilfs->ns_writer_mutex);
}
static inline void
nilfs_attach_writer(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
{
mutex_lock(&nilfs->ns_writer_mutex);
down_write(&nilfs->ns_writer_sem);
nilfs->ns_writer = sbi;
mutex_unlock(&nilfs->ns_writer_mutex);
up_write(&nilfs->ns_writer_sem);
}
static inline void
nilfs_detach_writer(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
{
mutex_lock(&nilfs->ns_writer_mutex);
down_write(&nilfs->ns_writer_sem);
if (sbi == nilfs->ns_writer)
nilfs->ns_writer = NULL;
mutex_unlock(&nilfs->ns_writer_mutex);
up_write(&nilfs->ns_writer_sem);
}
static inline void nilfs_put_sbinfo(struct nilfs_sb_info *sbi)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册