Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
10183a69
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
10183a69
编写于
4月 27, 2015
作者:
Y
Yan, Zheng
提交者:
Ilya Dryomov
6月 25, 2015
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
ceph: check OSD caps before read/write
Signed-off-by:
N
Yan, Zheng
<
zyan@redhat.com
>
上级
144cba14
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
249 addition
and
6 deletion
+249
-6
fs/ceph/addr.c
fs/ceph/addr.c
+203
-0
fs/ceph/caps.c
fs/ceph/caps.c
+4
-0
fs/ceph/inode.c
fs/ceph/inode.c
+3
-0
fs/ceph/mds_client.c
fs/ceph/mds_client.c
+4
-0
fs/ceph/mds_client.h
fs/ceph/mds_client.h
+9
-0
fs/ceph/super.c
fs/ceph/super.c
+14
-1
fs/ceph/super.h
fs/ceph/super.h
+12
-5
未找到文件。
fs/ceph/addr.c
浏览文件 @
10183a69
...
...
@@ -1598,3 +1598,206 @@ int ceph_mmap(struct file *file, struct vm_area_struct *vma)
vma
->
vm_ops
=
&
ceph_vmops
;
return
0
;
}
enum
{
POOL_READ
=
1
,
POOL_WRITE
=
2
,
};
static
int
__ceph_pool_perm_get
(
struct
ceph_inode_info
*
ci
,
u32
pool
)
{
struct
ceph_fs_client
*
fsc
=
ceph_inode_to_client
(
&
ci
->
vfs_inode
);
struct
ceph_mds_client
*
mdsc
=
fsc
->
mdsc
;
struct
ceph_osd_request
*
rd_req
=
NULL
,
*
wr_req
=
NULL
;
struct
rb_node
**
p
,
*
parent
;
struct
ceph_pool_perm
*
perm
;
struct
page
**
pages
;
int
err
=
0
,
err2
=
0
,
have
=
0
;
down_read
(
&
mdsc
->
pool_perm_rwsem
);
p
=
&
mdsc
->
pool_perm_tree
.
rb_node
;
while
(
*
p
)
{
perm
=
rb_entry
(
*
p
,
struct
ceph_pool_perm
,
node
);
if
(
pool
<
perm
->
pool
)
p
=
&
(
*
p
)
->
rb_left
;
else
if
(
pool
>
perm
->
pool
)
p
=
&
(
*
p
)
->
rb_right
;
else
{
have
=
perm
->
perm
;
break
;
}
}
up_read
(
&
mdsc
->
pool_perm_rwsem
);
if
(
*
p
)
goto
out
;
dout
(
"__ceph_pool_perm_get pool %u no perm cached
\n
"
,
pool
);
down_write
(
&
mdsc
->
pool_perm_rwsem
);
parent
=
NULL
;
while
(
*
p
)
{
parent
=
*
p
;
perm
=
rb_entry
(
parent
,
struct
ceph_pool_perm
,
node
);
if
(
pool
<
perm
->
pool
)
p
=
&
(
*
p
)
->
rb_left
;
else
if
(
pool
>
perm
->
pool
)
p
=
&
(
*
p
)
->
rb_right
;
else
{
have
=
perm
->
perm
;
break
;
}
}
if
(
*
p
)
{
up_write
(
&
mdsc
->
pool_perm_rwsem
);
goto
out
;
}
rd_req
=
ceph_osdc_alloc_request
(
&
fsc
->
client
->
osdc
,
ci
->
i_snap_realm
->
cached_context
,
1
,
false
,
GFP_NOFS
);
if
(
!
rd_req
)
{
err
=
-
ENOMEM
;
goto
out_unlock
;
}
rd_req
->
r_flags
=
CEPH_OSD_FLAG_READ
;
osd_req_op_init
(
rd_req
,
0
,
CEPH_OSD_OP_STAT
,
0
);
rd_req
->
r_base_oloc
.
pool
=
pool
;
snprintf
(
rd_req
->
r_base_oid
.
name
,
sizeof
(
rd_req
->
r_base_oid
.
name
),
"%llx.00000000"
,
ci
->
i_vino
.
ino
);
rd_req
->
r_base_oid
.
name_len
=
strlen
(
rd_req
->
r_base_oid
.
name
);
wr_req
=
ceph_osdc_alloc_request
(
&
fsc
->
client
->
osdc
,
ci
->
i_snap_realm
->
cached_context
,
1
,
false
,
GFP_NOFS
);
if
(
!
wr_req
)
{
err
=
-
ENOMEM
;
goto
out_unlock
;
}
wr_req
->
r_flags
=
CEPH_OSD_FLAG_WRITE
|
CEPH_OSD_FLAG_ACK
|
CEPH_OSD_FLAG_ONDISK
;
osd_req_op_init
(
wr_req
,
0
,
CEPH_OSD_OP_CREATE
,
CEPH_OSD_OP_FLAG_EXCL
);
wr_req
->
r_base_oloc
.
pool
=
pool
;
wr_req
->
r_base_oid
=
rd_req
->
r_base_oid
;
/* one page should be large enough for STAT data */
pages
=
ceph_alloc_page_vector
(
1
,
GFP_KERNEL
);
if
(
IS_ERR
(
pages
))
{
err
=
PTR_ERR
(
pages
);
goto
out_unlock
;
}
osd_req_op_raw_data_in_pages
(
rd_req
,
0
,
pages
,
PAGE_SIZE
,
0
,
false
,
true
);
ceph_osdc_build_request
(
rd_req
,
0
,
NULL
,
CEPH_NOSNAP
,
&
ci
->
vfs_inode
.
i_mtime
);
err
=
ceph_osdc_start_request
(
&
fsc
->
client
->
osdc
,
rd_req
,
false
);
ceph_osdc_build_request
(
wr_req
,
0
,
NULL
,
CEPH_NOSNAP
,
&
ci
->
vfs_inode
.
i_mtime
);
err2
=
ceph_osdc_start_request
(
&
fsc
->
client
->
osdc
,
wr_req
,
false
);
if
(
!
err
)
err
=
ceph_osdc_wait_request
(
&
fsc
->
client
->
osdc
,
rd_req
);
if
(
!
err2
)
err2
=
ceph_osdc_wait_request
(
&
fsc
->
client
->
osdc
,
wr_req
);
if
(
err
>=
0
||
err
==
-
ENOENT
)
have
|=
POOL_READ
;
else
if
(
err
!=
-
EPERM
)
goto
out_unlock
;
if
(
err2
==
0
||
err2
==
-
EEXIST
)
have
|=
POOL_WRITE
;
else
if
(
err2
!=
-
EPERM
)
{
err
=
err2
;
goto
out_unlock
;
}
perm
=
kmalloc
(
sizeof
(
*
perm
),
GFP_NOFS
);
if
(
!
perm
)
{
err
=
-
ENOMEM
;
goto
out_unlock
;
}
perm
->
pool
=
pool
;
perm
->
perm
=
have
;
rb_link_node
(
&
perm
->
node
,
parent
,
p
);
rb_insert_color
(
&
perm
->
node
,
&
mdsc
->
pool_perm_tree
);
err
=
0
;
out_unlock:
up_write
(
&
mdsc
->
pool_perm_rwsem
);
if
(
rd_req
)
ceph_osdc_put_request
(
rd_req
);
if
(
wr_req
)
ceph_osdc_put_request
(
wr_req
);
out:
if
(
!
err
)
err
=
have
;
dout
(
"__ceph_pool_perm_get pool %u result = %d
\n
"
,
pool
,
err
);
return
err
;
}
int
ceph_pool_perm_check
(
struct
ceph_inode_info
*
ci
,
int
need
)
{
u32
pool
;
int
ret
,
flags
;
if
(
ceph_test_mount_opt
(
ceph_inode_to_client
(
&
ci
->
vfs_inode
),
NOPOOLPERM
))
return
0
;
spin_lock
(
&
ci
->
i_ceph_lock
);
flags
=
ci
->
i_ceph_flags
;
pool
=
ceph_file_layout_pg_pool
(
ci
->
i_layout
);
spin_unlock
(
&
ci
->
i_ceph_lock
);
check:
if
(
flags
&
CEPH_I_POOL_PERM
)
{
if
((
need
&
CEPH_CAP_FILE_RD
)
&&
!
(
flags
&
CEPH_I_POOL_RD
))
{
dout
(
"ceph_pool_perm_check pool %u no read perm
\n
"
,
pool
);
return
-
EPERM
;
}
if
((
need
&
CEPH_CAP_FILE_WR
)
&&
!
(
flags
&
CEPH_I_POOL_WR
))
{
dout
(
"ceph_pool_perm_check pool %u no write perm
\n
"
,
pool
);
return
-
EPERM
;
}
return
0
;
}
ret
=
__ceph_pool_perm_get
(
ci
,
pool
);
if
(
ret
<
0
)
return
ret
;
flags
=
CEPH_I_POOL_PERM
;
if
(
ret
&
POOL_READ
)
flags
|=
CEPH_I_POOL_RD
;
if
(
ret
&
POOL_WRITE
)
flags
|=
CEPH_I_POOL_WR
;
spin_lock
(
&
ci
->
i_ceph_lock
);
if
(
pool
==
ceph_file_layout_pg_pool
(
ci
->
i_layout
))
{
ci
->
i_ceph_flags
=
flags
;
}
else
{
pool
=
ceph_file_layout_pg_pool
(
ci
->
i_layout
);
flags
=
ci
->
i_ceph_flags
;
}
spin_unlock
(
&
ci
->
i_ceph_lock
);
goto
check
;
}
void
ceph_pool_perm_destroy
(
struct
ceph_mds_client
*
mdsc
)
{
struct
ceph_pool_perm
*
perm
;
struct
rb_node
*
n
;
while
(
!
RB_EMPTY_ROOT
(
&
mdsc
->
pool_perm_tree
))
{
n
=
rb_first
(
&
mdsc
->
pool_perm_tree
);
perm
=
rb_entry
(
n
,
struct
ceph_pool_perm
,
node
);
rb_erase
(
n
,
&
mdsc
->
pool_perm_tree
);
kfree
(
perm
);
}
}
fs/ceph/caps.c
浏览文件 @
10183a69
...
...
@@ -2233,6 +2233,10 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
{
int
_got
,
check_max
,
ret
,
err
=
0
;
ret
=
ceph_pool_perm_check
(
ci
,
need
);
if
(
ret
<
0
)
return
ret
;
retry:
if
(
endoff
>
0
)
check_max_size
(
&
ci
->
vfs_inode
,
endoff
);
...
...
fs/ceph/inode.c
浏览文件 @
10183a69
...
...
@@ -753,7 +753,10 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
if
(
new_version
||
(
new_issued
&
(
CEPH_CAP_ANY_FILE_RD
|
CEPH_CAP_ANY_FILE_WR
)))
{
if
(
ci
->
i_layout
.
fl_pg_pool
!=
info
->
layout
.
fl_pg_pool
)
ci
->
i_ceph_flags
&=
~
CEPH_I_POOL_PERM
;
ci
->
i_layout
=
info
->
layout
;
queue_trunc
=
ceph_fill_file_size
(
inode
,
issued
,
le32_to_cpu
(
info
->
truncate_seq
),
le64_to_cpu
(
info
->
truncate_size
),
...
...
fs/ceph/mds_client.c
浏览文件 @
10183a69
...
...
@@ -3414,6 +3414,9 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
ceph_caps_init
(
mdsc
);
ceph_adjust_min_caps
(
mdsc
,
fsc
->
min_caps
);
init_rwsem
(
&
mdsc
->
pool_perm_rwsem
);
mdsc
->
pool_perm_tree
=
RB_ROOT
;
return
0
;
}
...
...
@@ -3607,6 +3610,7 @@ static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
ceph_mdsmap_destroy
(
mdsc
->
mdsmap
);
kfree
(
mdsc
->
sessions
);
ceph_caps_finalize
(
mdsc
);
ceph_pool_perm_destroy
(
mdsc
);
}
void
ceph_mdsc_destroy
(
struct
ceph_fs_client
*
fsc
)
...
...
fs/ceph/mds_client.h
浏览文件 @
10183a69
...
...
@@ -260,6 +260,12 @@ struct ceph_mds_request {
int
r_num_caps
;
};
struct
ceph_pool_perm
{
struct
rb_node
node
;
u32
pool
;
int
perm
;
};
/*
* mds client state
*/
...
...
@@ -328,6 +334,9 @@ struct ceph_mds_client {
spinlock_t
dentry_lru_lock
;
struct
list_head
dentry_lru
;
int
num_dentry
;
struct
rw_semaphore
pool_perm_rwsem
;
struct
rb_root
pool_perm_tree
;
};
extern
const
char
*
ceph_mds_op_name
(
int
op
);
...
...
fs/ceph/super.c
浏览文件 @
10183a69
...
...
@@ -134,10 +134,12 @@ enum {
Opt_noino32
,
Opt_fscache
,
Opt_nofscache
,
Opt_poolperm
,
Opt_nopoolperm
,
#ifdef CONFIG_CEPH_FS_POSIX_ACL
Opt_acl
,
#endif
Opt_noacl
Opt_noacl
,
};
static
match_table_t
fsopt_tokens
=
{
...
...
@@ -165,6 +167,8 @@ static match_table_t fsopt_tokens = {
{
Opt_noino32
,
"noino32"
},
{
Opt_fscache
,
"fsc"
},
{
Opt_nofscache
,
"nofsc"
},
{
Opt_poolperm
,
"poolperm"
},
{
Opt_nopoolperm
,
"nopoolperm"
},
#ifdef CONFIG_CEPH_FS_POSIX_ACL
{
Opt_acl
,
"acl"
},
#endif
...
...
@@ -268,6 +272,13 @@ static int parse_fsopt_token(char *c, void *private)
case
Opt_nofscache
:
fsopt
->
flags
&=
~
CEPH_MOUNT_OPT_FSCACHE
;
break
;
case
Opt_poolperm
:
fsopt
->
flags
&=
~
CEPH_MOUNT_OPT_NOPOOLPERM
;
printk
(
"pool perm"
);
break
;
case
Opt_nopoolperm
:
fsopt
->
flags
|=
CEPH_MOUNT_OPT_NOPOOLPERM
;
break
;
#ifdef CONFIG_CEPH_FS_POSIX_ACL
case
Opt_acl
:
fsopt
->
sb_flags
|=
MS_POSIXACL
;
...
...
@@ -436,6 +447,8 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
seq_puts
(
m
,
",nodcache"
);
if
(
fsopt
->
flags
&
CEPH_MOUNT_OPT_FSCACHE
)
seq_puts
(
m
,
",fsc"
);
if
(
fsopt
->
flags
&
CEPH_MOUNT_OPT_NOPOOLPERM
)
seq_puts
(
m
,
",nopoolperm"
);
#ifdef CONFIG_CEPH_FS_POSIX_ACL
if
(
fsopt
->
sb_flags
&
MS_POSIXACL
)
...
...
fs/ceph/super.h
浏览文件 @
10183a69
...
...
@@ -35,6 +35,7 @@
#define CEPH_MOUNT_OPT_INO32 (1<<8)
/* 32 bit inos */
#define CEPH_MOUNT_OPT_DCACHE (1<<9)
/* use dcache for readdir etc */
#define CEPH_MOUNT_OPT_FSCACHE (1<<10)
/* use fscache */
#define CEPH_MOUNT_OPT_NOPOOLPERM (1<<11)
/* no pool permission check */
#define CEPH_MOUNT_OPT_DEFAULT (CEPH_MOUNT_OPT_RBYTES | \
CEPH_MOUNT_OPT_DCACHE)
...
...
@@ -438,10 +439,14 @@ static inline struct inode *ceph_find_inode(struct super_block *sb,
/*
* Ceph inode.
*/
#define CEPH_I_DIR_ORDERED 1
/* dentries in dir are ordered */
#define CEPH_I_NODELAY 4
/* do not delay cap release */
#define CEPH_I_FLUSH 8
/* do not delay flush of dirty metadata */
#define CEPH_I_NOFLUSH 16
/* do not flush dirty caps */
#define CEPH_I_DIR_ORDERED (1 << 0)
/* dentries in dir are ordered */
#define CEPH_I_NODELAY (1 << 1)
/* do not delay cap release */
#define CEPH_I_FLUSH (1 << 2)
/* do not delay flush of dirty metadata */
#define CEPH_I_NOFLUSH (1 << 3)
/* do not flush dirty caps */
#define CEPH_I_POOL_PERM (1 << 4)
/* pool rd/wr bits are valid */
#define CEPH_I_POOL_RD (1 << 5)
/* can read from pool */
#define CEPH_I_POOL_WR (1 << 6)
/* can write to pool */
static
inline
void
__ceph_dir_set_complete
(
struct
ceph_inode_info
*
ci
,
int
release_count
,
int
ordered_count
)
...
...
@@ -879,6 +884,9 @@ extern void ceph_put_fmode(struct ceph_inode_info *ci, int mode);
/* addr.c */
extern
const
struct
address_space_operations
ceph_aops
;
extern
int
ceph_mmap
(
struct
file
*
file
,
struct
vm_area_struct
*
vma
);
extern
int
ceph_uninline_data
(
struct
file
*
filp
,
struct
page
*
locked_page
);
extern
int
ceph_pool_perm_check
(
struct
ceph_inode_info
*
ci
,
int
need
);
extern
void
ceph_pool_perm_destroy
(
struct
ceph_mds_client
*
mdsc
);
/* file.c */
extern
const
struct
file_operations
ceph_file_fops
;
...
...
@@ -890,7 +898,6 @@ extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
extern
int
ceph_release
(
struct
inode
*
inode
,
struct
file
*
filp
);
extern
void
ceph_fill_inline_data
(
struct
inode
*
inode
,
struct
page
*
locked_page
,
char
*
data
,
size_t
len
);
int
ceph_uninline_data
(
struct
file
*
filp
,
struct
page
*
locked_page
);
/* dir.c */
extern
const
struct
file_operations
ceph_dir_fops
;
extern
const
struct
file_operations
ceph_snapdir_fops
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录