未验证 提交 61b43196 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!341 Backport CVEs and fs bugfixes

Merge Pull Request from: @openeuler-sync-bot 
 
Auto sync pull request https://gitee.com/openeuler/kernel/pulls/340 from openEuler-22.03-LTS.

Original pull request related commit(s) at <Original branch name>:
04856b0e KVM: VMX: Execute IBPB on emulated VM-exit when guest has IBRS
a9d49f94 bfq: fix null-ptr-deref in bfq_pd_offline
9da915fa i2c: ismt: Fix an out-of-bounds bug in ismt_access()
9bb7487f ksmbd: fix heap-based overflow in set_ntacl_dacl()
6bd39552 ksmbd: prevent out of bound read for SMB2_WRITE
61dc2a2e ksmbd: validate length in smb2_write()
5a5e896a xfs: fix super block buf log item UAF during force shutdown
1146fdf4 xfs: wait iclog complete before tearing down AIL
be18cd15 xfs: get rid of assert from xfs_btree_islastblock

Pull new CVEs:
CVE-2022-2196
CVE-2022-2873
CVE-2022-47942
CVE-2022-47943
CVE-2022-47940

fs bugfixes from Guo Xuenan and Li Nan:
xfs: fix super block buf log item UAF during force shutdown
xfs: wait iclog complete before tearing down AIL
xfs: get rid of assert from xfs_btree_islastblock
bfq: fix null-ptr-deref in bfq_pd_offline 
 
Link:https://gitee.com/openeuler/kernel/pulls/341 
Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
......@@ -4543,6 +4543,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
/*
* If IBRS is advertised to the vCPU, KVM must flush the indirect
* branch predictors when transitioning from L2 to L1, as L1 expects
* hardware (KVM in this case) to provide separate predictor modes.
* Bare metal isolates VMX root (host) from VMX non-root (guest), but
* doesn't isolate different VMCSs, i.e. in this case, doesn't provide
* separate modes for L2 vs L1.
*/
if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
indirect_branch_prediction_barrier();
/* Update any VMCS fields that might have changed while L2 ran */
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
......
......@@ -1454,8 +1454,10 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
/*
* No indirect branch prediction barrier needed when switching
* the active VMCS within a guest, e.g. on nested VM-Enter.
* The L1 VMM can protect itself with retpolines, IBPB or IBRS.
* the active VMCS within a vCPU, unless IBRS is advertised to
* the vCPU. To minimize the number of IBPBs executed, KVM
* performs IBPB on nested VM-Exit (a single nested transition
* may switch the active VMCS multiple times).
*/
if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
indirect_branch_prediction_barrier();
......
......@@ -911,6 +911,9 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
unsigned long flags;
int i;
if (!bfqg->online)
return;
spin_lock_irqsave(&bfqd->lock, flags);
if (!entity) /* root group */
......
......@@ -507,6 +507,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
if (read_write == I2C_SMBUS_WRITE) {
/* Block Write */
dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: WRITE\n");
if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX)
return -EINVAL;
dma_size = data->block[0] + 1;
dma_direction = DMA_TO_DEVICE;
desc->wr_len_cmd = dma_size;
......
......@@ -132,8 +132,11 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
*len = le16_to_cpu(((struct smb2_read_req *)hdr)->ReadChannelInfoLength);
break;
case SMB2_WRITE:
if (((struct smb2_write_req *)hdr)->DataOffset) {
*off = le16_to_cpu(((struct smb2_write_req *)hdr)->DataOffset);
if (((struct smb2_write_req *)hdr)->DataOffset ||
((struct smb2_write_req *)hdr)->Length) {
*off = max_t(unsigned int,
le16_to_cpu(((struct smb2_write_req *)hdr)->DataOffset),
offsetof(struct smb2_write_req, Buffer));
*len = le32_to_cpu(((struct smb2_write_req *)hdr)->Length);
break;
}
......
......@@ -539,9 +539,10 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work)
struct smb2_query_info_req *req;
req = smb2_get_msg(work->request_buf);
if (req->InfoType == SMB2_O_INFO_FILE &&
(req->FileInfoClass == FILE_FULL_EA_INFORMATION ||
req->FileInfoClass == FILE_ALL_INFORMATION))
if ((req->InfoType == SMB2_O_INFO_FILE &&
(req->FileInfoClass == FILE_FULL_EA_INFORMATION ||
req->FileInfoClass == FILE_ALL_INFORMATION)) ||
req->InfoType == SMB2_O_INFO_SECURITY)
sz = large_sz;
}
......@@ -2972,7 +2973,7 @@ int smb2_open(struct ksmbd_work *work)
if (!pntsd)
goto err_out;
rc = build_sec_desc(pntsd, NULL,
rc = build_sec_desc(pntsd, NULL, 0,
OWNER_SECINFO |
GROUP_SECINFO |
DACL_SECINFO,
......@@ -3807,6 +3808,15 @@ static int verify_info_level(int info_level)
return 0;
}
static int smb2_resp_buf_len(struct ksmbd_work *work, unsigned short hdr2_len)
{
int free_len;
free_len = (int)(work->response_sz -
(get_rfc1002_len(work->response_buf) + 4)) - hdr2_len;
return free_len;
}
static int smb2_calc_max_out_buf_len(struct ksmbd_work *work,
unsigned short hdr2_len,
unsigned int out_buf_len)
......@@ -3816,9 +3826,7 @@ static int smb2_calc_max_out_buf_len(struct ksmbd_work *work,
if (out_buf_len > work->conn->vals->max_trans_size)
return -EINVAL;
free_len = (int)(work->response_sz -
(get_rfc1002_len(work->response_buf) + 4)) -
hdr2_len;
free_len = smb2_resp_buf_len(work, hdr2_len);
if (free_len < 0)
return -EINVAL;
......@@ -5074,10 +5082,10 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
struct smb_ntsd *pntsd = (struct smb_ntsd *)rsp->Buffer, *ppntsd = NULL;
struct smb_fattr fattr = {{0}};
struct inode *inode;
__u32 secdesclen;
__u32 secdesclen = 0;
unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID;
int addition_info = le32_to_cpu(req->AdditionalInformation);
int rc;
int rc = 0, ppntsd_size = 0;
if (addition_info & ~(OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO |
PROTECTED_DACL_SECINFO |
......@@ -5122,9 +5130,14 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
if (test_share_config_flag(work->tcon->share_conf,
KSMBD_SHARE_FLAG_ACL_XATTR))
ksmbd_vfs_get_sd_xattr(work->conn, fp->filp->f_path.dentry, &ppntsd);
rc = build_sec_desc(pntsd, ppntsd, addition_info, &secdesclen, &fattr);
ppntsd_size = ksmbd_vfs_get_sd_xattr(work->conn,
fp->filp->f_path.dentry,
&ppntsd);
/* Check if sd buffer size exceeds response buffer size */
if (smb2_resp_buf_len(work, 8) > ppntsd_size)
rc = build_sec_desc(pntsd, ppntsd, ppntsd_size,
addition_info, &secdesclen, &fattr);
posix_acl_release(fattr.cf_acls);
posix_acl_release(fattr.cf_dacls);
kfree(ppntsd);
......@@ -6315,23 +6328,18 @@ static noinline int smb2_write_pipe(struct ksmbd_work *work)
length = le32_to_cpu(req->Length);
id = le64_to_cpu(req->VolatileFileId);
if (le16_to_cpu(req->DataOffset) ==
offsetof(struct smb2_write_req, Buffer)) {
data_buf = (char *)&req->Buffer[0];
} else {
if ((u64)le16_to_cpu(req->DataOffset) + length >
get_rfc1002_len(work->request_buf)) {
pr_err("invalid write data offset %u, smb_len %u\n",
le16_to_cpu(req->DataOffset),
get_rfc1002_len(work->request_buf));
err = -EINVAL;
goto out;
}
data_buf = (char *)(((char *)&req->hdr.ProtocolId) +
le16_to_cpu(req->DataOffset));
if ((u64)le16_to_cpu(req->DataOffset) + length >
get_rfc1002_len(work->request_buf)) {
pr_err("invalid write data offset %u, smb_len %u\n",
le16_to_cpu(req->DataOffset),
get_rfc1002_len(work->request_buf));
err = -EINVAL;
goto out;
}
data_buf = (char *)(((char *)&req->hdr.ProtocolId) +
le16_to_cpu(req->DataOffset));
rpc_resp = ksmbd_rpc_write(work->sess, id, data_buf, length);
if (rpc_resp) {
if (rpc_resp->flags == KSMBD_RPC_ENOTIMPLEMENTED) {
......@@ -6477,23 +6485,15 @@ int smb2_write(struct ksmbd_work *work)
if (req->Channel != SMB2_CHANNEL_RDMA_V1 &&
req->Channel != SMB2_CHANNEL_RDMA_V1_INVALIDATE) {
if (le16_to_cpu(req->DataOffset) ==
if (le16_to_cpu(req->DataOffset) <
offsetof(struct smb2_write_req, Buffer)) {
data_buf = (char *)&req->Buffer[0];
} else {
if ((u64)le16_to_cpu(req->DataOffset) + length >
get_rfc1002_len(work->request_buf)) {
pr_err("invalid write data offset %u, smb_len %u\n",
le16_to_cpu(req->DataOffset),
get_rfc1002_len(work->request_buf));
err = -EINVAL;
goto out;
}
data_buf = (char *)(((char *)&req->hdr.ProtocolId) +
le16_to_cpu(req->DataOffset));
err = -EINVAL;
goto out;
}
data_buf = (char *)(((char *)&req->hdr.ProtocolId) +
le16_to_cpu(req->DataOffset));
ksmbd_debug(SMB, "flags %u\n", le32_to_cpu(req->Flags));
if (le32_to_cpu(req->Flags) & SMB2_WRITEFLAG_WRITE_THROUGH)
writethrough = true;
......
......@@ -688,6 +688,7 @@ static void set_posix_acl_entries_dacl(struct smb_ace *pndace,
}
static void set_ntacl_dacl(struct smb_acl *pndacl, struct smb_acl *nt_dacl,
unsigned int aces_size,
const struct smb_sid *pownersid,
const struct smb_sid *pgrpsid,
struct smb_fattr *fattr)
......@@ -701,9 +702,19 @@ static void set_ntacl_dacl(struct smb_acl *pndacl, struct smb_acl *nt_dacl,
if (nt_num_aces) {
ntace = (struct smb_ace *)((char *)nt_dacl + sizeof(struct smb_acl));
for (i = 0; i < nt_num_aces; i++) {
memcpy((char *)pndace + size, ntace, le16_to_cpu(ntace->size));
size += le16_to_cpu(ntace->size);
ntace = (struct smb_ace *)((char *)ntace + le16_to_cpu(ntace->size));
unsigned short nt_ace_size;
if (offsetof(struct smb_ace, access_req) > aces_size)
break;
nt_ace_size = le16_to_cpu(ntace->size);
if (nt_ace_size > aces_size)
break;
memcpy((char *)pndace + size, ntace, nt_ace_size);
size += nt_ace_size;
aces_size -= nt_ace_size;
ntace = (struct smb_ace *)((char *)ntace + nt_ace_size);
num_aces++;
}
}
......@@ -872,7 +883,7 @@ int parse_sec_desc(struct smb_ntsd *pntsd, int acl_len,
/* Convert permission bits from mode to equivalent CIFS ACL */
int build_sec_desc(struct smb_ntsd *pntsd, struct smb_ntsd *ppntsd,
int addition_info, __u32 *secdesclen,
int ppntsd_size, int addition_info, __u32 *secdesclen,
struct smb_fattr *fattr)
{
int rc = 0;
......@@ -932,15 +943,25 @@ int build_sec_desc(struct smb_ntsd *pntsd, struct smb_ntsd *ppntsd,
if (!ppntsd) {
set_mode_dacl(dacl_ptr, fattr);
} else if (!ppntsd->dacloffset) {
goto out;
} else {
struct smb_acl *ppdacl_ptr;
ppdacl_ptr = (struct smb_acl *)((char *)ppntsd +
le32_to_cpu(ppntsd->dacloffset));
set_ntacl_dacl(dacl_ptr, ppdacl_ptr, nowner_sid_ptr,
ngroup_sid_ptr, fattr);
unsigned int dacl_offset = le32_to_cpu(ppntsd->dacloffset);
int ppdacl_size, ntacl_size = ppntsd_size - dacl_offset;
if (!dacl_offset ||
(dacl_offset + sizeof(struct smb_acl) > ppntsd_size))
goto out;
ppdacl_ptr = (struct smb_acl *)((char *)ppntsd + dacl_offset);
ppdacl_size = le16_to_cpu(ppdacl_ptr->size);
if (ppdacl_size > ntacl_size ||
ppdacl_size < sizeof(struct smb_acl))
goto out;
set_ntacl_dacl(dacl_ptr, ppdacl_ptr,
ntacl_size - sizeof(struct smb_acl),
nowner_sid_ptr, ngroup_sid_ptr,
fattr);
}
pntsd->dacloffset = cpu_to_le32(offset);
offset += le16_to_cpu(dacl_ptr->size);
......@@ -973,23 +994,31 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
struct smb_ntsd *parent_pntsd = NULL;
struct smb_sid owner_sid, group_sid;
struct dentry *parent = path->dentry->d_parent;
int inherited_flags = 0, flags = 0, i, ace_cnt = 0, nt_size = 0;
int rc = 0, num_aces, dacloffset, pntsd_type, acl_len;
int inherited_flags = 0, flags = 0, i, ace_cnt = 0, nt_size = 0, pdacl_size;
int rc = 0, num_aces, dacloffset, pntsd_type, pntsd_size, acl_len, aces_size;
char *aces_base;
bool is_dir = S_ISDIR(d_inode(path->dentry)->i_mode);
acl_len = ksmbd_vfs_get_sd_xattr(conn, parent, &parent_pntsd);
if (acl_len <= 0)
pntsd_size = ksmbd_vfs_get_sd_xattr(conn,
parent, &parent_pntsd);
if (pntsd_size <= 0)
return -ENOENT;
dacloffset = le32_to_cpu(parent_pntsd->dacloffset);
if (!dacloffset) {
if (!dacloffset || (dacloffset + sizeof(struct smb_acl) > pntsd_size)) {
rc = -EINVAL;
goto free_parent_pntsd;
}
parent_pdacl = (struct smb_acl *)((char *)parent_pntsd + dacloffset);
acl_len = pntsd_size - dacloffset;
num_aces = le32_to_cpu(parent_pdacl->num_aces);
pntsd_type = le16_to_cpu(parent_pntsd->type);
pdacl_size = le16_to_cpu(parent_pdacl->size);
if (pdacl_size > acl_len || pdacl_size < sizeof(struct smb_acl)) {
rc = -EINVAL;
goto free_parent_pntsd;
}
aces_base = kmalloc(sizeof(struct smb_ace) * num_aces * 2, GFP_KERNEL);
if (!aces_base) {
......@@ -1000,11 +1029,23 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
aces = (struct smb_ace *)aces_base;
parent_aces = (struct smb_ace *)((char *)parent_pdacl +
sizeof(struct smb_acl));
aces_size = acl_len - sizeof(struct smb_acl);
if (pntsd_type & DACL_AUTO_INHERITED)
inherited_flags = INHERITED_ACE;
for (i = 0; i < num_aces; i++) {
int pace_size;
if (offsetof(struct smb_ace, access_req) > aces_size)
break;
pace_size = le16_to_cpu(parent_aces->size);
if (pace_size > aces_size)
break;
aces_size -= pace_size;
flags = parent_aces->flags;
if (!smb_inherit_flags(flags, is_dir))
goto pass;
......@@ -1049,8 +1090,7 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
aces = (struct smb_ace *)((char *)aces + le16_to_cpu(aces->size));
ace_cnt++;
pass:
parent_aces =
(struct smb_ace *)((char *)parent_aces + le16_to_cpu(parent_aces->size));
parent_aces = (struct smb_ace *)((char *)parent_aces + pace_size);
}
if (nt_size > 0) {
......@@ -1143,7 +1183,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path,
struct smb_ntsd *pntsd = NULL;
struct smb_acl *pdacl;
struct posix_acl *posix_acls;
int rc = 0, acl_size;
int rc = 0, pntsd_size, acl_size, aces_size, pdacl_size, dacl_offset;
struct smb_sid sid;
int granted = le32_to_cpu(*pdaccess & ~FILE_MAXIMAL_ACCESS_LE);
struct smb_ace *ace;
......@@ -1152,36 +1192,33 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path,
struct smb_ace *others_ace = NULL;
struct posix_acl_entry *pa_entry;
unsigned int sid_type = SIDOWNER;
char *end_of_acl;
unsigned short ace_size;
ksmbd_debug(SMB, "check permission using windows acl\n");
acl_size = ksmbd_vfs_get_sd_xattr(conn, path->dentry, &pntsd);
if (acl_size <= 0 || !pntsd || !pntsd->dacloffset) {
kfree(pntsd);
return 0;
}
pntsd_size = ksmbd_vfs_get_sd_xattr(conn,
path->dentry, &pntsd);
if (pntsd_size <= 0 || !pntsd)
goto err_out;
dacl_offset = le32_to_cpu(pntsd->dacloffset);
if (!dacl_offset ||
(dacl_offset + sizeof(struct smb_acl) > pntsd_size))
goto err_out;
pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset));
end_of_acl = ((char *)pntsd) + acl_size;
if (end_of_acl <= (char *)pdacl) {
kfree(pntsd);
return 0;
}
acl_size = pntsd_size - dacl_offset;
pdacl_size = le16_to_cpu(pdacl->size);
if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size) ||
le16_to_cpu(pdacl->size) < sizeof(struct smb_acl)) {
kfree(pntsd);
return 0;
}
if (pdacl_size > acl_size || pdacl_size < sizeof(struct smb_acl))
goto err_out;
if (!pdacl->num_aces) {
if (!(le16_to_cpu(pdacl->size) - sizeof(struct smb_acl)) &&
if (!(pdacl_size - sizeof(struct smb_acl)) &&
*pdaccess & ~(FILE_READ_CONTROL_LE | FILE_WRITE_DAC_LE)) {
rc = -EACCES;
goto err_out;
}
kfree(pntsd);
return 0;
goto err_out;
}
if (*pdaccess & FILE_MAXIMAL_ACCESS_LE) {
......@@ -1189,11 +1226,16 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path,
DELETE;
ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
aces_size = acl_size - sizeof(struct smb_acl);
for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) {
if (offsetof(struct smb_ace, access_req) > aces_size)
break;
ace_size = le16_to_cpu(ace->size);
if (ace_size > aces_size)
break;
aces_size -= ace_size;
granted |= le32_to_cpu(ace->access_req);
ace = (struct smb_ace *)((char *)ace + le16_to_cpu(ace->size));
if (end_of_acl < (char *)ace)
goto err_out;
}
if (!pdacl->num_aces)
......@@ -1205,7 +1247,15 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path,
id_to_sid(uid, sid_type, &sid);
ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
aces_size = acl_size - sizeof(struct smb_acl);
for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) {
if (offsetof(struct smb_ace, access_req) > aces_size)
break;
ace_size = le16_to_cpu(ace->size);
if (ace_size > aces_size)
break;
aces_size -= ace_size;
if (!compare_sids(&sid, &ace->sid) ||
!compare_sids(&sid_unix_NFS_mode, &ace->sid)) {
found = 1;
......@@ -1215,8 +1265,6 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path,
others_ace = ace;
ace = (struct smb_ace *)((char *)ace + le16_to_cpu(ace->size));
if (end_of_acl < (char *)ace)
goto err_out;
}
if (*pdaccess & FILE_MAXIMAL_ACCESS_LE && found) {
......
......@@ -192,7 +192,7 @@ struct posix_acl_state {
int parse_sec_desc(struct smb_ntsd *pntsd, int acl_len,
struct smb_fattr *fattr);
int build_sec_desc(struct smb_ntsd *pntsd, struct smb_ntsd *ppntsd,
int addition_info, __u32 *secdesclen,
int ppntsd_size, int addition_info, __u32 *secdesclen,
struct smb_fattr *fattr);
int init_acl_state(struct posix_acl_state *state, int cnt);
void free_acl_state(struct posix_acl_state *state);
......
......@@ -1495,6 +1495,11 @@ int ksmbd_vfs_get_sd_xattr(struct ksmbd_conn *conn, struct dentry *dentry,
}
*pntsd = acl.sd_buf;
if (acl.sd_size < sizeof(struct smb_ntsd)) {
pr_err("sd size is invalid\n");
goto out_free;
}
(*pntsd)->osidoffset =
cpu_to_le32(le32_to_cpu((*pntsd)->osidoffset) - NDR_NTSD_OFFSETOF);
(*pntsd)->gsidoffset =
......
......@@ -523,7 +523,6 @@ xfs_btree_islastblock(
struct xfs_buf *bp;
block = xfs_btree_get_block(cur, level, &bp);
ASSERT(block && xfs_btree_check_block(cur, block, level, bp) == 0);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK);
......
......@@ -936,6 +936,8 @@ xfs_buf_item_relse(
trace_xfs_buf_item_relse(bp, _RET_IP_);
ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
if (atomic_read(&bip->bli_refcount))
return;
bp->b_log_item = NULL;
xfs_buf_rele(bp);
xfs_buf_item_free(bip);
......
......@@ -847,6 +847,24 @@ xlog_force_iclog(
return xlog_state_release_iclog(iclog->ic_log, iclog, 0);
}
/*
* Cycle all the iclogbuf locks to make sure all log IO completion
* is done before we tear down these buffers.
*/
static void
xlog_wait_iclog_completion(struct xlog *log)
{
int i;
struct xlog_in_core *iclog = log->l_iclog;
for (i = 0; i < log->l_iclog_bufs; i++) {
down(&iclog->ic_sema);
up(&iclog->ic_sema);
iclog = iclog->ic_next;
}
}
/*
* Wait for the iclog and all prior iclogs to be written disk as required by the
* log force state machine. Waiting on ic_force_wait ensures iclog completions
......@@ -1034,6 +1052,13 @@ xfs_log_unmount(
struct xfs_mount *mp)
{
xfs_log_quiesce(mp);
/*
* If shutdown has come from iclog IO context, the log
* cleaning will have been skipped and so we need to wait
* for the iclog to complete shutdown processing before we
* tear anything down.
*/
xlog_wait_iclog_completion(mp->m_log);
xfs_trans_ail_destroy(mp);
......@@ -1941,17 +1966,6 @@ xlog_dealloc_log(
xlog_in_core_t *iclog, *next_iclog;
int i;
/*
* Cycle all the iclogbuf locks to make sure all log IO completion
* is done before we tear down these buffers.
*/
iclog = log->l_iclog;
for (i = 0; i < log->l_iclog_bufs; i++) {
down(&iclog->ic_sema);
up(&iclog->ic_sema);
iclog = iclog->ic_next;
}
/*
* Destroy the CIL after waiting for iclog IO completion because an
* iclog EIO error will try to shut down the log, which accesses the
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册