diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 6c4277e99d586930df9d74d2a953a7eb6887fbac..42d9345b10c332e90a4a2f5cf2411c0069e536d4 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -4532,6 +4532,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, vmx_switch_vmcs(vcpu, &vmx->vmcs01); + /* + * If IBRS is advertised to the vCPU, KVM must flush the indirect + * branch predictors when transitioning from L2 to L1, as L1 expects + * hardware (KVM in this case) to provide separate predictor modes. + * Bare metal isolates VMX root (host) from VMX non-root (guest), but + * doesn't isolate different VMCSs, i.e. in this case, doesn't provide + * separate modes for L2 vs L1. + */ + if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) + indirect_branch_prediction_barrier(); + /* Update any VMCS fields that might have changed while L2 ran */ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index e0e3eab6910cb6b651db6ff019460e725636bd59..e47c4324d9ebb4a8e6cfd491fec2133e58b250e8 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1439,8 +1439,10 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, /* * No indirect branch prediction barrier needed when switching - * the active VMCS within a guest, e.g. on nested VM-Enter. - * The L1 VMM can protect itself with retpolines, IBPB or IBRS. + * the active VMCS within a vCPU, unless IBRS is advertised to + * the vCPU. To minimize the number of IBPBs executed, KVM + * performs IBPB on nested VM-Exit (a single nested transition + * may switch the active VMCS multiple times). */ if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev)) indirect_branch_prediction_barrier(); diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 36ba7324f685385bbd6b0c4732ff233c70238332..a285711f1b4834b942d82cd0239c1412acfa39e5 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -911,6 +911,9 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) unsigned long flags; int i; + if (!bfqg->online) + return; + spin_lock_irqsave(&bfqd->lock, flags); if (!entity) /* root group */ diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index 3d2d92640651e698bcc6f01685a7dbfab5ae1fa2..cec2b2ae7684fc63e2a1f9a6fbc4267744ccf323 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c @@ -507,6 +507,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, if (read_write == I2C_SMBUS_WRITE) { /* Block Write */ dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: WRITE\n"); + if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX) + return -EINVAL; + dma_size = data->block[0] + 1; dma_direction = DMA_TO_DEVICE; desc->wr_len_cmd = dma_size; diff --git a/fs/ksmbd/smb2misc.c b/fs/ksmbd/smb2misc.c index 81b227034865e2dab935998170f1e85dd3ff5eb1..62e90b229ad7275e3d7554ac076149c096fbabc0 100644 --- a/fs/ksmbd/smb2misc.c +++ b/fs/ksmbd/smb2misc.c @@ -132,8 +132,11 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len, *len = le16_to_cpu(((struct smb2_read_req *)hdr)->ReadChannelInfoLength); break; case SMB2_WRITE: - if (((struct smb2_write_req *)hdr)->DataOffset) { - *off = le16_to_cpu(((struct smb2_write_req *)hdr)->DataOffset); + if (((struct smb2_write_req *)hdr)->DataOffset || + ((struct smb2_write_req *)hdr)->Length) { + *off = max_t(unsigned int, + le16_to_cpu(((struct smb2_write_req *)hdr)->DataOffset), + offsetof(struct smb2_write_req, Buffer)); *len = le32_to_cpu(((struct smb2_write_req *)hdr)->Length); break; } diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c index 5a7eab3991db5d604278a2b4b175342e452ff116..15b7f876cb01e96a002ad32ed6bd61983589b693 100644 --- a/fs/ksmbd/smb2pdu.c +++ b/fs/ksmbd/smb2pdu.c @@ -539,9 +539,10 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work) struct smb2_query_info_req *req; req = smb2_get_msg(work->request_buf); - if (req->InfoType == SMB2_O_INFO_FILE && - (req->FileInfoClass == FILE_FULL_EA_INFORMATION || - req->FileInfoClass == FILE_ALL_INFORMATION)) + if ((req->InfoType == SMB2_O_INFO_FILE && + (req->FileInfoClass == FILE_FULL_EA_INFORMATION || + req->FileInfoClass == FILE_ALL_INFORMATION)) || + req->InfoType == SMB2_O_INFO_SECURITY) sz = large_sz; } @@ -2972,7 +2973,7 @@ int smb2_open(struct ksmbd_work *work) if (!pntsd) goto err_out; - rc = build_sec_desc(pntsd, NULL, + rc = build_sec_desc(pntsd, NULL, 0, OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO, @@ -3807,6 +3808,15 @@ static int verify_info_level(int info_level) return 0; } +static int smb2_resp_buf_len(struct ksmbd_work *work, unsigned short hdr2_len) +{ + int free_len; + + free_len = (int)(work->response_sz - + (get_rfc1002_len(work->response_buf) + 4)) - hdr2_len; + return free_len; +} + static int smb2_calc_max_out_buf_len(struct ksmbd_work *work, unsigned short hdr2_len, unsigned int out_buf_len) @@ -3816,9 +3826,7 @@ static int smb2_calc_max_out_buf_len(struct ksmbd_work *work, if (out_buf_len > work->conn->vals->max_trans_size) return -EINVAL; - free_len = (int)(work->response_sz - - (get_rfc1002_len(work->response_buf) + 4)) - - hdr2_len; + free_len = smb2_resp_buf_len(work, hdr2_len); if (free_len < 0) return -EINVAL; @@ -5074,10 +5082,10 @@ static int smb2_get_info_sec(struct ksmbd_work *work, struct smb_ntsd *pntsd = (struct smb_ntsd *)rsp->Buffer, *ppntsd = NULL; struct smb_fattr fattr = {{0}}; struct inode *inode; - __u32 secdesclen; + __u32 secdesclen = 0; unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID; int addition_info = le32_to_cpu(req->AdditionalInformation); - int rc; + int rc = 0, ppntsd_size = 0; if (addition_info & ~(OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO | PROTECTED_DACL_SECINFO | @@ -5122,9 +5130,14 @@ static int smb2_get_info_sec(struct ksmbd_work *work, if (test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_ACL_XATTR)) - ksmbd_vfs_get_sd_xattr(work->conn, fp->filp->f_path.dentry, &ppntsd); - - rc = build_sec_desc(pntsd, ppntsd, addition_info, &secdesclen, &fattr); + ppntsd_size = ksmbd_vfs_get_sd_xattr(work->conn, + fp->filp->f_path.dentry, + &ppntsd); + + /* Check if sd buffer size exceeds response buffer size */ + if (smb2_resp_buf_len(work, 8) > ppntsd_size) + rc = build_sec_desc(pntsd, ppntsd, ppntsd_size, + addition_info, &secdesclen, &fattr); posix_acl_release(fattr.cf_acls); posix_acl_release(fattr.cf_dacls); kfree(ppntsd); @@ -6315,23 +6328,18 @@ static noinline int smb2_write_pipe(struct ksmbd_work *work) length = le32_to_cpu(req->Length); id = le64_to_cpu(req->VolatileFileId); - if (le16_to_cpu(req->DataOffset) == - offsetof(struct smb2_write_req, Buffer)) { - data_buf = (char *)&req->Buffer[0]; - } else { - if ((u64)le16_to_cpu(req->DataOffset) + length > - get_rfc1002_len(work->request_buf)) { - pr_err("invalid write data offset %u, smb_len %u\n", - le16_to_cpu(req->DataOffset), - get_rfc1002_len(work->request_buf)); - err = -EINVAL; - goto out; - } - - data_buf = (char *)(((char *)&req->hdr.ProtocolId) + - le16_to_cpu(req->DataOffset)); + if ((u64)le16_to_cpu(req->DataOffset) + length > + get_rfc1002_len(work->request_buf)) { + pr_err("invalid write data offset %u, smb_len %u\n", + le16_to_cpu(req->DataOffset), + get_rfc1002_len(work->request_buf)); + err = -EINVAL; + goto out; } + data_buf = (char *)(((char *)&req->hdr.ProtocolId) + + le16_to_cpu(req->DataOffset)); + rpc_resp = ksmbd_rpc_write(work->sess, id, data_buf, length); if (rpc_resp) { if (rpc_resp->flags == KSMBD_RPC_ENOTIMPLEMENTED) { @@ -6477,23 +6485,15 @@ int smb2_write(struct ksmbd_work *work) if (req->Channel != SMB2_CHANNEL_RDMA_V1 && req->Channel != SMB2_CHANNEL_RDMA_V1_INVALIDATE) { - if (le16_to_cpu(req->DataOffset) == + if (le16_to_cpu(req->DataOffset) < offsetof(struct smb2_write_req, Buffer)) { - data_buf = (char *)&req->Buffer[0]; - } else { - if ((u64)le16_to_cpu(req->DataOffset) + length > - get_rfc1002_len(work->request_buf)) { - pr_err("invalid write data offset %u, smb_len %u\n", - le16_to_cpu(req->DataOffset), - get_rfc1002_len(work->request_buf)); - err = -EINVAL; - goto out; - } - - data_buf = (char *)(((char *)&req->hdr.ProtocolId) + - le16_to_cpu(req->DataOffset)); + err = -EINVAL; + goto out; } + data_buf = (char *)(((char *)&req->hdr.ProtocolId) + + le16_to_cpu(req->DataOffset)); + ksmbd_debug(SMB, "flags %u\n", le32_to_cpu(req->Flags)); if (le32_to_cpu(req->Flags) & SMB2_WRITEFLAG_WRITE_THROUGH) writethrough = true; diff --git a/fs/ksmbd/smbacl.c b/fs/ksmbd/smbacl.c index aa2c76e1bcbe695f6d099e0b84bcbd1329b9ae37..d76cee773b826a0a94ffb5173e701bf95e3ad55b 100644 --- a/fs/ksmbd/smbacl.c +++ b/fs/ksmbd/smbacl.c @@ -688,6 +688,7 @@ static void set_posix_acl_entries_dacl(struct smb_ace *pndace, } static void set_ntacl_dacl(struct smb_acl *pndacl, struct smb_acl *nt_dacl, + unsigned int aces_size, const struct smb_sid *pownersid, const struct smb_sid *pgrpsid, struct smb_fattr *fattr) @@ -701,9 +702,19 @@ static void set_ntacl_dacl(struct smb_acl *pndacl, struct smb_acl *nt_dacl, if (nt_num_aces) { ntace = (struct smb_ace *)((char *)nt_dacl + sizeof(struct smb_acl)); for (i = 0; i < nt_num_aces; i++) { - memcpy((char *)pndace + size, ntace, le16_to_cpu(ntace->size)); - size += le16_to_cpu(ntace->size); - ntace = (struct smb_ace *)((char *)ntace + le16_to_cpu(ntace->size)); + unsigned short nt_ace_size; + + if (offsetof(struct smb_ace, access_req) > aces_size) + break; + + nt_ace_size = le16_to_cpu(ntace->size); + if (nt_ace_size > aces_size) + break; + + memcpy((char *)pndace + size, ntace, nt_ace_size); + size += nt_ace_size; + aces_size -= nt_ace_size; + ntace = (struct smb_ace *)((char *)ntace + nt_ace_size); num_aces++; } } @@ -872,7 +883,7 @@ int parse_sec_desc(struct smb_ntsd *pntsd, int acl_len, /* Convert permission bits from mode to equivalent CIFS ACL */ int build_sec_desc(struct smb_ntsd *pntsd, struct smb_ntsd *ppntsd, - int addition_info, __u32 *secdesclen, + int ppntsd_size, int addition_info, __u32 *secdesclen, struct smb_fattr *fattr) { int rc = 0; @@ -932,15 +943,25 @@ int build_sec_desc(struct smb_ntsd *pntsd, struct smb_ntsd *ppntsd, if (!ppntsd) { set_mode_dacl(dacl_ptr, fattr); - } else if (!ppntsd->dacloffset) { - goto out; } else { struct smb_acl *ppdacl_ptr; - - ppdacl_ptr = (struct smb_acl *)((char *)ppntsd + - le32_to_cpu(ppntsd->dacloffset)); - set_ntacl_dacl(dacl_ptr, ppdacl_ptr, nowner_sid_ptr, - ngroup_sid_ptr, fattr); + unsigned int dacl_offset = le32_to_cpu(ppntsd->dacloffset); + int ppdacl_size, ntacl_size = ppntsd_size - dacl_offset; + + if (!dacl_offset || + (dacl_offset + sizeof(struct smb_acl) > ppntsd_size)) + goto out; + + ppdacl_ptr = (struct smb_acl *)((char *)ppntsd + dacl_offset); + ppdacl_size = le16_to_cpu(ppdacl_ptr->size); + if (ppdacl_size > ntacl_size || + ppdacl_size < sizeof(struct smb_acl)) + goto out; + + set_ntacl_dacl(dacl_ptr, ppdacl_ptr, + ntacl_size - sizeof(struct smb_acl), + nowner_sid_ptr, ngroup_sid_ptr, + fattr); } pntsd->dacloffset = cpu_to_le32(offset); offset += le16_to_cpu(dacl_ptr->size); @@ -973,23 +994,31 @@ int smb_inherit_dacl(struct ksmbd_conn *conn, struct smb_ntsd *parent_pntsd = NULL; struct smb_sid owner_sid, group_sid; struct dentry *parent = path->dentry->d_parent; - int inherited_flags = 0, flags = 0, i, ace_cnt = 0, nt_size = 0; - int rc = 0, num_aces, dacloffset, pntsd_type, acl_len; + int inherited_flags = 0, flags = 0, i, ace_cnt = 0, nt_size = 0, pdacl_size; + int rc = 0, num_aces, dacloffset, pntsd_type, pntsd_size, acl_len, aces_size; char *aces_base; bool is_dir = S_ISDIR(d_inode(path->dentry)->i_mode); - acl_len = ksmbd_vfs_get_sd_xattr(conn, parent, &parent_pntsd); - if (acl_len <= 0) + pntsd_size = ksmbd_vfs_get_sd_xattr(conn, + parent, &parent_pntsd); + if (pntsd_size <= 0) return -ENOENT; dacloffset = le32_to_cpu(parent_pntsd->dacloffset); - if (!dacloffset) { + if (!dacloffset || (dacloffset + sizeof(struct smb_acl) > pntsd_size)) { rc = -EINVAL; goto free_parent_pntsd; } parent_pdacl = (struct smb_acl *)((char *)parent_pntsd + dacloffset); + acl_len = pntsd_size - dacloffset; num_aces = le32_to_cpu(parent_pdacl->num_aces); pntsd_type = le16_to_cpu(parent_pntsd->type); + pdacl_size = le16_to_cpu(parent_pdacl->size); + + if (pdacl_size > acl_len || pdacl_size < sizeof(struct smb_acl)) { + rc = -EINVAL; + goto free_parent_pntsd; + } aces_base = kmalloc(sizeof(struct smb_ace) * num_aces * 2, GFP_KERNEL); if (!aces_base) { @@ -1000,11 +1029,23 @@ int smb_inherit_dacl(struct ksmbd_conn *conn, aces = (struct smb_ace *)aces_base; parent_aces = (struct smb_ace *)((char *)parent_pdacl + sizeof(struct smb_acl)); + aces_size = acl_len - sizeof(struct smb_acl); if (pntsd_type & DACL_AUTO_INHERITED) inherited_flags = INHERITED_ACE; for (i = 0; i < num_aces; i++) { + int pace_size; + + if (offsetof(struct smb_ace, access_req) > aces_size) + break; + + pace_size = le16_to_cpu(parent_aces->size); + if (pace_size > aces_size) + break; + + aces_size -= pace_size; + flags = parent_aces->flags; if (!smb_inherit_flags(flags, is_dir)) goto pass; @@ -1049,8 +1090,7 @@ int smb_inherit_dacl(struct ksmbd_conn *conn, aces = (struct smb_ace *)((char *)aces + le16_to_cpu(aces->size)); ace_cnt++; pass: - parent_aces = - (struct smb_ace *)((char *)parent_aces + le16_to_cpu(parent_aces->size)); + parent_aces = (struct smb_ace *)((char *)parent_aces + pace_size); } if (nt_size > 0) { @@ -1143,7 +1183,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path, struct smb_ntsd *pntsd = NULL; struct smb_acl *pdacl; struct posix_acl *posix_acls; - int rc = 0, acl_size; + int rc = 0, pntsd_size, acl_size, aces_size, pdacl_size, dacl_offset; struct smb_sid sid; int granted = le32_to_cpu(*pdaccess & ~FILE_MAXIMAL_ACCESS_LE); struct smb_ace *ace; @@ -1152,36 +1192,33 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path, struct smb_ace *others_ace = NULL; struct posix_acl_entry *pa_entry; unsigned int sid_type = SIDOWNER; - char *end_of_acl; + unsigned short ace_size; ksmbd_debug(SMB, "check permission using windows acl\n"); - acl_size = ksmbd_vfs_get_sd_xattr(conn, path->dentry, &pntsd); - if (acl_size <= 0 || !pntsd || !pntsd->dacloffset) { - kfree(pntsd); - return 0; - } + pntsd_size = ksmbd_vfs_get_sd_xattr(conn, + path->dentry, &pntsd); + if (pntsd_size <= 0 || !pntsd) + goto err_out; + + dacl_offset = le32_to_cpu(pntsd->dacloffset); + if (!dacl_offset || + (dacl_offset + sizeof(struct smb_acl) > pntsd_size)) + goto err_out; pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset)); - end_of_acl = ((char *)pntsd) + acl_size; - if (end_of_acl <= (char *)pdacl) { - kfree(pntsd); - return 0; - } + acl_size = pntsd_size - dacl_offset; + pdacl_size = le16_to_cpu(pdacl->size); - if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size) || - le16_to_cpu(pdacl->size) < sizeof(struct smb_acl)) { - kfree(pntsd); - return 0; - } + if (pdacl_size > acl_size || pdacl_size < sizeof(struct smb_acl)) + goto err_out; if (!pdacl->num_aces) { - if (!(le16_to_cpu(pdacl->size) - sizeof(struct smb_acl)) && + if (!(pdacl_size - sizeof(struct smb_acl)) && *pdaccess & ~(FILE_READ_CONTROL_LE | FILE_WRITE_DAC_LE)) { rc = -EACCES; goto err_out; } - kfree(pntsd); - return 0; + goto err_out; } if (*pdaccess & FILE_MAXIMAL_ACCESS_LE) { @@ -1189,11 +1226,16 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path, DELETE; ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl)); + aces_size = acl_size - sizeof(struct smb_acl); for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) { + if (offsetof(struct smb_ace, access_req) > aces_size) + break; + ace_size = le16_to_cpu(ace->size); + if (ace_size > aces_size) + break; + aces_size -= ace_size; granted |= le32_to_cpu(ace->access_req); ace = (struct smb_ace *)((char *)ace + le16_to_cpu(ace->size)); - if (end_of_acl < (char *)ace) - goto err_out; } if (!pdacl->num_aces) @@ -1205,7 +1247,15 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path, id_to_sid(uid, sid_type, &sid); ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl)); + aces_size = acl_size - sizeof(struct smb_acl); for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) { + if (offsetof(struct smb_ace, access_req) > aces_size) + break; + ace_size = le16_to_cpu(ace->size); + if (ace_size > aces_size) + break; + aces_size -= ace_size; + if (!compare_sids(&sid, &ace->sid) || !compare_sids(&sid_unix_NFS_mode, &ace->sid)) { found = 1; @@ -1215,8 +1265,6 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path, others_ace = ace; ace = (struct smb_ace *)((char *)ace + le16_to_cpu(ace->size)); - if (end_of_acl < (char *)ace) - goto err_out; } if (*pdaccess & FILE_MAXIMAL_ACCESS_LE && found) { diff --git a/fs/ksmbd/smbacl.h b/fs/ksmbd/smbacl.h index 4ee7bda32e5f38d1239e256c3c69a9765f9d6bc4..4bfcd468597774c0c1aec112f156827636729964 100644 --- a/fs/ksmbd/smbacl.h +++ b/fs/ksmbd/smbacl.h @@ -192,7 +192,7 @@ struct posix_acl_state { int parse_sec_desc(struct smb_ntsd *pntsd, int acl_len, struct smb_fattr *fattr); int build_sec_desc(struct smb_ntsd *pntsd, struct smb_ntsd *ppntsd, - int addition_info, __u32 *secdesclen, + int ppntsd_size, int addition_info, __u32 *secdesclen, struct smb_fattr *fattr); int init_acl_state(struct posix_acl_state *state, int cnt); void free_acl_state(struct posix_acl_state *state); diff --git a/fs/ksmbd/vfs.c b/fs/ksmbd/vfs.c index bf0fdf90bbfa06ad8bf842c89f14fc20d3a27b88..7fd796880c7c45e74fbb57ea5bd39c6e9033663b 100644 --- a/fs/ksmbd/vfs.c +++ b/fs/ksmbd/vfs.c @@ -1495,6 +1495,11 @@ int ksmbd_vfs_get_sd_xattr(struct ksmbd_conn *conn, struct dentry *dentry, } *pntsd = acl.sd_buf; + if (acl.sd_size < sizeof(struct smb_ntsd)) { + pr_err("sd size is invalid\n"); + goto out_free; + } + (*pntsd)->osidoffset = cpu_to_le32(le32_to_cpu((*pntsd)->osidoffset) - NDR_NTSD_OFFSETOF); (*pntsd)->gsidoffset = diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h index 10e50cbacacf5ddf5af7ceb2ad8429ac5028b8ae..ba11d2a4b686e4ff30ae0b06acd28929dcc0a9de 100644 --- a/fs/xfs/libxfs/xfs_btree.h +++ b/fs/xfs/libxfs/xfs_btree.h @@ -523,7 +523,6 @@ xfs_btree_islastblock( struct xfs_buf *bp; block = xfs_btree_get_block(cur, level, &bp); - ASSERT(block && xfs_btree_check_block(cur, block, level, bp) == 0); if (cur->bc_flags & XFS_BTREE_LONG_PTRS) return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK); diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index a3d5ecccfc2ccf72f20e50eea674f59e60b8e6c3..b89e8fcb49c94d1294bc395fb98801d95ee80e97 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -936,6 +936,8 @@ xfs_buf_item_relse( trace_xfs_buf_item_relse(bp, _RET_IP_); ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags)); + if (atomic_read(&bip->bli_refcount)) + return; bp->b_log_item = NULL; xfs_buf_rele(bp); xfs_buf_item_free(bip); diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 681bdcbe226584f3a9e731c884e18326587304f4..92c5d6ef47d67372256886b6f3ea6a1fe5afff1f 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -847,6 +847,24 @@ xlog_force_iclog( return xlog_state_release_iclog(iclog->ic_log, iclog, 0); } +/* + * Cycle all the iclogbuf locks to make sure all log IO completion + * is done before we tear down these buffers. + */ +static void +xlog_wait_iclog_completion(struct xlog *log) +{ + int i; + struct xlog_in_core *iclog = log->l_iclog; + + for (i = 0; i < log->l_iclog_bufs; i++) { + down(&iclog->ic_sema); + up(&iclog->ic_sema); + iclog = iclog->ic_next; + } +} + + /* * Wait for the iclog and all prior iclogs to be written disk as required by the * log force state machine. Waiting on ic_force_wait ensures iclog completions @@ -1034,6 +1052,13 @@ xfs_log_unmount( struct xfs_mount *mp) { xfs_log_quiesce(mp); + /* + * If shutdown has come from iclog IO context, the log + * cleaning will have been skipped and so we need to wait + * for the iclog to complete shutdown processing before we + * tear anything down. + */ + xlog_wait_iclog_completion(mp->m_log); xfs_trans_ail_destroy(mp); @@ -1941,17 +1966,6 @@ xlog_dealloc_log( xlog_in_core_t *iclog, *next_iclog; int i; - /* - * Cycle all the iclogbuf locks to make sure all log IO completion - * is done before we tear down these buffers. - */ - iclog = log->l_iclog; - for (i = 0; i < log->l_iclog_bufs; i++) { - down(&iclog->ic_sema); - up(&iclog->ic_sema); - iclog = iclog->ic_next; - } - /* * Destroy the CIL after waiting for iclog IO completion because an * iclog EIO error will try to shut down the log, which accesses the