提交 5227b315 编写于 作者: C Cheng Chang 提交者: Facebook GitHub Bot

Fix unchecked statuses for transaction_test (#7572)

Summary:
When `ASSERT_STATUS_CHECKED` is enabled, `transaction_test` does not pass without this PR.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/7572

Test Plan: `ASSERT_STATUS_CHECKED=1 make   -j32 transaction_test && ./transaction_test`

Reviewed By: zhichao-cao

Differential Revision: D24404319

Pulled By: cheng-chang

fbshipit-source-id: 13689035995366ab06d8eada3ea404e45fef8bc5
上级 73dbe10b
......@@ -349,6 +349,7 @@ Status RandomTransactionInserter::Verify(DB* db, uint16_t num_sets,
static_cast<int>(key.size()), key.data(), int_value);
total += int_value;
}
iter->status().PermitUncheckedError();
delete iter;
}
......
......@@ -635,7 +635,7 @@ void PointLockManager::UnLock(PessimisticTransaction* txn,
assert(lock_map->lock_map_stripes_.size() > stripe_num);
LockMapStripe* stripe = lock_map->lock_map_stripes_.at(stripe_num);
stripe->stripe_mutex->Lock();
stripe->stripe_mutex->Lock().PermitUncheckedError();
UnLockKey(txn, key, stripe, lock_map, env);
stripe->stripe_mutex->UnLock();
......@@ -677,7 +677,7 @@ void PointLockManager::UnLock(PessimisticTransaction* txn,
assert(lock_map->lock_map_stripes_.size() > stripe_num);
LockMapStripe* stripe = lock_map->lock_map_stripes_.at(stripe_num);
stripe->stripe_mutex->Lock();
stripe->stripe_mutex->Lock().PermitUncheckedError();
for (const std::string* key : stripe_keys) {
UnLockKey(txn, *key, stripe, lock_map, env);
......@@ -708,7 +708,7 @@ PointLockManager::PointLockStatus PointLockManager::GetPointLockStatus() {
const auto& stripes = lock_maps_[i]->lock_map_stripes_;
// Iterate and lock all stripes in ascending order.
for (const auto& j : stripes) {
j->stripe_mutex->Lock();
j->stripe_mutex->Lock().PermitUncheckedError();
for (const auto& it : j->keys) {
struct KeyLockInfo info;
info.exclusive = it.second.exclusive;
......
......@@ -173,7 +173,6 @@ Status PessimisticTransaction::CommitBatch(WriteBatch* batch) {
}
Status PessimisticTransaction::Prepare() {
Status s;
if (name_.empty()) {
return Status::InvalidArgument(
......@@ -184,6 +183,7 @@ Status PessimisticTransaction::Prepare() {
return Status::Expired();
}
Status s;
bool can_prepare = false;
if (expiration_time_ > 0) {
......@@ -226,7 +226,9 @@ Status PessimisticTransaction::Prepare() {
Status WriteCommittedTxn::PrepareInternal() {
WriteOptions write_options = write_options_;
write_options.disableWAL = false;
WriteBatchInternal::MarkEndPrepare(GetWriteBatch()->GetWriteBatch(), name_);
auto s = WriteBatchInternal::MarkEndPrepare(GetWriteBatch()->GetWriteBatch(),
name_);
assert(s.ok());
class MarkLogCallback : public PreReleaseCallback {
public:
MarkLogCallback(DBImpl* db, bool two_write_queues)
......@@ -256,15 +258,14 @@ Status WriteCommittedTxn::PrepareInternal() {
const bool kDisableMemtable = true;
SequenceNumber* const KIgnoreSeqUsed = nullptr;
const size_t kNoBatchCount = 0;
Status s = db_impl_->WriteImpl(
write_options, GetWriteBatch()->GetWriteBatch(), kNoWriteCallback,
&log_number_, kRefNoLog, kDisableMemtable, KIgnoreSeqUsed, kNoBatchCount,
&mark_log_callback);
s = db_impl_->WriteImpl(write_options, GetWriteBatch()->GetWriteBatch(),
kNoWriteCallback, &log_number_, kRefNoLog,
kDisableMemtable, KIgnoreSeqUsed, kNoBatchCount,
&mark_log_callback);
return s;
}
Status PessimisticTransaction::Commit() {
Status s;
bool commit_without_prepare = false;
bool commit_prepared = false;
......@@ -294,6 +295,7 @@ Status PessimisticTransaction::Commit() {
}
}
Status s;
if (commit_without_prepare) {
assert(!commit_prepared);
if (WriteBatchInternal::Count(GetCommitTimeWriteBatch()) > 0) {
......@@ -377,7 +379,8 @@ Status WriteCommittedTxn::CommitInternal() {
// We take the commit-time batch and append the Commit marker.
// The Memtable will ignore the Commit marker in non-recovery mode
WriteBatch* working_batch = GetCommitTimeWriteBatch();
WriteBatchInternal::MarkCommit(working_batch, name_);
auto s = WriteBatchInternal::MarkCommit(working_batch, name_);
assert(s.ok());
// any operations appended to this working_batch will be ignored from WAL
working_batch->MarkWalTerminationPoint();
......@@ -385,13 +388,14 @@ Status WriteCommittedTxn::CommitInternal() {
// insert prepared batch into Memtable only skipping WAL.
// Memtable will ignore BeginPrepare/EndPrepare markers
// in non recovery mode and simply insert the values
WriteBatchInternal::Append(working_batch, GetWriteBatch()->GetWriteBatch());
s = WriteBatchInternal::Append(working_batch,
GetWriteBatch()->GetWriteBatch());
assert(s.ok());
uint64_t seq_used = kMaxSequenceNumber;
auto s =
db_impl_->WriteImpl(write_options_, working_batch, /*callback*/ nullptr,
s = db_impl_->WriteImpl(write_options_, working_batch, /*callback*/ nullptr,
/*log_used*/ nullptr, /*log_ref*/ log_number_,
/*disable_memtable*/ false, &seq_used);
/*disable_memtable*/ false, &seq_used);
assert(!s.ok() || seq_used != kMaxSequenceNumber);
if (s.ok()) {
SetId(seq_used);
......@@ -439,8 +443,9 @@ Status PessimisticTransaction::Rollback() {
Status WriteCommittedTxn::RollbackInternal() {
WriteBatch rollback_marker;
WriteBatchInternal::MarkRollback(&rollback_marker, name_);
auto s = db_impl_->WriteImpl(write_options_, &rollback_marker);
auto s = WriteBatchInternal::MarkRollback(&rollback_marker, name_);
assert(s.ok());
s = db_impl_->WriteImpl(write_options_, &rollback_marker);
return s;
}
......@@ -505,9 +510,10 @@ Status PessimisticTransaction::LockBatch(WriteBatch* batch,
// Iterating on this handler will add all keys in this batch into keys
Handler handler;
batch->Iterate(&handler);
Status s;
Status s = batch->Iterate(&handler);
if (!s.ok()) {
return s;
}
// Attempt to lock all keys
for (const auto& cf_iter : handler.keys_) {
......
......@@ -530,7 +530,9 @@ Status TransactionBaseImpl::SingleDeleteUntracked(
}
void TransactionBaseImpl::PutLogData(const Slice& blob) {
write_batch_.PutLogData(blob);
auto s = write_batch_.PutLogData(blob);
(void)s;
assert(s.ok());
}
WriteBatchWithIndex* TransactionBaseImpl::GetWriteBatch() {
......
......@@ -271,7 +271,8 @@ class TransactionBaseImpl : public Transaction {
write_batch_.Clear();
}
assert(write_batch_.GetDataSize() == WriteBatchInternal::kHeader);
WriteBatchInternal::InsertNoop(write_batch_.GetWriteBatch());
auto s = WriteBatchInternal::InsertNoop(write_batch_.GetWriteBatch());
assert(s.ok());
}
DB* db_;
......
......@@ -104,8 +104,9 @@ Status WritePreparedTxn::PrepareInternal() {
write_options.disableWAL = false;
const bool WRITE_AFTER_COMMIT = true;
const bool kFirstPrepareBatch = true;
WriteBatchInternal::MarkEndPrepare(GetWriteBatch()->GetWriteBatch(), name_,
!WRITE_AFTER_COMMIT);
auto s = WriteBatchInternal::MarkEndPrepare(GetWriteBatch()->GetWriteBatch(),
name_, !WRITE_AFTER_COMMIT);
assert(s.ok());
// For each duplicate key we account for a new sub-batch
prepare_batch_cnt_ = GetWriteBatch()->SubBatchCnt();
// Having AddPrepared in the PreReleaseCallback allows in-order addition of
......@@ -116,10 +117,10 @@ Status WritePreparedTxn::PrepareInternal() {
db_impl_->immutable_db_options().two_write_queues, kFirstPrepareBatch);
const bool DISABLE_MEMTABLE = true;
uint64_t seq_used = kMaxSequenceNumber;
Status s = db_impl_->WriteImpl(
write_options, GetWriteBatch()->GetWriteBatch(),
/*callback*/ nullptr, &log_number_, /*log ref*/ 0, !DISABLE_MEMTABLE,
&seq_used, prepare_batch_cnt_, &add_prepared_callback);
s = db_impl_->WriteImpl(write_options, GetWriteBatch()->GetWriteBatch(),
/*callback*/ nullptr, &log_number_, /*log ref*/ 0,
!DISABLE_MEMTABLE, &seq_used, prepare_batch_cnt_,
&add_prepared_callback);
assert(!s.ok() || seq_used != kMaxSequenceNumber);
auto prepare_seq = seq_used;
SetId(prepare_seq);
......@@ -144,7 +145,8 @@ Status WritePreparedTxn::CommitInternal() {
// The Memtable will ignore the Commit marker in non-recovery mode
WriteBatch* working_batch = GetCommitTimeWriteBatch();
const bool empty = working_batch->Count() == 0;
WriteBatchInternal::MarkCommit(working_batch, name_);
auto s = WriteBatchInternal::MarkCommit(working_batch, name_);
assert(s.ok());
const bool for_recovery = use_only_the_last_commit_time_batch_for_recovery_;
if (!empty && for_recovery) {
......@@ -162,7 +164,7 @@ Status WritePreparedTxn::CommitInternal() {
ROCKS_LOG_WARN(db_impl_->immutable_db_options().info_log,
"Duplicate key overhead");
SubBatchCounter counter(*wpt_db_->GetCFComparatorMap());
auto s = working_batch->Iterate(&counter);
s = working_batch->Iterate(&counter);
assert(s.ok());
commit_batch_cnt = counter.BatchCount();
}
......@@ -188,9 +190,9 @@ Status WritePreparedTxn::CommitInternal() {
// redundantly reference the log that contains the prepared data.
const uint64_t zero_log_number = 0ull;
size_t batch_cnt = UNLIKELY(commit_batch_cnt) ? commit_batch_cnt : 1;
auto s = db_impl_->WriteImpl(write_options_, working_batch, nullptr, nullptr,
zero_log_number, disable_memtable, &seq_used,
batch_cnt, pre_release_callback);
s = db_impl_->WriteImpl(write_options_, working_batch, nullptr, nullptr,
zero_log_number, disable_memtable, &seq_used,
batch_cnt, pre_release_callback);
assert(!s.ok() || seq_used != kMaxSequenceNumber);
const SequenceNumber commit_batch_seq = seq_used;
if (LIKELY(do_one_write || !s.ok())) {
......@@ -217,9 +219,11 @@ Status WritePreparedTxn::CommitInternal() {
wpt_db_, db_impl_, prepare_seq, prepare_batch_cnt_, kZeroData,
commit_batch_seq, commit_batch_cnt);
WriteBatch empty_batch;
empty_batch.PutLogData(Slice());
s = empty_batch.PutLogData(Slice());
assert(s.ok());
// In the absence of Prepare markers, use Noop as a batch separator
WriteBatchInternal::InsertNoop(&empty_batch);
s = WriteBatchInternal::InsertNoop(&empty_batch);
assert(s.ok());
const bool DISABLE_MEMTABLE = true;
const size_t ONE_BATCH = 1;
const uint64_t NO_REF_LOG = 0;
......@@ -347,12 +351,12 @@ Status WritePreparedTxn::RollbackInternal() {
wpt_db_->txn_db_options_.rollback_merge_operands,
roptions);
auto s = GetWriteBatch()->GetWriteBatch()->Iterate(&rollback_handler);
assert(s.ok());
if (!s.ok()) {
return s;
}
// The Rollback marker will be used as a batch separator
WriteBatchInternal::MarkRollback(&rollback_batch, name_);
s = WriteBatchInternal::MarkRollback(&rollback_batch, name_);
assert(s.ok());
bool do_one_write = !db_impl_->immutable_db_options().two_write_queues;
const bool DISABLE_MEMTABLE = true;
const uint64_t NO_REF_LOG = 0;
......@@ -402,9 +406,11 @@ Status WritePreparedTxn::RollbackInternal() {
WritePreparedRollbackPreReleaseCallback update_commit_map_with_prepare(
wpt_db_, db_impl_, GetId(), rollback_seq, prepare_batch_cnt_);
WriteBatch empty_batch;
empty_batch.PutLogData(Slice());
s = empty_batch.PutLogData(Slice());
assert(s.ok());
// In the absence of Prepare markers, use Noop as a batch separator
WriteBatchInternal::InsertNoop(&empty_batch);
s = WriteBatchInternal::InsertNoop(&empty_batch);
assert(s.ok());
s = db_impl_->WriteImpl(write_options_, &empty_batch, nullptr, nullptr,
NO_REF_LOG, DISABLE_MEMTABLE, &seq_used, ONE_BATCH,
&update_commit_map_with_prepare);
......
......@@ -168,7 +168,8 @@ Status WritePreparedTxnDB::WriteInternal(const WriteOptions& write_options_orig,
bool do_one_write = !db_impl_->immutable_db_options().two_write_queues;
WriteOptions write_options(write_options_orig);
// In the absence of Prepare markers, use Noop as a batch separator
WriteBatchInternal::InsertNoop(batch);
auto s = WriteBatchInternal::InsertNoop(batch);
assert(s.ok());
const bool DISABLE_MEMTABLE = true;
const uint64_t no_log_ref = 0;
uint64_t seq_used = kMaxSequenceNumber;
......@@ -189,9 +190,9 @@ Status WritePreparedTxnDB::WriteInternal(const WriteOptions& write_options_orig,
} else {
pre_release_callback = &add_prepared_callback;
}
auto s = db_impl_->WriteImpl(write_options, batch, nullptr, nullptr,
no_log_ref, !DISABLE_MEMTABLE, &seq_used,
batch_cnt, pre_release_callback);
s = db_impl_->WriteImpl(write_options, batch, nullptr, nullptr, no_log_ref,
!DISABLE_MEMTABLE, &seq_used, batch_cnt,
pre_release_callback);
assert(!s.ok() || seq_used != kMaxSequenceNumber);
uint64_t prepare_seq = seq_used;
if (txn != nullptr) {
......
......@@ -279,7 +279,9 @@ Status WriteUnpreparedTxn::FlushWriteBatchToDBInternal(bool prepared) {
static std::atomic_ullong autogen_id{0};
// To avoid changing all tests to call SetName, just autogenerate one.
if (wupt_db_->txn_db_options_.autogenerate_name) {
SetName(std::string("autoxid") + ToString(autogen_id.fetch_add(1)));
auto s =
SetName(std::string("autoxid") + ToString(autogen_id.fetch_add(1)));
assert(s.ok());
} else
#endif
{
......@@ -354,8 +356,9 @@ Status WriteUnpreparedTxn::FlushWriteBatchToDBInternal(bool prepared) {
const bool WRITE_AFTER_COMMIT = true;
const bool first_prepare_batch = log_number_ == 0;
// MarkEndPrepare will change Noop marker to the appropriate marker.
WriteBatchInternal::MarkEndPrepare(GetWriteBatch()->GetWriteBatch(), name_,
!WRITE_AFTER_COMMIT, !prepared);
s = WriteBatchInternal::MarkEndPrepare(GetWriteBatch()->GetWriteBatch(),
name_, !WRITE_AFTER_COMMIT, !prepared);
assert(s.ok());
// For each duplicate key we account for a new sub-batch
prepare_batch_cnt_ = GetWriteBatch()->SubBatchCnt();
// AddPrepared better to be called in the pre-release callback otherwise there
......@@ -541,7 +544,8 @@ Status WriteUnpreparedTxn::CommitInternal() {
// will ignore the Commit marker in non-recovery mode
WriteBatch* working_batch = GetCommitTimeWriteBatch();
const bool empty = working_batch->Count() == 0;
WriteBatchInternal::MarkCommit(working_batch, name_);
auto s = WriteBatchInternal::MarkCommit(working_batch, name_);
assert(s.ok());
const bool for_recovery = use_only_the_last_commit_time_batch_for_recovery_;
if (!empty && for_recovery) {
......@@ -557,7 +561,7 @@ Status WriteUnpreparedTxn::CommitInternal() {
ROCKS_LOG_WARN(db_impl_->immutable_db_options().info_log,
"Duplicate key overhead");
SubBatchCounter counter(*wpt_db_->GetCFComparatorMap());
auto s = working_batch->Iterate(&counter);
s = working_batch->Iterate(&counter);
assert(s.ok());
commit_batch_cnt = counter.BatchCount();
}
......@@ -583,9 +587,9 @@ Status WriteUnpreparedTxn::CommitInternal() {
// need to redundantly reference the log that contains the prepared data.
const uint64_t zero_log_number = 0ull;
size_t batch_cnt = UNLIKELY(commit_batch_cnt) ? commit_batch_cnt : 1;
auto s = db_impl_->WriteImpl(write_options_, working_batch, nullptr, nullptr,
zero_log_number, disable_memtable, &seq_used,
batch_cnt, pre_release_callback);
s = db_impl_->WriteImpl(write_options_, working_batch, nullptr, nullptr,
zero_log_number, disable_memtable, &seq_used,
batch_cnt, pre_release_callback);
assert(!s.ok() || seq_used != kMaxSequenceNumber);
const SequenceNumber commit_batch_seq = seq_used;
if (LIKELY(do_one_write || !s.ok())) {
......@@ -619,9 +623,11 @@ Status WriteUnpreparedTxn::CommitInternal() {
// Update commit map only from the 2nd queue
WriteBatch empty_batch;
empty_batch.PutLogData(Slice());
s = empty_batch.PutLogData(Slice());
assert(s.ok());
// In the absence of Prepare markers, use Noop as a batch separator
WriteBatchInternal::InsertNoop(&empty_batch);
s = WriteBatchInternal::InsertNoop(&empty_batch);
assert(s.ok());
const bool DISABLE_MEMTABLE = true;
const size_t ONE_BATCH = 1;
const uint64_t NO_REF_LOG = 0;
......@@ -719,10 +725,14 @@ Status WriteUnpreparedTxn::RollbackInternal() {
// TODO(lth): We write rollback batch all in a single batch here, but this
// should be subdivded into multiple batches as well. In phase 2, when key
// sets are read from WAL, this will happen naturally.
WriteRollbackKeys(*tracked_locks_, &rollback_batch, &callback, roptions);
s = WriteRollbackKeys(*tracked_locks_, &rollback_batch, &callback, roptions);
if (!s.ok()) {
return s;
}
// The Rollback marker will be used as a batch separator
WriteBatchInternal::MarkRollback(rollback_batch.GetWriteBatch(), name_);
s = WriteBatchInternal::MarkRollback(rollback_batch.GetWriteBatch(), name_);
assert(s.ok());
bool do_one_write = !db_impl_->immutable_db_options().two_write_queues;
const bool DISABLE_MEMTABLE = true;
const uint64_t NO_REF_LOG = 0;
......@@ -778,9 +788,11 @@ Status WriteUnpreparedTxn::RollbackInternal() {
prepare_seq);
WriteBatch empty_batch;
const size_t ONE_BATCH = 1;
empty_batch.PutLogData(Slice());
s = empty_batch.PutLogData(Slice());
assert(s.ok());
// In the absence of Prepare markers, use Noop as a batch separator
WriteBatchInternal::InsertNoop(&empty_batch);
s = WriteBatchInternal::InsertNoop(&empty_batch);
assert(s.ok());
s = db_impl_->WriteImpl(write_options_, &empty_batch, nullptr, nullptr,
NO_REF_LOG, DISABLE_MEMTABLE, &seq_used, ONE_BATCH,
&update_commit_map_with_rollback_batch);
......@@ -863,11 +875,13 @@ Status WriteUnpreparedTxn::RollbackToSavePointInternal() {
WriteUnpreparedTxnReadCallback callback(wupt_db_, snap_seq, min_uncommitted,
top.unprep_seqs_,
kBackedByDBSnapshot);
WriteRollbackKeys(tracked_keys, &write_batch_, &callback, roptions);
s = WriteRollbackKeys(tracked_keys, &write_batch_, &callback, roptions);
if (!s.ok()) {
return s;
}
const bool kPrepared = true;
s = FlushWriteBatchToDBInternal(!kPrepared);
assert(s.ok());
if (!s.ok()) {
return s;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册