From 1bdaef7a06ee2fc147f1188cec6781905849c71b Mon Sep 17 00:00:00 2001 From: Jay Zhuang Date: Tue, 29 Sep 2020 18:21:49 -0700 Subject: [PATCH] Status check enforcement for timestamp_basic_test (#7454) Summary: Pull Request resolved: https://github.com/facebook/rocksdb/pull/7454 Reviewed By: riversand963 Differential Revision: D23981719 Pulled By: jay-zhuang fbshipit-source-id: 01073f73e54c17067b886c4a2f179b2804198399 --- Makefile | 2 ++ db/db_impl/db_impl_compaction_flush.cc | 1 + db/db_with_timestamp_basic_test.cc | 12 ++++++------ db/error_handler.cc | 2 +- table/block_based/block_based_table_builder.cc | 7 ++++++- table/block_based/block_based_table_reader.cc | 5 ++++- table/multiget_context.h | 4 ++-- 7 files changed, 22 insertions(+), 11 deletions(-) diff --git a/Makefile b/Makefile index 5d8b2bc3d..876d056e4 100644 --- a/Makefile +++ b/Makefile @@ -587,6 +587,8 @@ ifdef ASSERT_STATUS_CHECKED crc32c_test \ dbformat_test \ db_basic_test \ + db_with_timestamp_basic_test \ + db_with_timestamp_compaction_test \ db_options_test \ options_file_test \ defer_test \ diff --git a/db/db_impl/db_impl_compaction_flush.cc b/db/db_impl/db_impl_compaction_flush.cc index 50783488b..7c2533bc5 100644 --- a/db/db_impl/db_impl_compaction_flush.cc +++ b/db/db_impl/db_impl_compaction_flush.cc @@ -1162,6 +1162,7 @@ Status DBImpl::CompactFilesImpl( Status status = compaction_job.Install(*c->mutable_cf_options()); if (status.ok()) { + assert(compaction_job.io_status().ok()); InstallSuperVersionAndScheduleWork(c->column_family_data(), &job_context->superversion_contexts[0], *c->mutable_cf_options()); diff --git a/db/db_with_timestamp_basic_test.cc b/db/db_with_timestamp_basic_test.cc index 9235acf34..8d71a3d91 100644 --- a/db/db_with_timestamp_basic_test.cc +++ b/db/db_with_timestamp_basic_test.cc @@ -468,8 +468,8 @@ TEST_F(DBBasicTestWithTimestamp, ReseekToNextUserKey) { { std::string ts_str = Timestamp(static_cast(kNumKeys + 1), 0); WriteBatch batch(0, 0, kTimestampSize); - batch.Put("a", "new_value"); - batch.Put("b", "new_value"); + ASSERT_OK(batch.Put("a", "new_value")); + ASSERT_OK(batch.Put("b", "new_value")); s = batch.AssignTimestamp(ts_str); ASSERT_OK(s); s = db_->Write(write_opts, &batch); @@ -1485,9 +1485,9 @@ TEST_P(DBBasicTestWithTimestampCompressionSettings, PutAndGetWithCompaction) { // at higher levels. CompactionOptions compact_opt; compact_opt.compression = kNoCompression; - db_->CompactFiles(compact_opt, handles_[cf], - collector->GetFlushedFiles(), - static_cast(kNumTimestamps - i)); + ASSERT_OK(db_->CompactFiles(compact_opt, handles_[cf], + collector->GetFlushedFiles(), + static_cast(kNumTimestamps - i))); collector->ClearFlushedFiles(); } } @@ -1576,7 +1576,7 @@ TEST_F(DBBasicTestWithTimestamp, BatchWriteAndMultiGet) { batch.Put(handles_[cf], Key1(j), "value_" + std::to_string(j) + "_" + std::to_string(i))); } - batch.AssignTimestamp(write_ts); + ASSERT_OK(batch.AssignTimestamp(write_ts)); ASSERT_OK(db_->Write(wopts, &batch)); verify_records_func(i, handles_[cf]); diff --git a/db/error_handler.cc b/db/error_handler.cc index d44af490f..7aa4aa826 100644 --- a/db/error_handler.cc +++ b/db/error_handler.cc @@ -345,7 +345,7 @@ Status ErrorHandler::SetBGError(const IOStatus& bg_io_err, Status s; DBRecoverContext context; if (bg_io_err.GetDataLoss()) { - // FIrst, data loss is treated as unrecoverable error. So it can directly + // First, data loss is treated as unrecoverable error. So it can directly // overwrite any existing bg_error_. bool auto_recovery = false; Status bg_err(new_bg_io_err, Status::Severity::kUnrecoverableError); diff --git a/table/block_based/block_based_table_builder.cc b/table/block_based/block_based_table_builder.cc index 53ebfead2..3d92891ca 100644 --- a/table/block_based/block_based_table_builder.cc +++ b/table/block_based/block_based_table_builder.cc @@ -727,7 +727,7 @@ void BlockBasedTableBuilder::Add(const Slice& key, const Slice& value) { if (r->props.num_entries > r->props.num_range_deletions) { assert(r->internal_comparator.Compare(key, Slice(r->last_key)) > 0); } -#endif // NDEBUG +#endif // !NDEBUG auto should_flush = r->flush_block_policy->Update(key, value); if (should_flush) { @@ -1653,6 +1653,11 @@ Status BlockBasedTableBuilder::Finish() { r->pc_rep->write_queue.finish(); r->pc_rep->write_thread->join(); r->pc_rep->finished = true; +#ifndef NDEBUG + for (const auto& br : r->pc_rep->block_rep_buf) { + assert(br.status.ok()); + } +#endif // !NDEBUG } else { // To make sure properties block is able to keep the accurate size of index // block, we will finish writing all index entries first. diff --git a/table/block_based/block_based_table_reader.cc b/table/block_based/block_based_table_reader.cc index a98167f1c..e12c32813 100644 --- a/table/block_based/block_based_table_reader.cc +++ b/table/block_based/block_based_table_reader.cc @@ -1999,7 +1999,6 @@ bool BlockBasedTable::PrefixMayMatch( } bool may_match = true; - Status s; // First, try check with full filter FilterBlockReader* const filter = rep_->filter.get(); @@ -2585,6 +2584,10 @@ void BlockBasedTable::MultiGet(const ReadOptions& read_options, read_options, results[idx_in_batch], &first_biter, statuses[idx_in_batch]); reusing_block = false; + } else { + // If handler is null and result is empty, then the status is never + // set, which should be the initial value: ok(). + assert(statuses[idx_in_batch].ok()); } biter = &first_biter; idx_in_batch++; diff --git a/table/multiget_context.h b/table/multiget_context.h index 964544d07..604a26f8b 100644 --- a/table/multiget_context.h +++ b/table/multiget_context.h @@ -87,9 +87,9 @@ struct KeyContext { class MultiGetContext { public: // Limit the number of keys in a batch to this number. Benchmarks show that - // there is negligible benefit for batches exceeding this. Keeping this < 64 + // there is negligible benefit for batches exceeding this. Keeping this < 32 // simplifies iteration, as well as reduces the amount of stack allocations - // htat need to be performed + // that need to be performed static const int MAX_BATCH_SIZE = 32; MultiGetContext(autovector* sorted_keys, -- GitLab