diff --git a/Makefile b/Makefile index 5d8b2bc3d6af2be6d8aabea64b5b3fbec7bbf099..876d056e427092e18ac1fae51fa5010f59becf3a 100644 --- a/Makefile +++ b/Makefile @@ -587,6 +587,8 @@ ifdef ASSERT_STATUS_CHECKED crc32c_test \ dbformat_test \ db_basic_test \ + db_with_timestamp_basic_test \ + db_with_timestamp_compaction_test \ db_options_test \ options_file_test \ defer_test \ diff --git a/db/db_impl/db_impl_compaction_flush.cc b/db/db_impl/db_impl_compaction_flush.cc index 50783488b4e37af984d2af08c3470435c3ee475c..7c2533bc5f70fba4b6c419d7104993abb2ae243f 100644 --- a/db/db_impl/db_impl_compaction_flush.cc +++ b/db/db_impl/db_impl_compaction_flush.cc @@ -1162,6 +1162,7 @@ Status DBImpl::CompactFilesImpl( Status status = compaction_job.Install(*c->mutable_cf_options()); if (status.ok()) { + assert(compaction_job.io_status().ok()); InstallSuperVersionAndScheduleWork(c->column_family_data(), &job_context->superversion_contexts[0], *c->mutable_cf_options()); diff --git a/db/db_with_timestamp_basic_test.cc b/db/db_with_timestamp_basic_test.cc index 9235acf34a3779cba72f6ac92592b0b080f1684b..8d71a3d91d39ff959d4bfd5830b0caf103fa11f5 100644 --- a/db/db_with_timestamp_basic_test.cc +++ b/db/db_with_timestamp_basic_test.cc @@ -468,8 +468,8 @@ TEST_F(DBBasicTestWithTimestamp, ReseekToNextUserKey) { { std::string ts_str = Timestamp(static_cast(kNumKeys + 1), 0); WriteBatch batch(0, 0, kTimestampSize); - batch.Put("a", "new_value"); - batch.Put("b", "new_value"); + ASSERT_OK(batch.Put("a", "new_value")); + ASSERT_OK(batch.Put("b", "new_value")); s = batch.AssignTimestamp(ts_str); ASSERT_OK(s); s = db_->Write(write_opts, &batch); @@ -1485,9 +1485,9 @@ TEST_P(DBBasicTestWithTimestampCompressionSettings, PutAndGetWithCompaction) { // at higher levels. CompactionOptions compact_opt; compact_opt.compression = kNoCompression; - db_->CompactFiles(compact_opt, handles_[cf], - collector->GetFlushedFiles(), - static_cast(kNumTimestamps - i)); + ASSERT_OK(db_->CompactFiles(compact_opt, handles_[cf], + collector->GetFlushedFiles(), + static_cast(kNumTimestamps - i))); collector->ClearFlushedFiles(); } } @@ -1576,7 +1576,7 @@ TEST_F(DBBasicTestWithTimestamp, BatchWriteAndMultiGet) { batch.Put(handles_[cf], Key1(j), "value_" + std::to_string(j) + "_" + std::to_string(i))); } - batch.AssignTimestamp(write_ts); + ASSERT_OK(batch.AssignTimestamp(write_ts)); ASSERT_OK(db_->Write(wopts, &batch)); verify_records_func(i, handles_[cf]); diff --git a/db/error_handler.cc b/db/error_handler.cc index d44af490fffda6fc85d5995d088a0700bf23aec1..7aa4aa82689c7a43dcf745f10f2660a5d2491013 100644 --- a/db/error_handler.cc +++ b/db/error_handler.cc @@ -345,7 +345,7 @@ Status ErrorHandler::SetBGError(const IOStatus& bg_io_err, Status s; DBRecoverContext context; if (bg_io_err.GetDataLoss()) { - // FIrst, data loss is treated as unrecoverable error. So it can directly + // First, data loss is treated as unrecoverable error. So it can directly // overwrite any existing bg_error_. bool auto_recovery = false; Status bg_err(new_bg_io_err, Status::Severity::kUnrecoverableError); diff --git a/table/block_based/block_based_table_builder.cc b/table/block_based/block_based_table_builder.cc index 53ebfead21f951b5c8ad5d16827f882cfee49903..3d92891ca8317fed7e8dace4988638eddf0917c5 100644 --- a/table/block_based/block_based_table_builder.cc +++ b/table/block_based/block_based_table_builder.cc @@ -727,7 +727,7 @@ void BlockBasedTableBuilder::Add(const Slice& key, const Slice& value) { if (r->props.num_entries > r->props.num_range_deletions) { assert(r->internal_comparator.Compare(key, Slice(r->last_key)) > 0); } -#endif // NDEBUG +#endif // !NDEBUG auto should_flush = r->flush_block_policy->Update(key, value); if (should_flush) { @@ -1653,6 +1653,11 @@ Status BlockBasedTableBuilder::Finish() { r->pc_rep->write_queue.finish(); r->pc_rep->write_thread->join(); r->pc_rep->finished = true; +#ifndef NDEBUG + for (const auto& br : r->pc_rep->block_rep_buf) { + assert(br.status.ok()); + } +#endif // !NDEBUG } else { // To make sure properties block is able to keep the accurate size of index // block, we will finish writing all index entries first. diff --git a/table/block_based/block_based_table_reader.cc b/table/block_based/block_based_table_reader.cc index a98167f1c89ef5fd70be81e4a3df075d1f97cb2b..e12c328131be641d54fe760593b9542f28ac5ce4 100644 --- a/table/block_based/block_based_table_reader.cc +++ b/table/block_based/block_based_table_reader.cc @@ -1999,7 +1999,6 @@ bool BlockBasedTable::PrefixMayMatch( } bool may_match = true; - Status s; // First, try check with full filter FilterBlockReader* const filter = rep_->filter.get(); @@ -2585,6 +2584,10 @@ void BlockBasedTable::MultiGet(const ReadOptions& read_options, read_options, results[idx_in_batch], &first_biter, statuses[idx_in_batch]); reusing_block = false; + } else { + // If handler is null and result is empty, then the status is never + // set, which should be the initial value: ok(). + assert(statuses[idx_in_batch].ok()); } biter = &first_biter; idx_in_batch++; diff --git a/table/multiget_context.h b/table/multiget_context.h index 964544d079cfcf029d8208c80cf1db6f849c6fec..604a26f8b4e243157dde69db3a6a0294f9e7a467 100644 --- a/table/multiget_context.h +++ b/table/multiget_context.h @@ -87,9 +87,9 @@ struct KeyContext { class MultiGetContext { public: // Limit the number of keys in a batch to this number. Benchmarks show that - // there is negligible benefit for batches exceeding this. Keeping this < 64 + // there is negligible benefit for batches exceeding this. Keeping this < 32 // simplifies iteration, as well as reduces the amount of stack allocations - // htat need to be performed + // that need to be performed static const int MAX_BATCH_SIZE = 32; MultiGetContext(autovector* sorted_keys,