db_test.cc 225.9 KB
Newer Older
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
S
Siying Dong 已提交
2 3 4
//  This source code is licensed under both the GPLv2 (found in the
//  COPYING file in the root directory) and Apache 2.0 License
//  (found in the LICENSE.Apache file in the root directory).
5
//
J
jorlow@chromium.org 已提交
6 7 8 9
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.

S
sdong 已提交
10 11
// Introduction of SyncPoint effectively disabled building and running this test
// in Release build.
D
Dmitri Smirnov 已提交
12
// which is a pity, it is a good test
A
Aaron Gao 已提交
13
#include <fcntl.h>
14
#include <algorithm>
15
#include <set>
16
#include <thread>
17
#include <unordered_set>
S
Stanislau Hlebik 已提交
18
#include <utility>
S
sdong 已提交
19 20 21
#ifndef OS_WIN
#include <unistd.h>
#endif
D
David Bernard 已提交
22
#ifdef OS_SOLARIS
D
David Bernard 已提交
23
#include <alloca.h>
D
David Bernard 已提交
24
#endif
25

26
#include "cache/lru_cache.h"
27
#include "db/blob/blob_index.h"
28
#include "db/db_impl/db_impl.h"
29
#include "db/db_test_util.h"
A
Aaron Gao 已提交
30
#include "db/dbformat.h"
I
Igor Canadi 已提交
31
#include "db/job_context.h"
J
jorlow@chromium.org 已提交
32 33
#include "db/version_set.h"
#include "db/write_batch_internal.h"
34
#include "env/mock_env.h"
35
#include "file/filename.h"
36
#include "memtable/hash_linklist_rep.h"
37
#include "monitoring/thread_status_util.h"
D
Dmitri Smirnov 已提交
38
#include "port/port.h"
I
Igor Canadi 已提交
39
#include "port/stack_trace.h"
40 41
#include "rocksdb/cache.h"
#include "rocksdb/compaction_filter.h"
A
agiardullo 已提交
42
#include "rocksdb/convenience.h"
K
Kai Liu 已提交
43
#include "rocksdb/db.h"
44
#include "rocksdb/env.h"
45
#include "rocksdb/experimental.h"
K
Kai Liu 已提交
46
#include "rocksdb/filter_policy.h"
A
agiardullo 已提交
47
#include "rocksdb/options.h"
48
#include "rocksdb/perf_context.h"
49 50
#include "rocksdb/slice.h"
#include "rocksdb/slice_transform.h"
A
agiardullo 已提交
51
#include "rocksdb/snapshot.h"
S
Siying Dong 已提交
52
#include "rocksdb/table.h"
53
#include "rocksdb/table_properties.h"
Y
Yueh-Hsuan Chiang 已提交
54
#include "rocksdb/thread_status.h"
55
#include "rocksdb/utilities/checkpoint.h"
A
agiardullo 已提交
56
#include "rocksdb/utilities/optimistic_transaction_db.h"
A
Aaron Gao 已提交
57
#include "rocksdb/utilities/write_batch_with_index.h"
58
#include "table/mock_table.h"
S
sdong 已提交
59
#include "table/scoped_arena_iterator.h"
60 61 62
#include "test_util/sync_point.h"
#include "test_util/testharness.h"
#include "test_util/testutil.h"
A
Aaron Gao 已提交
63
#include "util/compression.h"
64
#include "util/mutexlock.h"
M
mrambacher 已提交
65
#include "util/random.h"
L
Lei Jin 已提交
66
#include "util/rate_limiter.h"
A
Aaron Gao 已提交
67 68
#include "util/string_util.h"
#include "utilities/merge_operators.h"
J
jorlow@chromium.org 已提交
69

70
namespace ROCKSDB_NAMESPACE {
J
jorlow@chromium.org 已提交
71

S
sdong 已提交
72 73 74
// Note that whole DBTest and its child classes disable fsync on files
// and directories for speed.
// If fsync needs to be covered in a test, put it in other places.
75 76
class DBTest : public DBTestBase {
 public:
S
sdong 已提交
77
  DBTest() : DBTestBase("/db_test", /*env_do_fsync=*/false) {}
78 79
};

80 81 82
class DBTestWithParam
    : public DBTest,
      public testing::WithParamInterface<std::tuple<uint32_t, bool>> {
83
 public:
84 85 86 87
  DBTestWithParam() {
    max_subcompactions_ = std::get<0>(GetParam());
    exclusive_manual_compaction_ = std::get<1>(GetParam());
  }
88 89 90 91 92

  // Required if inheriting from testing::WithParamInterface<>
  static void SetUpTestCase() {}
  static void TearDownTestCase() {}

93
  uint32_t max_subcompactions_;
94
  bool exclusive_manual_compaction_;
95
};
J
jorlow@chromium.org 已提交
96

97
TEST_F(DBTest, MockEnvTest) {
98
  std::unique_ptr<MockEnv> env{new MockEnv(Env::Default())};
99 100 101 102
  Options options;
  options.create_if_missing = true;
  options.env = env.get();
  DB* db;
103

104 105
  const Slice keys[] = {Slice("aaa"), Slice("bbb"), Slice("ccc")};
  const Slice vals[] = {Slice("foo"), Slice("bar"), Slice("baz")};
K
kailiu 已提交
106

107 108 109 110
  ASSERT_OK(DB::Open(options, "/dir/db", &db));
  for (size_t i = 0; i < 3; ++i) {
    ASSERT_OK(db->Put(WriteOptions(), keys[i], vals[i]));
  }
111

112 113 114 115 116
  for (size_t i = 0; i < 3; ++i) {
    std::string res;
    ASSERT_OK(db->Get(ReadOptions(), keys[i], &res));
    ASSERT_TRUE(res == vals[i]);
  }
117

118 119 120 121 122 123 124 125 126 127
  Iterator* iterator = db->NewIterator(ReadOptions());
  iterator->SeekToFirst();
  for (size_t i = 0; i < 3; ++i) {
    ASSERT_TRUE(iterator->Valid());
    ASSERT_TRUE(keys[i] == iterator->key());
    ASSERT_TRUE(vals[i] == iterator->value());
    iterator->Next();
  }
  ASSERT_TRUE(!iterator->Valid());
  delete iterator;
128

A
Aaron Gao 已提交
129 130
// TEST_FlushMemTable() is not supported in ROCKSDB_LITE
#ifndef ROCKSDB_LITE
131
  DBImpl* dbi = static_cast_with_check<DBImpl>(db);
132
  ASSERT_OK(dbi->TEST_FlushMemTable());
133

134 135 136 137 138
  for (size_t i = 0; i < 3; ++i) {
    std::string res;
    ASSERT_OK(db->Get(ReadOptions(), keys[i], &res));
    ASSERT_TRUE(res == vals[i]);
  }
A
Aaron Gao 已提交
139
#endif  // ROCKSDB_LITE
140

141 142
  delete db;
}
143

A
Andrew Kryczka 已提交
144 145 146
// NewMemEnv returns nullptr in ROCKSDB_LITE since class InMemoryEnv isn't
// defined.
#ifndef ROCKSDB_LITE
147
TEST_F(DBTest, MemEnvTest) {
148
  std::unique_ptr<Env> env{NewMemEnv(Env::Default())};
149 150 151 152
  Options options;
  options.create_if_missing = true;
  options.env = env.get();
  DB* db;
153

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
  const Slice keys[] = {Slice("aaa"), Slice("bbb"), Slice("ccc")};
  const Slice vals[] = {Slice("foo"), Slice("bar"), Slice("baz")};

  ASSERT_OK(DB::Open(options, "/dir/db", &db));
  for (size_t i = 0; i < 3; ++i) {
    ASSERT_OK(db->Put(WriteOptions(), keys[i], vals[i]));
  }

  for (size_t i = 0; i < 3; ++i) {
    std::string res;
    ASSERT_OK(db->Get(ReadOptions(), keys[i], &res));
    ASSERT_TRUE(res == vals[i]);
  }

  Iterator* iterator = db->NewIterator(ReadOptions());
  iterator->SeekToFirst();
  for (size_t i = 0; i < 3; ++i) {
    ASSERT_TRUE(iterator->Valid());
    ASSERT_TRUE(keys[i] == iterator->key());
    ASSERT_TRUE(vals[i] == iterator->value());
    iterator->Next();
  }
  ASSERT_TRUE(!iterator->Valid());
  delete iterator;

179
  DBImpl* dbi = static_cast_with_check<DBImpl>(db);
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
  ASSERT_OK(dbi->TEST_FlushMemTable());

  for (size_t i = 0; i < 3; ++i) {
    std::string res;
    ASSERT_OK(db->Get(ReadOptions(), keys[i], &res));
    ASSERT_TRUE(res == vals[i]);
  }

  delete db;

  options.create_if_missing = false;
  ASSERT_OK(DB::Open(options, "/dir/db", &db));
  for (size_t i = 0; i < 3; ++i) {
    std::string res;
    ASSERT_OK(db->Get(ReadOptions(), keys[i], &res));
    ASSERT_TRUE(res == vals[i]);
  }
  delete db;
J
jorlow@chromium.org 已提交
198
}
I
Islam AbdelRahman 已提交
199
#endif  // ROCKSDB_LITE
J
jorlow@chromium.org 已提交
200

I
Igor Sugak 已提交
201
TEST_F(DBTest, WriteEmptyBatch) {
S
sdong 已提交
202
  Options options = CurrentOptions();
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
  options.env = env_;
  options.write_buffer_size = 100000;
  CreateAndReopenWithCF({"pikachu"}, options);

  ASSERT_OK(Put(1, "foo", "bar"));
  WriteOptions wo;
  wo.sync = true;
  wo.disableWAL = false;
  WriteBatch empty_batch;
  ASSERT_OK(dbfull()->Write(wo, &empty_batch));

  // make sure we can re-open it.
  ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options));
  ASSERT_EQ("bar", Get(1, "foo"));
}

M
Maysam Yabandeh 已提交
219 220 221 222 223 224 225 226
TEST_F(DBTest, SkipDelay) {
  Options options = CurrentOptions();
  options.env = env_;
  options.write_buffer_size = 100000;
  CreateAndReopenWithCF({"pikachu"}, options);

  for (bool sync : {true, false}) {
    for (bool disableWAL : {true, false}) {
227 228 229 230
      if (sync && disableWAL) {
        // sync and disableWAL is incompatible.
        continue;
      }
M
Maysam Yabandeh 已提交
231 232 233 234 235 236
      // Use a small number to ensure a large delay that is still effective
      // when we do Put
      // TODO(myabandeh): this is time dependent and could potentially make
      // the test flaky
      auto token = dbfull()->TEST_write_controler().GetDelayToken(1);
      std::atomic<int> sleep_count(0);
237
      ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
M
Maysam Yabandeh 已提交
238
          "DBImpl::DelayWrite:Sleep",
239
          [&](void* /*arg*/) { sleep_count.fetch_add(1); });
M
Maysam Yabandeh 已提交
240
      std::atomic<int> wait_count(0);
241
      ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
M
Maysam Yabandeh 已提交
242
          "DBImpl::DelayWrite:Wait",
243
          [&](void* /*arg*/) { wait_count.fetch_add(1); });
244
      ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
M
Maysam Yabandeh 已提交
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266

      WriteOptions wo;
      wo.sync = sync;
      wo.disableWAL = disableWAL;
      wo.no_slowdown = true;
      dbfull()->Put(wo, "foo", "bar");
      // We need the 2nd write to trigger delay. This is because delay is
      // estimated based on the last write size which is 0 for the first write.
      ASSERT_NOK(dbfull()->Put(wo, "foo2", "bar2"));
      ASSERT_GE(sleep_count.load(), 0);
      ASSERT_GE(wait_count.load(), 0);
      token.reset();

      token = dbfull()->TEST_write_controler().GetDelayToken(1000000000);
      wo.no_slowdown = false;
      ASSERT_OK(dbfull()->Put(wo, "foo3", "bar3"));
      ASSERT_GE(sleep_count.load(), 1);
      token.reset();
    }
  }
}

267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
TEST_F(DBTest, MixedSlowdownOptions) {
  Options options = CurrentOptions();
  options.env = env_;
  options.write_buffer_size = 100000;
  CreateAndReopenWithCF({"pikachu"}, options);
  std::vector<port::Thread> threads;
  std::atomic<int> thread_num(0);

  std::function<void()> write_slowdown_func = [&]() {
    int a = thread_num.fetch_add(1);
    std::string key = "foo" + std::to_string(a);
    WriteOptions wo;
    wo.no_slowdown = false;
    ASSERT_OK(dbfull()->Put(wo, key, "bar"));
  };
  std::function<void()> write_no_slowdown_func = [&]() {
    int a = thread_num.fetch_add(1);
    std::string key = "foo" + std::to_string(a);
    WriteOptions wo;
    wo.no_slowdown = true;
    ASSERT_NOK(dbfull()->Put(wo, key, "bar"));
  };
  // Use a small number to ensure a large delay that is still effective
  // when we do Put
  // TODO(myabandeh): this is time dependent and could potentially make
  // the test flaky
  auto token = dbfull()->TEST_write_controler().GetDelayToken(1);
  std::atomic<int> sleep_count(0);
295 296
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
      "DBImpl::DelayWrite:BeginWriteStallDone", [&](void* /*arg*/) {
297 298 299 300 301 302 303 304 305 306
        sleep_count.fetch_add(1);
        if (threads.empty()) {
          for (int i = 0; i < 2; ++i) {
            threads.emplace_back(write_slowdown_func);
          }
          for (int i = 0; i < 2; ++i) {
            threads.emplace_back(write_no_slowdown_func);
          }
        }
      });
307
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348

  WriteOptions wo;
  wo.sync = false;
  wo.disableWAL = false;
  wo.no_slowdown = false;
  dbfull()->Put(wo, "foo", "bar");
  // We need the 2nd write to trigger delay. This is because delay is
  // estimated based on the last write size which is 0 for the first write.
  ASSERT_OK(dbfull()->Put(wo, "foo2", "bar2"));
          token.reset();

  for (auto& t : threads) {
    t.join();
  }
  ASSERT_GE(sleep_count.load(), 1);

  wo.no_slowdown = true;
  ASSERT_OK(dbfull()->Put(wo, "foo3", "bar"));
}

TEST_F(DBTest, MixedSlowdownOptionsInQueue) {
  Options options = CurrentOptions();
  options.env = env_;
  options.write_buffer_size = 100000;
  CreateAndReopenWithCF({"pikachu"}, options);
  std::vector<port::Thread> threads;
  std::atomic<int> thread_num(0);

  std::function<void()> write_no_slowdown_func = [&]() {
    int a = thread_num.fetch_add(1);
    std::string key = "foo" + std::to_string(a);
    WriteOptions wo;
    wo.no_slowdown = true;
    ASSERT_NOK(dbfull()->Put(wo, key, "bar"));
  };
  // Use a small number to ensure a large delay that is still effective
  // when we do Put
  // TODO(myabandeh): this is time dependent and could potentially make
  // the test flaky
  auto token = dbfull()->TEST_write_controler().GetDelayToken(1);
  std::atomic<int> sleep_count(0);
349 350
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
      "DBImpl::DelayWrite:Sleep", [&](void* /*arg*/) {
351 352 353 354 355 356 357 358 359 360 361
        sleep_count.fetch_add(1);
        if (threads.empty()) {
          for (int i = 0; i < 2; ++i) {
            threads.emplace_back(write_no_slowdown_func);
          }
          // Sleep for 2s to allow the threads to insert themselves into the
          // write queue
          env_->SleepForMicroseconds(3000000ULL);
        }
      });
  std::atomic<int> wait_count(0);
362
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
363 364
      "DBImpl::DelayWrite:Wait",
      [&](void* /*arg*/) { wait_count.fetch_add(1); });
365
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416

  WriteOptions wo;
  wo.sync = false;
  wo.disableWAL = false;
  wo.no_slowdown = false;
  dbfull()->Put(wo, "foo", "bar");
  // We need the 2nd write to trigger delay. This is because delay is
  // estimated based on the last write size which is 0 for the first write.
  ASSERT_OK(dbfull()->Put(wo, "foo2", "bar2"));
          token.reset();

  for (auto& t : threads) {
    t.join();
  }
  ASSERT_EQ(sleep_count.load(), 1);
  ASSERT_GE(wait_count.load(), 0);
}

TEST_F(DBTest, MixedSlowdownOptionsStop) {
  Options options = CurrentOptions();
  options.env = env_;
  options.write_buffer_size = 100000;
  CreateAndReopenWithCF({"pikachu"}, options);
  std::vector<port::Thread> threads;
  std::atomic<int> thread_num(0);

  std::function<void()> write_slowdown_func = [&]() {
    int a = thread_num.fetch_add(1);
    std::string key = "foo" + std::to_string(a);
    WriteOptions wo;
    wo.no_slowdown = false;
    ASSERT_OK(dbfull()->Put(wo, key, "bar"));
  };
  std::function<void()> write_no_slowdown_func = [&]() {
    int a = thread_num.fetch_add(1);
    std::string key = "foo" + std::to_string(a);
    WriteOptions wo;
    wo.no_slowdown = true;
    ASSERT_NOK(dbfull()->Put(wo, key, "bar"));
  };
  std::function<void()> wakeup_writer = [&]() {
    dbfull()->mutex_.Lock();
    dbfull()->bg_cv_.SignalAll();
    dbfull()->mutex_.Unlock();
  };
  // Use a small number to ensure a large delay that is still effective
  // when we do Put
  // TODO(myabandeh): this is time dependent and could potentially make
  // the test flaky
  auto token = dbfull()->TEST_write_controler().GetStopToken();
  std::atomic<int> wait_count(0);
417 418
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
      "DBImpl::DelayWrite:Wait", [&](void* /*arg*/) {
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
        wait_count.fetch_add(1);
        if (threads.empty()) {
          for (int i = 0; i < 2; ++i) {
            threads.emplace_back(write_slowdown_func);
          }
          for (int i = 0; i < 2; ++i) {
            threads.emplace_back(write_no_slowdown_func);
          }
          // Sleep for 2s to allow the threads to insert themselves into the
          // write queue
          env_->SleepForMicroseconds(3000000ULL);
        }
        token.reset();
        threads.emplace_back(wakeup_writer);
      });
434
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453

  WriteOptions wo;
  wo.sync = false;
  wo.disableWAL = false;
  wo.no_slowdown = false;
  dbfull()->Put(wo, "foo", "bar");
  // We need the 2nd write to trigger delay. This is because delay is
  // estimated based on the last write size which is 0 for the first write.
  ASSERT_OK(dbfull()->Put(wo, "foo2", "bar2"));
          token.reset();

  for (auto& t : threads) {
    t.join();
  }
  ASSERT_GE(wait_count.load(), 1);

  wo.no_slowdown = true;
  ASSERT_OK(dbfull()->Put(wo, "foo3", "bar"));
}
I
Islam AbdelRahman 已提交
454
#ifndef ROCKSDB_LITE
L
Lei Jin 已提交
455

I
Igor Sugak 已提交
456
TEST_F(DBTest, LevelLimitReopen) {
457
  Options options = CurrentOptions();
L
Lei Jin 已提交
458
  CreateAndReopenWithCF({"pikachu"}, options);
459 460 461

  const std::string value(1024 * 1024, ' ');
  int i = 0;
462 463
  while (NumTableFilesAtLevel(2, 1) == 0) {
    ASSERT_OK(Put(1, Key(i++), value));
464 465 466
  }

  options.num_levels = 1;
467
  options.max_bytes_for_level_multiplier_additional.resize(1, 1);
L
Lei Jin 已提交
468
  Status s = TryReopenWithColumnFamilies({"default", "pikachu"}, options);
469
  ASSERT_EQ(s.IsInvalidArgument(), true);
470
  ASSERT_EQ(s.ToString(),
471
            "Invalid argument: db has more levels than options.num_levels");
472 473

  options.num_levels = 10;
474
  options.max_bytes_for_level_multiplier_additional.resize(10, 1);
L
Lei Jin 已提交
475
  ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options));
476
}
I
Islam AbdelRahman 已提交
477
#endif  // ROCKSDB_LITE
478

J
jorlow@chromium.org 已提交
479

A
Andres Noetzli 已提交
480 481 482 483 484 485 486 487 488
TEST_F(DBTest, PutSingleDeleteGet) {
  do {
    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
    ASSERT_OK(Put(1, "foo", "v1"));
    ASSERT_EQ("v1", Get(1, "foo"));
    ASSERT_OK(Put(1, "foo2", "v2"));
    ASSERT_EQ("v2", Get(1, "foo2"));
    ASSERT_OK(SingleDelete(1, "foo"));
    ASSERT_EQ("NOT_FOUND", Get(1, "foo"));
489 490 491 492 493
    // Skip FIFO and universal compaction beccause they do not apply to the test
    // case. Skip MergePut because single delete does not get removed when it
    // encounters a merge.
  } while (ChangeOptions(kSkipFIFOCompaction | kSkipUniversalCompaction |
                         kSkipMergePut));
A
Andres Noetzli 已提交
494 495
}

496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
TEST_F(DBTest, ReadFromPersistedTier) {
  do {
    Random rnd(301);
    Options options = CurrentOptions();
    for (int disableWAL = 0; disableWAL <= 1; ++disableWAL) {
      CreateAndReopenWithCF({"pikachu"}, options);
      WriteOptions wopt;
      wopt.disableWAL = (disableWAL == 1);
      // 1st round: put but not flush
      ASSERT_OK(db_->Put(wopt, handles_[1], "foo", "first"));
      ASSERT_OK(db_->Put(wopt, handles_[1], "bar", "one"));
      ASSERT_EQ("first", Get(1, "foo"));
      ASSERT_EQ("one", Get(1, "bar"));

      // Read directly from persited data.
      ReadOptions ropt;
      ropt.read_tier = kPersistedTier;
      std::string value;
      if (wopt.disableWAL) {
        // as data has not yet being flushed, we expect not found.
        ASSERT_TRUE(db_->Get(ropt, handles_[1], "foo", &value).IsNotFound());
        ASSERT_TRUE(db_->Get(ropt, handles_[1], "bar", &value).IsNotFound());
      } else {
        ASSERT_OK(db_->Get(ropt, handles_[1], "foo", &value));
        ASSERT_OK(db_->Get(ropt, handles_[1], "bar", &value));
      }

      // Multiget
      std::vector<ColumnFamilyHandle*> multiget_cfs;
      multiget_cfs.push_back(handles_[1]);
      multiget_cfs.push_back(handles_[1]);
      std::vector<Slice> multiget_keys;
      multiget_keys.push_back("foo");
      multiget_keys.push_back("bar");
      std::vector<std::string> multiget_values;
      auto statuses =
          db_->MultiGet(ropt, multiget_cfs, multiget_keys, &multiget_values);
      if (wopt.disableWAL) {
        ASSERT_TRUE(statuses[0].IsNotFound());
        ASSERT_TRUE(statuses[1].IsNotFound());
      } else {
        ASSERT_OK(statuses[0]);
        ASSERT_OK(statuses[1]);
      }

      // 2nd round: flush and put a new value in memtable.
      ASSERT_OK(Flush(1));
      ASSERT_OK(db_->Put(wopt, handles_[1], "rocksdb", "hello"));

      // once the data has been flushed, we are able to get the
      // data when kPersistedTier is used.
      ASSERT_TRUE(db_->Get(ropt, handles_[1], "foo", &value).ok());
      ASSERT_EQ(value, "first");
      ASSERT_TRUE(db_->Get(ropt, handles_[1], "bar", &value).ok());
      ASSERT_EQ(value, "one");
      if (wopt.disableWAL) {
        ASSERT_TRUE(
            db_->Get(ropt, handles_[1], "rocksdb", &value).IsNotFound());
      } else {
        ASSERT_OK(db_->Get(ropt, handles_[1], "rocksdb", &value));
        ASSERT_EQ(value, "hello");
      }

      // Expect same result in multiget
      multiget_cfs.push_back(handles_[1]);
      multiget_keys.push_back("rocksdb");
      statuses =
          db_->MultiGet(ropt, multiget_cfs, multiget_keys, &multiget_values);
      ASSERT_TRUE(statuses[0].ok());
      ASSERT_EQ("first", multiget_values[0]);
      ASSERT_TRUE(statuses[1].ok());
      ASSERT_EQ("one", multiget_values[1]);
      if (wopt.disableWAL) {
        ASSERT_TRUE(statuses[2].IsNotFound());
      } else {
        ASSERT_OK(statuses[2]);
      }

      // 3rd round: delete and flush
      ASSERT_OK(db_->Delete(wopt, handles_[1], "foo"));
      Flush(1);
      ASSERT_OK(db_->Delete(wopt, handles_[1], "bar"));

      ASSERT_TRUE(db_->Get(ropt, handles_[1], "foo", &value).IsNotFound());
      if (wopt.disableWAL) {
        // Still expect finding the value as its delete has not yet being
        // flushed.
        ASSERT_TRUE(db_->Get(ropt, handles_[1], "bar", &value).ok());
        ASSERT_EQ(value, "one");
      } else {
        ASSERT_TRUE(db_->Get(ropt, handles_[1], "bar", &value).IsNotFound());
      }
      ASSERT_TRUE(db_->Get(ropt, handles_[1], "rocksdb", &value).ok());
      ASSERT_EQ(value, "hello");

      statuses =
          db_->MultiGet(ropt, multiget_cfs, multiget_keys, &multiget_values);
      ASSERT_TRUE(statuses[0].IsNotFound());
      if (wopt.disableWAL) {
        ASSERT_TRUE(statuses[1].ok());
        ASSERT_EQ("one", multiget_values[1]);
      } else {
        ASSERT_TRUE(statuses[1].IsNotFound());
      }
      ASSERT_TRUE(statuses[2].ok());
      ASSERT_EQ("hello", multiget_values[2]);
      if (wopt.disableWAL == 0) {
        DestroyAndReopen(options);
      }
    }
606
  } while (ChangeOptions());
607 608
}

A
Andres Noetzli 已提交
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
TEST_F(DBTest, SingleDeleteFlush) {
  // Test to check whether flushing preserves a single delete hidden
  // behind a put.
  do {
    Random rnd(301);

    Options options = CurrentOptions();
    options.disable_auto_compactions = true;
    CreateAndReopenWithCF({"pikachu"}, options);

    // Put values on second level (so that they will not be in the same
    // compaction as the other operations.
    Put(1, "foo", "first");
    Put(1, "bar", "one");
    ASSERT_OK(Flush(1));
    MoveFilesToLevel(2, 1);

    // (Single) delete hidden by a put
    SingleDelete(1, "foo");
    Put(1, "foo", "second");
    Delete(1, "bar");
    Put(1, "bar", "two");
    ASSERT_OK(Flush(1));

    SingleDelete(1, "foo");
    Delete(1, "bar");
    ASSERT_OK(Flush(1));

    dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
                           nullptr);

    ASSERT_EQ("NOT_FOUND", Get(1, "bar"));
    ASSERT_EQ("NOT_FOUND", Get(1, "foo"));
642 643 644 645 646
    // Skip FIFO and universal compaction beccause they do not apply to the test
    // case. Skip MergePut because single delete does not get removed when it
    // encounters a merge.
  } while (ChangeOptions(kSkipFIFOCompaction | kSkipUniversalCompaction |
                         kSkipMergePut));
A
Andres Noetzli 已提交
647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
}

TEST_F(DBTest, SingleDeletePutFlush) {
  // Single deletes that encounter the matching put in a flush should get
  // removed.
  do {
    Random rnd(301);

    Options options = CurrentOptions();
    options.disable_auto_compactions = true;
    CreateAndReopenWithCF({"pikachu"}, options);

    Put(1, "foo", Slice());
    Put(1, "a", Slice());
    SingleDelete(1, "a");
    ASSERT_OK(Flush(1));

    ASSERT_EQ("[ ]", AllEntriesFor("a", 1));
665 666 667 668 669
    // Skip FIFO and universal compaction beccause they do not apply to the test
    // case. Skip MergePut because single delete does not get removed when it
    // encounters a merge.
  } while (ChangeOptions(kSkipFIFOCompaction | kSkipUniversalCompaction |
                         kSkipMergePut));
A
Andres Noetzli 已提交
670 671
}

672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
// Disable because not all platform can run it.
// It requires more than 9GB memory to run it, With single allocation
// of more than 3GB.
TEST_F(DBTest, DISABLED_SanitizeVeryVeryLargeValue) {
  const size_t kValueSize = 4 * size_t{1024 * 1024 * 1024};  // 4GB value
  std::string raw(kValueSize, 'v');
  Options options = CurrentOptions();
  options.env = env_;
  options.merge_operator = MergeOperators::CreatePutOperator();
  options.write_buffer_size = 100000;  // Small write buffer
  options.paranoid_checks = true;
  DestroyAndReopen(options);

  ASSERT_OK(Put("boo", "v1"));
  ASSERT_TRUE(Put("foo", raw).IsInvalidArgument());
  ASSERT_TRUE(Merge("foo", raw).IsInvalidArgument());

  WriteBatch wb;
  ASSERT_TRUE(wb.Put("foo", raw).IsInvalidArgument());
  ASSERT_TRUE(wb.Merge("foo", raw).IsInvalidArgument());

  Slice value_slice = raw;
  Slice key_slice = "foo";
  SliceParts sp_key(&key_slice, 1);
  SliceParts sp_value(&value_slice, 1);

  ASSERT_TRUE(wb.Put(sp_key, sp_value).IsInvalidArgument());
  ASSERT_TRUE(wb.Merge(sp_key, sp_value).IsInvalidArgument());
}

S
sdong 已提交
702 703 704 705 706 707 708 709 710 711
// Disable because not all platform can run it.
// It requires more than 9GB memory to run it, With single allocation
// of more than 3GB.
TEST_F(DBTest, DISABLED_VeryLargeValue) {
  const size_t kValueSize = 3221225472u;  // 3GB value
  const size_t kKeySize = 8388608u;       // 8MB key
  std::string raw(kValueSize, 'v');
  std::string key1(kKeySize, 'c');
  std::string key2(kKeySize, 'd');

S
sdong 已提交
712
  Options options = CurrentOptions();
S
sdong 已提交
713 714 715 716 717 718 719 720 721 722 723 724
  options.env = env_;
  options.write_buffer_size = 100000;  // Small write buffer
  options.paranoid_checks = true;
  DestroyAndReopen(options);

  ASSERT_OK(Put("boo", "v1"));
  ASSERT_OK(Put("foo", "v1"));
  ASSERT_OK(Put(key1, raw));
  raw[0] = 'w';
  ASSERT_OK(Put(key2, raw));
  dbfull()->TEST_WaitForFlushMemTable();

Y
Yi Wu 已提交
725
#ifndef ROCKSDB_LITE
S
sdong 已提交
726
  ASSERT_EQ(1, NumTableFilesAtLevel(0));
Y
Yi Wu 已提交
727
#endif  // !ROCKSDB_LITE
S
sdong 已提交
728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757

  std::string value;
  Status s = db_->Get(ReadOptions(), key1, &value);
  ASSERT_OK(s);
  ASSERT_EQ(kValueSize, value.size());
  ASSERT_EQ('v', value[0]);

  s = db_->Get(ReadOptions(), key2, &value);
  ASSERT_OK(s);
  ASSERT_EQ(kValueSize, value.size());
  ASSERT_EQ('w', value[0]);

  // Compact all files.
  Flush();
  db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);

  // Check DB is not in read-only state.
  ASSERT_OK(Put("boo", "v1"));

  s = db_->Get(ReadOptions(), key1, &value);
  ASSERT_OK(s);
  ASSERT_EQ(kValueSize, value.size());
  ASSERT_EQ('v', value[0]);

  s = db_->Get(ReadOptions(), key2, &value);
  ASSERT_OK(s);
  ASSERT_EQ(kValueSize, value.size());
  ASSERT_EQ('w', value[0]);
}

I
Igor Sugak 已提交
758
TEST_F(DBTest, GetFromImmutableLayer) {
S
Sanjay Ghemawat 已提交
759
  do {
S
sdong 已提交
760
    Options options = CurrentOptions();
S
Sanjay Ghemawat 已提交
761
    options.env = env_;
L
Lei Jin 已提交
762
    CreateAndReopenWithCF({"pikachu"}, options);
763

764 765
    ASSERT_OK(Put(1, "foo", "v1"));
    ASSERT_EQ("v1", Get(1, "foo"));
766

I
Igor Canadi 已提交
767 768
    // Block sync calls
    env_->delay_sstable_sync_.store(true, std::memory_order_release);
A
Aaron Gao 已提交
769 770
    Put(1, "k1", std::string(100000, 'x'));  // Fill memtable
    Put(1, "k2", std::string(100000, 'y'));  // Trigger flush
771 772
    ASSERT_EQ("v1", Get(1, "foo"));
    ASSERT_EQ("NOT_FOUND", Get(0, "foo"));
I
Igor Canadi 已提交
773 774
    // Release sync calls
    env_->delay_sstable_sync_.store(false, std::memory_order_release);
S
Sanjay Ghemawat 已提交
775
  } while (ChangeOptions());
776 777 778
}


I
Igor Sugak 已提交
779
TEST_F(DBTest, GetLevel0Ordering) {
S
Sanjay Ghemawat 已提交
780
  do {
L
Lei Jin 已提交
781
    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
S
Sanjay Ghemawat 已提交
782 783 784 785
    // Check that we process level-0 files in correct order.  The code
    // below generates two level-0 files where the earlier one comes
    // before the later one in the level-0 file list since the earlier
    // one has a smaller "smallest" key.
786 787 788 789 790 791
    ASSERT_OK(Put(1, "bar", "b"));
    ASSERT_OK(Put(1, "foo", "v1"));
    ASSERT_OK(Flush(1));
    ASSERT_OK(Put(1, "foo", "v2"));
    ASSERT_OK(Flush(1));
    ASSERT_EQ("v2", Get(1, "foo"));
S
Sanjay Ghemawat 已提交
792
  } while (ChangeOptions());
793 794
}

I
Igor Sugak 已提交
795
TEST_F(DBTest, WrongLevel0Config) {
796 797 798 799 800 801 802 803 804
  Options options = CurrentOptions();
  Close();
  ASSERT_OK(DestroyDB(dbname_, options));
  options.level0_stop_writes_trigger = 1;
  options.level0_slowdown_writes_trigger = 2;
  options.level0_file_num_compaction_trigger = 3;
  ASSERT_OK(DB::Open(options, dbname_, &db_));
}

I
Islam AbdelRahman 已提交
805
#ifndef ROCKSDB_LITE
I
Igor Sugak 已提交
806
TEST_F(DBTest, GetOrderedByLevels) {
S
Sanjay Ghemawat 已提交
807
  do {
L
Lei Jin 已提交
808
    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
809 810 811 812 813 814 815
    ASSERT_OK(Put(1, "foo", "v1"));
    Compact(1, "a", "z");
    ASSERT_EQ("v1", Get(1, "foo"));
    ASSERT_OK(Put(1, "foo", "v2"));
    ASSERT_EQ("v2", Get(1, "foo"));
    ASSERT_OK(Flush(1));
    ASSERT_EQ("v2", Get(1, "foo"));
S
Sanjay Ghemawat 已提交
816
  } while (ChangeOptions());
817 818
}

I
Igor Sugak 已提交
819
TEST_F(DBTest, GetPicksCorrectFile) {
S
Sanjay Ghemawat 已提交
820
  do {
L
Lei Jin 已提交
821
    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
S
Sanjay Ghemawat 已提交
822
    // Arrange to have multiple files in a non-level-0 level.
823 824 825 826 827 828 829 830 831
    ASSERT_OK(Put(1, "a", "va"));
    Compact(1, "a", "b");
    ASSERT_OK(Put(1, "x", "vx"));
    Compact(1, "x", "y");
    ASSERT_OK(Put(1, "f", "vf"));
    Compact(1, "f", "g");
    ASSERT_EQ("va", Get(1, "a"));
    ASSERT_EQ("vf", Get(1, "f"));
    ASSERT_EQ("vx", Get(1, "x"));
S
Sanjay Ghemawat 已提交
832
  } while (ChangeOptions());
833 834
}

I
Igor Sugak 已提交
835
TEST_F(DBTest, GetEncountersEmptyLevel) {
S
Sanjay Ghemawat 已提交
836
  do {
I
Igor Canadi 已提交
837
    Options options = CurrentOptions();
L
Lei Jin 已提交
838
    CreateAndReopenWithCF({"pikachu"}, options);
S
Sanjay Ghemawat 已提交
839 840 841 842 843 844
    // Arrange for the following to happen:
    //   * sstable A in level 0
    //   * nothing in level 1
    //   * sstable B in level 2
    // Then do enough Get() calls to arrange for an automatic compaction
    // of sstable A.  A bug would cause the compaction to be marked as
C
clark.kang 已提交
845
    // occurring at level 1 (instead of the correct level 0).
S
Sanjay Ghemawat 已提交
846 847

    // Step 1: First place sstables in levels 0 and 2
848 849 850 851 852 853 854 855 856 857
    Put(1, "a", "begin");
    Put(1, "z", "end");
    ASSERT_OK(Flush(1));
    dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
    dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
    Put(1, "a", "begin");
    Put(1, "z", "end");
    ASSERT_OK(Flush(1));
    ASSERT_GT(NumTableFilesAtLevel(0, 1), 0);
    ASSERT_GT(NumTableFilesAtLevel(2, 1), 0);
858

S
Sanjay Ghemawat 已提交
859
    // Step 2: clear level 1 if necessary.
860 861 862 863
    dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 1);
    ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0);
    ASSERT_EQ(NumTableFilesAtLevel(2, 1), 1);
S
Sanjay Ghemawat 已提交
864

H
heyongqiang 已提交
865 866
    // Step 3: read a bunch of times
    for (int i = 0; i < 1000; i++) {
867
      ASSERT_EQ("NOT_FOUND", Get(1, "missing"));
S
Sanjay Ghemawat 已提交
868
    }
H
heyongqiang 已提交
869 870

    // Step 4: Wait for compaction to finish
871
    dbfull()->TEST_WaitForCompact();
H
heyongqiang 已提交
872

873
    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 1);  // XXX
I
Igor Canadi 已提交
874
  } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction));
875
}
I
Islam AbdelRahman 已提交
876
#endif  // ROCKSDB_LITE
877

Y
Yi Wu 已提交
878
TEST_F(DBTest, FlushMultipleMemtable) {
V
Venkatesh Radhakrishnan 已提交
879 880
  do {
    Options options = CurrentOptions();
Y
Yi Wu 已提交
881 882 883 884
    WriteOptions writeOpt = WriteOptions();
    writeOpt.disableWAL = true;
    options.max_write_buffer_number = 4;
    options.min_write_buffer_number_to_merge = 3;
885
    options.max_write_buffer_size_to_maintain = -1;
V
Venkatesh Radhakrishnan 已提交
886
    CreateAndReopenWithCF({"pikachu"}, options);
Y
Yi Wu 已提交
887
    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v1"));
V
Venkatesh Radhakrishnan 已提交
888
    ASSERT_OK(Flush(1));
Y
Yi Wu 已提交
889
    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v1"));
V
Venkatesh Radhakrishnan 已提交
890

Y
Yi Wu 已提交
891 892 893
    ASSERT_EQ("v1", Get(1, "foo"));
    ASSERT_EQ("v1", Get(1, "bar"));
    ASSERT_OK(Flush(1));
894
  } while (ChangeCompactOptions());
895
}
Y
Yi Wu 已提交
896 897 898 899 900 901 902
#ifndef ROCKSDB_LITE
TEST_F(DBTest, FlushSchedule) {
  Options options = CurrentOptions();
  options.disable_auto_compactions = true;
  options.level0_stop_writes_trigger = 1 << 10;
  options.level0_slowdown_writes_trigger = 1 << 10;
  options.min_write_buffer_number_to_merge = 1;
903 904
  options.max_write_buffer_size_to_maintain =
      static_cast<int64_t>(options.write_buffer_size);
Y
Yi Wu 已提交
905 906 907
  options.max_write_buffer_number = 2;
  options.write_buffer_size = 120 * 1024;
  CreateAndReopenWithCF({"pikachu"}, options);
D
Dmitri Smirnov 已提交
908
  std::vector<port::Thread> threads;
909

Y
Yi Wu 已提交
910 911 912 913 914 915 916 917 918
  std::atomic<int> thread_num(0);
  // each column family will have 5 thread, each thread generating 2 memtables.
  // each column family should end up with 10 table files
  std::function<void()> fill_memtable_func = [&]() {
    int a = thread_num.fetch_add(1);
    Random rnd(a);
    WriteOptions wo;
    // this should fill up 2 memtables
    for (int k = 0; k < 5000; ++k) {
M
mrambacher 已提交
919
      ASSERT_OK(db_->Put(wo, handles_[a & 1], rnd.RandomString(13), ""));
Y
Yi Wu 已提交
920 921
    }
  };
S
sdong 已提交
922

Y
Yi Wu 已提交
923 924
  for (int i = 0; i < 10; ++i) {
    threads.emplace_back(fill_memtable_func);
S
sdong 已提交
925 926
  }

Y
Yi Wu 已提交
927 928 929
  for (auto& t : threads) {
    t.join();
  }
S
sdong 已提交
930

Y
Yi Wu 已提交
931 932 933 934 935 936
  auto default_tables = GetNumberOfSstFilesForColumnFamily(db_, "default");
  auto pikachu_tables = GetNumberOfSstFilesForColumnFamily(db_, "pikachu");
  ASSERT_LE(default_tables, static_cast<uint64_t>(10));
  ASSERT_GT(default_tables, static_cast<uint64_t>(0));
  ASSERT_LE(pikachu_tables, static_cast<uint64_t>(10));
  ASSERT_GT(pikachu_tables, static_cast<uint64_t>(0));
S
sdong 已提交
937
}
Y
Yi Wu 已提交
938
#endif  // ROCKSDB_LITE
S
sdong 已提交
939

Y
Yi Wu 已提交
940 941 942
namespace {
class KeepFilter : public CompactionFilter {
 public:
943 944 945
  bool Filter(int /*level*/, const Slice& /*key*/, const Slice& /*value*/,
              std::string* /*new_value*/,
              bool* /*value_changed*/) const override {
Y
Yi Wu 已提交
946 947
    return false;
  }
J
jorlow@chromium.org 已提交
948

949
  const char* Name() const override { return "KeepFilter"; }
Y
Yi Wu 已提交
950
};
951

Y
Yi Wu 已提交
952 953 954 955
class KeepFilterFactory : public CompactionFilterFactory {
 public:
  explicit KeepFilterFactory(bool check_context = false)
      : check_context_(check_context) {}
J
jorlow@chromium.org 已提交
956

957
  std::unique_ptr<CompactionFilter> CreateCompactionFilter(
Y
Yi Wu 已提交
958 959 960 961 962 963 964
      const CompactionFilter::Context& context) override {
    if (check_context_) {
      EXPECT_EQ(expect_full_compaction_.load(), context.is_full_compaction);
      EXPECT_EQ(expect_manual_compaction_.load(), context.is_manual_compaction);
    }
    return std::unique_ptr<CompactionFilter>(new KeepFilter());
  }
965

966
  const char* Name() const override { return "KeepFilterFactory"; }
Y
Yi Wu 已提交
967 968 969 970
  bool check_context_;
  std::atomic_bool expect_full_compaction_;
  std::atomic_bool expect_manual_compaction_;
};
971

Y
Yi Wu 已提交
972 973 974
class DelayFilter : public CompactionFilter {
 public:
  explicit DelayFilter(DBTestBase* d) : db_test(d) {}
975 976 977
  bool Filter(int /*level*/, const Slice& /*key*/, const Slice& /*value*/,
              std::string* /*new_value*/,
              bool* /*value_changed*/) const override {
978
    db_test->env_->MockSleepForMicroseconds(1000);
Y
Yi Wu 已提交
979 980
    return true;
  }
981

982
  const char* Name() const override { return "DelayFilter"; }
983

Y
Yi Wu 已提交
984 985 986
 private:
  DBTestBase* db_test;
};
987

Y
Yi Wu 已提交
988 989 990
class DelayFilterFactory : public CompactionFilterFactory {
 public:
  explicit DelayFilterFactory(DBTestBase* d) : db_test(d) {}
991
  std::unique_ptr<CompactionFilter> CreateCompactionFilter(
A
Andrew Kryczka 已提交
992
      const CompactionFilter::Context& /*context*/) override {
Y
Yi Wu 已提交
993 994
    return std::unique_ptr<CompactionFilter>(new DelayFilter(db_test));
  }
995

996
  const char* Name() const override { return "DelayFilterFactory"; }
J
jorlow@chromium.org 已提交
997

Y
Yi Wu 已提交
998 999 1000 1001
 private:
  DBTestBase* db_test;
};
}  // namespace
J
jorlow@chromium.org 已提交
1002

Y
Yi Wu 已提交
1003 1004 1005 1006 1007 1008
#ifndef ROCKSDB_LITE

static std::string CompressibleString(Random* rnd, int len) {
  std::string r;
  test::CompressibleString(rnd, 0.8, len, &r);
  return r;
J
jorlow@chromium.org 已提交
1009
}
Y
Yi Wu 已提交
1010
#endif  // ROCKSDB_LITE
J
jorlow@chromium.org 已提交
1011

Y
Yi Wu 已提交
1012 1013 1014 1015 1016 1017 1018 1019
TEST_F(DBTest, FailMoreDbPaths) {
  Options options = CurrentOptions();
  options.db_paths.emplace_back(dbname_, 10000000);
  options.db_paths.emplace_back(dbname_ + "_2", 1000000);
  options.db_paths.emplace_back(dbname_ + "_3", 1000000);
  options.db_paths.emplace_back(dbname_ + "_4", 1000000);
  options.db_paths.emplace_back(dbname_ + "_5", 1000000);
  ASSERT_TRUE(TryReopen(options).IsNotSupported());
1020 1021
}

1022 1023
void CheckColumnFamilyMeta(
    const ColumnFamilyMetaData& cf_meta,
1024 1025
    const std::vector<std::vector<FileMetaData>>& files_by_level,
    uint64_t start_time, uint64_t end_time) {
1026 1027 1028
  ASSERT_EQ(cf_meta.name, kDefaultColumnFamilyName);
  ASSERT_EQ(cf_meta.levels.size(), files_by_level.size());

Y
Yi Wu 已提交
1029 1030
  uint64_t cf_size = 0;
  size_t file_count = 0;
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040

  for (size_t i = 0; i < cf_meta.levels.size(); ++i) {
    const auto& level_meta_from_cf = cf_meta.levels[i];
    const auto& level_meta_from_files = files_by_level[i];

    ASSERT_EQ(level_meta_from_cf.level, i);
    ASSERT_EQ(level_meta_from_cf.files.size(), level_meta_from_files.size());

    file_count += level_meta_from_cf.files.size();

Y
Yi Wu 已提交
1041
    uint64_t level_size = 0;
1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
    for (size_t j = 0; j < level_meta_from_cf.files.size(); ++j) {
      const auto& file_meta_from_cf = level_meta_from_cf.files[j];
      const auto& file_meta_from_files = level_meta_from_files[j];

      level_size += file_meta_from_cf.size;

      ASSERT_EQ(file_meta_from_cf.file_number,
                file_meta_from_files.fd.GetNumber());
      ASSERT_EQ(file_meta_from_cf.file_number,
                TableFileNameToNumber(file_meta_from_cf.name));
      ASSERT_EQ(file_meta_from_cf.size, file_meta_from_files.fd.file_size);
      ASSERT_EQ(file_meta_from_cf.smallest_seqno,
                file_meta_from_files.fd.smallest_seqno);
      ASSERT_EQ(file_meta_from_cf.largest_seqno,
                file_meta_from_files.fd.largest_seqno);
      ASSERT_EQ(file_meta_from_cf.smallestkey,
                file_meta_from_files.smallest.user_key().ToString());
      ASSERT_EQ(file_meta_from_cf.largestkey,
                file_meta_from_files.largest.user_key().ToString());
      ASSERT_EQ(file_meta_from_cf.oldest_blob_file_number,
                file_meta_from_files.oldest_blob_file_number);
1063 1064 1065 1066 1067 1068 1069 1070
      ASSERT_EQ(file_meta_from_cf.oldest_ancester_time,
                file_meta_from_files.oldest_ancester_time);
      ASSERT_EQ(file_meta_from_cf.file_creation_time,
                file_meta_from_files.file_creation_time);
      ASSERT_GE(file_meta_from_cf.file_creation_time, start_time);
      ASSERT_LE(file_meta_from_cf.file_creation_time, end_time);
      ASSERT_GE(file_meta_from_cf.oldest_ancester_time, start_time);
      ASSERT_LE(file_meta_from_cf.oldest_ancester_time, end_time);
1071
    }
1072 1073

    ASSERT_EQ(level_meta_from_cf.size, level_size);
Y
Yi Wu 已提交
1074 1075
    cf_size += level_size;
  }
1076

Y
Yi Wu 已提交
1077 1078 1079
  ASSERT_EQ(cf_meta.file_count, file_count);
  ASSERT_EQ(cf_meta.size, cf_size);
}
1080

1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
void CheckLiveFilesMeta(
    const std::vector<LiveFileMetaData>& live_file_meta,
    const std::vector<std::vector<FileMetaData>>& files_by_level) {
  size_t total_file_count = 0;
  for (const auto& f : files_by_level) {
    total_file_count += f.size();
  }

  ASSERT_EQ(live_file_meta.size(), total_file_count);

  int level = 0;
  int i = 0;

  for (const auto& meta : live_file_meta) {
    if (level != meta.level) {
      level = meta.level;
      i = 0;
    }

    ASSERT_LT(i, files_by_level[level].size());

    const auto& expected_meta = files_by_level[level][i];

    ASSERT_EQ(meta.column_family_name, kDefaultColumnFamilyName);
    ASSERT_EQ(meta.file_number, expected_meta.fd.GetNumber());
    ASSERT_EQ(meta.file_number, TableFileNameToNumber(meta.name));
    ASSERT_EQ(meta.size, expected_meta.fd.file_size);
    ASSERT_EQ(meta.smallest_seqno, expected_meta.fd.smallest_seqno);
    ASSERT_EQ(meta.largest_seqno, expected_meta.fd.largest_seqno);
    ASSERT_EQ(meta.smallestkey, expected_meta.smallest.user_key().ToString());
    ASSERT_EQ(meta.largestkey, expected_meta.largest.user_key().ToString());
    ASSERT_EQ(meta.oldest_blob_file_number,
              expected_meta.oldest_blob_file_number);

    ++i;
  }
}

Y
Yi Wu 已提交
1119
#ifndef ROCKSDB_LITE
1120
TEST_F(DBTest, MetaDataTest) {
Y
Yi Wu 已提交
1121 1122
  Options options = CurrentOptions();
  options.create_if_missing = true;
1123
  options.disable_auto_compactions = true;
1124 1125 1126 1127 1128

  int64_t temp_time = 0;
  options.env->GetCurrentTime(&temp_time);
  uint64_t start_time = static_cast<uint64_t>(temp_time);

Y
Yi Wu 已提交
1129
  DestroyAndReopen(options);
1130

Y
Yi Wu 已提交
1131 1132 1133
  Random rnd(301);
  int key_index = 0;
  for (int i = 0; i < 100; ++i) {
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
    // Add a single blob reference to each file
    std::string blob_index;
    BlobIndex::EncodeBlob(&blob_index, /* blob_file_number */ i + 1000,
                          /* offset */ 1234, /* size */ 5678, kNoCompression);

    WriteBatch batch;
    ASSERT_OK(WriteBatchInternal::PutBlobIndex(&batch, 0, Key(key_index),
                                               blob_index));
    ASSERT_OK(dbfull()->Write(WriteOptions(), &batch));

    ++key_index;

    // Fill up the rest of the file with random values.
    GenerateNewFile(&rnd, &key_index, /* nowait */ true);

    Flush();
Y
Yi Wu 已提交
1150
  }
1151 1152 1153 1154

  std::vector<std::vector<FileMetaData>> files_by_level;
  dbfull()->TEST_GetFilesMetaData(db_->DefaultColumnFamily(), &files_by_level);

1155 1156 1157
  options.env->GetCurrentTime(&temp_time);
  uint64_t end_time = static_cast<uint64_t>(temp_time);

1158 1159
  ColumnFamilyMetaData cf_meta;
  db_->GetColumnFamilyMetaData(&cf_meta);
1160
  CheckColumnFamilyMeta(cf_meta, files_by_level, start_time, end_time);
1161 1162 1163 1164

  std::vector<LiveFileMetaData> live_file_meta;
  db_->GetLiveFilesMetaData(&live_file_meta);
  CheckLiveFilesMeta(live_file_meta, files_by_level);
Y
Yi Wu 已提交
1165
}
1166

Y
Yi Wu 已提交
1167 1168 1169
namespace {
void MinLevelHelper(DBTest* self, Options& options) {
  Random rnd(301);
1170

Y
Yi Wu 已提交
1171 1172 1173 1174 1175
  for (int num = 0; num < options.level0_file_num_compaction_trigger - 1;
       num++) {
    std::vector<std::string> values;
    // Write 120KB (12 values, each 10K)
    for (int i = 0; i < 12; i++) {
M
mrambacher 已提交
1176
      values.push_back(rnd.RandomString(10000));
Y
Yi Wu 已提交
1177 1178 1179 1180 1181
      ASSERT_OK(self->Put(DBTestBase::Key(i), values[i]));
    }
    self->dbfull()->TEST_WaitForFlushMemTable();
    ASSERT_EQ(self->NumTableFilesAtLevel(0), num + 1);
  }
1182

Y
Yi Wu 已提交
1183 1184 1185
  // generate one more file in level-0, and should trigger level-0 compaction
  std::vector<std::string> values;
  for (int i = 0; i < 12; i++) {
M
mrambacher 已提交
1186
    values.push_back(rnd.RandomString(10000));
Y
Yi Wu 已提交
1187 1188 1189
    ASSERT_OK(self->Put(DBTestBase::Key(i), values[i]));
  }
  self->dbfull()->TEST_WaitForCompact();
1190

Y
Yi Wu 已提交
1191 1192
  ASSERT_EQ(self->NumTableFilesAtLevel(0), 0);
  ASSERT_EQ(self->NumTableFilesAtLevel(1), 1);
1193 1194
}

Y
Yi Wu 已提交
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
// returns false if the calling-Test should be skipped
bool MinLevelToCompress(CompressionType& type, Options& options, int wbits,
                        int lev, int strategy) {
  fprintf(stderr,
          "Test with compression options : window_bits = %d, level =  %d, "
          "strategy = %d}\n",
          wbits, lev, strategy);
  options.write_buffer_size = 100 << 10;  // 100KB
  options.arena_block_size = 4096;
  options.num_levels = 3;
  options.level0_file_num_compaction_trigger = 3;
  options.create_if_missing = true;
1207

Y
Yi Wu 已提交
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
  if (Snappy_Supported()) {
    type = kSnappyCompression;
    fprintf(stderr, "using snappy\n");
  } else if (Zlib_Supported()) {
    type = kZlibCompression;
    fprintf(stderr, "using zlib\n");
  } else if (BZip2_Supported()) {
    type = kBZip2Compression;
    fprintf(stderr, "using bzip2\n");
  } else if (LZ4_Supported()) {
    type = kLZ4Compression;
    fprintf(stderr, "using lz4\n");
1220 1221 1222 1223
  } else if (XPRESS_Supported()) {
    type = kXpressCompression;
    fprintf(stderr, "using xpress\n");
  } else if (ZSTD_Supported()) {
S
sdong 已提交
1224
    type = kZSTD;
1225
    fprintf(stderr, "using ZSTD\n");
Y
Yi Wu 已提交
1226 1227 1228 1229 1230
  } else {
    fprintf(stderr, "skipping test, compression disabled\n");
    return false;
  }
  options.compression_per_level.resize(options.num_levels);
1231

Y
Yi Wu 已提交
1232 1233 1234 1235 1236 1237 1238 1239
  // do not compress L0
  for (int i = 0; i < 1; i++) {
    options.compression_per_level[i] = kNoCompression;
  }
  for (int i = 1; i < options.num_levels; i++) {
    options.compression_per_level[i] = type;
  }
  return true;
J
jorlow@chromium.org 已提交
1240
}
Y
Yi Wu 已提交
1241
}  // namespace
J
jorlow@chromium.org 已提交
1242

Y
Yi Wu 已提交
1243 1244 1245 1246 1247 1248 1249 1250
TEST_F(DBTest, MinLevelToCompress1) {
  Options options = CurrentOptions();
  CompressionType type = kSnappyCompression;
  if (!MinLevelToCompress(type, options, -14, -1, 0)) {
    return;
  }
  Reopen(options);
  MinLevelHelper(this, options);
1251

Y
Yi Wu 已提交
1252 1253 1254 1255 1256 1257 1258 1259 1260
  // do not compress L0 and L1
  for (int i = 0; i < 2; i++) {
    options.compression_per_level[i] = kNoCompression;
  }
  for (int i = 2; i < options.num_levels; i++) {
    options.compression_per_level[i] = type;
  }
  DestroyAndReopen(options);
  MinLevelHelper(this, options);
1261 1262
}

Y
Yi Wu 已提交
1263 1264 1265 1266 1267 1268 1269 1270
TEST_F(DBTest, MinLevelToCompress2) {
  Options options = CurrentOptions();
  CompressionType type = kSnappyCompression;
  if (!MinLevelToCompress(type, options, 15, -1, 0)) {
    return;
  }
  Reopen(options);
  MinLevelHelper(this, options);
I
Igor Canadi 已提交
1271

Y
Yi Wu 已提交
1272 1273 1274 1275 1276 1277
  // do not compress L0 and L1
  for (int i = 0; i < 2; i++) {
    options.compression_per_level[i] = kNoCompression;
  }
  for (int i = 2; i < options.num_levels; i++) {
    options.compression_per_level[i] = type;
I
Igor Canadi 已提交
1278
  }
Y
Yi Wu 已提交
1279 1280 1281
  DestroyAndReopen(options);
  MinLevelHelper(this, options);
}
I
Igor Canadi 已提交
1282

1283 1284 1285
// This test may fail because of a legit case that multiple L0 files
// are trivial moved to L1.
TEST_F(DBTest, DISABLED_RepeatedWritesToSameKey) {
I
Igor Canadi 已提交
1286 1287
  do {
    Options options = CurrentOptions();
Y
Yi Wu 已提交
1288 1289 1290
    options.env = env_;
    options.write_buffer_size = 100000;  // Small write buffer
    CreateAndReopenWithCF({"pikachu"}, options);
I
Igor Canadi 已提交
1291

Y
Yi Wu 已提交
1292 1293 1294 1295
    // We must have at most one file per level except for level-0,
    // which may have up to kL0_StopWritesTrigger files.
    const int kMaxFiles =
        options.num_levels + options.level0_stop_writes_trigger;
1296

Y
Yi Wu 已提交
1297 1298
    Random rnd(301);
    std::string value =
M
mrambacher 已提交
1299
        rnd.RandomString(static_cast<int>(2 * options.write_buffer_size));
Y
Yi Wu 已提交
1300 1301 1302
    for (int i = 0; i < 5 * kMaxFiles; i++) {
      ASSERT_OK(Put(1, "key", value));
      ASSERT_LE(TotalTableFiles(1), kMaxFiles);
1303
    }
1304
  } while (ChangeCompactOptions());
1305
}
Y
Yi Wu 已提交
1306
#endif  // ROCKSDB_LITE
1307

Y
Yi Wu 已提交
1308
TEST_F(DBTest, SparseMerge) {
1309 1310
  do {
    Options options = CurrentOptions();
Y
Yi Wu 已提交
1311
    options.compression = kNoCompression;
L
Lei Jin 已提交
1312
    CreateAndReopenWithCF({"pikachu"}, options);
Y
Yi Wu 已提交
1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330

    FillLevels("A", "Z", 1);

    // Suppose there is:
    //    small amount of data with prefix A
    //    large amount of data with prefix B
    //    small amount of data with prefix C
    // and that recent updates have made small changes to all three prefixes.
    // Check that we do not do a compaction that merges all of B in one shot.
    const std::string value(1000, 'x');
    Put(1, "A", "va");
    // Write approximately 100MB of "B" values
    for (int i = 0; i < 100000; i++) {
      char key[100];
      snprintf(key, sizeof(key), "B%010d", i);
      Put(1, key, value);
    }
    Put(1, "C", "vc");
1331
    ASSERT_OK(Flush(1));
Y
Yi Wu 已提交
1332
    dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
1333

Y
Yi Wu 已提交
1334 1335 1336 1337
    // Make sparse update
    Put(1, "A", "va2");
    Put(1, "B100", "bvalue2");
    Put(1, "C", "vc2");
1338
    ASSERT_OK(Flush(1));
Y
Yi Wu 已提交
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349

    // Compactions should not cause us to create a situation where
    // a file overlaps too much data at the next level.
    ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(handles_[1]),
              20 * 1048576);
    dbfull()->TEST_CompactRange(0, nullptr, nullptr);
    ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(handles_[1]),
              20 * 1048576);
    dbfull()->TEST_CompactRange(1, nullptr, nullptr);
    ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(handles_[1]),
              20 * 1048576);
1350
  } while (ChangeCompactOptions());
1351 1352
}

Y
Yi Wu 已提交
1353 1354 1355 1356 1357 1358 1359 1360 1361 1362
#ifndef ROCKSDB_LITE
static bool Between(uint64_t val, uint64_t low, uint64_t high) {
  bool result = (val >= low) && (val <= high);
  if (!result) {
    fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
            (unsigned long long)(val), (unsigned long long)(low),
            (unsigned long long)(high));
  }
  return result;
}
1363

Y
Yi Wu 已提交
1364
TEST_F(DBTest, ApproximateSizesMemTable) {
1365
  Options options = CurrentOptions();
Y
Yi Wu 已提交
1366 1367 1368 1369
  options.write_buffer_size = 100000000;  // Large write buffer
  options.compression = kNoCompression;
  options.create_if_missing = true;
  DestroyAndReopen(options);
1370
  auto default_cf = db_->DefaultColumnFamily();
1371

Y
Yi Wu 已提交
1372 1373 1374
  const int N = 128;
  Random rnd(301);
  for (int i = 0; i < N; i++) {
M
mrambacher 已提交
1375
    ASSERT_OK(Put(Key(i), rnd.RandomString(1024)));
Y
Yi Wu 已提交
1376
  }
1377

Y
Yi Wu 已提交
1378 1379 1380 1381
  uint64_t size;
  std::string start = Key(50);
  std::string end = Key(60);
  Range r(start, end);
1382 1383 1384 1385
  SizeApproximationOptions size_approx_options;
  size_approx_options.include_memtabtles = true;
  size_approx_options.include_files = true;
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
Y
Yi Wu 已提交
1386 1387 1388
  ASSERT_GT(size, 6000);
  ASSERT_LT(size, 204800);
  // Zero if not including mem table
1389
  db_->GetApproximateSizes(&r, 1, &size);
Y
Yi Wu 已提交
1390
  ASSERT_EQ(size, 0);
1391

Y
Yi Wu 已提交
1392 1393 1394
  start = Key(500);
  end = Key(600);
  r = Range(start, end);
1395
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
Y
Yi Wu 已提交
1396
  ASSERT_EQ(size, 0);
1397

Y
Yi Wu 已提交
1398
  for (int i = 0; i < N; i++) {
M
mrambacher 已提交
1399
    ASSERT_OK(Put(Key(1000 + i), rnd.RandomString(1024)));
Y
Yi Wu 已提交
1400
  }
1401

Y
Yi Wu 已提交
1402 1403 1404
  start = Key(500);
  end = Key(600);
  r = Range(start, end);
1405
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
Y
Yi Wu 已提交
1406
  ASSERT_EQ(size, 0);
1407

Y
Yi Wu 已提交
1408 1409 1410
  start = Key(100);
  end = Key(1020);
  r = Range(start, end);
1411
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
Y
Yi Wu 已提交
1412
  ASSERT_GT(size, 6000);
H
heyongqiang 已提交
1413

Y
Yi Wu 已提交
1414 1415 1416 1417
  options.max_write_buffer_number = 8;
  options.min_write_buffer_number_to_merge = 5;
  options.write_buffer_size = 1024 * N;  // Not very large
  DestroyAndReopen(options);
1418
  default_cf = db_->DefaultColumnFamily();
1419

Y
Yi Wu 已提交
1420 1421 1422 1423 1424 1425
  int keys[N * 3];
  for (int i = 0; i < N; i++) {
    keys[i * 3] = i * 5;
    keys[i * 3 + 1] = i * 5 + 1;
    keys[i * 3 + 2] = i * 5 + 2;
  }
1426 1427 1428
  // MemTable entry counting is estimated and can vary greatly depending on
  // layout. Thus, using deterministic seed for test stability.
  RandomShuffle(std::begin(keys), std::end(keys), rnd.Next());
H
heyongqiang 已提交
1429

Y
Yi Wu 已提交
1430
  for (int i = 0; i < N * 3; i++) {
M
mrambacher 已提交
1431
    ASSERT_OK(Put(Key(keys[i] + 1000), rnd.RandomString(1024)));
Y
Yi Wu 已提交
1432
  }
H
heyongqiang 已提交
1433

Y
Yi Wu 已提交
1434 1435 1436
  start = Key(100);
  end = Key(300);
  r = Range(start, end);
1437
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
Y
Yi Wu 已提交
1438
  ASSERT_EQ(size, 0);
H
heyongqiang 已提交
1439

Y
Yi Wu 已提交
1440 1441 1442
  start = Key(1050);
  end = Key(1080);
  r = Range(start, end);
1443
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
Y
Yi Wu 已提交
1444
  ASSERT_GT(size, 6000);
H
heyongqiang 已提交
1445

Y
Yi Wu 已提交
1446 1447 1448
  start = Key(2100);
  end = Key(2300);
  r = Range(start, end);
1449
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
Y
Yi Wu 已提交
1450
  ASSERT_EQ(size, 0);
1451

Y
Yi Wu 已提交
1452 1453 1454 1455
  start = Key(1050);
  end = Key(1080);
  r = Range(start, end);
  uint64_t size_with_mt, size_without_mt;
1456 1457
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1,
                           &size_with_mt);
Y
Yi Wu 已提交
1458
  ASSERT_GT(size_with_mt, 6000);
1459
  db_->GetApproximateSizes(&r, 1, &size_without_mt);
Y
Yi Wu 已提交
1460 1461 1462 1463 1464
  ASSERT_EQ(size_without_mt, 0);

  Flush();

  for (int i = 0; i < N; i++) {
M
mrambacher 已提交
1465
    ASSERT_OK(Put(Key(i + 1000), rnd.RandomString(1024)));
Y
Yi Wu 已提交
1466 1467 1468 1469 1470
  }

  start = Key(1050);
  end = Key(1080);
  r = Range(start, end);
1471 1472
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1,
                           &size_with_mt);
1473
  db_->GetApproximateSizes(&r, 1, &size_without_mt);
Y
Yi Wu 已提交
1474 1475
  ASSERT_GT(size_with_mt, size_without_mt);
  ASSERT_GT(size_without_mt, 6000);
1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495

  // Check that include_memtabtles flag works as expected
  size_approx_options.include_memtabtles = false;
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
  ASSERT_EQ(size, size_without_mt);

  // Check that files_size_error_margin works as expected, when the heuristic
  // conditions are not met
  start = Key(1);
  end = Key(1000 + N - 2);
  r = Range(start, end);
  size_approx_options.files_size_error_margin = -1.0;  // disabled
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
  uint64_t size2;
  size_approx_options.files_size_error_margin = 0.5;  // enabled, but not used
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size2);
  ASSERT_EQ(size, size2);
}

TEST_F(DBTest, ApproximateSizesFilesWithErrorMargin) {
1496 1497 1498 1499 1500
  // Roughly 4 keys per data block, 1000 keys per file,
  // with filter substantially larger than a data block
  BlockBasedTableOptions table_options;
  table_options.filter_policy.reset(NewBloomFilterPolicy(16));
  table_options.block_size = 100;
1501
  Options options = CurrentOptions();
1502 1503
  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  options.write_buffer_size = 24 * 1024;
1504 1505
  options.compression = kNoCompression;
  options.create_if_missing = true;
1506
  options.target_file_size_base = 24 * 1024;
1507 1508 1509 1510 1511 1512
  DestroyAndReopen(options);
  const auto default_cf = db_->DefaultColumnFamily();

  const int N = 64000;
  Random rnd(301);
  for (int i = 0; i < N; i++) {
M
mrambacher 已提交
1513
    ASSERT_OK(Put(Key(i), rnd.RandomString(24)));
1514 1515 1516 1517 1518 1519 1520 1521
  }
  // Flush everything to files
  Flush();
  // Compact the entire key space into the next level
  db_->CompactRange(CompactRangeOptions(), default_cf, nullptr, nullptr);

  // Write more keys
  for (int i = N; i < (N + N / 4); i++) {
M
mrambacher 已提交
1522
    ASSERT_OK(Put(Key(i), rnd.RandomString(24)));
1523 1524 1525 1526 1527 1528 1529
  }
  // Flush everything to files again
  Flush();

  // Wait for compaction to finish
  ASSERT_OK(dbfull()->TEST_WaitForCompact());

1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552
  {
    const std::string start = Key(0);
    const std::string end = Key(2 * N);
    const Range r(start, end);

    SizeApproximationOptions size_approx_options;
    size_approx_options.include_memtabtles = false;
    size_approx_options.include_files = true;
    size_approx_options.files_size_error_margin = -1.0;  // disabled

    // Get the precise size without any approximation heuristic
    uint64_t size;
    db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
    ASSERT_NE(size, 0);

    // Get the size with an approximation heuristic
    uint64_t size2;
    const double error_margin = 0.2;
    size_approx_options.files_size_error_margin = error_margin;
    db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size2);
    ASSERT_LT(size2, size * (1 + error_margin));
    ASSERT_GT(size2, size * (1 - error_margin));
  }
1553

1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568
  {
    // Ensure that metadata is not falsely attributed only to the last data in
    // the file. (In some applications, filters can be large portion of data
    // size.)
    // Perform many queries over small range, enough to ensure crossing file
    // boundary, and make sure we never see a spike for large filter.
    for (int i = 0; i < 3000; i += 10) {
      const std::string start = Key(i);
      const std::string end = Key(i + 11);  // overlap by 1 key
      const Range r(start, end);
      uint64_t size;
      db_->GetApproximateSizes(&r, 1, &size);
      ASSERT_LE(size, 11 * 100);
    }
  }
H
heyongqiang 已提交
1569 1570
}

1571 1572 1573 1574 1575 1576 1577 1578 1579 1580
TEST_F(DBTest, GetApproximateMemTableStats) {
  Options options = CurrentOptions();
  options.write_buffer_size = 100000000;
  options.compression = kNoCompression;
  options.create_if_missing = true;
  DestroyAndReopen(options);

  const int N = 128;
  Random rnd(301);
  for (int i = 0; i < N; i++) {
M
mrambacher 已提交
1581
    ASSERT_OK(Put(Key(i), rnd.RandomString(1024)));
1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612
  }

  uint64_t count;
  uint64_t size;

  std::string start = Key(50);
  std::string end = Key(60);
  Range r(start, end);
  db_->GetApproximateMemTableStats(r, &count, &size);
  ASSERT_GT(count, 0);
  ASSERT_LE(count, N);
  ASSERT_GT(size, 6000);
  ASSERT_LT(size, 204800);

  start = Key(500);
  end = Key(600);
  r = Range(start, end);
  db_->GetApproximateMemTableStats(r, &count, &size);
  ASSERT_EQ(count, 0);
  ASSERT_EQ(size, 0);

  Flush();

  start = Key(50);
  end = Key(60);
  r = Range(start, end);
  db_->GetApproximateMemTableStats(r, &count, &size);
  ASSERT_EQ(count, 0);
  ASSERT_EQ(size, 0);

  for (int i = 0; i < N; i++) {
M
mrambacher 已提交
1613
    ASSERT_OK(Put(Key(1000 + i), rnd.RandomString(1024)));
1614 1615 1616 1617 1618 1619 1620 1621 1622 1623
  }

  start = Key(100);
  end = Key(1020);
  r = Range(start, end);
  db_->GetApproximateMemTableStats(r, &count, &size);
  ASSERT_GT(count, 20);
  ASSERT_GT(size, 6000);
}

Y
Yi Wu 已提交
1624
TEST_F(DBTest, ApproximateSizes) {
S
Sanjay Ghemawat 已提交
1625
  do {
Y
Yi Wu 已提交
1626 1627 1628 1629 1630 1631
    Options options = CurrentOptions();
    options.write_buffer_size = 100000000;  // Large write buffer
    options.compression = kNoCompression;
    options.create_if_missing = true;
    DestroyAndReopen(options);
    CreateAndReopenWithCF({"pikachu"}, options);
J
jorlow@chromium.org 已提交
1632

Y
Yi Wu 已提交
1633 1634 1635
    ASSERT_TRUE(Between(Size("", "xyz", 1), 0, 0));
    ReopenWithColumnFamilies({"default", "pikachu"}, options);
    ASSERT_TRUE(Between(Size("", "xyz", 1), 0, 0));
I
Igor Canadi 已提交
1636

Y
Yi Wu 已提交
1637 1638 1639 1640 1641 1642 1643
    // Write 8MB (80 values, each 100K)
    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
    const int N = 80;
    static const int S1 = 100000;
    static const int S2 = 105000;  // Allow some expansion from metadata
    Random rnd(301);
    for (int i = 0; i < N; i++) {
M
mrambacher 已提交
1644
      ASSERT_OK(Put(1, Key(i), rnd.RandomString(S1)));
1645 1646
    }

Y
Yi Wu 已提交
1647 1648
    // 0 because GetApproximateSizes() does not account for memtable space
    ASSERT_TRUE(Between(Size("", Key(50), 1), 0, 0));
I
Igor Canadi 已提交
1649

Y
Yi Wu 已提交
1650 1651 1652
    // Check sizes across recovery by reopening a few times
    for (int run = 0; run < 3; run++) {
      ReopenWithColumnFamilies({"default", "pikachu"}, options);
I
Igor Canadi 已提交
1653

Y
Yi Wu 已提交
1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677
      for (int compact_start = 0; compact_start < N; compact_start += 10) {
        for (int i = 0; i < N; i += 10) {
          ASSERT_TRUE(Between(Size("", Key(i), 1), S1 * i, S2 * i));
          ASSERT_TRUE(Between(Size("", Key(i) + ".suffix", 1), S1 * (i + 1),
                              S2 * (i + 1)));
          ASSERT_TRUE(Between(Size(Key(i), Key(i + 10), 1), S1 * 10, S2 * 10));
        }
        ASSERT_TRUE(Between(Size("", Key(50), 1), S1 * 50, S2 * 50));
        ASSERT_TRUE(
            Between(Size("", Key(50) + ".suffix", 1), S1 * 50, S2 * 50));

        std::string cstart_str = Key(compact_start);
        std::string cend_str = Key(compact_start + 9);
        Slice cstart = cstart_str;
        Slice cend = cend_str;
        dbfull()->TEST_CompactRange(0, &cstart, &cend, handles_[1]);
      }

      ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
      ASSERT_GT(NumTableFilesAtLevel(1, 1), 0);
    }
    // ApproximateOffsetOf() is not yet implemented in plain table format.
  } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction |
                         kSkipPlainTable | kSkipHashIndex));
I
Igor Canadi 已提交
1678
}
J
jorlow@chromium.org 已提交
1679

Y
Yi Wu 已提交
1680
TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
1681
  do {
Y
Yi Wu 已提交
1682 1683
    Options options = CurrentOptions();
    options.compression = kNoCompression;
L
Lei Jin 已提交
1684
    CreateAndReopenWithCF({"pikachu"}, options);
Y
Yi Wu 已提交
1685 1686

    Random rnd(301);
M
mrambacher 已提交
1687 1688 1689
    std::string big1 = rnd.RandomString(100000);
    ASSERT_OK(Put(1, Key(0), rnd.RandomString(10000)));
    ASSERT_OK(Put(1, Key(1), rnd.RandomString(10000)));
Y
Yi Wu 已提交
1690
    ASSERT_OK(Put(1, Key(2), big1));
M
mrambacher 已提交
1691
    ASSERT_OK(Put(1, Key(3), rnd.RandomString(10000)));
Y
Yi Wu 已提交
1692
    ASSERT_OK(Put(1, Key(4), big1));
M
mrambacher 已提交
1693 1694 1695
    ASSERT_OK(Put(1, Key(5), rnd.RandomString(10000)));
    ASSERT_OK(Put(1, Key(6), rnd.RandomString(300000)));
    ASSERT_OK(Put(1, Key(7), rnd.RandomString(10000)));
Y
Yi Wu 已提交
1696 1697 1698

    // Check sizes across recovery by reopening a few times
    for (int run = 0; run < 3; run++) {
L
Lei Jin 已提交
1699
      ReopenWithColumnFamilies({"default", "pikachu"}, options);
A
Abhishek Kona 已提交
1700

Y
Yi Wu 已提交
1701 1702 1703 1704 1705
      ASSERT_TRUE(Between(Size("", Key(0), 1), 0, 0));
      ASSERT_TRUE(Between(Size("", Key(1), 1), 10000, 11000));
      ASSERT_TRUE(Between(Size("", Key(2), 1), 20000, 21000));
      ASSERT_TRUE(Between(Size("", Key(3), 1), 120000, 121000));
      ASSERT_TRUE(Between(Size("", Key(4), 1), 130000, 131000));
1706 1707 1708 1709 1710
      ASSERT_TRUE(Between(Size("", Key(5), 1), 230000, 232000));
      ASSERT_TRUE(Between(Size("", Key(6), 1), 240000, 242000));
      // Ensure some overhead is accounted for, even without including all
      ASSERT_TRUE(Between(Size("", Key(7), 1), 540500, 545000));
      ASSERT_TRUE(Between(Size("", Key(8), 1), 550500, 555000));
M
Mayank Agarwal 已提交
1711

1712
      ASSERT_TRUE(Between(Size(Key(3), Key(5), 1), 110100, 111000));
M
Mayank Agarwal 已提交
1713

Y
Yi Wu 已提交
1714 1715 1716 1717
      dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
    }
    // ApproximateOffsetOf() is not yet implemented in plain table format.
  } while (ChangeOptions(kSkipPlainTable));
M
Mayank Agarwal 已提交
1718
}
Y
Yi Wu 已提交
1719
#endif  // ROCKSDB_LITE
M
Mayank Agarwal 已提交
1720

I
Islam AbdelRahman 已提交
1721
#ifndef ROCKSDB_LITE
Y
Yi Wu 已提交
1722
TEST_F(DBTest, Snapshot) {
1723
  env_->SetMockSleep();
Y
Yi Wu 已提交
1724 1725
  anon::OptionsOverride options_override;
  options_override.skip_policy = kSkipNoSnapshot;
1726
  do {
Y
Yi Wu 已提交
1727 1728 1729
    CreateAndReopenWithCF({"pikachu"}, CurrentOptions(options_override));
    Put(0, "foo", "0v1");
    Put(1, "foo", "1v1");
1730

Y
Yi Wu 已提交
1731 1732 1733 1734
    const Snapshot* s1 = db_->GetSnapshot();
    ASSERT_EQ(1U, GetNumSnapshots());
    uint64_t time_snap1 = GetTimeOldestSnapshots();
    ASSERT_GT(time_snap1, 0U);
1735
    ASSERT_EQ(GetSequenceOldestSnapshots(), s1->GetSequenceNumber());
Y
Yi Wu 已提交
1736 1737
    Put(0, "foo", "0v2");
    Put(1, "foo", "1v2");
J
jorlow@chromium.org 已提交
1738

1739
    env_->MockSleepForSeconds(1);
1740

Y
Yi Wu 已提交
1741 1742 1743
    const Snapshot* s2 = db_->GetSnapshot();
    ASSERT_EQ(2U, GetNumSnapshots());
    ASSERT_EQ(time_snap1, GetTimeOldestSnapshots());
1744
    ASSERT_EQ(GetSequenceOldestSnapshots(), s1->GetSequenceNumber());
Y
Yi Wu 已提交
1745 1746
    Put(0, "foo", "0v3");
    Put(1, "foo", "1v3");
1747

Y
Yi Wu 已提交
1748 1749 1750 1751
    {
      ManagedSnapshot s3(db_);
      ASSERT_EQ(3U, GetNumSnapshots());
      ASSERT_EQ(time_snap1, GetTimeOldestSnapshots());
1752
      ASSERT_EQ(GetSequenceOldestSnapshots(), s1->GetSequenceNumber());
1753

Y
Yi Wu 已提交
1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
      Put(0, "foo", "0v4");
      Put(1, "foo", "1v4");
      ASSERT_EQ("0v1", Get(0, "foo", s1));
      ASSERT_EQ("1v1", Get(1, "foo", s1));
      ASSERT_EQ("0v2", Get(0, "foo", s2));
      ASSERT_EQ("1v2", Get(1, "foo", s2));
      ASSERT_EQ("0v3", Get(0, "foo", s3.snapshot()));
      ASSERT_EQ("1v3", Get(1, "foo", s3.snapshot()));
      ASSERT_EQ("0v4", Get(0, "foo"));
      ASSERT_EQ("1v4", Get(1, "foo"));
1764 1765
    }

Y
Yi Wu 已提交
1766 1767
    ASSERT_EQ(2U, GetNumSnapshots());
    ASSERT_EQ(time_snap1, GetTimeOldestSnapshots());
1768
    ASSERT_EQ(GetSequenceOldestSnapshots(), s1->GetSequenceNumber());
Y
Yi Wu 已提交
1769 1770 1771 1772 1773 1774
    ASSERT_EQ("0v1", Get(0, "foo", s1));
    ASSERT_EQ("1v1", Get(1, "foo", s1));
    ASSERT_EQ("0v2", Get(0, "foo", s2));
    ASSERT_EQ("1v2", Get(1, "foo", s2));
    ASSERT_EQ("0v4", Get(0, "foo"));
    ASSERT_EQ("1v4", Get(1, "foo"));
1775

Y
Yi Wu 已提交
1776 1777 1778 1779 1780 1781 1782
    db_->ReleaseSnapshot(s1);
    ASSERT_EQ("0v2", Get(0, "foo", s2));
    ASSERT_EQ("1v2", Get(1, "foo", s2));
    ASSERT_EQ("0v4", Get(0, "foo"));
    ASSERT_EQ("1v4", Get(1, "foo"));
    ASSERT_EQ(1U, GetNumSnapshots());
    ASSERT_LT(time_snap1, GetTimeOldestSnapshots());
1783
    ASSERT_EQ(GetSequenceOldestSnapshots(), s2->GetSequenceNumber());
1784

Y
Yi Wu 已提交
1785 1786
    db_->ReleaseSnapshot(s2);
    ASSERT_EQ(0U, GetNumSnapshots());
1787
    ASSERT_EQ(GetSequenceOldestSnapshots(), 0);
Y
Yi Wu 已提交
1788 1789
    ASSERT_EQ("0v4", Get(0, "foo"));
    ASSERT_EQ("1v4", Get(1, "foo"));
1790
  } while (ChangeOptions());
Y
Yi Wu 已提交
1791
}
1792

Y
Yi Wu 已提交
1793 1794 1795 1796 1797 1798 1799 1800
TEST_F(DBTest, HiddenValuesAreRemoved) {
  anon::OptionsOverride options_override;
  options_override.skip_policy = kSkipNoSnapshot;
  do {
    Options options = CurrentOptions(options_override);
    CreateAndReopenWithCF({"pikachu"}, options);
    Random rnd(301);
    FillLevels("a", "z", 1);
1801

M
mrambacher 已提交
1802
    std::string big = rnd.RandomString(50000);
Y
Yi Wu 已提交
1803 1804 1805 1806 1807
    Put(1, "foo", big);
    Put(1, "pastfoo", "v");
    const Snapshot* snapshot = db_->GetSnapshot();
    Put(1, "foo", "tiny");
    Put(1, "pastfoo2", "v2");  // Advance sequence number one more
1808

Y
Yi Wu 已提交
1809 1810
    ASSERT_OK(Flush(1));
    ASSERT_GT(NumTableFilesAtLevel(0, 1), 0);
S
sdong 已提交
1811

Y
Yi Wu 已提交
1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822
    ASSERT_EQ(big, Get(1, "foo", snapshot));
    ASSERT_TRUE(Between(Size("", "pastfoo", 1), 50000, 60000));
    db_->ReleaseSnapshot(snapshot);
    ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny, " + big + " ]");
    Slice x("x");
    dbfull()->TEST_CompactRange(0, nullptr, &x, handles_[1]);
    ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny ]");
    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
    ASSERT_GE(NumTableFilesAtLevel(1, 1), 1);
    dbfull()->TEST_CompactRange(1, nullptr, &x, handles_[1]);
    ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny ]");
1823

Y
Yi Wu 已提交
1824 1825 1826 1827
    ASSERT_TRUE(Between(Size("", "pastfoo", 1), 0, 1000));
    // ApproximateOffsetOf() is not yet implemented in plain table format,
    // which is used by Size().
  } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction |
1828
                         kSkipPlainTable));
Y
Yi Wu 已提交
1829 1830
}
#endif  // ROCKSDB_LITE
1831

Y
Yi Wu 已提交
1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848
TEST_F(DBTest, UnremovableSingleDelete) {
  // If we compact:
  //
  // Put(A, v1) Snapshot SingleDelete(A) Put(A, v2)
  //
  // We do not want to end up with:
  //
  // Put(A, v1) Snapshot Put(A, v2)
  //
  // Because a subsequent SingleDelete(A) would delete the Put(A, v2)
  // but not Put(A, v1), so Get(A) would return v1.
  anon::OptionsOverride options_override;
  options_override.skip_policy = kSkipNoSnapshot;
  do {
    Options options = CurrentOptions(options_override);
    options.disable_auto_compactions = true;
    CreateAndReopenWithCF({"pikachu"}, options);
K
kailiu 已提交
1849

Y
Yi Wu 已提交
1850 1851 1852 1853 1854
    Put(1, "foo", "first");
    const Snapshot* snapshot = db_->GetSnapshot();
    SingleDelete(1, "foo");
    Put(1, "foo", "second");
    ASSERT_OK(Flush(1));
1855

Y
Yi Wu 已提交
1856 1857
    ASSERT_EQ("first", Get(1, "foo", snapshot));
    ASSERT_EQ("second", Get(1, "foo"));
1858

Y
Yi Wu 已提交
1859 1860 1861
    dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
                           nullptr);
    ASSERT_EQ("[ second, SDEL, first ]", AllEntriesFor("foo", 1));
1862

Y
Yi Wu 已提交
1863
    SingleDelete(1, "foo");
1864

Y
Yi Wu 已提交
1865 1866
    ASSERT_EQ("first", Get(1, "foo", snapshot));
    ASSERT_EQ("NOT_FOUND", Get(1, "foo"));
1867

Y
Yi Wu 已提交
1868 1869
    dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
                           nullptr);
1870

Y
Yi Wu 已提交
1871 1872 1873
    ASSERT_EQ("first", Get(1, "foo", snapshot));
    ASSERT_EQ("NOT_FOUND", Get(1, "foo"));
    db_->ReleaseSnapshot(snapshot);
1874 1875 1876 1877 1878
    // Skip FIFO and universal compaction beccause they do not apply to the test
    // case. Skip MergePut because single delete does not get removed when it
    // encounters a merge.
  } while (ChangeOptions(kSkipFIFOCompaction | kSkipUniversalCompaction |
                         kSkipMergePut));
1879 1880
}

Y
Yi Wu 已提交
1881 1882 1883 1884 1885 1886 1887 1888 1889 1890
#ifndef ROCKSDB_LITE
TEST_F(DBTest, DeletionMarkers1) {
  Options options = CurrentOptions();
  CreateAndReopenWithCF({"pikachu"}, options);
  Put(1, "foo", "v1");
  ASSERT_OK(Flush(1));
  const int last = 2;
  MoveFilesToLevel(last, 1);
  // foo => v1 is now in last level
  ASSERT_EQ(NumTableFilesAtLevel(last, 1), 1);
1891

Y
Yi Wu 已提交
1892 1893 1894 1895 1896 1897 1898
  // Place a table at level last-1 to prevent merging with preceding mutation
  Put(1, "a", "begin");
  Put(1, "z", "end");
  Flush(1);
  MoveFilesToLevel(last - 1, 1);
  ASSERT_EQ(NumTableFilesAtLevel(last, 1), 1);
  ASSERT_EQ(NumTableFilesAtLevel(last - 1, 1), 1);
1899

Y
Yi Wu 已提交
1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913
  Delete(1, "foo");
  Put(1, "foo", "v2");
  ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, DEL, v1 ]");
  ASSERT_OK(Flush(1));  // Moves to level last-2
  ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, v1 ]");
  Slice z("z");
  dbfull()->TEST_CompactRange(last - 2, nullptr, &z, handles_[1]);
  // DEL eliminated, but v1 remains because we aren't compacting that level
  // (DEL can be eliminated because v2 hides v1).
  ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, v1 ]");
  dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr, handles_[1]);
  // Merging last-1 w/ last, so we are the base level for "foo", so
  // DEL is removed.  (as is v1).
  ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2 ]");
1914
}
1915

Y
Yi Wu 已提交
1916
TEST_F(DBTest, DeletionMarkers2) {
1917
  Options options = CurrentOptions();
Y
Yi Wu 已提交
1918 1919 1920 1921 1922 1923 1924
  CreateAndReopenWithCF({"pikachu"}, options);
  Put(1, "foo", "v1");
  ASSERT_OK(Flush(1));
  const int last = 2;
  MoveFilesToLevel(last, 1);
  // foo => v1 is now in last level
  ASSERT_EQ(NumTableFilesAtLevel(last, 1), 1);
1925

Y
Yi Wu 已提交
1926 1927 1928 1929 1930 1931 1932
  // Place a table at level last-1 to prevent merging with preceding mutation
  Put(1, "a", "begin");
  Put(1, "z", "end");
  Flush(1);
  MoveFilesToLevel(last - 1, 1);
  ASSERT_EQ(NumTableFilesAtLevel(last, 1), 1);
  ASSERT_EQ(NumTableFilesAtLevel(last - 1, 1), 1);
1933

Y
Yi Wu 已提交
1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944
  Delete(1, "foo");
  ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v1 ]");
  ASSERT_OK(Flush(1));  // Moves to level last-2
  ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v1 ]");
  dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr, handles_[1]);
  // DEL kept: "last" file overlaps
  ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v1 ]");
  dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr, handles_[1]);
  // Merging last-1 w/ last, so we are the base level for "foo", so
  // DEL is removed.  (as is v1).
  ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]");
1945
}
1946

Y
Yi Wu 已提交
1947
TEST_F(DBTest, OverlapInLevel0) {
1948
  do {
S
sdong 已提交
1949
    Options options = CurrentOptions();
L
Lei Jin 已提交
1950
    CreateAndReopenWithCF({"pikachu"}, options);
1951

Y
Yi Wu 已提交
1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962
    // Fill levels 1 and 2 to disable the pushing of new memtables to levels >
    // 0.
    ASSERT_OK(Put(1, "100", "v100"));
    ASSERT_OK(Put(1, "999", "v999"));
    Flush(1);
    MoveFilesToLevel(2, 1);
    ASSERT_OK(Delete(1, "100"));
    ASSERT_OK(Delete(1, "999"));
    Flush(1);
    MoveFilesToLevel(1, 1);
    ASSERT_EQ("0,1,1", FilesPerLevel(1));
1963

Y
Yi Wu 已提交
1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975
    // Make files spanning the following ranges in level-0:
    //  files[0]  200 .. 900
    //  files[1]  300 .. 500
    // Note that files are sorted by smallest key.
    ASSERT_OK(Put(1, "300", "v300"));
    ASSERT_OK(Put(1, "500", "v500"));
    Flush(1);
    ASSERT_OK(Put(1, "200", "v200"));
    ASSERT_OK(Put(1, "600", "v600"));
    ASSERT_OK(Put(1, "900", "v900"));
    Flush(1);
    ASSERT_EQ("2,1,1", FilesPerLevel(1));
1976

Y
Yi Wu 已提交
1977 1978 1979 1980
    // Compact away the placeholder files we created initially
    dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
    dbfull()->TEST_CompactRange(2, nullptr, nullptr, handles_[1]);
    ASSERT_EQ("2", FilesPerLevel(1));
1981

Y
Yi Wu 已提交
1982 1983 1984 1985 1986 1987 1988 1989 1990 1991
    // Do a memtable compaction.  Before bug-fix, the compaction would
    // not detect the overlap with level-0 files and would incorrectly place
    // the deletion in a deeper level.
    ASSERT_OK(Delete(1, "600"));
    Flush(1);
    ASSERT_EQ("3", FilesPerLevel(1));
    ASSERT_EQ("NOT_FOUND", Get(1, "600"));
  } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction));
}
#endif  // ROCKSDB_LITE
1992

Y
Yi Wu 已提交
1993 1994 1995
TEST_F(DBTest, ComparatorCheck) {
  class NewComparator : public Comparator {
   public:
1996 1997
    const char* Name() const override { return "rocksdb.NewComparator"; }
    int Compare(const Slice& a, const Slice& b) const override {
Y
Yi Wu 已提交
1998 1999
      return BytewiseComparator()->Compare(a, b);
    }
2000
    void FindShortestSeparator(std::string* s, const Slice& l) const override {
Y
Yi Wu 已提交
2001 2002
      BytewiseComparator()->FindShortestSeparator(s, l);
    }
2003
    void FindShortSuccessor(std::string* key) const override {
Y
Yi Wu 已提交
2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019
      BytewiseComparator()->FindShortSuccessor(key);
    }
  };
  Options new_options, options;
  NewComparator cmp;
  do {
    options = CurrentOptions();
    CreateAndReopenWithCF({"pikachu"}, options);
    new_options = CurrentOptions();
    new_options.comparator = &cmp;
    // only the non-default column family has non-matching comparator
    Status s = TryReopenWithColumnFamilies(
        {"default", "pikachu"}, std::vector<Options>({options, new_options}));
    ASSERT_TRUE(!s.ok());
    ASSERT_TRUE(s.ToString().find("comparator") != std::string::npos)
        << s.ToString();
2020
  } while (ChangeCompactOptions());
2021 2022
}

Y
Yi Wu 已提交
2023 2024 2025
TEST_F(DBTest, CustomComparator) {
  class NumberComparator : public Comparator {
   public:
2026 2027
    const char* Name() const override { return "test.NumberComparator"; }
    int Compare(const Slice& a, const Slice& b) const override {
Y
Yi Wu 已提交
2028 2029
      return ToNumber(a) - ToNumber(b);
    }
2030
    void FindShortestSeparator(std::string* s, const Slice& l) const override {
Y
Yi Wu 已提交
2031 2032 2033
      ToNumber(*s);  // Check format
      ToNumber(l);   // Check format
    }
2034
    void FindShortSuccessor(std::string* key) const override {
Y
Yi Wu 已提交
2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081
      ToNumber(*key);  // Check format
    }

   private:
    static int ToNumber(const Slice& x) {
      // Check that there are no extra characters.
      EXPECT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']')
          << EscapeString(x);
      int val;
      char ignored;
      EXPECT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1)
          << EscapeString(x);
      return val;
    }
  };
  Options new_options;
  NumberComparator cmp;
  do {
    new_options = CurrentOptions();
    new_options.create_if_missing = true;
    new_options.comparator = &cmp;
    new_options.write_buffer_size = 4096;  // Compact more often
    new_options.arena_block_size = 4096;
    new_options = CurrentOptions(new_options);
    DestroyAndReopen(new_options);
    CreateAndReopenWithCF({"pikachu"}, new_options);
    ASSERT_OK(Put(1, "[10]", "ten"));
    ASSERT_OK(Put(1, "[0x14]", "twenty"));
    for (int i = 0; i < 2; i++) {
      ASSERT_EQ("ten", Get(1, "[10]"));
      ASSERT_EQ("ten", Get(1, "[0xa]"));
      ASSERT_EQ("twenty", Get(1, "[20]"));
      ASSERT_EQ("twenty", Get(1, "[0x14]"));
      ASSERT_EQ("NOT_FOUND", Get(1, "[15]"));
      ASSERT_EQ("NOT_FOUND", Get(1, "[0xf]"));
      Compact(1, "[0]", "[9999]");
    }

    for (int run = 0; run < 2; run++) {
      for (int i = 0; i < 1000; i++) {
        char buf[100];
        snprintf(buf, sizeof(buf), "[%d]", i * 10);
        ASSERT_OK(Put(1, buf, buf));
      }
      Compact(1, "[0]", "[1000000]");
    }
  } while (ChangeCompactOptions());
J
jorlow@chromium.org 已提交
2082 2083
}

Y
Yi Wu 已提交
2084
TEST_F(DBTest, DBOpen_Options) {
S
sdong 已提交
2085
  Options options = CurrentOptions();
2086
  std::string dbname = test::PerThreadDBPath("db_options_test");
Y
Yi Wu 已提交
2087
  ASSERT_OK(DestroyDB(dbname, options));
2088

Y
Yi Wu 已提交
2089 2090 2091 2092 2093 2094
  // Does not exist, and create_if_missing == false: error
  DB* db = nullptr;
  options.create_if_missing = false;
  Status s = DB::Open(options, dbname, &db);
  ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != nullptr);
  ASSERT_TRUE(db == nullptr);
2095

Y
Yi Wu 已提交
2096 2097 2098 2099 2100
  // Does not exist, and create_if_missing == true: OK
  options.create_if_missing = true;
  s = DB::Open(options, dbname, &db);
  ASSERT_OK(s);
  ASSERT_TRUE(db != nullptr);
2101

Y
Yi Wu 已提交
2102 2103
  delete db;
  db = nullptr;
2104

Y
Yi Wu 已提交
2105 2106 2107 2108 2109 2110
  // Does exist, and error_if_exists == true: error
  options.create_if_missing = false;
  options.error_if_exists = true;
  s = DB::Open(options, dbname, &db);
  ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != nullptr);
  ASSERT_TRUE(db == nullptr);
2111

Y
Yi Wu 已提交
2112 2113 2114 2115 2116 2117
  // Does exist, and error_if_exists == false: OK
  options.create_if_missing = true;
  options.error_if_exists = false;
  s = DB::Open(options, dbname, &db);
  ASSERT_OK(s);
  ASSERT_TRUE(db != nullptr);
2118

Y
Yi Wu 已提交
2119 2120 2121
  delete db;
  db = nullptr;
}
2122

Y
Yi Wu 已提交
2123 2124 2125
TEST_F(DBTest, DBOpen_Change_NumLevels) {
  Options options = CurrentOptions();
  options.create_if_missing = true;
2126
  DestroyAndReopen(options);
Y
Yi Wu 已提交
2127 2128
  ASSERT_TRUE(db_ != nullptr);
  CreateAndReopenWithCF({"pikachu"}, options);
2129

Y
Yi Wu 已提交
2130 2131 2132 2133 2134
  ASSERT_OK(Put(1, "a", "123"));
  ASSERT_OK(Put(1, "b", "234"));
  Flush(1);
  MoveFilesToLevel(3, 1);
  Close();
2135

Y
Yi Wu 已提交
2136 2137 2138 2139 2140 2141
  options.create_if_missing = false;
  options.num_levels = 2;
  Status s = TryReopenWithColumnFamilies({"default", "pikachu"}, options);
  ASSERT_TRUE(strstr(s.ToString().c_str(), "Invalid argument") != nullptr);
  ASSERT_TRUE(db_ == nullptr);
}
2142

Y
Yi Wu 已提交
2143
TEST_F(DBTest, DestroyDBMetaDatabase) {
2144
  std::string dbname = test::PerThreadDBPath("db_meta");
Y
Yi Wu 已提交
2145 2146 2147 2148 2149
  ASSERT_OK(env_->CreateDirIfMissing(dbname));
  std::string metadbname = MetaDatabaseName(dbname, 0);
  ASSERT_OK(env_->CreateDirIfMissing(metadbname));
  std::string metametadbname = MetaDatabaseName(metadbname, 0);
  ASSERT_OK(env_->CreateDirIfMissing(metametadbname));
2150

Y
Yi Wu 已提交
2151 2152 2153 2154 2155
  // Destroy previous versions if they exist. Using the long way.
  Options options = CurrentOptions();
  ASSERT_OK(DestroyDB(metametadbname, options));
  ASSERT_OK(DestroyDB(metadbname, options));
  ASSERT_OK(DestroyDB(dbname, options));
2156

Y
Yi Wu 已提交
2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167
  // Setup databases
  DB* db = nullptr;
  ASSERT_OK(DB::Open(options, dbname, &db));
  delete db;
  db = nullptr;
  ASSERT_OK(DB::Open(options, metadbname, &db));
  delete db;
  db = nullptr;
  ASSERT_OK(DB::Open(options, metametadbname, &db));
  delete db;
  db = nullptr;
2168

Y
Yi Wu 已提交
2169 2170
  // Delete databases
  ASSERT_OK(DestroyDB(dbname, options));
2171

Y
Yi Wu 已提交
2172 2173 2174 2175 2176
  // Check if deletion worked.
  options.create_if_missing = false;
  ASSERT_TRUE(!(DB::Open(options, dbname, &db)).ok());
  ASSERT_TRUE(!(DB::Open(options, metadbname, &db)).ok());
  ASSERT_TRUE(!(DB::Open(options, metametadbname, &db)).ok());
2177 2178
}

Y
Yi Wu 已提交
2179 2180
#ifndef ROCKSDB_LITE
TEST_F(DBTest, SnapshotFiles) {
A
Andres Noetzli 已提交
2181
  do {
Y
Yi Wu 已提交
2182 2183
    Options options = CurrentOptions();
    options.write_buffer_size = 100000000;  // Large write buffer
A
Andres Noetzli 已提交
2184 2185
    CreateAndReopenWithCF({"pikachu"}, options);

Y
Yi Wu 已提交
2186
    Random rnd(301);
A
Andres Noetzli 已提交
2187

Y
Yi Wu 已提交
2188 2189 2190 2191
    // Write 8MB (80 values, each 100K)
    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
    std::vector<std::string> values;
    for (int i = 0; i < 80; i++) {
M
mrambacher 已提交
2192
      values.push_back(rnd.RandomString(100000));
Y
Yi Wu 已提交
2193 2194
      ASSERT_OK(Put((i < 40), Key(i), values[i]));
    }
A
Andres Noetzli 已提交
2195

Y
Yi Wu 已提交
2196 2197
    // assert that nothing makes it to disk yet.
    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
A
Andres Noetzli 已提交
2198

Y
Yi Wu 已提交
2199 2200 2201 2202 2203 2204
    // get a file snapshot
    uint64_t manifest_number = 0;
    uint64_t manifest_size = 0;
    std::vector<std::string> files;
    dbfull()->DisableFileDeletions();
    dbfull()->GetLiveFiles(files, &manifest_size);
A
Andres Noetzli 已提交
2205

W
Wanning Jiang 已提交
2206 2207
    // CURRENT, MANIFEST, OPTIONS, *.sst files (one for each CF)
    ASSERT_EQ(files.size(), 5U);
A
Andres Noetzli 已提交
2208

Y
Yi Wu 已提交
2209 2210
    uint64_t number = 0;
    FileType type;
A
Andres Noetzli 已提交
2211

Y
Yi Wu 已提交
2212 2213
    // copy these files to a new snapshot directory
    std::string snapdir = dbname_ + ".snapdir/";
2214 2215 2216 2217
    if (env_->FileExists(snapdir).ok()) {
      ASSERT_OK(DestroyDir(env_, snapdir));
    }
    ASSERT_OK(env_->CreateDir(snapdir));
A
Andres Noetzli 已提交
2218

Y
Yi Wu 已提交
2219 2220 2221 2222 2223 2224
    for (size_t i = 0; i < files.size(); i++) {
      // our clients require that GetLiveFiles returns
      // files with "/" as first character!
      ASSERT_EQ(files[i][0], '/');
      std::string src = dbname_ + files[i];
      std::string dest = snapdir + files[i];
2225

Y
Yi Wu 已提交
2226 2227
      uint64_t size;
      ASSERT_OK(env_->GetFileSize(src, &size));
2228

Y
Yi Wu 已提交
2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240
      // record the number and the size of the
      // latest manifest file
      if (ParseFileName(files[i].substr(1), &number, &type)) {
        if (type == kDescriptorFile) {
          if (number > manifest_number) {
            manifest_number = number;
            ASSERT_GE(size, manifest_size);
            size = manifest_size;  // copy only valid MANIFEST data
          }
        }
      }
      CopyFile(src, dest, size);
2241
    }
2242

Y
Yi Wu 已提交
2243 2244 2245 2246 2247
    // release file snapshot
    dbfull()->DisableFileDeletions();
    // overwrite one key, this key should not appear in the snapshot
    std::vector<std::string> extras;
    for (unsigned int i = 0; i < 1; i++) {
M
mrambacher 已提交
2248
      extras.push_back(rnd.RandomString(100000));
Y
Yi Wu 已提交
2249 2250
      ASSERT_OK(Put(0, Key(i), extras[i]));
    }
L
Lei Jin 已提交
2251

Y
Yi Wu 已提交
2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263
    // verify that data in the snapshot are correct
    std::vector<ColumnFamilyDescriptor> column_families;
    column_families.emplace_back("default", ColumnFamilyOptions());
    column_families.emplace_back("pikachu", ColumnFamilyOptions());
    std::vector<ColumnFamilyHandle*> cf_handles;
    DB* snapdb;
    DBOptions opts;
    opts.env = env_;
    opts.create_if_missing = false;
    Status stat =
        DB::Open(opts, snapdir, column_families, &cf_handles, &snapdb);
    ASSERT_OK(stat);
2264

Y
Yi Wu 已提交
2265 2266 2267 2268 2269
    ReadOptions roptions;
    std::string val;
    for (unsigned int i = 0; i < 80; i++) {
      stat = snapdb->Get(roptions, cf_handles[i < 40], Key(i), &val);
      ASSERT_EQ(values[i].compare(val), 0);
L
Lei Jin 已提交
2270
    }
Y
Yi Wu 已提交
2271 2272 2273 2274
    for (auto cfh : cf_handles) {
      delete cfh;
    }
    delete snapdb;
L
Lei Jin 已提交
2275

Y
Yi Wu 已提交
2276 2277 2278 2279 2280 2281 2282
    // look at the new live files after we added an 'extra' key
    // and after we took the first snapshot.
    uint64_t new_manifest_number = 0;
    uint64_t new_manifest_size = 0;
    std::vector<std::string> newfiles;
    dbfull()->DisableFileDeletions();
    dbfull()->GetLiveFiles(newfiles, &new_manifest_size);
L
Lei Jin 已提交
2283

Y
Yi Wu 已提交
2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304
    // find the new manifest file. assert that this manifest file is
    // the same one as in the previous snapshot. But its size should be
    // larger because we added an extra key after taking the
    // previous shapshot.
    for (size_t i = 0; i < newfiles.size(); i++) {
      std::string src = dbname_ + "/" + newfiles[i];
      // record the lognumber and the size of the
      // latest manifest file
      if (ParseFileName(newfiles[i].substr(1), &number, &type)) {
        if (type == kDescriptorFile) {
          if (number > new_manifest_number) {
            uint64_t size;
            new_manifest_number = number;
            ASSERT_OK(env_->GetFileSize(src, &size));
            ASSERT_GE(size, new_manifest_size);
          }
        }
      }
    }
    ASSERT_EQ(manifest_number, new_manifest_number);
    ASSERT_GT(new_manifest_size, manifest_size);
L
Lei Jin 已提交
2305

Y
Yi Wu 已提交
2306 2307 2308 2309
    // release file snapshot
    dbfull()->DisableFileDeletions();
  } while (ChangeCompactOptions());
}
2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344

TEST_F(DBTest, ReadonlyDBGetLiveManifestSize) {
  do {
    Options options = CurrentOptions();
    options.level0_file_num_compaction_trigger = 2;
    DestroyAndReopen(options);

    ASSERT_OK(Put("foo", "bar"));
    ASSERT_OK(Flush());
    ASSERT_OK(Put("foo", "bar"));
    ASSERT_OK(Flush());
    ASSERT_OK(dbfull()->TEST_WaitForCompact());

    Close();
    ASSERT_OK(ReadOnlyReopen(options));

    uint64_t manifest_size = 0;
    std::vector<std::string> files;
    dbfull()->GetLiveFiles(files, &manifest_size);

    for (const std::string& f : files) {
      uint64_t number = 0;
      FileType type;
      if (ParseFileName(f.substr(1), &number, &type)) {
        if (type == kDescriptorFile) {
          uint64_t size_on_disk;
          env_->GetFileSize(dbname_ + "/" + f, &size_on_disk);
          ASSERT_EQ(manifest_size, size_on_disk);
          break;
        }
      }
    }
    Close();
  } while (ChangeCompactOptions());
}
2345 2346 2347 2348 2349 2350 2351 2352 2353

TEST_F(DBTest, GetLiveBlobFiles) {
  VersionSet* const versions = dbfull()->TEST_GetVersionSet();
  assert(versions);
  assert(versions->GetColumnFamilySet());

  ColumnFamilyData* const cfd = versions->GetColumnFamilySet()->GetDefault();
  assert(cfd);

2354 2355 2356 2357 2358
  Version* const version = cfd->current();
  assert(version);

  VersionStorageInfo* const storage_info = version->storage_info();
  assert(storage_info);
2359

2360
  // Add a live blob file.
2361 2362 2363 2364 2365 2366
  constexpr uint64_t blob_file_number = 234;
  constexpr uint64_t total_blob_count = 555;
  constexpr uint64_t total_blob_bytes = 66666;
  constexpr char checksum_method[] = "CRC32";
  constexpr char checksum_value[] = "3d87ff57";

2367 2368 2369
  auto shared_meta = SharedBlobFileMetaData::Create(
      blob_file_number, total_blob_count, total_blob_bytes, checksum_method,
      checksum_value);
2370

2371 2372
  constexpr uint64_t garbage_blob_count = 0;
  constexpr uint64_t garbage_blob_bytes = 0;
2373

2374 2375 2376 2377 2378
  auto meta = BlobFileMetaData::Create(std::move(shared_meta),
                                       BlobFileMetaData::LinkedSsts(),
                                       garbage_blob_count, garbage_blob_bytes);

  storage_info->AddBlobFile(std::move(meta));
2379 2380 2381 2382 2383 2384 2385 2386 2387

  // Make sure it appears in the results returned by GetLiveFiles.
  uint64_t manifest_size = 0;
  std::vector<std::string> files;
  ASSERT_OK(dbfull()->GetLiveFiles(files, &manifest_size));

  ASSERT_FALSE(files.empty());
  ASSERT_EQ(files[0], BlobFileName("", blob_file_number));
}
Y
Yi Wu 已提交
2388
#endif
L
Lei Jin 已提交
2389

Y
Yi Wu 已提交
2390 2391 2392
TEST_F(DBTest, PurgeInfoLogs) {
  Options options = CurrentOptions();
  options.keep_log_file_num = 5;
2393
  options.create_if_missing = true;
Y
Yi Wu 已提交
2394 2395 2396 2397 2398 2399 2400 2401 2402 2403
  for (int mode = 0; mode <= 1; mode++) {
    if (mode == 1) {
      options.db_log_dir = dbname_ + "_logs";
      env_->CreateDirIfMissing(options.db_log_dir);
    } else {
      options.db_log_dir = "";
    }
    for (int i = 0; i < 8; i++) {
      Reopen(options);
    }
2404

Y
Yi Wu 已提交
2405 2406 2407 2408 2409 2410 2411
    std::vector<std::string> files;
    env_->GetChildren(options.db_log_dir.empty() ? dbname_ : options.db_log_dir,
                      &files);
    int info_log_count = 0;
    for (std::string file : files) {
      if (file.find("LOG") != std::string::npos) {
        info_log_count++;
2412
      }
2413
    }
Y
Yi Wu 已提交
2414
    ASSERT_EQ(5, info_log_count);
2415

Y
Yi Wu 已提交
2416 2417 2418 2419 2420 2421 2422
    Destroy(options);
    // For mode (1), test DestroyDB() to delete all the logs under DB dir.
    // For mode (2), no info log file should have been put under DB dir.
    std::vector<std::string> db_files;
    env_->GetChildren(dbname_, &db_files);
    for (std::string file : db_files) {
      ASSERT_TRUE(file.find("LOG") == std::string::npos);
2423 2424
    }

Y
Yi Wu 已提交
2425 2426 2427 2428 2429 2430 2431 2432
    if (mode == 1) {
      // Cleaning up
      env_->GetChildren(options.db_log_dir, &files);
      for (std::string file : files) {
        env_->DeleteFile(options.db_log_dir + "/" + file);
      }
      env_->DeleteDir(options.db_log_dir);
    }
2433 2434 2435
  }
}

Y
Yi Wu 已提交
2436 2437 2438
#ifndef ROCKSDB_LITE
// Multi-threaded test:
namespace {
2439

Y
Yi Wu 已提交
2440 2441 2442 2443
static const int kColumnFamilies = 10;
static const int kNumThreads = 10;
static const int kTestSeconds = 10;
static const int kNumKeys = 1000;
2444

Y
Yi Wu 已提交
2445 2446 2447 2448 2449 2450
struct MTState {
  DBTest* test;
  std::atomic<bool> stop;
  std::atomic<int> counter[kNumThreads];
  std::atomic<bool> thread_done[kNumThreads];
};
2451

Y
Yi Wu 已提交
2452 2453 2454
struct MTThread {
  MTState* state;
  int id;
2455
  bool multiget_batched;
Y
Yi Wu 已提交
2456
};
2457

Y
Yi Wu 已提交
2458 2459 2460 2461 2462 2463 2464 2465 2466 2467
static void MTThreadBody(void* arg) {
  MTThread* t = reinterpret_cast<MTThread*>(arg);
  int id = t->id;
  DB* db = t->state->test->db_;
  int counter = 0;
  fprintf(stderr, "... starting thread %d\n", id);
  Random rnd(1000 + id);
  char valbuf[1500];
  while (t->state->stop.load(std::memory_order_acquire) == false) {
    t->state->counter[id].store(counter, std::memory_order_release);
2468

Y
Yi Wu 已提交
2469 2470 2471
    int key = rnd.Uniform(kNumKeys);
    char keybuf[20];
    snprintf(keybuf, sizeof(keybuf), "%016d", key);
2472

Y
Yi Wu 已提交
2473 2474 2475 2476 2477
    if (rnd.OneIn(2)) {
      // Write values of the form <key, my id, counter, cf, unique_id>.
      // into each of the CFs
      // We add some padding for force compactions.
      int unique_id = rnd.Uniform(1000000);
2478

Y
Yi Wu 已提交
2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503
      // Half of the time directly use WriteBatch. Half of the time use
      // WriteBatchWithIndex.
      if (rnd.OneIn(2)) {
        WriteBatch batch;
        for (int cf = 0; cf < kColumnFamilies; ++cf) {
          snprintf(valbuf, sizeof(valbuf), "%d.%d.%d.%d.%-1000d", key, id,
                   static_cast<int>(counter), cf, unique_id);
          batch.Put(t->state->test->handles_[cf], Slice(keybuf), Slice(valbuf));
        }
        ASSERT_OK(db->Write(WriteOptions(), &batch));
      } else {
        WriteBatchWithIndex batch(db->GetOptions().comparator);
        for (int cf = 0; cf < kColumnFamilies; ++cf) {
          snprintf(valbuf, sizeof(valbuf), "%d.%d.%d.%d.%-1000d", key, id,
                   static_cast<int>(counter), cf, unique_id);
          batch.Put(t->state->test->handles_[cf], Slice(keybuf), Slice(valbuf));
        }
        ASSERT_OK(db->Write(WriteOptions(), batch.GetWriteBatch()));
      }
    } else {
      // Read a value and verify that it matches the pattern written above
      // and that writes to all column families were atomic (unique_id is the
      // same)
      std::vector<Slice> keys(kColumnFamilies, Slice(keybuf));
      std::vector<std::string> values;
2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525
      std::vector<Status> statuses;
      if (!t->multiget_batched) {
        statuses = db->MultiGet(ReadOptions(), t->state->test->handles_, keys,
                                &values);
      } else {
        std::vector<PinnableSlice> pin_values(keys.size());
        statuses.resize(keys.size());
        const Snapshot* snapshot = db->GetSnapshot();
        ReadOptions ro;
        ro.snapshot = snapshot;
        for (int cf = 0; cf < kColumnFamilies; ++cf) {
          db->MultiGet(ro, t->state->test->handles_[cf], 1, &keys[cf],
                       &pin_values[cf], &statuses[cf]);
        }
        db->ReleaseSnapshot(snapshot);
        values.resize(keys.size());
        for (int cf = 0; cf < kColumnFamilies; ++cf) {
          if (statuses[cf].ok()) {
            values[cf].assign(pin_values[cf].data(), pin_values[cf].size());
          }
        }
      }
Y
Yi Wu 已提交
2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563
      Status s = statuses[0];
      // all statuses have to be the same
      for (size_t i = 1; i < statuses.size(); ++i) {
        // they are either both ok or both not-found
        ASSERT_TRUE((s.ok() && statuses[i].ok()) ||
                    (s.IsNotFound() && statuses[i].IsNotFound()));
      }
      if (s.IsNotFound()) {
        // Key has not yet been written
      } else {
        // Check that the writer thread counter is >= the counter in the value
        ASSERT_OK(s);
        int unique_id = -1;
        for (int i = 0; i < kColumnFamilies; ++i) {
          int k, w, c, cf, u;
          ASSERT_EQ(5, sscanf(values[i].c_str(), "%d.%d.%d.%d.%d", &k, &w, &c,
                              &cf, &u))
              << values[i];
          ASSERT_EQ(k, key);
          ASSERT_GE(w, 0);
          ASSERT_LT(w, kNumThreads);
          ASSERT_LE(c, t->state->counter[w].load(std::memory_order_acquire));
          ASSERT_EQ(cf, i);
          if (i == 0) {
            unique_id = u;
          } else {
            // this checks that updates across column families happened
            // atomically -- all unique ids are the same
            ASSERT_EQ(u, unique_id);
          }
        }
      }
    }
    counter++;
  }
  t->state->thread_done[id].store(true, std::memory_order_release);
  fprintf(stderr, "... stopping thread %d after %d ops\n", id, int(counter));
}
2564

Y
Yi Wu 已提交
2565
}  // namespace
2566

2567 2568 2569
class MultiThreadedDBTest
    : public DBTest,
      public ::testing::WithParamInterface<std::tuple<int, bool>> {
Y
Yi Wu 已提交
2570
 public:
2571 2572 2573
  void SetUp() override {
    std::tie(option_config_, multiget_batched_) = GetParam();
  }
2574

Y
Yi Wu 已提交
2575 2576 2577
  static std::vector<int> GenerateOptionConfigs() {
    std::vector<int> optionConfigs;
    for (int optionConfig = kDefault; optionConfig < kEnd; ++optionConfig) {
2578
      optionConfigs.push_back(optionConfig);
2579
    }
Y
Yi Wu 已提交
2580 2581
    return optionConfigs;
  }
2582 2583

  bool multiget_batched_;
Y
Yi Wu 已提交
2584
};
2585

Y
Yi Wu 已提交
2586
TEST_P(MultiThreadedDBTest, MultiThreaded) {
2587
  if (option_config_ == kPipelinedWrite) return;
Y
Yi Wu 已提交
2588 2589
  anon::OptionsOverride options_override;
  options_override.skip_policy = kSkipNoSnapshot;
2590
  Options options = CurrentOptions(options_override);
Y
Yi Wu 已提交
2591 2592 2593 2594
  std::vector<std::string> cfs;
  for (int i = 1; i < kColumnFamilies; ++i) {
    cfs.push_back(ToString(i));
  }
2595 2596
  Reopen(options);
  CreateAndReopenWithCF(cfs, options);
Y
Yi Wu 已提交
2597 2598 2599 2600 2601 2602 2603
  // Initialize state
  MTState mt;
  mt.test = this;
  mt.stop.store(false, std::memory_order_release);
  for (int id = 0; id < kNumThreads; id++) {
    mt.counter[id].store(0, std::memory_order_release);
    mt.thread_done[id].store(false, std::memory_order_release);
2604 2605
  }

Y
Yi Wu 已提交
2606 2607 2608 2609 2610
  // Start threads
  MTThread thread[kNumThreads];
  for (int id = 0; id < kNumThreads; id++) {
    thread[id].state = &mt;
    thread[id].id = id;
2611
    thread[id].multiget_batched = multiget_batched_;
Y
Yi Wu 已提交
2612
    env_->StartThread(MTThreadBody, &thread[id]);
2613
  }
Y
Yi Wu 已提交
2614 2615 2616 2617 2618 2619 2620 2621 2622 2623

  // Let them run for a while
  env_->SleepForMicroseconds(kTestSeconds * 1000000);

  // Stop the threads and wait for them to finish
  mt.stop.store(true, std::memory_order_release);
  for (int id = 0; id < kNumThreads; id++) {
    while (mt.thread_done[id].load(std::memory_order_acquire) == false) {
      env_->SleepForMicroseconds(100000);
    }
2624 2625 2626
  }
}

2627
INSTANTIATE_TEST_CASE_P(
Y
Yi Wu 已提交
2628
    MultiThreaded, MultiThreadedDBTest,
2629 2630 2631
    ::testing::Combine(
        ::testing::ValuesIn(MultiThreadedDBTest::GenerateOptionConfigs()),
        ::testing::Bool()));
Y
Yi Wu 已提交
2632
#endif  // ROCKSDB_LITE
2633

Y
Yi Wu 已提交
2634
// Group commit test:
2635 2636 2637
#if !defined(TRAVIS) && !defined(OS_WIN)
// Disable this test temporarily on Travis and appveyor as it fails
// intermittently. Github issue: #4151
Y
Yi Wu 已提交
2638
namespace {
2639

Y
Yi Wu 已提交
2640 2641
static const int kGCNumThreads = 4;
static const int kGCNumKeys = 1000;
2642

Y
Yi Wu 已提交
2643 2644 2645 2646 2647
struct GCThread {
  DB* db;
  int id;
  std::atomic<bool> done;
};
2648

Y
Yi Wu 已提交
2649 2650 2651 2652 2653
static void GCThreadBody(void* arg) {
  GCThread* t = reinterpret_cast<GCThread*>(arg);
  int id = t->id;
  DB* db = t->db;
  WriteOptions wo;
2654

Y
Yi Wu 已提交
2655 2656 2657 2658 2659 2660
  for (int i = 0; i < kGCNumKeys; ++i) {
    std::string kv(ToString(i + id * kGCNumKeys));
    ASSERT_OK(db->Put(wo, kv, kv));
  }
  t->done = true;
}
2661

Y
Yi Wu 已提交
2662 2663 2664 2665 2666 2667
}  // namespace

TEST_F(DBTest, GroupCommitTest) {
  do {
    Options options = CurrentOptions();
    options.env = env_;
2668
    options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
Y
Yi Wu 已提交
2669 2670
    Reopen(options);

2671
    ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
2672
        {{"WriteThread::JoinBatchGroup:BeganWaiting",
2673
          "DBImpl::WriteImpl:BeforeLeaderEnters"},
2674
         {"WriteThread::AwaitState:BlockingWaiting",
2675
          "WriteThread::EnterAsBatchGroupLeader:End"}});
2676
    ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
2677

Y
Yi Wu 已提交
2678 2679 2680 2681 2682 2683 2684
    // Start threads
    GCThread thread[kGCNumThreads];
    for (int id = 0; id < kGCNumThreads; id++) {
      thread[id].id = id;
      thread[id].db = db_;
      thread[id].done = false;
      env_->StartThread(GCThreadBody, &thread[id]);
2685
    }
2686
    env_->WaitForJoin();
2687

Y
Yi Wu 已提交
2688 2689 2690 2691 2692
    ASSERT_GT(TestGetTickerCount(options, WRITE_DONE_BY_OTHER), 0);

    std::vector<std::string> expected_db;
    for (int i = 0; i < kGCNumThreads * kGCNumKeys; ++i) {
      expected_db.push_back(ToString(i));
2693
    }
2694
    std::sort(expected_db.begin(), expected_db.end());
Y
Yi Wu 已提交
2695 2696 2697 2698 2699 2700 2701 2702

    Iterator* itr = db_->NewIterator(ReadOptions());
    itr->SeekToFirst();
    for (auto x : expected_db) {
      ASSERT_TRUE(itr->Valid());
      ASSERT_EQ(itr->key().ToString(), x);
      ASSERT_EQ(itr->value().ToString(), x);
      itr->Next();
2703
    }
Y
Yi Wu 已提交
2704 2705
    ASSERT_TRUE(!itr->Valid());
    delete itr;
2706

A
Andrew Kryczka 已提交
2707
    HistogramData hist_data;
Y
Yi Wu 已提交
2708 2709 2710
    options.statistics->histogramData(DB_WRITE, &hist_data);
    ASSERT_GT(hist_data.average, 0.0);
  } while (ChangeOptions(kSkipNoSeekToLast));
2711
}
2712
#endif  // TRAVIS
2713

Y
Yi Wu 已提交
2714 2715
namespace {
typedef std::map<std::string, std::string> KVMap;
2716 2717
}

Y
Yi Wu 已提交
2718
class ModelDB : public DB {
2719
 public:
Y
Yi Wu 已提交
2720 2721 2722
  class ModelSnapshot : public Snapshot {
   public:
    KVMap map_;
2723

2724
    SequenceNumber GetSequenceNumber() const override {
Y
Yi Wu 已提交
2725 2726 2727 2728 2729
      // no need to call this
      assert(false);
      return 0;
    }
  };
2730

Y
Yi Wu 已提交
2731 2732
  explicit ModelDB(const Options& options) : options_(options) {}
  using DB::Put;
2733 2734
  Status Put(const WriteOptions& o, ColumnFamilyHandle* cf, const Slice& k,
             const Slice& v) override {
Y
Yi Wu 已提交
2735 2736 2737
    WriteBatch batch;
    batch.Put(cf, k, v);
    return Write(o, &batch);
2738
  }
2739
  using DB::Close;
2740
  Status Close() override { return Status::OK(); }
Y
Yi Wu 已提交
2741
  using DB::Delete;
2742 2743
  Status Delete(const WriteOptions& o, ColumnFamilyHandle* cf,
                const Slice& key) override {
Y
Yi Wu 已提交
2744 2745 2746
    WriteBatch batch;
    batch.Delete(cf, key);
    return Write(o, &batch);
2747
  }
Y
Yi Wu 已提交
2748
  using DB::SingleDelete;
2749 2750
  Status SingleDelete(const WriteOptions& o, ColumnFamilyHandle* cf,
                      const Slice& key) override {
Y
Yi Wu 已提交
2751 2752 2753
    WriteBatch batch;
    batch.SingleDelete(cf, key);
    return Write(o, &batch);
2754
  }
Y
Yi Wu 已提交
2755
  using DB::Merge;
2756 2757
  Status Merge(const WriteOptions& o, ColumnFamilyHandle* cf, const Slice& k,
               const Slice& v) override {
Y
Yi Wu 已提交
2758 2759 2760 2761 2762
    WriteBatch batch;
    batch.Merge(cf, k, v);
    return Write(o, &batch);
  }
  using DB::Get;
2763 2764
  Status Get(const ReadOptions& /*options*/, ColumnFamilyHandle* /*cf*/,
             const Slice& key, PinnableSlice* /*value*/) override {
Y
Yi Wu 已提交
2765
    return Status::NotSupported(key);
2766 2767
  }

2768 2769 2770 2771 2772 2773 2774 2775 2776
  using DB::GetMergeOperands;
  virtual Status GetMergeOperands(
      const ReadOptions& /*options*/, ColumnFamilyHandle* /*column_family*/,
      const Slice& key, PinnableSlice* /*slice*/,
      GetMergeOperandsOptions* /*merge_operands_options*/,
      int* /*number_of_operands*/) override {
    return Status::NotSupported(key);
  }

Y
Yi Wu 已提交
2777
  using DB::MultiGet;
2778
  std::vector<Status> MultiGet(
A
Andrew Kryczka 已提交
2779 2780
      const ReadOptions& /*options*/,
      const std::vector<ColumnFamilyHandle*>& /*column_family*/,
Y
Yi Wu 已提交
2781
      const std::vector<Slice>& keys,
A
Andrew Kryczka 已提交
2782
      std::vector<std::string>* /*values*/) override {
Y
Yi Wu 已提交
2783 2784 2785 2786
    std::vector<Status> s(keys.size(),
                          Status::NotSupported("Not implemented."));
    return s;
  }
2787

Y
Yi Wu 已提交
2788
#ifndef ROCKSDB_LITE
2789
  using DB::IngestExternalFile;
2790
  Status IngestExternalFile(
A
Andrew Kryczka 已提交
2791 2792 2793
      ColumnFamilyHandle* /*column_family*/,
      const std::vector<std::string>& /*external_files*/,
      const IngestExternalFileOptions& /*options*/) override {
Y
Yi Wu 已提交
2794 2795
    return Status::NotSupported("Not implemented.");
  }
2796

Y
Yanqin Jin 已提交
2797
  using DB::IngestExternalFiles;
2798
  Status IngestExternalFiles(
Y
Yanqin Jin 已提交
2799 2800 2801 2802
      const std::vector<IngestExternalFileArg>& /*args*/) override {
    return Status::NotSupported("Not implemented");
  }

2803 2804 2805 2806 2807 2808 2809 2810 2811 2812
  using DB::CreateColumnFamilyWithImport;
  virtual Status CreateColumnFamilyWithImport(
      const ColumnFamilyOptions& /*options*/,
      const std::string& /*column_family_name*/,
      const ImportColumnFamilyOptions& /*import_options*/,
      const ExportImportFilesMetaData& /*metadata*/,
      ColumnFamilyHandle** /*handle*/) override {
    return Status::NotSupported("Not implemented.");
  }

S
sdong 已提交
2813 2814
  using DB::VerifyChecksum;
  Status VerifyChecksum(const ReadOptions&) override {
A
Aaron G 已提交
2815 2816 2817
    return Status::NotSupported("Not implemented.");
  }

Y
Yi Wu 已提交
2818
  using DB::GetPropertiesOfAllTables;
2819
  Status GetPropertiesOfAllTables(
A
Andrew Kryczka 已提交
2820 2821
      ColumnFamilyHandle* /*column_family*/,
      TablePropertiesCollection* /*props*/) override {
Y
Yi Wu 已提交
2822
    return Status();
2823 2824
  }

2825
  Status GetPropertiesOfTablesInRange(
A
Andrew Kryczka 已提交
2826 2827
      ColumnFamilyHandle* /*column_family*/, const Range* /*range*/,
      std::size_t /*n*/, TablePropertiesCollection* /*props*/) override {
Y
Yi Wu 已提交
2828 2829 2830
    return Status();
  }
#endif  // ROCKSDB_LITE
2831

Y
Yi Wu 已提交
2832
  using DB::KeyMayExist;
2833 2834 2835 2836
  bool KeyMayExist(const ReadOptions& /*options*/,
                   ColumnFamilyHandle* /*column_family*/, const Slice& /*key*/,
                   std::string* /*value*/,
                   bool* value_found = nullptr) override {
Y
Yi Wu 已提交
2837 2838 2839 2840
    if (value_found != nullptr) {
      *value_found = false;
    }
    return true;  // Not Supported directly
2841
  }
Y
Yi Wu 已提交
2842
  using DB::NewIterator;
2843 2844
  Iterator* NewIterator(const ReadOptions& options,
                        ColumnFamilyHandle* /*column_family*/) override {
Y
Yi Wu 已提交
2845 2846 2847 2848 2849 2850 2851 2852 2853
    if (options.snapshot == nullptr) {
      KVMap* saved = new KVMap;
      *saved = map_;
      return new ModelIter(saved, true);
    } else {
      const KVMap* snapshot_state =
          &(reinterpret_cast<const ModelSnapshot*>(options.snapshot)->map_);
      return new ModelIter(snapshot_state, false);
    }
2854
  }
2855 2856 2857
  Status NewIterators(const ReadOptions& /*options*/,
                      const std::vector<ColumnFamilyHandle*>& /*column_family*/,
                      std::vector<Iterator*>* /*iterators*/) override {
Y
Yi Wu 已提交
2858 2859
    return Status::NotSupported("Not supported yet");
  }
2860
  const Snapshot* GetSnapshot() override {
Y
Yi Wu 已提交
2861 2862 2863
    ModelSnapshot* snapshot = new ModelSnapshot;
    snapshot->map_ = map_;
    return snapshot;
2864 2865
  }

2866
  void ReleaseSnapshot(const Snapshot* snapshot) override {
Y
Yi Wu 已提交
2867
    delete reinterpret_cast<const ModelSnapshot*>(snapshot);
2868
  }
Y
Yi Wu 已提交
2869

2870
  Status Write(const WriteOptions& /*options*/, WriteBatch* batch) override {
Y
Yi Wu 已提交
2871 2872 2873
    class Handler : public WriteBatch::Handler {
     public:
      KVMap* map_;
2874
      void Put(const Slice& key, const Slice& value) override {
Y
Yi Wu 已提交
2875 2876
        (*map_)[key.ToString()] = value.ToString();
      }
2877
      void Merge(const Slice& /*key*/, const Slice& /*value*/) override {
Y
Yi Wu 已提交
2878 2879 2880
        // ignore merge for now
        // (*map_)[key.ToString()] = value.ToString();
      }
2881
      void Delete(const Slice& key) override { map_->erase(key.ToString()); }
Y
Yi Wu 已提交
2882 2883 2884 2885
    };
    Handler handler;
    handler.map_ = &map_;
    return batch->Iterate(&handler);
2886 2887
  }

Y
Yi Wu 已提交
2888
  using DB::GetProperty;
2889 2890
  bool GetProperty(ColumnFamilyHandle* /*column_family*/,
                   const Slice& /*property*/, std::string* /*value*/) override {
Y
Yi Wu 已提交
2891 2892 2893
    return false;
  }
  using DB::GetIntProperty;
2894 2895
  bool GetIntProperty(ColumnFamilyHandle* /*column_family*/,
                      const Slice& /*property*/, uint64_t* /*value*/) override {
2896 2897 2898
    return false;
  }
  using DB::GetMapProperty;
2899 2900 2901
  bool GetMapProperty(ColumnFamilyHandle* /*column_family*/,
                      const Slice& /*property*/,
                      std::map<std::string, std::string>* /*value*/) override {
Y
Yi Wu 已提交
2902 2903 2904
    return false;
  }
  using DB::GetAggregatedIntProperty;
2905 2906
  bool GetAggregatedIntProperty(const Slice& /*property*/,
                                uint64_t* /*value*/) override {
Y
Yi Wu 已提交
2907 2908 2909
    return false;
  }
  using DB::GetApproximateSizes;
2910 2911 2912 2913
  Status GetApproximateSizes(const SizeApproximationOptions& /*options*/,
                             ColumnFamilyHandle* /*column_family*/,
                             const Range* /*range*/, int n,
                             uint64_t* sizes) override {
Y
Yi Wu 已提交
2914 2915 2916
    for (int i = 0; i < n; i++) {
      sizes[i] = 0;
    }
2917
    return Status::OK();
Y
Yi Wu 已提交
2918
  }
2919
  using DB::GetApproximateMemTableStats;
2920 2921 2922 2923
  void GetApproximateMemTableStats(ColumnFamilyHandle* /*column_family*/,
                                   const Range& /*range*/,
                                   uint64_t* const count,
                                   uint64_t* const size) override {
2924 2925 2926
    *count = 0;
    *size = 0;
  }
Y
Yi Wu 已提交
2927
  using DB::CompactRange;
2928 2929 2930
  Status CompactRange(const CompactRangeOptions& /*options*/,
                      ColumnFamilyHandle* /*column_family*/,
                      const Slice* /*start*/, const Slice* /*end*/) override {
Y
Yi Wu 已提交
2931 2932
    return Status::NotSupported("Not supported operation.");
  }
2933

2934
  Status SetDBOptions(
A
Andrew Kryczka 已提交
2935
      const std::unordered_map<std::string, std::string>& /*new_options*/)
2936 2937 2938 2939
      override {
    return Status::NotSupported("Not supported operation.");
  }

Y
Yi Wu 已提交
2940
  using DB::CompactFiles;
2941
  Status CompactFiles(
A
Andrew Kryczka 已提交
2942 2943 2944
      const CompactionOptions& /*compact_options*/,
      ColumnFamilyHandle* /*column_family*/,
      const std::vector<std::string>& /*input_file_names*/,
2945
      const int /*output_level*/, const int /*output_path_id*/ = -1,
2946 2947
      std::vector<std::string>* const /*output_file_names*/ = nullptr,
      CompactionJobInfo* /*compaction_job_info*/ = nullptr) override {
Y
Yi Wu 已提交
2948 2949
    return Status::NotSupported("Not supported operation.");
  }
2950

Y
Yi Wu 已提交
2951 2952 2953
  Status PauseBackgroundWork() override {
    return Status::NotSupported("Not supported operation.");
  }
2954

Y
Yi Wu 已提交
2955 2956 2957
  Status ContinueBackgroundWork() override {
    return Status::NotSupported("Not supported operation.");
  }
2958

Y
Yi Wu 已提交
2959
  Status EnableAutoCompaction(
A
Andrew Kryczka 已提交
2960 2961
      const std::vector<ColumnFamilyHandle*>& /*column_family_handles*/)
      override {
Y
Yi Wu 已提交
2962 2963
    return Status::NotSupported("Not supported operation.");
  }
2964

2965 2966 2967 2968
  void EnableManualCompaction() override { return; }

  void DisableManualCompaction() override { return; }

Y
Yi Wu 已提交
2969
  using DB::NumberLevels;
2970
  int NumberLevels(ColumnFamilyHandle* /*column_family*/) override { return 1; }
2971

Y
Yi Wu 已提交
2972
  using DB::MaxMemCompactionLevel;
2973
  int MaxMemCompactionLevel(ColumnFamilyHandle* /*column_family*/) override {
Y
Yi Wu 已提交
2974
    return 1;
2975
  }
2976

Y
Yi Wu 已提交
2977
  using DB::Level0StopWriteTrigger;
2978
  int Level0StopWriteTrigger(ColumnFamilyHandle* /*column_family*/) override {
Y
Yi Wu 已提交
2979 2980
    return -1;
  }
2981

2982
  const std::string& GetName() const override { return name_; }
2983

2984
  Env* GetEnv() const override { return nullptr; }
2985

Y
Yi Wu 已提交
2986
  using DB::GetOptions;
2987
  Options GetOptions(ColumnFamilyHandle* /*column_family*/) const override {
Y
Yi Wu 已提交
2988
    return options_;
2989
  }
2990

Y
Yi Wu 已提交
2991
  using DB::GetDBOptions;
2992
  DBOptions GetDBOptions() const override { return options_; }
2993

Y
Yi Wu 已提交
2994
  using DB::Flush;
2995
  Status Flush(const ROCKSDB_NAMESPACE::FlushOptions& /*options*/,
2996
               ColumnFamilyHandle* /*column_family*/) override {
Y
Yi Wu 已提交
2997 2998 2999
    Status ret;
    return ret;
  }
3000
  Status Flush(
3001
      const ROCKSDB_NAMESPACE::FlushOptions& /*options*/,
Y
Yanqin Jin 已提交
3002 3003 3004
      const std::vector<ColumnFamilyHandle*>& /*column_families*/) override {
    return Status::OK();
  }
L
Lei Jin 已提交
3005

3006
  Status SyncWAL() override { return Status::OK(); }
3007

3008
  Status DisableFileDeletions() override { return Status::OK(); }
3009

3010
  Status EnableFileDeletions(bool /*force*/) override { return Status::OK(); }
3011 3012
#ifndef ROCKSDB_LITE

3013 3014
  Status GetLiveFiles(std::vector<std::string>&, uint64_t* /*size*/,
                      bool /*flush_memtable*/ = true) override {
Y
Yi Wu 已提交
3015 3016
    return Status::OK();
  }
3017

3018 3019 3020 3021 3022
  Status GetLiveFilesChecksumInfo(
      FileChecksumList* /*checksum_list*/) override {
    return Status::OK();
  }

3023
  Status GetSortedWalFiles(VectorLogPtr& /*files*/) override {
Y
Yi Wu 已提交
3024 3025
    return Status::OK();
  }
3026

3027 3028
  Status GetCurrentWalFile(
      std::unique_ptr<LogFile>* /*current_log_file*/) override {
3029 3030 3031
    return Status::OK();
  }

3032 3033 3034 3035 3036
  virtual Status GetCreationTimeOfOldestFile(
      uint64_t* /*creation_time*/) override {
    return Status::NotSupported();
  }

3037
  Status DeleteFile(std::string /*name*/) override { return Status::OK(); }
3038

3039
  Status GetUpdatesSince(
3040 3041
      ROCKSDB_NAMESPACE::SequenceNumber,
      std::unique_ptr<ROCKSDB_NAMESPACE::TransactionLogIterator>*,
A
Andrew Kryczka 已提交
3042
      const TransactionLogIterator::ReadOptions& /*read_options*/ =
A
Aaron Gao 已提交
3043
          TransactionLogIterator::ReadOptions()) override {
Y
Yi Wu 已提交
3044 3045
    return Status::NotSupported("Not supported in Model DB");
  }
3046

3047 3048
  void GetColumnFamilyMetaData(ColumnFamilyHandle* /*column_family*/,
                               ColumnFamilyMetaData* /*metadata*/) override {}
Y
Yi Wu 已提交
3049 3050
#endif  // ROCKSDB_LITE

3051
  Status GetDbIdentity(std::string& /*identity*/) const override {
Y
Yi Wu 已提交
3052
    return Status::OK();
3053
  }
3054

Z
Zitan Chen 已提交
3055 3056 3057 3058
  Status GetDbSessionId(std::string& /*session_id*/) const override {
    return Status::OK();
  }

3059
  SequenceNumber GetLatestSequenceNumber() const override { return 0; }
Y
Yi Wu 已提交
3060

3061
  bool SetPreserveDeletesSequenceNumber(SequenceNumber /*seqnum*/) override {
3062 3063 3064
    return true;
  }

3065
  ColumnFamilyHandle* DefaultColumnFamily() const override { return nullptr; }
Y
Yi Wu 已提交
3066 3067 3068 3069 3070 3071

 private:
  class ModelIter : public Iterator {
   public:
    ModelIter(const KVMap* map, bool owned)
        : map_(map), owned_(owned), iter_(map_->end()) {}
3072
    ~ModelIter() override {
Y
Yi Wu 已提交
3073 3074
      if (owned_) delete map_;
    }
3075 3076 3077
    bool Valid() const override { return iter_ != map_->end(); }
    void SeekToFirst() override { iter_ = map_->begin(); }
    void SeekToLast() override {
Y
Yi Wu 已提交
3078 3079 3080 3081 3082 3083
      if (map_->empty()) {
        iter_ = map_->end();
      } else {
        iter_ = map_->find(map_->rbegin()->first);
      }
    }
3084
    void Seek(const Slice& k) override {
Y
Yi Wu 已提交
3085 3086
      iter_ = map_->lower_bound(k.ToString());
    }
3087
    void SeekForPrev(const Slice& k) override {
A
Aaron Gao 已提交
3088 3089 3090
      iter_ = map_->upper_bound(k.ToString());
      Prev();
    }
3091 3092
    void Next() override { ++iter_; }
    void Prev() override {
Y
Yi Wu 已提交
3093 3094 3095 3096 3097 3098 3099
      if (iter_ == map_->begin()) {
        iter_ = map_->end();
        return;
      }
      --iter_;
    }

3100 3101 3102
    Slice key() const override { return iter_->first; }
    Slice value() const override { return iter_->second; }
    Status status() const override { return Status::OK(); }
3103

Y
Yi Wu 已提交
3104 3105 3106 3107 3108 3109 3110 3111 3112
   private:
    const KVMap* const map_;
    const bool owned_;  // Do we own map_
    KVMap::const_iterator iter_;
  };
  const Options options_;
  KVMap map_;
  std::string name_ = "";
};
3113

3114
#ifndef ROCKSDB_VALGRIND_RUN
Y
Yi Wu 已提交
3115 3116 3117 3118 3119 3120 3121 3122 3123
static std::string RandomKey(Random* rnd, int minimum = 0) {
  int len;
  do {
    len = (rnd->OneIn(3)
               ? 1  // Short sometimes to encourage collisions
               : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
  } while (len < minimum);
  return test::RandomKey(rnd, len);
}
L
Lei Jin 已提交
3124

Y
Yi Wu 已提交
3125 3126 3127 3128 3129 3130 3131 3132 3133
static bool CompareIterators(int step, DB* model, DB* db,
                             const Snapshot* model_snap,
                             const Snapshot* db_snap) {
  ReadOptions options;
  options.snapshot = model_snap;
  Iterator* miter = model->NewIterator(options);
  options.snapshot = db_snap;
  Iterator* dbiter = db->NewIterator(options);
  bool ok = true;
L
Lei Jin 已提交
3134
  int count = 0;
Y
Yi Wu 已提交
3135 3136
  for (miter->SeekToFirst(), dbiter->SeekToFirst();
       ok && miter->Valid() && dbiter->Valid(); miter->Next(), dbiter->Next()) {
L
Lei Jin 已提交
3137
    count++;
Y
Yi Wu 已提交
3138
    if (miter->key().compare(dbiter->key()) != 0) {
A
Aaron Gao 已提交
3139
      fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step,
Y
Yi Wu 已提交
3140 3141 3142
              EscapeString(miter->key()).c_str(),
              EscapeString(dbiter->key()).c_str());
      ok = false;
3143 3144
      break;
    }
L
Lei Jin 已提交
3145

Y
Yi Wu 已提交
3146 3147 3148 3149
    if (miter->value().compare(dbiter->value()) != 0) {
      fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
              step, EscapeString(miter->key()).c_str(),
              EscapeString(miter->value()).c_str(),
3150
              EscapeString(dbiter->value()).c_str());
Y
Yi Wu 已提交
3151
      ok = false;
3152
    }
L
Lei Jin 已提交
3153 3154
  }

Y
Yi Wu 已提交
3155 3156 3157 3158 3159 3160
  if (ok) {
    if (miter->Valid() != dbiter->Valid()) {
      fprintf(stderr, "step %d: Mismatch at end of iterators: %d vs. %d\n",
              step, miter->Valid(), dbiter->Valid());
      ok = false;
    }
L
Lei Jin 已提交
3161
  }
Y
Yi Wu 已提交
3162 3163 3164 3165
  delete miter;
  delete dbiter;
  return ok;
}
L
Lei Jin 已提交
3166

Y
Yi Wu 已提交
3167 3168 3169
class DBTestRandomized : public DBTest,
                         public ::testing::WithParamInterface<int> {
 public:
3170
  void SetUp() override { option_config_ = GetParam(); }
L
Lei Jin 已提交
3171

Y
Yi Wu 已提交
3172 3173 3174 3175
  static std::vector<int> GenerateOptionConfigs() {
    std::vector<int> option_configs;
    // skip cuckoo hash as it does not support snapshot.
    for (int option_config = kDefault; option_config < kEnd; ++option_config) {
3176 3177
      if (!ShouldSkipOptions(option_config,
                             kSkipDeletesFilterFirst | kSkipNoSeekToLast)) {
Y
Yi Wu 已提交
3178 3179 3180 3181 3182
        option_configs.push_back(option_config);
      }
    }
    option_configs.push_back(kBlockBasedTableWithIndexRestartInterval);
    return option_configs;
L
Lei Jin 已提交
3183
  }
Y
Yi Wu 已提交
3184
};
3185

3186
INSTANTIATE_TEST_CASE_P(
Y
Yi Wu 已提交
3187 3188
    DBTestRandomized, DBTestRandomized,
    ::testing::ValuesIn(DBTestRandomized::GenerateOptionConfigs()));
3189

Y
Yi Wu 已提交
3190 3191 3192 3193
TEST_P(DBTestRandomized, Randomized) {
  anon::OptionsOverride options_override;
  options_override.skip_policy = kSkipNoSnapshot;
  Options options = CurrentOptions(options_override);
L
Lei Jin 已提交
3194
  DestroyAndReopen(options);
3195

Y
Yi Wu 已提交
3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213
  Random rnd(test::RandomSeed() + GetParam());
  ModelDB model(options);
  const int N = 10000;
  const Snapshot* model_snap = nullptr;
  const Snapshot* db_snap = nullptr;
  std::string k, v;
  for (int step = 0; step < N; step++) {
    // TODO(sanjay): Test Get() works
    int p = rnd.Uniform(100);
    int minimum = 0;
    if (option_config_ == kHashSkipList || option_config_ == kHashLinkList ||
        option_config_ == kPlainTableFirstBytePrefix ||
        option_config_ == kBlockBasedTableWithWholeKeyHashIndex ||
        option_config_ == kBlockBasedTableWithPrefixHashIndex) {
      minimum = 1;
    }
    if (p < 45) {  // Put
      k = RandomKey(&rnd, minimum);
M
mrambacher 已提交
3214 3215
      v = rnd.RandomString(rnd.OneIn(20) ? 100 + rnd.Uniform(100)
                                         : rnd.Uniform(8));
Y
Yi Wu 已提交
3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232
      ASSERT_OK(model.Put(WriteOptions(), k, v));
      ASSERT_OK(db_->Put(WriteOptions(), k, v));
    } else if (p < 90) {  // Delete
      k = RandomKey(&rnd, minimum);
      ASSERT_OK(model.Delete(WriteOptions(), k));
      ASSERT_OK(db_->Delete(WriteOptions(), k));
    } else {  // Multi-element batch
      WriteBatch b;
      const int num = rnd.Uniform(8);
      for (int i = 0; i < num; i++) {
        if (i == 0 || !rnd.OneIn(10)) {
          k = RandomKey(&rnd, minimum);
        } else {
          // Periodically re-use the same key from the previous iter, so
          // we have multiple entries in the write batch for the same key
        }
        if (rnd.OneIn(2)) {
M
mrambacher 已提交
3233
          v = rnd.RandomString(rnd.Uniform(10));
Y
Yi Wu 已提交
3234 3235 3236 3237
          b.Put(k, v);
        } else {
          b.Delete(k);
        }
3238
      }
Y
Yi Wu 已提交
3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250
      ASSERT_OK(model.Write(WriteOptions(), &b));
      ASSERT_OK(db_->Write(WriteOptions(), &b));
    }

    if ((step % 100) == 0) {
      // For DB instances that use the hash index + block-based table, the
      // iterator will be invalid right when seeking a non-existent key, right
      // than return a key that is close to it.
      if (option_config_ != kBlockBasedTableWithWholeKeyHashIndex &&
          option_config_ != kBlockBasedTableWithPrefixHashIndex) {
        ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
        ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap));
3251
      }
Y
Yi Wu 已提交
3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263

      // Save a snapshot from each DB this time that we'll use next
      // time we compare things, to make sure the current state is
      // preserved with the snapshot
      if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
      if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);

      Reopen(options);
      ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));

      model_snap = model.GetSnapshot();
      db_snap = db_->GetSnapshot();
3264 3265
    }
  }
Y
Yi Wu 已提交
3266 3267 3268
  if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
  if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
}
3269
#endif  // ROCKSDB_VALGRIND_RUN
Y
Yi Wu 已提交
3270 3271 3272 3273 3274 3275 3276 3277

TEST_F(DBTest, BlockBasedTablePrefixIndexTest) {
  // create a DB with block prefix index
  BlockBasedTableOptions table_options;
  Options options = CurrentOptions();
  table_options.index_type = BlockBasedTableOptions::kHashSearch;
  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  options.prefix_extractor.reset(NewFixedPrefixTransform(1));
3278

Y
Yi Wu 已提交
3279 3280 3281 3282
  Reopen(options);
  ASSERT_OK(Put("k1", "v1"));
  Flush();
  ASSERT_OK(Put("k2", "v2"));
3283

Y
Yi Wu 已提交
3284 3285 3286 3287 3288
  // Reopen it without prefix extractor, make sure everything still works.
  // RocksDB should just fall back to the binary index.
  table_options.index_type = BlockBasedTableOptions::kBinarySearch;
  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  options.prefix_extractor.reset();
3289

Y
Yi Wu 已提交
3290 3291 3292
  Reopen(options);
  ASSERT_EQ("v1", Get("k1"));
  ASSERT_EQ("v2", Get("k2"));
3293 3294
}

3295 3296 3297 3298 3299 3300 3301 3302 3303 3304
TEST_F(DBTest, BlockBasedTablePrefixIndexTotalOrderSeek) {
  // create a DB with block prefix index
  BlockBasedTableOptions table_options;
  Options options = CurrentOptions();
  options.max_open_files = 10;
  table_options.index_type = BlockBasedTableOptions::kHashSearch;
  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  options.prefix_extractor.reset(NewFixedPrefixTransform(1));

  // RocksDB sanitize max open files to at least 20. Modify it back.
3305
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
3306 3307 3308 3309
      "SanitizeOptions::AfterChangeMaxOpenFiles", [&](void* arg) {
        int* max_open_files = static_cast<int*>(arg);
        *max_open_files = 11;
      });
3310
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342

  Reopen(options);
  ASSERT_OK(Put("k1", "v1"));
  Flush();

  CompactRangeOptions cro;
  cro.change_level = true;
  cro.target_level = 1;
  ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));

  // Force evict tables
  dbfull()->TEST_table_cache()->SetCapacity(0);
  // Make table cache to keep one entry.
  dbfull()->TEST_table_cache()->SetCapacity(1);

  ReadOptions read_options;
  read_options.total_order_seek = true;
  {
    std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
    iter->Seek("k1");
    ASSERT_TRUE(iter->Valid());
    ASSERT_EQ("k1", iter->key().ToString());
  }

  // After total order seek, prefix index should still be used.
  read_options.total_order_seek = false;
  {
    std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
    iter->Seek("k1");
    ASSERT_TRUE(iter->Valid());
    ASSERT_EQ("k1", iter->key().ToString());
  }
3343
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
3344 3345
}

Y
Yi Wu 已提交
3346 3347 3348
TEST_F(DBTest, ChecksumTest) {
  BlockBasedTableOptions table_options;
  Options options = CurrentOptions();
I
Igor Canadi 已提交
3349

Y
Yi Wu 已提交
3350 3351 3352 3353 3354 3355
  table_options.checksum = kCRC32c;
  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  Reopen(options);
  ASSERT_OK(Put("a", "b"));
  ASSERT_OK(Put("c", "d"));
  ASSERT_OK(Flush());  // table with crc checksum
I
Igor Canadi 已提交
3356

Y
Yi Wu 已提交
3357 3358 3359 3360 3361 3362
  table_options.checksum = kxxHash;
  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  Reopen(options);
  ASSERT_OK(Put("e", "f"));
  ASSERT_OK(Put("g", "h"));
  ASSERT_OK(Flush());  // table with xxhash checksum
I
Igor Canadi 已提交
3363

Y
Yi Wu 已提交
3364 3365 3366 3367 3368 3369 3370
  table_options.checksum = kCRC32c;
  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  Reopen(options);
  ASSERT_EQ("b", Get("a"));
  ASSERT_EQ("d", Get("c"));
  ASSERT_EQ("f", Get("e"));
  ASSERT_EQ("h", Get("g"));
I
Igor Canadi 已提交
3371

Y
Yi Wu 已提交
3372 3373 3374 3375 3376 3377 3378
  table_options.checksum = kCRC32c;
  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  Reopen(options);
  ASSERT_EQ("b", Get("a"));
  ASSERT_EQ("d", Get("c"));
  ASSERT_EQ("f", Get("e"));
  ASSERT_EQ("h", Get("g"));
I
Igor Canadi 已提交
3379 3380
}

I
Islam AbdelRahman 已提交
3381
#ifndef ROCKSDB_LITE
Y
Yi Wu 已提交
3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398
TEST_P(DBTestWithParam, FIFOCompactionTest) {
  for (int iter = 0; iter < 2; ++iter) {
    // first iteration -- auto compaction
    // second iteration -- manual compaction
    Options options;
    options.compaction_style = kCompactionStyleFIFO;
    options.write_buffer_size = 100 << 10;  // 100KB
    options.arena_block_size = 4096;
    options.compaction_options_fifo.max_table_files_size = 500 << 10;  // 500KB
    options.compression = kNoCompression;
    options.create_if_missing = true;
    options.max_subcompactions = max_subcompactions_;
    if (iter == 1) {
      options.disable_auto_compactions = true;
    }
    options = CurrentOptions(options);
    DestroyAndReopen(options);
I
Igor Canadi 已提交
3399

Y
Yi Wu 已提交
3400 3401 3402
    Random rnd(301);
    for (int i = 0; i < 6; ++i) {
      for (int j = 0; j < 110; ++j) {
M
mrambacher 已提交
3403
        ASSERT_OK(Put(ToString(i * 100 + j), rnd.RandomString(980)));
Y
Yi Wu 已提交
3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419
      }
      // flush should happen here
      ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
    }
    if (iter == 0) {
      ASSERT_OK(dbfull()->TEST_WaitForCompact());
    } else {
      CompactRangeOptions cro;
      cro.exclusive_manual_compaction = exclusive_manual_compaction_;
      ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
    }
    // only 5 files should survive
    ASSERT_EQ(NumTableFilesAtLevel(0), 5);
    for (int i = 0; i < 50; ++i) {
      // these keys should be deleted in previous compaction
      ASSERT_EQ("NOT_FOUND", Get(ToString(i)));
I
Igor Canadi 已提交
3420 3421
    }
  }
Y
Yi Wu 已提交
3422
}
3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440

TEST_F(DBTest, FIFOCompactionTestWithCompaction) {
  Options options;
  options.compaction_style = kCompactionStyleFIFO;
  options.write_buffer_size = 20 << 10;  // 20K
  options.arena_block_size = 4096;
  options.compaction_options_fifo.max_table_files_size = 1500 << 10;  // 1MB
  options.compaction_options_fifo.allow_compaction = true;
  options.level0_file_num_compaction_trigger = 6;
  options.compression = kNoCompression;
  options.create_if_missing = true;
  options = CurrentOptions(options);
  DestroyAndReopen(options);

  Random rnd(301);
  for (int i = 0; i < 60; i++) {
    // Generate and flush a file about 20KB.
    for (int j = 0; j < 20; j++) {
M
mrambacher 已提交
3441
      ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
3442 3443 3444 3445 3446 3447 3448 3449
    }
    Flush();
    ASSERT_OK(dbfull()->TEST_WaitForCompact());
  }
  // It should be compacted to 10 files.
  ASSERT_EQ(NumTableFilesAtLevel(0), 10);

  for (int i = 0; i < 60; i++) {
S
Sagar Vemuri 已提交
3450
    // Generate and flush a file about 20KB.
3451
    for (int j = 0; j < 20; j++) {
M
mrambacher 已提交
3452
      ASSERT_OK(Put(ToString(i * 20 + j + 2000), rnd.RandomString(980)));
3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464
    }
    Flush();
    ASSERT_OK(dbfull()->TEST_WaitForCompact());
  }

  // It should be compacted to no more than 20 files.
  ASSERT_GT(NumTableFilesAtLevel(0), 10);
  ASSERT_LT(NumTableFilesAtLevel(0), 18);
  // Size limit is still guaranteed.
  ASSERT_LE(SizeAtLevel(0),
            options.compaction_options_fifo.max_table_files_size);
}
S
Sagar Vemuri 已提交
3465

3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481
TEST_F(DBTest, FIFOCompactionStyleWithCompactionAndDelete) {
  Options options;
  options.compaction_style = kCompactionStyleFIFO;
  options.write_buffer_size = 20 << 10;  // 20K
  options.arena_block_size = 4096;
  options.compaction_options_fifo.max_table_files_size = 1500 << 10;  // 1MB
  options.compaction_options_fifo.allow_compaction = true;
  options.level0_file_num_compaction_trigger = 3;
  options.compression = kNoCompression;
  options.create_if_missing = true;
  options = CurrentOptions(options);
  DestroyAndReopen(options);

  Random rnd(301);
  for (int i = 0; i < 3; i++) {
    // Each file contains a different key which will be dropped later.
M
mrambacher 已提交
3482
    ASSERT_OK(Put("a" + ToString(i), rnd.RandomString(500)));
3483
    ASSERT_OK(Put("key" + ToString(i), ""));
M
mrambacher 已提交
3484
    ASSERT_OK(Put("z" + ToString(i), rnd.RandomString(500)));
3485 3486 3487 3488 3489 3490 3491 3492 3493
    Flush();
    ASSERT_OK(dbfull()->TEST_WaitForCompact());
  }
  ASSERT_EQ(NumTableFilesAtLevel(0), 1);
  for (int i = 0; i < 3; i++) {
    ASSERT_EQ("", Get("key" + ToString(i)));
  }
  for (int i = 0; i < 3; i++) {
    // Each file contains a different key which will be dropped later.
M
mrambacher 已提交
3494
    ASSERT_OK(Put("a" + ToString(i), rnd.RandomString(500)));
3495
    ASSERT_OK(Delete("key" + ToString(i)));
M
mrambacher 已提交
3496
    ASSERT_OK(Put("z" + ToString(i), rnd.RandomString(500)));
3497 3498 3499 3500 3501 3502 3503 3504 3505
    Flush();
    ASSERT_OK(dbfull()->TEST_WaitForCompact());
  }
  ASSERT_EQ(NumTableFilesAtLevel(0), 2);
  for (int i = 0; i < 3; i++) {
    ASSERT_EQ("NOT_FOUND", Get("key" + ToString(i)));
  }
}

3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521
// Check that FIFO-with-TTL is not supported with max_open_files != -1.
TEST_F(DBTest, FIFOCompactionWithTTLAndMaxOpenFilesTest) {
  Options options;
  options.compaction_style = kCompactionStyleFIFO;
  options.create_if_missing = true;
  options.ttl = 600;  // seconds

  // TTL is now supported with max_open_files != -1.
  options.max_open_files = 100;
  options = CurrentOptions(options);
  ASSERT_OK(TryReopen(options));

  options.max_open_files = -1;
  ASSERT_OK(TryReopen(options));
}

S
Sagar Vemuri 已提交
3522 3523 3524 3525 3526
// Check that FIFO-with-TTL is supported only with BlockBasedTableFactory.
TEST_F(DBTest, FIFOCompactionWithTTLAndVariousTableFormatsTest) {
  Options options;
  options.compaction_style = kCompactionStyleFIFO;
  options.create_if_missing = true;
3527
  options.ttl = 600;  // seconds
S
Sagar Vemuri 已提交
3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541

  options = CurrentOptions(options);
  options.table_factory.reset(NewBlockBasedTableFactory());
  ASSERT_OK(TryReopen(options));

  Destroy(options);
  options.table_factory.reset(NewPlainTableFactory());
  ASSERT_TRUE(TryReopen(options).IsNotSupported());

  Destroy(options);
  options.table_factory.reset(NewAdaptiveTableFactory());
  ASSERT_TRUE(TryReopen(options).IsNotSupported());
}

3542
TEST_F(DBTest, FIFOCompactionWithTTLTest) {
S
Sagar Vemuri 已提交
3543 3544 3545 3546 3547 3548
  Options options;
  options.compaction_style = kCompactionStyleFIFO;
  options.write_buffer_size = 10 << 10;  // 10KB
  options.arena_block_size = 4096;
  options.compression = kNoCompression;
  options.create_if_missing = true;
3549
  env_->SetMockSleep();
3550
  options.env = env_;
S
Sagar Vemuri 已提交
3551 3552 3553 3554

  // Test to make sure that all files with expired ttl are deleted on next
  // manual compaction.
  {
3555 3556
    // NOTE: Presumed unnecessary and removed: resetting mock time in env

S
Sagar Vemuri 已提交
3557 3558
    options.compaction_options_fifo.max_table_files_size = 150 << 10;  // 150KB
    options.compaction_options_fifo.allow_compaction = false;
3559
    options.ttl = 1 * 60 * 60 ;  // 1 hour
S
Sagar Vemuri 已提交
3560 3561 3562 3563 3564 3565 3566
    options = CurrentOptions(options);
    DestroyAndReopen(options);

    Random rnd(301);
    for (int i = 0; i < 10; i++) {
      // Generate and flush a file about 10KB.
      for (int j = 0; j < 10; j++) {
M
mrambacher 已提交
3567
        ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
S
Sagar Vemuri 已提交
3568 3569
      }
      Flush();
3570
      ASSERT_OK(dbfull()->TEST_WaitForCompact());
S
Sagar Vemuri 已提交
3571 3572 3573
    }
    ASSERT_EQ(NumTableFilesAtLevel(0), 10);

3574
    // Sleep for 2 hours -- which is much greater than TTL.
3575
    env_->MockSleepForSeconds(2 * 60 * 60);
3576 3577 3578

    // Since no flushes and compactions have run, the db should still be in
    // the same state even after considerable time has passed.
S
Sagar Vemuri 已提交
3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590
    ASSERT_OK(dbfull()->TEST_WaitForCompact());
    ASSERT_EQ(NumTableFilesAtLevel(0), 10);

    dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
    ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  }

  // Test to make sure that all files with expired ttl are deleted on next
  // automatic compaction.
  {
    options.compaction_options_fifo.max_table_files_size = 150 << 10;  // 150KB
    options.compaction_options_fifo.allow_compaction = false;
3591
    options.ttl = 1 * 60 * 60;  // 1 hour
S
Sagar Vemuri 已提交
3592 3593 3594 3595 3596 3597 3598
    options = CurrentOptions(options);
    DestroyAndReopen(options);

    Random rnd(301);
    for (int i = 0; i < 10; i++) {
      // Generate and flush a file about 10KB.
      for (int j = 0; j < 10; j++) {
M
mrambacher 已提交
3599
        ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
S
Sagar Vemuri 已提交
3600 3601
      }
      Flush();
3602
      ASSERT_OK(dbfull()->TEST_WaitForCompact());
S
Sagar Vemuri 已提交
3603 3604 3605
    }
    ASSERT_EQ(NumTableFilesAtLevel(0), 10);

3606
    // Sleep for 2 hours -- which is much greater than TTL.
3607
    env_->MockSleepForSeconds(2 * 60 * 60);
3608
    // Just to make sure that we are in the same state even after sleeping.
S
Sagar Vemuri 已提交
3609 3610 3611 3612 3613 3614
    ASSERT_OK(dbfull()->TEST_WaitForCompact());
    ASSERT_EQ(NumTableFilesAtLevel(0), 10);

    // Create 1 more file to trigger TTL compaction. The old files are dropped.
    for (int i = 0; i < 1; i++) {
      for (int j = 0; j < 10; j++) {
M
mrambacher 已提交
3615
        ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
S
Sagar Vemuri 已提交
3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629
      }
      Flush();
    }

    ASSERT_OK(dbfull()->TEST_WaitForCompact());
    // Only the new 10 files remain.
    ASSERT_EQ(NumTableFilesAtLevel(0), 1);
    ASSERT_LE(SizeAtLevel(0),
              options.compaction_options_fifo.max_table_files_size);
  }

  // Test that shows the fall back to size-based FIFO compaction if TTL-based
  // deletion doesn't move the total size to be less than max_table_files_size.
  {
3630
    options.write_buffer_size = 10 << 10;                              // 10KB
S
Sagar Vemuri 已提交
3631 3632
    options.compaction_options_fifo.max_table_files_size = 150 << 10;  // 150KB
    options.compaction_options_fifo.allow_compaction = false;
3633
    options.ttl =  1 * 60 * 60;  // 1 hour
S
Sagar Vemuri 已提交
3634 3635 3636 3637 3638 3639 3640
    options = CurrentOptions(options);
    DestroyAndReopen(options);

    Random rnd(301);
    for (int i = 0; i < 3; i++) {
      // Generate and flush a file about 10KB.
      for (int j = 0; j < 10; j++) {
M
mrambacher 已提交
3641
        ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
S
Sagar Vemuri 已提交
3642 3643
      }
      Flush();
3644
      ASSERT_OK(dbfull()->TEST_WaitForCompact());
S
Sagar Vemuri 已提交
3645 3646 3647
    }
    ASSERT_EQ(NumTableFilesAtLevel(0), 3);

3648
    // Sleep for 2 hours -- which is much greater than TTL.
3649
    env_->MockSleepForSeconds(2 * 60 * 60);
3650
    // Just to make sure that we are in the same state even after sleeping.
S
Sagar Vemuri 已提交
3651 3652 3653 3654 3655
    ASSERT_OK(dbfull()->TEST_WaitForCompact());
    ASSERT_EQ(NumTableFilesAtLevel(0), 3);

    for (int i = 0; i < 5; i++) {
      for (int j = 0; j < 140; j++) {
M
mrambacher 已提交
3656
        ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
S
Sagar Vemuri 已提交
3657 3658
      }
      Flush();
3659
      ASSERT_OK(dbfull()->TEST_WaitForCompact());
S
Sagar Vemuri 已提交
3660 3661 3662 3663 3664 3665 3666 3667 3668 3669
    }
    // Size limit is still guaranteed.
    ASSERT_LE(SizeAtLevel(0),
              options.compaction_options_fifo.max_table_files_size);
  }

  // Test with TTL + Intra-L0 compactions.
  {
    options.compaction_options_fifo.max_table_files_size = 150 << 10;  // 150KB
    options.compaction_options_fifo.allow_compaction = true;
3670
    options.ttl = 1 * 60 * 60;  // 1 hour
S
Sagar Vemuri 已提交
3671 3672 3673 3674 3675 3676 3677 3678
    options.level0_file_num_compaction_trigger = 6;
    options = CurrentOptions(options);
    DestroyAndReopen(options);

    Random rnd(301);
    for (int i = 0; i < 10; i++) {
      // Generate and flush a file about 10KB.
      for (int j = 0; j < 10; j++) {
M
mrambacher 已提交
3679
        ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
S
Sagar Vemuri 已提交
3680 3681
      }
      Flush();
3682
      ASSERT_OK(dbfull()->TEST_WaitForCompact());
S
Sagar Vemuri 已提交
3683 3684 3685 3686 3687 3688
    }
    // With Intra-L0 compaction, out of 10 files, 6 files will be compacted to 1
    // (due to level0_file_num_compaction_trigger = 6).
    // So total files = 1 + remaining 4 = 5.
    ASSERT_EQ(NumTableFilesAtLevel(0), 5);

3689
    // Sleep for 2 hours -- which is much greater than TTL.
3690
    env_->MockSleepForSeconds(2 * 60 * 60);
3691
    // Just to make sure that we are in the same state even after sleeping.
S
Sagar Vemuri 已提交
3692 3693 3694 3695 3696 3697
    ASSERT_OK(dbfull()->TEST_WaitForCompact());
    ASSERT_EQ(NumTableFilesAtLevel(0), 5);

    // Create 10 more files. The old 5 files are dropped as their ttl expired.
    for (int i = 0; i < 10; i++) {
      for (int j = 0; j < 10; j++) {
M
mrambacher 已提交
3698
        ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
S
Sagar Vemuri 已提交
3699 3700
      }
      Flush();
3701
      ASSERT_OK(dbfull()->TEST_WaitForCompact());
S
Sagar Vemuri 已提交
3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713
    }
    ASSERT_EQ(NumTableFilesAtLevel(0), 5);
    ASSERT_LE(SizeAtLevel(0),
              options.compaction_options_fifo.max_table_files_size);
  }

  // Test with large TTL + Intra-L0 compactions.
  // Files dropped based on size, as ttl doesn't kick in.
  {
    options.write_buffer_size = 20 << 10;                               // 20K
    options.compaction_options_fifo.max_table_files_size = 1500 << 10;  // 1.5MB
    options.compaction_options_fifo.allow_compaction = true;
3714
    options.ttl = 1 * 60 * 60;  // 1 hour
S
Sagar Vemuri 已提交
3715 3716 3717 3718 3719 3720 3721 3722
    options.level0_file_num_compaction_trigger = 6;
    options = CurrentOptions(options);
    DestroyAndReopen(options);

    Random rnd(301);
    for (int i = 0; i < 60; i++) {
      // Generate and flush a file about 20KB.
      for (int j = 0; j < 20; j++) {
M
mrambacher 已提交
3723
        ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
S
Sagar Vemuri 已提交
3724 3725
      }
      Flush();
3726
      ASSERT_OK(dbfull()->TEST_WaitForCompact());
S
Sagar Vemuri 已提交
3727 3728 3729 3730 3731 3732 3733
    }
    // It should be compacted to 10 files.
    ASSERT_EQ(NumTableFilesAtLevel(0), 10);

    for (int i = 0; i < 60; i++) {
      // Generate and flush a file about 20KB.
      for (int j = 0; j < 20; j++) {
M
mrambacher 已提交
3734
        ASSERT_OK(Put(ToString(i * 20 + j + 2000), rnd.RandomString(980)));
S
Sagar Vemuri 已提交
3735 3736
      }
      Flush();
3737
      ASSERT_OK(dbfull()->TEST_WaitForCompact());
S
Sagar Vemuri 已提交
3738 3739 3740 3741 3742 3743 3744 3745 3746 3747
    }

    // It should be compacted to no more than 20 files.
    ASSERT_GT(NumTableFilesAtLevel(0), 10);
    ASSERT_LT(NumTableFilesAtLevel(0), 18);
    // Size limit is still guaranteed.
    ASSERT_LE(SizeAtLevel(0),
              options.compaction_options_fifo.max_table_files_size);
  }
}
Y
Yi Wu 已提交
3748
#endif  // ROCKSDB_LITE
I
Igor Canadi 已提交
3749

Y
Yi Wu 已提交
3750 3751 3752
#ifndef ROCKSDB_LITE
/*
 * This test is not reliable enough as it heavily depends on disk behavior.
S
Siying Dong 已提交
3753
 * Disable as it is flaky.
Y
Yi Wu 已提交
3754
 */
S
Siying Dong 已提交
3755
TEST_F(DBTest, DISABLED_RateLimitingTest) {
3756
  Options options = CurrentOptions();
Y
Yi Wu 已提交
3757
  options.write_buffer_size = 1 << 20;  // 1MB
3758
  options.level0_file_num_compaction_trigger = 2;
Y
Yi Wu 已提交
3759 3760 3761
  options.target_file_size_base = 1 << 20;     // 1MB
  options.max_bytes_for_level_base = 4 << 20;  // 4MB
  options.max_bytes_for_level_multiplier = 4;
3762
  options.compression = kNoCompression;
Y
Yi Wu 已提交
3763 3764
  options.create_if_missing = true;
  options.env = env_;
3765
  options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
Y
Yi Wu 已提交
3766 3767
  options.IncreaseParallelism(4);
  DestroyAndReopen(options);
3768

Y
Yi Wu 已提交
3769 3770
  WriteOptions wo;
  wo.disableWAL = true;
3771

Y
Yi Wu 已提交
3772 3773 3774 3775 3776
  // # no rate limiting
  Random rnd(301);
  uint64_t start = env_->NowMicros();
  // Write ~96M data
  for (int64_t i = 0; i < (96 << 10); ++i) {
M
mrambacher 已提交
3777
    ASSERT_OK(Put(rnd.RandomString(32), rnd.RandomString((1 << 10) + 1), wo));
3778
  }
Y
Yi Wu 已提交
3779 3780
  uint64_t elapsed = env_->NowMicros() - start;
  double raw_rate = env_->bytes_written_ * 1000000.0 / elapsed;
3781 3782 3783
  uint64_t rate_limiter_drains =
      TestGetTickerCount(options, NUMBER_RATE_LIMITER_DRAINS);
  ASSERT_EQ(0, rate_limiter_drains);
Y
Yi Wu 已提交
3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794
  Close();

  // # rate limiting with 0.7 x threshold
  options.rate_limiter.reset(
      NewGenericRateLimiter(static_cast<int64_t>(0.7 * raw_rate)));
  env_->bytes_written_ = 0;
  DestroyAndReopen(options);

  start = env_->NowMicros();
  // Write ~96M data
  for (int64_t i = 0; i < (96 << 10); ++i) {
M
mrambacher 已提交
3795
    ASSERT_OK(Put(rnd.RandomString(32), rnd.RandomString((1 << 10) + 1), wo));
Y
Yi Wu 已提交
3796
  }
3797 3798 3799
  rate_limiter_drains =
      TestGetTickerCount(options, NUMBER_RATE_LIMITER_DRAINS) -
      rate_limiter_drains;
3800
  elapsed = env_->NowMicros() - start;
Y
Yi Wu 已提交
3801 3802
  Close();
  ASSERT_EQ(options.rate_limiter->GetTotalBytesThrough(), env_->bytes_written_);
3803 3804
  // Most intervals should've been drained (interval time is 100ms, elapsed is
  // micros)
3805
  ASSERT_GT(rate_limiter_drains, 0);
A
Andrew Kryczka 已提交
3806
  ASSERT_LE(rate_limiter_drains, elapsed / 100000 + 1);
Y
Yi Wu 已提交
3807 3808 3809 3810 3811 3812 3813 3814 3815
  double ratio = env_->bytes_written_ * 1000000 / elapsed / raw_rate;
  fprintf(stderr, "write rate ratio = %.2lf, expected 0.7\n", ratio);
  ASSERT_TRUE(ratio < 0.8);

  // # rate limiting with half of the raw_rate
  options.rate_limiter.reset(
      NewGenericRateLimiter(static_cast<int64_t>(raw_rate / 2)));
  env_->bytes_written_ = 0;
  DestroyAndReopen(options);
3816

Y
Yi Wu 已提交
3817 3818 3819
  start = env_->NowMicros();
  // Write ~96M data
  for (int64_t i = 0; i < (96 << 10); ++i) {
M
mrambacher 已提交
3820
    ASSERT_OK(Put(rnd.RandomString(32), rnd.RandomString((1 << 10) + 1), wo));
3821
  }
Y
Yi Wu 已提交
3822
  elapsed = env_->NowMicros() - start;
3823 3824 3825
  rate_limiter_drains =
      TestGetTickerCount(options, NUMBER_RATE_LIMITER_DRAINS) -
      rate_limiter_drains;
Y
Yi Wu 已提交
3826 3827
  Close();
  ASSERT_EQ(options.rate_limiter->GetTotalBytesThrough(), env_->bytes_written_);
3828 3829 3830
  // Most intervals should've been drained (interval time is 100ms, elapsed is
  // micros)
  ASSERT_GT(rate_limiter_drains, elapsed / 100000 / 2);
A
Andrew Kryczka 已提交
3831
  ASSERT_LE(rate_limiter_drains, elapsed / 100000 + 1);
Y
Yi Wu 已提交
3832 3833 3834 3835
  ratio = env_->bytes_written_ * 1000000 / elapsed / raw_rate;
  fprintf(stderr, "write rate ratio = %.2lf, expected 0.5\n", ratio);
  ASSERT_LT(ratio, 0.6);
}
3836

Y
Yi Wu 已提交
3837 3838 3839 3840 3841
TEST_F(DBTest, TableOptionsSanitizeTest) {
  Options options = CurrentOptions();
  options.create_if_missing = true;
  DestroyAndReopen(options);
  ASSERT_EQ(db_->GetOptions().allow_mmap_reads, false);
3842

3843
  options.table_factory.reset(NewPlainTableFactory());
Y
Yi Wu 已提交
3844 3845 3846
  options.prefix_extractor.reset(NewNoopTransform());
  Destroy(options);
  ASSERT_TRUE(!TryReopen(options).IsNotSupported());
3847

Y
Yi Wu 已提交
3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858
  // Test for check of prefix_extractor when hash index is used for
  // block-based table
  BlockBasedTableOptions to;
  to.index_type = BlockBasedTableOptions::kHashSearch;
  options = CurrentOptions();
  options.create_if_missing = true;
  options.table_factory.reset(NewBlockBasedTableFactory(to));
  ASSERT_TRUE(TryReopen(options).IsInvalidArgument());
  options.prefix_extractor.reset(NewFixedPrefixTransform(1));
  ASSERT_OK(TryReopen(options));
}
3859

Y
Yi Wu 已提交
3860
TEST_F(DBTest, ConcurrentMemtableNotSupported) {
3861
  Options options = CurrentOptions();
Y
Yi Wu 已提交
3862 3863 3864 3865
  options.allow_concurrent_memtable_write = true;
  options.soft_pending_compaction_bytes_limit = 0;
  options.hard_pending_compaction_bytes_limit = 100;
  options.create_if_missing = true;
3866

Y
Yi Wu 已提交
3867 3868 3869
  DestroyDB(dbname_, options);
  options.memtable_factory.reset(NewHashLinkListRepFactory(4, 0, 3, true, 4));
  ASSERT_NOK(TryReopen(options));
3870

Y
Yi Wu 已提交
3871 3872
  options.memtable_factory.reset(new SkipListFactory);
  ASSERT_OK(TryReopen(options));
3873

Y
Yi Wu 已提交
3874 3875 3876 3877 3878 3879
  ColumnFamilyOptions cf_options(options);
  cf_options.memtable_factory.reset(
      NewHashLinkListRepFactory(4, 0, 3, true, 4));
  ColumnFamilyHandle* handle;
  ASSERT_NOK(db_->CreateColumnFamily(cf_options, "name", &handle));
}
3880

Y
Yi Wu 已提交
3881
#endif  // ROCKSDB_LITE
3882

Y
Yi Wu 已提交
3883 3884 3885 3886
TEST_F(DBTest, SanitizeNumThreads) {
  for (int attempt = 0; attempt < 2; attempt++) {
    const size_t kTotalTasks = 8;
    test::SleepingBackgroundTask sleeping_tasks[kTotalTasks];
3887

Y
Yi Wu 已提交
3888 3889 3890 3891
    Options options = CurrentOptions();
    if (attempt == 0) {
      options.max_background_compactions = 3;
      options.max_background_flushes = 2;
3892
    }
Y
Yi Wu 已提交
3893 3894
    options.create_if_missing = true;
    DestroyAndReopen(options);
3895

Y
Yi Wu 已提交
3896 3897 3898 3899 3900 3901
    for (size_t i = 0; i < kTotalTasks; i++) {
      // Insert 5 tasks to low priority queue and 5 tasks to high priority queue
      env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
                     &sleeping_tasks[i],
                     (i < 4) ? Env::Priority::LOW : Env::Priority::HIGH);
    }
3902

3903 3904 3905 3906 3907 3908 3909 3910
    // Wait until 10s for they are scheduled.
    for (int i = 0; i < 10000; i++) {
      if (options.env->GetThreadPoolQueueLen(Env::Priority::LOW) <= 1 &&
          options.env->GetThreadPoolQueueLen(Env::Priority::HIGH) <= 2) {
        break;
      }
      env_->SleepForMicroseconds(1000);
    }
3911

Y
Yi Wu 已提交
3912 3913 3914 3915 3916 3917 3918 3919
    // pool size 3, total task 4. Queue size should be 1.
    ASSERT_EQ(1U, options.env->GetThreadPoolQueueLen(Env::Priority::LOW));
    // pool size 2, total task 4. Queue size should be 2.
    ASSERT_EQ(2U, options.env->GetThreadPoolQueueLen(Env::Priority::HIGH));

    for (size_t i = 0; i < kTotalTasks; i++) {
      sleeping_tasks[i].WakeUp();
      sleeping_tasks[i].WaitUntilDone();
3920
    }
Y
Yi Wu 已提交
3921 3922 3923 3924 3925

    ASSERT_OK(Put("abc", "def"));
    ASSERT_EQ("def", Get("abc"));
    Flush();
    ASSERT_EQ("def", Get("abc"));
3926 3927 3928
  }
}

Y
Yi Wu 已提交
3929
TEST_F(DBTest, WriteSingleThreadEntry) {
D
Dmitri Smirnov 已提交
3930
  std::vector<port::Thread> threads;
Y
Yi Wu 已提交
3931 3932 3933 3934 3935 3936 3937 3938 3939 3940
  dbfull()->TEST_LockMutex();
  auto w = dbfull()->TEST_BeginWrite();
  threads.emplace_back([&] { Put("a", "b"); });
  env_->SleepForMicroseconds(10000);
  threads.emplace_back([&] { Flush(); });
  env_->SleepForMicroseconds(10000);
  dbfull()->TEST_UnlockMutex();
  dbfull()->TEST_LockMutex();
  dbfull()->TEST_EndWrite(w);
  dbfull()->TEST_UnlockMutex();
3941

Y
Yi Wu 已提交
3942 3943 3944
  for (auto& t : threads) {
    t.join();
  }
3945 3946
}

3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996
TEST_F(DBTest, ConcurrentFlushWAL) {
  const size_t cnt = 100;
  Options options;
  WriteOptions wopt;
  ReadOptions ropt;
  for (bool two_write_queues : {false, true}) {
    for (bool manual_wal_flush : {false, true}) {
      options.two_write_queues = two_write_queues;
      options.manual_wal_flush = manual_wal_flush;
      options.create_if_missing = true;
      DestroyAndReopen(options);
      std::vector<port::Thread> threads;
      threads.emplace_back([&] {
        for (size_t i = 0; i < cnt; i++) {
          auto istr = ToString(i);
          db_->Put(wopt, db_->DefaultColumnFamily(), "a" + istr, "b" + istr);
        }
      });
      if (two_write_queues) {
        threads.emplace_back([&] {
          for (size_t i = cnt; i < 2 * cnt; i++) {
            auto istr = ToString(i);
            WriteBatch batch;
            batch.Put("a" + istr, "b" + istr);
            dbfull()->WriteImpl(wopt, &batch, nullptr, nullptr, 0, true);
          }
        });
      }
      threads.emplace_back([&] {
        for (size_t i = 0; i < cnt * 100; i++) {  // FlushWAL is faster than Put
          db_->FlushWAL(false);
        }
      });
      for (auto& t : threads) {
        t.join();
      }
      options.create_if_missing = false;
      // Recover from the wal and make sure that it is not corrupted
      Reopen(options);
      for (size_t i = 0; i < cnt; i++) {
        PinnableSlice pval;
        auto istr = ToString(i);
        ASSERT_OK(
            db_->Get(ropt, db_->DefaultColumnFamily(), "a" + istr, &pval));
        ASSERT_TRUE(pval == ("b" + istr));
      }
    }
  }
}

Y
Yi Wu 已提交
3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014
#ifndef ROCKSDB_LITE
TEST_F(DBTest, DynamicMemtableOptions) {
  const uint64_t k64KB = 1 << 16;
  const uint64_t k128KB = 1 << 17;
  const uint64_t k5KB = 5 * 1024;
  Options options;
  options.env = env_;
  options.create_if_missing = true;
  options.compression = kNoCompression;
  options.max_background_compactions = 1;
  options.write_buffer_size = k64KB;
  options.arena_block_size = 16 * 1024;
  options.max_write_buffer_number = 2;
  // Don't trigger compact/slowdown/stop
  options.level0_file_num_compaction_trigger = 1024;
  options.level0_slowdown_writes_trigger = 1024;
  options.level0_stop_writes_trigger = 1024;
  DestroyAndReopen(options);
I
Igor Canadi 已提交
4015

4016
  auto gen_l0_kb = [this](int size) {
4017
    const int kNumPutsBeforeWaitForFlush = 64;
Y
Yi Wu 已提交
4018 4019
    Random rnd(301);
    for (int i = 0; i < size; i++) {
M
mrambacher 已提交
4020
      ASSERT_OK(Put(Key(i), rnd.RandomString(1024)));
I
Igor Canadi 已提交
4021

Y
Yi Wu 已提交
4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032
      // The following condition prevents a race condition between flush jobs
      // acquiring work and this thread filling up multiple memtables. Without
      // this, the flush might produce less files than expected because
      // multiple memtables are flushed into a single L0 file. This race
      // condition affects assertion (A).
      if (i % kNumPutsBeforeWaitForFlush == kNumPutsBeforeWaitForFlush - 1) {
        dbfull()->TEST_WaitForFlushMemTable();
      }
    }
    dbfull()->TEST_WaitForFlushMemTable();
  };
I
Igor Canadi 已提交
4033

Y
Yi Wu 已提交
4034 4035 4036 4037 4038
  // Test write_buffer_size
  gen_l0_kb(64);
  ASSERT_EQ(NumTableFilesAtLevel(0), 1);
  ASSERT_LT(SizeAtLevel(0), k64KB + k5KB);
  ASSERT_GT(SizeAtLevel(0), k64KB - k5KB * 2);
I
Igor Canadi 已提交
4039

Y
Yi Wu 已提交
4040 4041 4042
  // Clean up L0
  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
I
Igor Canadi 已提交
4043

Y
Yi Wu 已提交
4044 4045 4046 4047
  // Increase buffer size
  ASSERT_OK(dbfull()->SetOptions({
      {"write_buffer_size", "131072"},
  }));
I
Igor Canadi 已提交
4048

4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065
  // The existing memtable inflated 64KB->128KB when we invoked SetOptions().
  // Write 192KB, we should have a 128KB L0 file and a memtable with 64KB data.
  gen_l0_kb(192);
  ASSERT_EQ(NumTableFilesAtLevel(0), 1);  // (A)
  ASSERT_LT(SizeAtLevel(0), k128KB + 2 * k5KB);
  ASSERT_GT(SizeAtLevel(0), k128KB - 4 * k5KB);

  // Decrease buffer size below current usage
  ASSERT_OK(dbfull()->SetOptions({
      {"write_buffer_size", "65536"},
  }));
  // The existing memtable became eligible for flush when we reduced its
  // capacity to 64KB. Two keys need to be added to trigger flush: first causes
  // memtable to be marked full, second schedules the flush. Then we should have
  // a 128KB L0 file, a 64KB L0 file, and a memtable with just one key.
  gen_l0_kb(2);
  ASSERT_EQ(NumTableFilesAtLevel(0), 2);
Y
Yi Wu 已提交
4066 4067
  ASSERT_LT(SizeAtLevel(0), k128KB + k64KB + 2 * k5KB);
  ASSERT_GT(SizeAtLevel(0), k128KB + k64KB - 4 * k5KB);
4068

Y
Yi Wu 已提交
4069 4070 4071 4072
  // Test max_write_buffer_number
  // Block compaction thread, which will also block the flushes because
  // max_background_flushes == 0, so flushes are getting executed by the
  // compaction thread
4073
  env_->SetBackgroundThreads(1, Env::LOW);
4074 4075
  test::SleepingBackgroundTask sleeping_task_low;
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
4076
                 Env::Priority::LOW);
Y
Yi Wu 已提交
4077 4078 4079
  // Start from scratch and disable compaction/flush. Flush can only happen
  // during compaction but trigger is pretty high
  options.disable_auto_compactions = true;
4080
  DestroyAndReopen(options);
4081
  env_->SetBackgroundThreads(0, Env::HIGH);
4082

Y
Yi Wu 已提交
4083 4084 4085
  // Put until writes are stopped, bounded by 256 puts. We should see stop at
  // ~128KB
  int count = 0;
4086 4087
  Random rnd(301);

4088
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
Y
Yi Wu 已提交
4089
      "DBImpl::DelayWrite:Wait",
4090
      [&](void* /*arg*/) { sleeping_task_low.WakeUp(); });
4091
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
Y
Yi Wu 已提交
4092 4093

  while (!sleeping_task_low.WokenUp() && count < 256) {
M
mrambacher 已提交
4094
    ASSERT_OK(Put(Key(count), rnd.RandomString(1024), WriteOptions()));
Y
Yi Wu 已提交
4095
    count++;
4096
  }
Y
Yi Wu 已提交
4097 4098
  ASSERT_GT(static_cast<double>(count), 128 * 0.8);
  ASSERT_LT(static_cast<double>(count), 128 * 1.2);
4099

Y
Yi Wu 已提交
4100
  sleeping_task_low.WaitUntilDone();
4101

Y
Yi Wu 已提交
4102 4103 4104 4105 4106 4107
  // Increase
  ASSERT_OK(dbfull()->SetOptions({
      {"max_write_buffer_number", "8"},
  }));
  // Clean up memtable and L0
  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
4108

Y
Yi Wu 已提交
4109 4110 4111 4112 4113
  sleeping_task_low.Reset();
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
                 Env::Priority::LOW);
  count = 0;
  while (!sleeping_task_low.WokenUp() && count < 1024) {
M
mrambacher 已提交
4114
    ASSERT_OK(Put(Key(count), rnd.RandomString(1024), WriteOptions()));
Y
Yi Wu 已提交
4115 4116 4117 4118 4119 4120 4121 4122
    count++;
  }
// Windows fails this test. Will tune in the future and figure out
// approp number
#ifndef OS_WIN
  ASSERT_GT(static_cast<double>(count), 512 * 0.8);
  ASSERT_LT(static_cast<double>(count), 512 * 1.2);
#endif
4123 4124
  sleeping_task_low.WaitUntilDone();

Y
Yi Wu 已提交
4125 4126 4127 4128 4129 4130
  // Decrease
  ASSERT_OK(dbfull()->SetOptions({
      {"max_write_buffer_number", "4"},
  }));
  // Clean up memtable and L0
  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
4131

Y
Yi Wu 已提交
4132 4133 4134
  sleeping_task_low.Reset();
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
                 Env::Priority::LOW);
4135

Y
Yi Wu 已提交
4136 4137
  count = 0;
  while (!sleeping_task_low.WokenUp() && count < 1024) {
M
mrambacher 已提交
4138
    ASSERT_OK(Put(Key(count), rnd.RandomString(1024), WriteOptions()));
Y
Yi Wu 已提交
4139
    count++;
4140
  }
Y
Yi Wu 已提交
4141 4142 4143 4144 4145 4146 4147
// Windows fails this test. Will tune in the future and figure out
// approp number
#ifndef OS_WIN
  ASSERT_GT(static_cast<double>(count), 256 * 0.8);
  ASSERT_LT(static_cast<double>(count), 266 * 1.2);
#endif
  sleeping_task_low.WaitUntilDone();
4148

4149
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
Y
Yi Wu 已提交
4150 4151
}
#endif  // ROCKSDB_LITE
4152

D
Daniel Black 已提交
4153
#ifdef ROCKSDB_USING_THREAD_STATUS
Y
Yi Wu 已提交
4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167
namespace {
void VerifyOperationCount(Env* env, ThreadStatus::OperationType op_type,
                          int expected_count) {
  int op_count = 0;
  std::vector<ThreadStatus> thread_list;
  ASSERT_OK(env->GetThreadList(&thread_list));
  for (auto thread : thread_list) {
    if (thread.operation_type == op_type) {
      op_count++;
    }
  }
  ASSERT_EQ(op_count, expected_count);
}
}  // namespace
4168

Y
Yi Wu 已提交
4169 4170 4171 4172 4173
TEST_F(DBTest, GetThreadStatus) {
  Options options;
  options.env = env_;
  options.enable_thread_tracking = true;
  TryReopen(options);
4174

Y
Yi Wu 已提交
4175 4176 4177 4178 4179 4180 4181 4182
  std::vector<ThreadStatus> thread_list;
  Status s = env_->GetThreadList(&thread_list);

  for (int i = 0; i < 2; ++i) {
    // repeat the test with differet number of high / low priority threads
    const int kTestCount = 3;
    const unsigned int kHighPriCounts[kTestCount] = {3, 2, 5};
    const unsigned int kLowPriCounts[kTestCount] = {10, 15, 3};
4183
    const unsigned int kBottomPriCounts[kTestCount] = {2, 1, 4};
Y
Yi Wu 已提交
4184 4185 4186 4187
    for (int test = 0; test < kTestCount; ++test) {
      // Change the number of threads in high / low priority pool.
      env_->SetBackgroundThreads(kHighPriCounts[test], Env::HIGH);
      env_->SetBackgroundThreads(kLowPriCounts[test], Env::LOW);
4188
      env_->SetBackgroundThreads(kBottomPriCounts[test], Env::BOTTOM);
Y
Yi Wu 已提交
4189 4190
      // Wait to ensure the all threads has been registered
      unsigned int thread_type_counts[ThreadStatus::NUM_THREAD_TYPES];
4191 4192
      // TODO(ajkr): it'd be better if SetBackgroundThreads returned only after
      // all threads have been registered.
4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206
      // Try up to 60 seconds.
      for (int num_try = 0; num_try < 60000; num_try++) {
        env_->SleepForMicroseconds(1000);
        thread_list.clear();
        s = env_->GetThreadList(&thread_list);
        ASSERT_OK(s);
        memset(thread_type_counts, 0, sizeof(thread_type_counts));
        for (auto thread : thread_list) {
          ASSERT_LT(thread.thread_type, ThreadStatus::NUM_THREAD_TYPES);
          thread_type_counts[thread.thread_type]++;
        }
        if (thread_type_counts[ThreadStatus::HIGH_PRIORITY] ==
                kHighPriCounts[test] &&
            thread_type_counts[ThreadStatus::LOW_PRIORITY] ==
4207 4208 4209
                kLowPriCounts[test] &&
            thread_type_counts[ThreadStatus::BOTTOM_PRIORITY] ==
                kBottomPriCounts[test]) {
4210 4211
          break;
        }
Y
Yi Wu 已提交
4212 4213 4214 4215 4216 4217 4218
      }
      // Verify the number of high-priority threads
      ASSERT_EQ(thread_type_counts[ThreadStatus::HIGH_PRIORITY],
                kHighPriCounts[test]);
      // Verify the number of low-priority threads
      ASSERT_EQ(thread_type_counts[ThreadStatus::LOW_PRIORITY],
                kLowPriCounts[test]);
4219 4220 4221
      // Verify the number of bottom-priority threads
      ASSERT_EQ(thread_type_counts[ThreadStatus::BOTTOM_PRIORITY],
                kBottomPriCounts[test]);
Y
Yi Wu 已提交
4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249
    }
    if (i == 0) {
      // repeat the test with multiple column families
      CreateAndReopenWithCF({"pikachu", "about-to-remove"}, options);
      env_->GetThreadStatusUpdater()->TEST_VerifyColumnFamilyInfoMap(handles_,
                                                                     true);
    }
  }
  db_->DropColumnFamily(handles_[2]);
  delete handles_[2];
  handles_.erase(handles_.begin() + 2);
  env_->GetThreadStatusUpdater()->TEST_VerifyColumnFamilyInfoMap(handles_,
                                                                 true);
  Close();
  env_->GetThreadStatusUpdater()->TEST_VerifyColumnFamilyInfoMap(handles_,
                                                                 true);
}

TEST_F(DBTest, DisableThreadStatus) {
  Options options;
  options.env = env_;
  options.enable_thread_tracking = false;
  TryReopen(options);
  CreateAndReopenWithCF({"pikachu", "about-to-remove"}, options);
  // Verify non of the column family info exists
  env_->GetThreadStatusUpdater()->TEST_VerifyColumnFamilyInfoMap(handles_,
                                                                 false);
}
4250

Y
Yi Wu 已提交
4251 4252 4253 4254 4255 4256
TEST_F(DBTest, ThreadStatusFlush) {
  Options options;
  options.env = env_;
  options.write_buffer_size = 100000;  // Small write buffer
  options.enable_thread_tracking = true;
  options = CurrentOptions(options);
4257

4258
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency({
Y
Yi Wu 已提交
4259
      {"FlushJob::FlushJob()", "DBTest::ThreadStatusFlush:1"},
4260
      {"DBTest::ThreadStatusFlush:2", "FlushJob::WriteLevel0Table"},
Y
Yi Wu 已提交
4261
  });
4262
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
4263

Y
Yi Wu 已提交
4264 4265
  CreateAndReopenWithCF({"pikachu"}, options);
  VerifyOperationCount(env_, ThreadStatus::OP_FLUSH, 0);
4266

Y
Yi Wu 已提交
4267 4268 4269
  ASSERT_OK(Put(1, "foo", "v1"));
  ASSERT_EQ("v1", Get(1, "foo"));
  VerifyOperationCount(env_, ThreadStatus::OP_FLUSH, 0);
4270

Y
Yi Wu 已提交
4271 4272 4273
  uint64_t num_running_flushes = 0;
  db_->GetIntProperty(DB::Properties::kNumRunningFlushes, &num_running_flushes);
  ASSERT_EQ(num_running_flushes, 0);
4274

Y
Yi Wu 已提交
4275 4276
  Put(1, "k1", std::string(100000, 'x'));  // Fill memtable
  Put(1, "k2", std::string(100000, 'y'));  // Trigger flush
4277

Y
Yi Wu 已提交
4278 4279 4280 4281 4282 4283 4284 4285 4286
  // The first sync point is to make sure there's one flush job
  // running when we perform VerifyOperationCount().
  TEST_SYNC_POINT("DBTest::ThreadStatusFlush:1");
  VerifyOperationCount(env_, ThreadStatus::OP_FLUSH, 1);
  db_->GetIntProperty(DB::Properties::kNumRunningFlushes, &num_running_flushes);
  ASSERT_EQ(num_running_flushes, 1);
  // This second sync point is to ensure the flush job will not
  // be completed until we already perform VerifyOperationCount().
  TEST_SYNC_POINT("DBTest::ThreadStatusFlush:2");
4287
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
4288
}
4289

Y
Yi Wu 已提交
4290 4291 4292 4293 4294 4295
TEST_P(DBTestWithParam, ThreadStatusSingleCompaction) {
  const int kTestKeySize = 16;
  const int kTestValueSize = 984;
  const int kEntrySize = kTestKeySize + kTestValueSize;
  const int kEntriesPerBuffer = 100;
  Options options;
4296
  options.create_if_missing = true;
Y
Yi Wu 已提交
4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307
  options.write_buffer_size = kEntrySize * kEntriesPerBuffer;
  options.compaction_style = kCompactionStyleLevel;
  options.target_file_size_base = options.write_buffer_size;
  options.max_bytes_for_level_base = options.target_file_size_base * 2;
  options.max_bytes_for_level_multiplier = 2;
  options.compression = kNoCompression;
  options = CurrentOptions(options);
  options.env = env_;
  options.enable_thread_tracking = true;
  const int kNumL0Files = 4;
  options.level0_file_num_compaction_trigger = kNumL0Files;
4308
  options.max_subcompactions = max_subcompactions_;
4309

4310
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency({
Y
Yi Wu 已提交
4311 4312 4313 4314 4315 4316
      {"DBTest::ThreadStatusSingleCompaction:0", "DBImpl::BGWorkCompaction"},
      {"CompactionJob::Run():Start", "DBTest::ThreadStatusSingleCompaction:1"},
      {"DBTest::ThreadStatusSingleCompaction:2", "CompactionJob::Run():End"},
  });
  for (int tests = 0; tests < 2; ++tests) {
    DestroyAndReopen(options);
4317 4318
    ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearTrace();
    ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
Y
Yi Wu 已提交
4319 4320 4321 4322 4323 4324

    Random rnd(301);
    // The Put Phase.
    for (int file = 0; file < kNumL0Files; ++file) {
      for (int key = 0; key < kEntriesPerBuffer; ++key) {
        ASSERT_OK(Put(ToString(key + file * kEntriesPerBuffer),
M
mrambacher 已提交
4325
                      rnd.RandomString(kTestValueSize)));
Y
Yi Wu 已提交
4326 4327
      }
      Flush();
4328
    }
Y
Yi Wu 已提交
4329 4330 4331 4332 4333 4334 4335 4336 4337
    // This makes sure a compaction won't be scheduled until
    // we have done with the above Put Phase.
    uint64_t num_running_compactions = 0;
    db_->GetIntProperty(DB::Properties::kNumRunningCompactions,
                        &num_running_compactions);
    ASSERT_EQ(num_running_compactions, 0);
    TEST_SYNC_POINT("DBTest::ThreadStatusSingleCompaction:0");
    ASSERT_GE(NumTableFilesAtLevel(0),
              options.level0_file_num_compaction_trigger);
4338

Y
Yi Wu 已提交
4339 4340
    // This makes sure at least one compaction is running.
    TEST_SYNC_POINT("DBTest::ThreadStatusSingleCompaction:1");
4341

Y
Yi Wu 已提交
4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353
    if (options.enable_thread_tracking) {
      // expecting one single L0 to L1 compaction
      VerifyOperationCount(env_, ThreadStatus::OP_COMPACTION, 1);
    } else {
      // If thread tracking is not enabled, compaction count should be 0.
      VerifyOperationCount(env_, ThreadStatus::OP_COMPACTION, 0);
    }
    db_->GetIntProperty(DB::Properties::kNumRunningCompactions,
                        &num_running_compactions);
    ASSERT_EQ(num_running_compactions, 1);
    // TODO(yhchiang): adding assert to verify each compaction stage.
    TEST_SYNC_POINT("DBTest::ThreadStatusSingleCompaction:2");
4354

Y
Yi Wu 已提交
4355 4356
    // repeat the test with disabling thread tracking.
    options.enable_thread_tracking = false;
4357
    ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
Y
Yi Wu 已提交
4358
  }
4359 4360
}

Y
Yi Wu 已提交
4361
TEST_P(DBTestWithParam, PreShutdownManualCompaction) {
4362
  Options options = CurrentOptions();
Y
Yi Wu 已提交
4363 4364
  options.max_subcompactions = max_subcompactions_;
  CreateAndReopenWithCF({"pikachu"}, options);
4365

Y
Yi Wu 已提交
4366 4367 4368 4369 4370
  // iter - 0 with 7 levels
  // iter - 1 with 3 levels
  for (int iter = 0; iter < 2; ++iter) {
    MakeTables(3, "p", "q", 1);
    ASSERT_EQ("1,1,1", FilesPerLevel(1));
4371

Y
Yi Wu 已提交
4372 4373 4374
    // Compaction range falls before files
    Compact(1, "", "c");
    ASSERT_EQ("1,1,1", FilesPerLevel(1));
4375

Y
Yi Wu 已提交
4376 4377 4378
    // Compaction range falls after files
    Compact(1, "r", "z");
    ASSERT_EQ("1,1,1", FilesPerLevel(1));
4379

Y
Yi Wu 已提交
4380
    // Compaction range overlaps files
4381
    Compact(1, "p", "q");
Y
Yi Wu 已提交
4382
    ASSERT_EQ("0,0,1", FilesPerLevel(1));
4383

Y
Yi Wu 已提交
4384 4385 4386
    // Populate a different range
    MakeTables(3, "c", "e", 1);
    ASSERT_EQ("1,1,2", FilesPerLevel(1));
4387

Y
Yi Wu 已提交
4388 4389 4390
    // Compact just the new range
    Compact(1, "b", "f");
    ASSERT_EQ("0,0,2", FilesPerLevel(1));
4391

Y
Yi Wu 已提交
4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405
    // Compact all
    MakeTables(1, "a", "z", 1);
    ASSERT_EQ("1,0,2", FilesPerLevel(1));
    CancelAllBackgroundWork(db_);
    db_->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr);
    ASSERT_EQ("1,0,2", FilesPerLevel(1));

    if (iter == 0) {
      options = CurrentOptions();
      options.num_levels = 3;
      options.create_if_missing = true;
      DestroyAndReopen(options);
      CreateAndReopenWithCF({"pikachu"}, options);
    }
4406
  }
Y
Yi Wu 已提交
4407
}
4408

Y
Yi Wu 已提交
4409 4410 4411 4412 4413 4414 4415 4416 4417
TEST_F(DBTest, PreShutdownFlush) {
  Options options = CurrentOptions();
  CreateAndReopenWithCF({"pikachu"}, options);
  ASSERT_OK(Put(1, "key", "value"));
  CancelAllBackgroundWork(db_);
  Status s =
      db_->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr);
  ASSERT_TRUE(s.IsShutdownInProgress());
}
4418

Y
Yi Wu 已提交
4419 4420 4421 4422 4423 4424
TEST_P(DBTestWithParam, PreShutdownMultipleCompaction) {
  const int kTestKeySize = 16;
  const int kTestValueSize = 984;
  const int kEntrySize = kTestKeySize + kTestValueSize;
  const int kEntriesPerBuffer = 40;
  const int kNumL0Files = 4;
4425

Y
Yi Wu 已提交
4426 4427 4428 4429
  const int kHighPriCount = 3;
  const int kLowPriCount = 5;
  env_->SetBackgroundThreads(kHighPriCount, Env::HIGH);
  env_->SetBackgroundThreads(kLowPriCount, Env::LOW);
4430

Y
Yi Wu 已提交
4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447
  Options options;
  options.create_if_missing = true;
  options.write_buffer_size = kEntrySize * kEntriesPerBuffer;
  options.compaction_style = kCompactionStyleLevel;
  options.target_file_size_base = options.write_buffer_size;
  options.max_bytes_for_level_base =
      options.target_file_size_base * kNumL0Files;
  options.compression = kNoCompression;
  options = CurrentOptions(options);
  options.env = env_;
  options.enable_thread_tracking = true;
  options.level0_file_num_compaction_trigger = kNumL0Files;
  options.max_bytes_for_level_multiplier = 2;
  options.max_background_compactions = kLowPriCount;
  options.level0_stop_writes_trigger = 1 << 10;
  options.level0_slowdown_writes_trigger = 1 << 10;
  options.max_subcompactions = max_subcompactions_;
4448

Y
Yi Wu 已提交
4449 4450
  TryReopen(options);
  Random rnd(301);
4451

Y
Yi Wu 已提交
4452 4453
  std::vector<ThreadStatus> thread_list;
  // Delay both flush and compaction
4454
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
Y
Yi Wu 已提交
4455 4456 4457 4458 4459 4460 4461 4462 4463
      {{"FlushJob::FlushJob()", "CompactionJob::Run():Start"},
       {"CompactionJob::Run():Start",
        "DBTest::PreShutdownMultipleCompaction:Preshutdown"},
       {"CompactionJob::Run():Start",
        "DBTest::PreShutdownMultipleCompaction:VerifyCompaction"},
       {"DBTest::PreShutdownMultipleCompaction:Preshutdown",
        "CompactionJob::Run():End"},
       {"CompactionJob::Run():End",
        "DBTest::PreShutdownMultipleCompaction:VerifyPreshutdown"}});
4464

4465
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
4466

Y
Yi Wu 已提交
4467 4468 4469 4470 4471 4472
  // Make rocksdb busy
  int key = 0;
  // check how many threads are doing compaction using GetThreadList
  int operation_count[ThreadStatus::NUM_OP_TYPES] = {0};
  for (int file = 0; file < 16 * kNumL0Files; ++file) {
    for (int k = 0; k < kEntriesPerBuffer; ++k) {
M
mrambacher 已提交
4473
      ASSERT_OK(Put(ToString(key++), rnd.RandomString(kTestValueSize)));
Y
Yi Wu 已提交
4474
    }
4475

Y
Yi Wu 已提交
4476 4477 4478 4479
    Status s = env_->GetThreadList(&thread_list);
    for (auto thread : thread_list) {
      operation_count[thread.operation_type]++;
    }
4480

Y
Yi Wu 已提交
4481 4482 4483 4484 4485 4486 4487 4488 4489
    // Speed up the test
    if (operation_count[ThreadStatus::OP_FLUSH] > 1 &&
        operation_count[ThreadStatus::OP_COMPACTION] >
            0.6 * options.max_background_compactions) {
      break;
    }
    if (file == 15 * kNumL0Files) {
      TEST_SYNC_POINT("DBTest::PreShutdownMultipleCompaction:Preshutdown");
    }
4490 4491
  }

Y
Yi Wu 已提交
4492 4493 4494 4495
  TEST_SYNC_POINT("DBTest::PreShutdownMultipleCompaction:Preshutdown");
  ASSERT_GE(operation_count[ThreadStatus::OP_COMPACTION], 1);
  CancelAllBackgroundWork(db_);
  TEST_SYNC_POINT("DBTest::PreShutdownMultipleCompaction:VerifyPreshutdown");
4496
  dbfull()->TEST_WaitForCompact();
Y
Yi Wu 已提交
4497 4498 4499 4500 4501 4502 4503 4504 4505
  // Record the number of compactions at a time.
  for (int i = 0; i < ThreadStatus::NUM_OP_TYPES; ++i) {
    operation_count[i] = 0;
  }
  Status s = env_->GetThreadList(&thread_list);
  for (auto thread : thread_list) {
    operation_count[thread.operation_type]++;
  }
  ASSERT_EQ(operation_count[ThreadStatus::OP_COMPACTION], 0);
4506 4507
}

Y
Yi Wu 已提交
4508 4509 4510 4511 4512 4513
TEST_P(DBTestWithParam, PreShutdownCompactionMiddle) {
  const int kTestKeySize = 16;
  const int kTestValueSize = 984;
  const int kEntrySize = kTestKeySize + kTestValueSize;
  const int kEntriesPerBuffer = 40;
  const int kNumL0Files = 4;
4514

Y
Yi Wu 已提交
4515 4516 4517 4518
  const int kHighPriCount = 3;
  const int kLowPriCount = 5;
  env_->SetBackgroundThreads(kHighPriCount, Env::HIGH);
  env_->SetBackgroundThreads(kLowPriCount, Env::LOW);
4519

Y
Yi Wu 已提交
4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536
  Options options;
  options.create_if_missing = true;
  options.write_buffer_size = kEntrySize * kEntriesPerBuffer;
  options.compaction_style = kCompactionStyleLevel;
  options.target_file_size_base = options.write_buffer_size;
  options.max_bytes_for_level_base =
      options.target_file_size_base * kNumL0Files;
  options.compression = kNoCompression;
  options = CurrentOptions(options);
  options.env = env_;
  options.enable_thread_tracking = true;
  options.level0_file_num_compaction_trigger = kNumL0Files;
  options.max_bytes_for_level_multiplier = 2;
  options.max_background_compactions = kLowPriCount;
  options.level0_stop_writes_trigger = 1 << 10;
  options.level0_slowdown_writes_trigger = 1 << 10;
  options.max_subcompactions = max_subcompactions_;
4537

Y
Yi Wu 已提交
4538
  TryReopen(options);
4539 4540
  Random rnd(301);

Y
Yi Wu 已提交
4541 4542
  std::vector<ThreadStatus> thread_list;
  // Delay both flush and compaction
4543
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
Y
Yi Wu 已提交
4544 4545 4546 4547 4548 4549 4550
      {{"DBTest::PreShutdownCompactionMiddle:Preshutdown",
        "CompactionJob::Run():Inprogress"},
       {"CompactionJob::Run():Start",
        "DBTest::PreShutdownCompactionMiddle:VerifyCompaction"},
       {"CompactionJob::Run():Inprogress", "CompactionJob::Run():End"},
       {"CompactionJob::Run():End",
        "DBTest::PreShutdownCompactionMiddle:VerifyPreshutdown"}});
4551

4552
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
4553

Y
Yi Wu 已提交
4554 4555 4556 4557 4558 4559
  // Make rocksdb busy
  int key = 0;
  // check how many threads are doing compaction using GetThreadList
  int operation_count[ThreadStatus::NUM_OP_TYPES] = {0};
  for (int file = 0; file < 16 * kNumL0Files; ++file) {
    for (int k = 0; k < kEntriesPerBuffer; ++k) {
M
mrambacher 已提交
4560
      ASSERT_OK(Put(ToString(key++), rnd.RandomString(kTestValueSize)));
Y
Yi Wu 已提交
4561
    }
4562

Y
Yi Wu 已提交
4563 4564 4565 4566
    Status s = env_->GetThreadList(&thread_list);
    for (auto thread : thread_list) {
      operation_count[thread.operation_type]++;
    }
4567

Y
Yi Wu 已提交
4568 4569 4570 4571 4572 4573 4574 4575 4576 4577
    // Speed up the test
    if (operation_count[ThreadStatus::OP_FLUSH] > 1 &&
        operation_count[ThreadStatus::OP_COMPACTION] >
            0.6 * options.max_background_compactions) {
      break;
    }
    if (file == 15 * kNumL0Files) {
      TEST_SYNC_POINT("DBTest::PreShutdownCompactionMiddle:VerifyCompaction");
    }
  }
4578

Y
Yi Wu 已提交
4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593
  ASSERT_GE(operation_count[ThreadStatus::OP_COMPACTION], 1);
  CancelAllBackgroundWork(db_);
  TEST_SYNC_POINT("DBTest::PreShutdownCompactionMiddle:Preshutdown");
  TEST_SYNC_POINT("DBTest::PreShutdownCompactionMiddle:VerifyPreshutdown");
  dbfull()->TEST_WaitForCompact();
  // Record the number of compactions at a time.
  for (int i = 0; i < ThreadStatus::NUM_OP_TYPES; ++i) {
    operation_count[i] = 0;
  }
  Status s = env_->GetThreadList(&thread_list);
  for (auto thread : thread_list) {
    operation_count[thread.operation_type]++;
  }
  ASSERT_EQ(operation_count[ThreadStatus::OP_COMPACTION], 0);
}
4594

Y
Yi Wu 已提交
4595
#endif  // ROCKSDB_USING_THREAD_STATUS
4596

Y
Yi Wu 已提交
4597 4598 4599 4600 4601 4602
#ifndef ROCKSDB_LITE
TEST_F(DBTest, FlushOnDestroy) {
  WriteOptions wo;
  wo.disableWAL = true;
  ASSERT_OK(Put("foo", "v1", wo));
  CancelAllBackgroundWork(db_);
4603 4604
}

Y
Yi Wu 已提交
4605 4606 4607 4608 4609 4610 4611 4612 4613
TEST_F(DBTest, DynamicLevelCompressionPerLevel) {
  if (!Snappy_Supported()) {
    return;
  }
  const int kNKeys = 120;
  int keys[kNKeys];
  for (int i = 0; i < kNKeys; i++) {
    keys[i] = i;
  }
P
Peter Dillinger 已提交
4614
  RandomShuffle(std::begin(keys), std::end(keys));
4615 4616

  Random rnd(301);
Y
Yi Wu 已提交
4617 4618 4619 4620 4621 4622 4623 4624
  Options options;
  options.create_if_missing = true;
  options.db_write_buffer_size = 20480;
  options.write_buffer_size = 20480;
  options.max_write_buffer_number = 2;
  options.level0_file_num_compaction_trigger = 2;
  options.level0_slowdown_writes_trigger = 2;
  options.level0_stop_writes_trigger = 2;
4625
  options.target_file_size_base = 20480;
Y
Yi Wu 已提交
4626 4627 4628 4629 4630
  options.level_compaction_dynamic_level_bytes = true;
  options.max_bytes_for_level_base = 102400;
  options.max_bytes_for_level_multiplier = 4;
  options.max_background_compactions = 1;
  options.num_levels = 5;
4631

Y
Yi Wu 已提交
4632 4633 4634 4635 4636 4637 4638
  options.compression_per_level.resize(3);
  options.compression_per_level[0] = kNoCompression;
  options.compression_per_level[1] = kNoCompression;
  options.compression_per_level[2] = kSnappyCompression;

  OnFileDeletionListener* listener = new OnFileDeletionListener();
  options.listeners.emplace_back(listener);
4639

4640 4641
  DestroyAndReopen(options);

Y
Yi Wu 已提交
4642 4643 4644 4645
  // Insert more than 80K. L4 should be base level. Neither L0 nor L4 should
  // be compressed, so total data size should be more than 80K.
  for (int i = 0; i < 20; i++) {
    ASSERT_OK(Put(Key(keys[i]), CompressibleString(&rnd, 4000)));
4646
  }
Y
Yi Wu 已提交
4647 4648
  Flush();
  dbfull()->TEST_WaitForCompact();
4649

Y
Yi Wu 已提交
4650 4651 4652
  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
  ASSERT_EQ(NumTableFilesAtLevel(2), 0);
  ASSERT_EQ(NumTableFilesAtLevel(3), 0);
4653 4654
  // Assuming each files' metadata is at least 50 bytes/
  ASSERT_GT(SizeAtLevel(0) + SizeAtLevel(4), 20U * 4000U + 50U * 4);
Y
Yi Wu 已提交
4655 4656 4657 4658

  // Insert 400KB. Some data will be compressed
  for (int i = 21; i < 120; i++) {
    ASSERT_OK(Put(Key(keys[i]), CompressibleString(&rnd, 4000)));
4659
  }
Y
Yi Wu 已提交
4660 4661 4662 4663
  Flush();
  dbfull()->TEST_WaitForCompact();
  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
  ASSERT_EQ(NumTableFilesAtLevel(2), 0);
4664 4665
  ASSERT_LT(SizeAtLevel(0) + SizeAtLevel(3) + SizeAtLevel(4),
            120U * 4000U + 50U * 24);
Y
Yi Wu 已提交
4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677
  // Make sure data in files in L3 is not compacted by removing all files
  // in L4 and calculate number of rows
  ASSERT_OK(dbfull()->SetOptions({
      {"disable_auto_compactions", "true"},
  }));
  ColumnFamilyMetaData cf_meta;
  db_->GetColumnFamilyMetaData(&cf_meta);
  for (auto file : cf_meta.levels[4].files) {
    listener->SetExpectedFileName(dbname_ + file.name);
    ASSERT_OK(dbfull()->DeleteFile(file.name));
  }
  listener->VerifyMatchedCount(cf_meta.levels[4].files.size());
4678

Y
Yi Wu 已提交
4679 4680 4681 4682
  int num_keys = 0;
  std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
    num_keys++;
4683
  }
Y
Yi Wu 已提交
4684
  ASSERT_OK(iter->status());
4685
  ASSERT_GT(SizeAtLevel(0) + SizeAtLevel(3), num_keys * 4000U + num_keys * 10U);
4686 4687
}

Y
Yi Wu 已提交
4688 4689 4690 4691 4692 4693 4694 4695 4696
TEST_F(DBTest, DynamicLevelCompressionPerLevel2) {
  if (!Snappy_Supported() || !LZ4_Supported() || !Zlib_Supported()) {
    return;
  }
  const int kNKeys = 500;
  int keys[kNKeys];
  for (int i = 0; i < kNKeys; i++) {
    keys[i] = i;
  }
P
Peter Dillinger 已提交
4697
  RandomShuffle(std::begin(keys), std::end(keys));
4698

Y
Yi Wu 已提交
4699 4700 4701
  Random rnd(301);
  Options options;
  options.create_if_missing = true;
4702 4703
  options.db_write_buffer_size = 6000000;
  options.write_buffer_size = 600000;
Y
Yi Wu 已提交
4704 4705 4706 4707 4708
  options.max_write_buffer_number = 2;
  options.level0_file_num_compaction_trigger = 2;
  options.level0_slowdown_writes_trigger = 2;
  options.level0_stop_writes_trigger = 2;
  options.soft_pending_compaction_bytes_limit = 1024 * 1024;
4709
  options.target_file_size_base = 20;
4710

Y
Yi Wu 已提交
4711 4712 4713 4714 4715 4716 4717
  options.level_compaction_dynamic_level_bytes = true;
  options.max_bytes_for_level_base = 200;
  options.max_bytes_for_level_multiplier = 8;
  options.max_background_compactions = 1;
  options.num_levels = 5;
  std::shared_ptr<mock::MockTableFactory> mtf(new mock::MockTableFactory);
  options.table_factory = mtf;
4718

Y
Yi Wu 已提交
4719 4720 4721 4722
  options.compression_per_level.resize(3);
  options.compression_per_level[0] = kNoCompression;
  options.compression_per_level[1] = kLZ4Compression;
  options.compression_per_level[2] = kZlibCompression;
4723

Y
Yi Wu 已提交
4724 4725 4726 4727 4728
  DestroyAndReopen(options);
  // When base level is L4, L4 is LZ4.
  std::atomic<int> num_zlib(0);
  std::atomic<int> num_lz4(0);
  std::atomic<int> num_no(0);
4729
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
Y
Yi Wu 已提交
4730 4731 4732 4733 4734 4735 4736
      "LevelCompactionPicker::PickCompaction:Return", [&](void* arg) {
        Compaction* compaction = reinterpret_cast<Compaction*>(arg);
        if (compaction->output_level() == 4) {
          ASSERT_TRUE(compaction->output_compression() == kLZ4Compression);
          num_lz4.fetch_add(1);
        }
      });
4737
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
Y
Yi Wu 已提交
4738 4739 4740 4741 4742
      "FlushJob::WriteLevel0Table:output_compression", [&](void* arg) {
        auto* compression = reinterpret_cast<CompressionType*>(arg);
        ASSERT_TRUE(*compression == kNoCompression);
        num_no.fetch_add(1);
      });
4743
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
4744

Y
Yi Wu 已提交
4745
  for (int i = 0; i < 100; i++) {
M
mrambacher 已提交
4746
    std::string value = rnd.RandomString(200);
4747 4748 4749 4750
    ASSERT_OK(Put(Key(keys[i]), value));
    if (i % 25 == 24) {
      Flush();
      dbfull()->TEST_WaitForCompact();
4751 4752
    }
  }
S
sdong 已提交
4753

Y
Yi Wu 已提交
4754 4755 4756
  Flush();
  dbfull()->TEST_WaitForFlushMemTable();
  dbfull()->TEST_WaitForCompact();
4757 4758
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
S
sdong 已提交
4759

Y
Yi Wu 已提交
4760 4761 4762 4763 4764 4765 4766
  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
  ASSERT_EQ(NumTableFilesAtLevel(2), 0);
  ASSERT_EQ(NumTableFilesAtLevel(3), 0);
  ASSERT_GT(NumTableFilesAtLevel(4), 0);
  ASSERT_GT(num_no.load(), 2);
  ASSERT_GT(num_lz4.load(), 0);
  int prev_num_files_l4 = NumTableFilesAtLevel(4);
4767

Y
Yi Wu 已提交
4768 4769 4770
  // After base level turn L4->L3, L3 becomes LZ4 and L4 becomes Zlib
  num_lz4.store(0);
  num_no.store(0);
4771
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
Y
Yi Wu 已提交
4772 4773 4774 4775 4776 4777 4778 4779 4780 4781
      "LevelCompactionPicker::PickCompaction:Return", [&](void* arg) {
        Compaction* compaction = reinterpret_cast<Compaction*>(arg);
        if (compaction->output_level() == 4 && compaction->start_level() == 3) {
          ASSERT_TRUE(compaction->output_compression() == kZlibCompression);
          num_zlib.fetch_add(1);
        } else {
          ASSERT_TRUE(compaction->output_compression() == kLZ4Compression);
          num_lz4.fetch_add(1);
        }
      });
4782
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
Y
Yi Wu 已提交
4783 4784 4785 4786 4787
      "FlushJob::WriteLevel0Table:output_compression", [&](void* arg) {
        auto* compression = reinterpret_cast<CompressionType*>(arg);
        ASSERT_TRUE(*compression == kNoCompression);
        num_no.fetch_add(1);
      });
4788
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
S
sdong 已提交
4789

Y
Yi Wu 已提交
4790
  for (int i = 101; i < 500; i++) {
M
mrambacher 已提交
4791
    std::string value = rnd.RandomString(200);
4792
    ASSERT_OK(Put(Key(keys[i]), value));
Y
Yi Wu 已提交
4793 4794 4795
    if (i % 100 == 99) {
      Flush();
      dbfull()->TEST_WaitForCompact();
S
sdong 已提交
4796 4797 4798
    }
  }

4799 4800
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
Y
Yi Wu 已提交
4801 4802 4803 4804 4805 4806 4807
  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
  ASSERT_EQ(NumTableFilesAtLevel(2), 0);
  ASSERT_GT(NumTableFilesAtLevel(3), 0);
  ASSERT_GT(NumTableFilesAtLevel(4), prev_num_files_l4);
  ASSERT_GT(num_no.load(), 2);
  ASSERT_GT(num_lz4.load(), 0);
  ASSERT_GT(num_zlib.load(), 0);
S
sdong 已提交
4808 4809
}

Y
Yi Wu 已提交
4810 4811 4812 4813 4814 4815 4816 4817
TEST_F(DBTest, DynamicCompactionOptions) {
  // minimum write buffer size is enforced at 64KB
  const uint64_t k32KB = 1 << 15;
  const uint64_t k64KB = 1 << 16;
  const uint64_t k128KB = 1 << 17;
  const uint64_t k1MB = 1 << 20;
  const uint64_t k4KB = 1 << 12;
  Options options;
4818
  options.env = env_;
Y
Yi Wu 已提交
4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829
  options.create_if_missing = true;
  options.compression = kNoCompression;
  options.soft_pending_compaction_bytes_limit = 1024 * 1024;
  options.write_buffer_size = k64KB;
  options.arena_block_size = 4 * k4KB;
  options.max_write_buffer_number = 2;
  // Compaction related options
  options.level0_file_num_compaction_trigger = 3;
  options.level0_slowdown_writes_trigger = 4;
  options.level0_stop_writes_trigger = 8;
  options.target_file_size_base = k64KB;
4830
  options.max_compaction_bytes = options.target_file_size_base * 10;
Y
Yi Wu 已提交
4831 4832 4833
  options.target_file_size_multiplier = 1;
  options.max_bytes_for_level_base = k128KB;
  options.max_bytes_for_level_multiplier = 4;
4834

Y
Yi Wu 已提交
4835
  // Block flush thread and disable compaction thread
4836
  env_->SetBackgroundThreads(1, Env::LOW);
Y
Yi Wu 已提交
4837 4838
  env_->SetBackgroundThreads(1, Env::HIGH);
  DestroyAndReopen(options);
4839

Y
Yi Wu 已提交
4840 4841 4842
  auto gen_l0_kb = [this](int start, int size, int stride) {
    Random rnd(301);
    for (int i = 0; i < size; i++) {
M
mrambacher 已提交
4843
      ASSERT_OK(Put(Key(start + stride * i), rnd.RandomString(1024)));
Y
Yi Wu 已提交
4844 4845 4846
    }
    dbfull()->TEST_WaitForFlushMemTable();
  };
4847

Y
Yi Wu 已提交
4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862
  // Write 3 files that have the same key range.
  // Since level0_file_num_compaction_trigger is 3, compaction should be
  // triggered. The compaction should result in one L1 file
  gen_l0_kb(0, 64, 1);
  ASSERT_EQ(NumTableFilesAtLevel(0), 1);
  gen_l0_kb(0, 64, 1);
  ASSERT_EQ(NumTableFilesAtLevel(0), 2);
  gen_l0_kb(0, 64, 1);
  dbfull()->TEST_WaitForCompact();
  ASSERT_EQ("0,1", FilesPerLevel());
  std::vector<LiveFileMetaData> metadata;
  db_->GetLiveFilesMetaData(&metadata);
  ASSERT_EQ(1U, metadata.size());
  ASSERT_LE(metadata[0].size, k64KB + k4KB);
  ASSERT_GE(metadata[0].size, k64KB - k4KB);
4863

Y
Yi Wu 已提交
4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895
  // Test compaction trigger and target_file_size_base
  // Reduce compaction trigger to 2, and reduce L1 file size to 32KB.
  // Writing to 64KB L0 files should trigger a compaction. Since these
  // 2 L0 files have the same key range, compaction merge them and should
  // result in 2 32KB L1 files.
  ASSERT_OK(dbfull()->SetOptions({{"level0_file_num_compaction_trigger", "2"},
                                  {"target_file_size_base", ToString(k32KB)}}));

  gen_l0_kb(0, 64, 1);
  ASSERT_EQ("1,1", FilesPerLevel());
  gen_l0_kb(0, 64, 1);
  dbfull()->TEST_WaitForCompact();
  ASSERT_EQ("0,2", FilesPerLevel());
  metadata.clear();
  db_->GetLiveFilesMetaData(&metadata);
  ASSERT_EQ(2U, metadata.size());
  ASSERT_LE(metadata[0].size, k32KB + k4KB);
  ASSERT_GE(metadata[0].size, k32KB - k4KB);
  ASSERT_LE(metadata[1].size, k32KB + k4KB);
  ASSERT_GE(metadata[1].size, k32KB - k4KB);

  // Test max_bytes_for_level_base
  // Increase level base size to 256KB and write enough data that will
  // fill L1 and L2. L1 size should be around 256KB while L2 size should be
  // around 256KB x 4.
  ASSERT_OK(
      dbfull()->SetOptions({{"max_bytes_for_level_base", ToString(k1MB)}}));

  // writing 96 x 64KB => 6 * 1024KB
  // (L1 + L2) = (1 + 4) * 1024KB
  for (int i = 0; i < 96; ++i) {
    gen_l0_kb(i, 64, 96);
4896
  }
Y
Yi Wu 已提交
4897 4898 4899
  dbfull()->TEST_WaitForCompact();
  ASSERT_GT(SizeAtLevel(1), k1MB / 2);
  ASSERT_LT(SizeAtLevel(1), k1MB + k1MB / 2);
4900

Y
Yi Wu 已提交
4901 4902 4903
  // Within (0.5, 1.5) of 4MB.
  ASSERT_GT(SizeAtLevel(2), 2 * k1MB);
  ASSERT_LT(SizeAtLevel(2), 6 * k1MB);
4904

Y
Yi Wu 已提交
4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925
  // Test max_bytes_for_level_multiplier and
  // max_bytes_for_level_base. Now, reduce both mulitplier and level base,
  // After filling enough data that can fit in L1 - L3, we should see L1 size
  // reduces to 128KB from 256KB which was asserted previously. Same for L2.
  ASSERT_OK(
      dbfull()->SetOptions({{"max_bytes_for_level_multiplier", "2"},
                            {"max_bytes_for_level_base", ToString(k128KB)}}));

  // writing 20 x 64KB = 10 x 128KB
  // (L1 + L2 + L3) = (1 + 2 + 4) * 128KB
  for (int i = 0; i < 20; ++i) {
    gen_l0_kb(i, 64, 32);
  }
  dbfull()->TEST_WaitForCompact();
  uint64_t total_size = SizeAtLevel(1) + SizeAtLevel(2) + SizeAtLevel(3);
  ASSERT_TRUE(total_size < k128KB * 7 * 1.5);

  // Test level0_stop_writes_trigger.
  // Clean up memtable and L0. Block compaction threads. If continue to write
  // and flush memtables. We should see put stop after 8 memtable flushes
  // since level0_stop_writes_trigger = 8
4926
  dbfull()->TEST_FlushMemTable(true, true);
Y
Yi Wu 已提交
4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937
  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
  // Block compaction
  test::SleepingBackgroundTask sleeping_task_low;
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
                 Env::Priority::LOW);
  sleeping_task_low.WaitUntilSleeping();
  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  int count = 0;
  Random rnd(301);
  WriteOptions wo;
  while (count < 64) {
M
mrambacher 已提交
4938
    ASSERT_OK(Put(Key(count), rnd.RandomString(1024), wo));
4939
    dbfull()->TEST_FlushMemTable(true, true);
Y
Yi Wu 已提交
4940 4941 4942 4943 4944
    count++;
    if (dbfull()->TEST_write_controler().IsStopped()) {
      sleeping_task_low.WakeUp();
      break;
    }
4945
  }
Y
Yi Wu 已提交
4946 4947 4948
  // Stop trigger = 8
  ASSERT_EQ(count, 8);
  // Unblock
4949
  sleeping_task_low.WaitUntilDone();
S
sdong 已提交
4950

Y
Yi Wu 已提交
4951 4952 4953 4954 4955 4956 4957
  // Now reduce level0_stop_writes_trigger to 6. Clear up memtables and L0.
  // Block compaction thread again. Perform the put and memtable flushes
  // until we see the stop after 6 memtable flushes.
  ASSERT_OK(dbfull()->SetOptions({{"level0_stop_writes_trigger", "6"}}));
  dbfull()->TEST_FlushMemTable(true);
  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
S
sdong 已提交
4958

Y
Yi Wu 已提交
4959 4960
  // Block compaction again
  sleeping_task_low.Reset();
4961 4962 4963
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
                 Env::Priority::LOW);
  sleeping_task_low.WaitUntilSleeping();
Y
Yi Wu 已提交
4964 4965
  count = 0;
  while (count < 64) {
M
mrambacher 已提交
4966
    ASSERT_OK(Put(Key(count), rnd.RandomString(1024), wo));
4967
    dbfull()->TEST_FlushMemTable(true, true);
Y
Yi Wu 已提交
4968 4969 4970 4971 4972
    count++;
    if (dbfull()->TEST_write_controler().IsStopped()) {
      sleeping_task_low.WakeUp();
      break;
    }
S
sdong 已提交
4973
  }
Y
Yi Wu 已提交
4974 4975
  ASSERT_EQ(count, 6);
  // Unblock
4976 4977
  sleeping_task_low.WaitUntilDone();

Y
Yi Wu 已提交
4978 4979 4980 4981 4982 4983 4984 4985
  // Test disable_auto_compactions
  // Compaction thread is unblocked but auto compaction is disabled. Write
  // 4 L0 files and compaction should be triggered. If auto compaction is
  // disabled, then TEST_WaitForCompact will be waiting for nothing. Number of
  // L0 files do not change after the call.
  ASSERT_OK(dbfull()->SetOptions({{"disable_auto_compactions", "true"}}));
  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
S
sdong 已提交
4986

Y
Yi Wu 已提交
4987
  for (int i = 0; i < 4; ++i) {
M
mrambacher 已提交
4988
    ASSERT_OK(Put(Key(i), rnd.RandomString(1024)));
Y
Yi Wu 已提交
4989 4990 4991 4992 4993
    // Wait for compaction so that put won't stop
    dbfull()->TEST_FlushMemTable(true);
  }
  dbfull()->TEST_WaitForCompact();
  ASSERT_EQ(NumTableFilesAtLevel(0), 4);
4994

Y
Yi Wu 已提交
4995 4996 4997 4998 4999
  // Enable auto compaction and perform the same test, # of L0 files should be
  // reduced after compaction.
  ASSERT_OK(dbfull()->SetOptions({{"disable_auto_compactions", "false"}}));
  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
5000

Y
Yi Wu 已提交
5001
  for (int i = 0; i < 4; ++i) {
M
mrambacher 已提交
5002
    ASSERT_OK(Put(Key(i), rnd.RandomString(1024)));
Y
Yi Wu 已提交
5003 5004
    // Wait for compaction so that put won't stop
    dbfull()->TEST_FlushMemTable(true);
S
sdong 已提交
5005
  }
Y
Yi Wu 已提交
5006 5007 5008
  dbfull()->TEST_WaitForCompact();
  ASSERT_LT(NumTableFilesAtLevel(0), 4);
}
5009

5010
// Test dynamic FIFO compaction options.
5011 5012 5013 5014 5015 5016 5017
// This test covers just option parsing and makes sure that the options are
// correctly assigned. Also look at DBOptionsTest.SetFIFOCompactionOptions
// test which makes sure that the FIFO compaction funcionality is working
// as expected on dynamically changing the options.
// Even more FIFOCompactionTests are at DBTest.FIFOCompaction* .
TEST_F(DBTest, DynamicFIFOCompactionOptions) {
  Options options;
5018
  options.ttl = 0;
5019 5020 5021 5022 5023 5024
  options.create_if_missing = true;
  DestroyAndReopen(options);

  // Initial defaults
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
            1024 * 1024 * 1024);
5025
  ASSERT_EQ(dbfull()->GetOptions().ttl, 0);
5026 5027 5028 5029 5030 5031 5032
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
            false);

  ASSERT_OK(dbfull()->SetOptions(
      {{"compaction_options_fifo", "{max_table_files_size=23;}"}}));
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
            23);
5033
  ASSERT_EQ(dbfull()->GetOptions().ttl, 0);
5034 5035 5036
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
            false);

5037
  ASSERT_OK(dbfull()->SetOptions({{"ttl", "97"}}));
5038 5039
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
            23);
5040
  ASSERT_EQ(dbfull()->GetOptions().ttl, 97);
5041 5042 5043
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
            false);

5044
  ASSERT_OK(dbfull()->SetOptions({{"ttl", "203"}}));
5045 5046
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
            23);
5047
  ASSERT_EQ(dbfull()->GetOptions().ttl, 203);
5048 5049 5050 5051 5052 5053 5054
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
            false);

  ASSERT_OK(dbfull()->SetOptions(
      {{"compaction_options_fifo", "{allow_compaction=true;}"}}));
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
            23);
5055
  ASSERT_EQ(dbfull()->GetOptions().ttl, 203);
5056 5057 5058 5059
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
            true);

  ASSERT_OK(dbfull()->SetOptions(
5060
      {{"compaction_options_fifo", "{max_table_files_size=31;}"}}));
5061 5062
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
            31);
5063
  ASSERT_EQ(dbfull()->GetOptions().ttl, 203);
5064 5065 5066 5067 5068
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
            true);

  ASSERT_OK(dbfull()->SetOptions(
      {{"compaction_options_fifo",
5069 5070
        "{max_table_files_size=51;allow_compaction=true;}"}}));
  ASSERT_OK(dbfull()->SetOptions({{"ttl", "49"}}));
5071 5072
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
            51);
5073
  ASSERT_EQ(dbfull()->GetOptions().ttl, 49);
5074 5075 5076 5077
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
            true);
}

5078 5079 5080 5081 5082 5083
TEST_F(DBTest, DynamicUniversalCompactionOptions) {
  Options options;
  options.create_if_missing = true;
  DestroyAndReopen(options);

  // Initial defaults
5084
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.size_ratio, 1U);
5085
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.min_merge_width,
5086
            2u);
5087 5088 5089 5090 5091
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.max_merge_width,
            UINT_MAX);
  ASSERT_EQ(dbfull()
                ->GetOptions()
                .compaction_options_universal.max_size_amplification_percent,
5092
            200u);
5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104
  ASSERT_EQ(dbfull()
                ->GetOptions()
                .compaction_options_universal.compression_size_percent,
            -1);
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.stop_style,
            kCompactionStopStyleTotalSize);
  ASSERT_EQ(
      dbfull()->GetOptions().compaction_options_universal.allow_trivial_move,
      false);

  ASSERT_OK(dbfull()->SetOptions(
      {{"compaction_options_universal", "{size_ratio=7;}"}}));
5105
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.size_ratio, 7u);
5106
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.min_merge_width,
5107
            2u);
5108 5109 5110 5111 5112
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.max_merge_width,
            UINT_MAX);
  ASSERT_EQ(dbfull()
                ->GetOptions()
                .compaction_options_universal.max_size_amplification_percent,
5113
            200u);
5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125
  ASSERT_EQ(dbfull()
                ->GetOptions()
                .compaction_options_universal.compression_size_percent,
            -1);
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.stop_style,
            kCompactionStopStyleTotalSize);
  ASSERT_EQ(
      dbfull()->GetOptions().compaction_options_universal.allow_trivial_move,
      false);

  ASSERT_OK(dbfull()->SetOptions(
      {{"compaction_options_universal", "{min_merge_width=11;}"}}));
5126
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.size_ratio, 7u);
5127
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.min_merge_width,
5128
            11u);
5129 5130 5131 5132 5133
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.max_merge_width,
            UINT_MAX);
  ASSERT_EQ(dbfull()
                ->GetOptions()
                .compaction_options_universal.max_size_amplification_percent,
5134
            200u);
5135 5136 5137 5138 5139 5140 5141 5142 5143 5144
  ASSERT_EQ(dbfull()
                ->GetOptions()
                .compaction_options_universal.compression_size_percent,
            -1);
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.stop_style,
            kCompactionStopStyleTotalSize);
  ASSERT_EQ(
      dbfull()->GetOptions().compaction_options_universal.allow_trivial_move,
      false);
}
5145
#endif  // ROCKSDB_LITE
5146

Y
Yi Wu 已提交
5147 5148 5149 5150 5151 5152 5153 5154
TEST_F(DBTest, FileCreationRandomFailure) {
  Options options;
  options.env = env_;
  options.create_if_missing = true;
  options.write_buffer_size = 100000;  // Small write buffer
  options.target_file_size_base = 200000;
  options.max_bytes_for_level_base = 1000000;
  options.max_bytes_for_level_multiplier = 2;
5155

Y
Yi Wu 已提交
5156 5157
  DestroyAndReopen(options);
  Random rnd(301);
S
sdong 已提交
5158

Y
Yi Wu 已提交
5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174
  const int kCDTKeysPerBuffer = 4;
  const int kTestSize = kCDTKeysPerBuffer * 4096;
  const int kTotalIteration = 100;
  // the second half of the test involves in random failure
  // of file creation.
  const int kRandomFailureTest = kTotalIteration / 2;
  std::vector<std::string> values;
  for (int i = 0; i < kTestSize; ++i) {
    values.push_back("NOT_FOUND");
  }
  for (int j = 0; j < kTotalIteration; ++j) {
    if (j == kRandomFailureTest) {
      env_->non_writeable_rate_.store(90);
    }
    for (int k = 0; k < kTestSize; ++k) {
      // here we expect some of the Put fails.
M
mrambacher 已提交
5175
      std::string value = rnd.RandomString(100);
Y
Yi Wu 已提交
5176 5177 5178 5179 5180 5181 5182 5183 5184 5185
      Status s = Put(Key(k), Slice(value));
      if (s.ok()) {
        // update the latest successful put
        values[k] = value;
      }
      // But everything before we simulate the failure-test should succeed.
      if (j < kRandomFailureTest) {
        ASSERT_OK(s);
      }
    }
S
sdong 已提交
5186 5187
  }

Y
Yi Wu 已提交
5188 5189 5190
  // If rocksdb does not do the correct job, internal assert will fail here.
  dbfull()->TEST_WaitForFlushMemTable();
  dbfull()->TEST_WaitForCompact();
S
sdong 已提交
5191

Y
Yi Wu 已提交
5192 5193 5194 5195 5196
  // verify we have the latest successful update
  for (int k = 0; k < kTestSize; ++k) {
    auto v = Get(Key(k));
    ASSERT_EQ(v, values[k]);
  }
5197

Y
Yi Wu 已提交
5198 5199 5200 5201 5202 5203 5204 5205
  // reopen and reverify we have the latest successful update
  env_->non_writeable_rate_.store(0);
  Reopen(options);
  for (int k = 0; k < kTestSize; ++k) {
    auto v = Get(Key(k));
    ASSERT_EQ(v, values[k]);
  }
}
S
sdong 已提交
5206

Y
Yi Wu 已提交
5207
#ifndef ROCKSDB_LITE
5208

Y
Yi Wu 已提交
5209 5210 5211 5212 5213 5214 5215
TEST_F(DBTest, DynamicMiscOptions) {
  // Test max_sequential_skip_in_iterations
  Options options;
  options.env = env_;
  options.create_if_missing = true;
  options.max_sequential_skip_in_iterations = 16;
  options.compression = kNoCompression;
5216
  options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
Y
Yi Wu 已提交
5217
  DestroyAndReopen(options);
S
sdong 已提交
5218

Y
Yi Wu 已提交
5219 5220 5221 5222 5223
  auto assert_reseek_count = [this, &options](int key_start, int num_reseek) {
    int key0 = key_start;
    int key1 = key_start + 1;
    int key2 = key_start + 2;
    Random rnd(301);
M
mrambacher 已提交
5224
    ASSERT_OK(Put(Key(key0), rnd.RandomString(8)));
Y
Yi Wu 已提交
5225
    for (int i = 0; i < 10; ++i) {
M
mrambacher 已提交
5226
      ASSERT_OK(Put(Key(key1), rnd.RandomString(8)));
Y
Yi Wu 已提交
5227
    }
M
mrambacher 已提交
5228
    ASSERT_OK(Put(Key(key2), rnd.RandomString(8)));
Y
Yi Wu 已提交
5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240
    std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
    iter->Seek(Key(key1));
    ASSERT_TRUE(iter->Valid());
    ASSERT_EQ(iter->key().compare(Key(key1)), 0);
    iter->Next();
    ASSERT_TRUE(iter->Valid());
    ASSERT_EQ(iter->key().compare(Key(key2)), 0);
    ASSERT_EQ(num_reseek,
              TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION));
  };
  // No reseek
  assert_reseek_count(100, 0);
S
sdong 已提交
5241

Y
Yi Wu 已提交
5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253
  ASSERT_OK(dbfull()->SetOptions({{"max_sequential_skip_in_iterations", "4"}}));
  // Clear memtable and make new option effective
  dbfull()->TEST_FlushMemTable(true);
  // Trigger reseek
  assert_reseek_count(200, 1);

  ASSERT_OK(
      dbfull()->SetOptions({{"max_sequential_skip_in_iterations", "16"}}));
  // Clear memtable and make new option effective
  dbfull()->TEST_FlushMemTable(true);
  // No reseek
  assert_reseek_count(300, 1);
A
Aaron Gao 已提交
5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271

  MutableCFOptions mutable_cf_options;
  CreateAndReopenWithCF({"pikachu"}, options);
  // Test soft_pending_compaction_bytes_limit,
  // hard_pending_compaction_bytes_limit
  ASSERT_OK(dbfull()->SetOptions(
      handles_[1], {{"soft_pending_compaction_bytes_limit", "200"},
                    {"hard_pending_compaction_bytes_limit", "300"}}));
  ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1],
                                                     &mutable_cf_options));
  ASSERT_EQ(200, mutable_cf_options.soft_pending_compaction_bytes_limit);
  ASSERT_EQ(300, mutable_cf_options.hard_pending_compaction_bytes_limit);
  // Test report_bg_io_stats
  ASSERT_OK(
      dbfull()->SetOptions(handles_[1], {{"report_bg_io_stats", "true"}}));
  // sanity check
  ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1],
                                                     &mutable_cf_options));
S
Siying Dong 已提交
5272
  ASSERT_TRUE(mutable_cf_options.report_bg_io_stats);
A
Aaron Gao 已提交
5273 5274 5275 5276 5277 5278
  // Test compression
  // sanity check
  ASSERT_OK(dbfull()->SetOptions({{"compression", "kNoCompression"}}));
  ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[0],
                                                     &mutable_cf_options));
  ASSERT_EQ(CompressionType::kNoCompression, mutable_cf_options.compression);
5279 5280 5281 5282 5283 5284 5285 5286 5287

  if (Snappy_Supported()) {
    ASSERT_OK(dbfull()->SetOptions({{"compression", "kSnappyCompression"}}));
    ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[0],
                                                       &mutable_cf_options));
    ASSERT_EQ(CompressionType::kSnappyCompression,
              mutable_cf_options.compression);
  }

A
Aaron Gao 已提交
5288 5289 5290 5291 5292
  // Test paranoid_file_checks already done in db_block_cache_test
  ASSERT_OK(
      dbfull()->SetOptions(handles_[1], {{"paranoid_file_checks", "true"}}));
  ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1],
                                                     &mutable_cf_options));
S
Siying Dong 已提交
5293
  ASSERT_TRUE(mutable_cf_options.report_bg_io_stats);
5294 5295 5296 5297 5298 5299 5300
  ASSERT_TRUE(mutable_cf_options.check_flush_compaction_key_order);

  ASSERT_OK(dbfull()->SetOptions(
      handles_[1], {{"check_flush_compaction_key_order", "false"}}));
  ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1],
                                                     &mutable_cf_options));
  ASSERT_FALSE(mutable_cf_options.check_flush_compaction_key_order);
S
sdong 已提交
5301
}
Y
Yi Wu 已提交
5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314
#endif  // ROCKSDB_LITE

TEST_F(DBTest, L0L1L2AndUpHitCounter) {
  Options options = CurrentOptions();
  options.write_buffer_size = 32 * 1024;
  options.target_file_size_base = 32 * 1024;
  options.level0_file_num_compaction_trigger = 2;
  options.level0_slowdown_writes_trigger = 2;
  options.level0_stop_writes_trigger = 4;
  options.max_bytes_for_level_base = 64 * 1024;
  options.max_write_buffer_number = 2;
  options.max_background_compactions = 8;
  options.max_background_flushes = 8;
5315
  options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
Y
Yi Wu 已提交
5316
  CreateAndReopenWithCF({"mypikachu"}, options);
5317

Y
Yi Wu 已提交
5318 5319 5320 5321 5322 5323 5324
  int numkeys = 20000;
  for (int i = 0; i < numkeys; i++) {
    ASSERT_OK(Put(1, Key(i), "val"));
  }
  ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L0));
  ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L1));
  ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L2_AND_UP));
5325

Y
Yi Wu 已提交
5326 5327
  ASSERT_OK(Flush(1));
  dbfull()->TEST_WaitForCompact();
5328

Y
Yi Wu 已提交
5329 5330
  for (int i = 0; i < numkeys; i++) {
    ASSERT_EQ(Get(1, Key(i)), "val");
5331 5332
  }

Y
Yi Wu 已提交
5333 5334 5335 5336 5337 5338 5339
  ASSERT_GT(TestGetTickerCount(options, GET_HIT_L0), 100);
  ASSERT_GT(TestGetTickerCount(options, GET_HIT_L1), 100);
  ASSERT_GT(TestGetTickerCount(options, GET_HIT_L2_AND_UP), 100);

  ASSERT_EQ(numkeys, TestGetTickerCount(options, GET_HIT_L0) +
                         TestGetTickerCount(options, GET_HIT_L1) +
                         TestGetTickerCount(options, GET_HIT_L2_AND_UP));
5340
}
S
sdong 已提交
5341

Y
Yi Wu 已提交
5342 5343 5344 5345 5346
TEST_F(DBTest, EncodeDecompressedBlockSizeTest) {
  // iter 0 -- zlib
  // iter 1 -- bzip2
  // iter 2 -- lz4
  // iter 3 -- lz4HC
5347
  // iter 4 -- xpress
5348
  CompressionType compressions[] = {kZlibCompression, kBZip2Compression,
A
Aaron Gao 已提交
5349
                                    kLZ4Compression, kLZ4HCCompression,
5350 5351 5352
                                    kXpressCompression};
  for (auto comp : compressions) {
    if (!CompressionTypeSupported(comp)) {
Y
Yi Wu 已提交
5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363
      continue;
    }
    // first_table_version 1 -- generate with table_version == 1, read with
    // table_version == 2
    // first_table_version 2 -- generate with table_version == 2, read with
    // table_version == 1
    for (int first_table_version = 1; first_table_version <= 2;
         ++first_table_version) {
      BlockBasedTableOptions table_options;
      table_options.format_version = first_table_version;
      table_options.filter_policy.reset(NewBloomFilterPolicy(10));
5364
      Options options = CurrentOptions();
Y
Yi Wu 已提交
5365 5366
      options.table_factory.reset(NewBlockBasedTableFactory(table_options));
      options.create_if_missing = true;
5367
      options.compression = comp;
Y
Yi Wu 已提交
5368 5369
      DestroyAndReopen(options);

5370
      int kNumKeysWritten = 1000;
Y
Yi Wu 已提交
5371 5372 5373 5374

      Random rnd(301);
      for (int i = 0; i < kNumKeysWritten; ++i) {
        // compressible string
M
mrambacher 已提交
5375
        ASSERT_OK(Put(Key(i), rnd.RandomString(128) + std::string(128, 'a')));
Y
Yi Wu 已提交
5376 5377 5378 5379 5380 5381 5382 5383 5384
      }

      table_options.format_version = first_table_version == 1 ? 2 : 1;
      options.table_factory.reset(NewBlockBasedTableFactory(table_options));
      Reopen(options);
      for (int i = 0; i < kNumKeysWritten; ++i) {
        auto r = Get(Key(i));
        ASSERT_EQ(r.substr(128), std::string(128, 'a'));
      }
5385 5386 5387 5388
    }
  }
}

Y
Yi Wu 已提交
5389
TEST_F(DBTest, CloseSpeedup) {
5390
  Options options = CurrentOptions();
Y
Yi Wu 已提交
5391 5392 5393 5394 5395 5396 5397
  options.compaction_style = kCompactionStyleLevel;
  options.write_buffer_size = 110 << 10;  // 110KB
  options.arena_block_size = 4 << 10;
  options.level0_file_num_compaction_trigger = 2;
  options.num_levels = 4;
  options.max_bytes_for_level_base = 400 * 1024;
  options.max_write_buffer_number = 16;
5398

Y
Yi Wu 已提交
5399 5400 5401 5402 5403 5404 5405 5406 5407
  // Block background threads
  env_->SetBackgroundThreads(1, Env::LOW);
  env_->SetBackgroundThreads(1, Env::HIGH);
  test::SleepingBackgroundTask sleeping_task_low;
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
                 Env::Priority::LOW);
  test::SleepingBackgroundTask sleeping_task_high;
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
                 &sleeping_task_high, Env::Priority::HIGH);
5408

Y
Yi Wu 已提交
5409 5410 5411 5412 5413 5414 5415
  std::vector<std::string> filenames;
  env_->GetChildren(dbname_, &filenames);
  // Delete archival files.
  for (size_t i = 0; i < filenames.size(); ++i) {
    env_->DeleteFile(dbname_ + "/" + filenames[i]);
  }
  env_->DeleteDir(dbname_);
5416 5417
  DestroyAndReopen(options);

5418
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
Y
Yi Wu 已提交
5419 5420
  env_->SetBackgroundThreads(1, Env::LOW);
  env_->SetBackgroundThreads(1, Env::HIGH);
5421
  Random rnd(301);
Y
Yi Wu 已提交
5422
  int key_idx = 0;
5423

Y
Yi Wu 已提交
5424 5425 5426 5427 5428
  // First three 110KB files are not going to level 2
  // After that, (100K, 200K)
  for (int num = 0; num < 5; num++) {
    GenerateNewFile(&rnd, &key_idx, true);
  }
5429

Y
Yi Wu 已提交
5430
  ASSERT_EQ(0, GetSstFileCount(dbname_));
5431 5432

  Close();
Y
Yi Wu 已提交
5433
  ASSERT_EQ(0, GetSstFileCount(dbname_));
5434

Y
Yi Wu 已提交
5435 5436 5437 5438 5439
  // Unblock background threads
  sleeping_task_high.WakeUp();
  sleeping_task_high.WaitUntilDone();
  sleeping_task_low.WakeUp();
  sleeping_task_low.WaitUntilDone();
5440

Y
Yi Wu 已提交
5441
  Destroy(options);
5442 5443
}

Y
Yi Wu 已提交
5444 5445 5446
class DelayedMergeOperator : public MergeOperator {
 private:
  DBTest* db_test_;
I
Islam AbdelRahman 已提交
5447

Y
Yi Wu 已提交
5448 5449
 public:
  explicit DelayedMergeOperator(DBTest* d) : db_test_(d) {}
5450

5451
  bool FullMergeV2(const MergeOperationInput& merge_in,
5452
                   MergeOperationOutput* merge_out) const override {
5453 5454
    db_test_->env_->MockSleepForMicroseconds(1000 *
                                             merge_in.operand_list.size());
5455
    merge_out->new_value = "";
Y
Yi Wu 已提交
5456
    return true;
5457 5458
  }

5459
  const char* Name() const override { return "DelayedMergeOperator"; }
Y
Yi Wu 已提交
5460
};
I
Islam AbdelRahman 已提交
5461

Y
Yi Wu 已提交
5462 5463 5464 5465 5466
TEST_F(DBTest, MergeTestTime) {
  std::string one, two, three;
  PutFixed64(&one, 1);
  PutFixed64(&two, 2);
  PutFixed64(&three, 3);
I
Islam AbdelRahman 已提交
5467

Y
Yi Wu 已提交
5468 5469
  // Enable time profiling
  SetPerfLevel(kEnableTime);
I
Islam AbdelRahman 已提交
5470
  Options options = CurrentOptions();
5471
  options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
Y
Yi Wu 已提交
5472
  options.merge_operator.reset(new DelayedMergeOperator(this));
5473
  SetTimeElapseOnlySleepOnReopen(&options);
I
Islam AbdelRahman 已提交
5474 5475
  DestroyAndReopen(options);

5476 5477
  // NOTE: Presumed unnecessary and removed: resetting mock time in env

Y
Yi Wu 已提交
5478 5479 5480 5481 5482 5483 5484
  ASSERT_EQ(TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME), 0);
  db_->Put(WriteOptions(), "foo", one);
  ASSERT_OK(Flush());
  ASSERT_OK(db_->Merge(WriteOptions(), "foo", two));
  ASSERT_OK(Flush());
  ASSERT_OK(db_->Merge(WriteOptions(), "foo", three));
  ASSERT_OK(Flush());
I
Islam AbdelRahman 已提交
5485

Y
Yi Wu 已提交
5486 5487 5488 5489 5490
  ReadOptions opt;
  opt.verify_checksums = true;
  opt.snapshot = nullptr;
  std::string result;
  db_->Get(opt, "foo", &result);
I
Islam AbdelRahman 已提交
5491

5492
  ASSERT_EQ(2000000, TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME));
I
Islam AbdelRahman 已提交
5493

Y
Yi Wu 已提交
5494 5495 5496 5497 5498 5499 5500
  ReadOptions read_options;
  std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
  int count = 0;
  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
    ASSERT_OK(iter->status());
    ++count;
  }
I
Islam AbdelRahman 已提交
5501

Y
Yi Wu 已提交
5502
  ASSERT_EQ(1, count);
5503
  ASSERT_EQ(4000000, TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME));
D
Daniel Black 已提交
5504
#ifdef ROCKSDB_USING_THREAD_STATUS
Y
Yi Wu 已提交
5505 5506
  ASSERT_GT(TestGetTickerCount(options, FLUSH_WRITE_BYTES), 0);
#endif  // ROCKSDB_USING_THREAD_STATUS
I
Islam AbdelRahman 已提交
5507 5508
}

Y
Yi Wu 已提交
5509 5510 5511
#ifndef ROCKSDB_LITE
TEST_P(DBTestWithParam, MergeCompactionTimeTest) {
  SetPerfLevel(kEnableTime);
5512
  Options options = CurrentOptions();
Y
Yi Wu 已提交
5513
  options.compaction_filter_factory = std::make_shared<KeepFilterFactory>();
5514
  options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
Y
Yi Wu 已提交
5515
  options.merge_operator.reset(new DelayedMergeOperator(this));
5516
  options.disable_auto_compactions = true;
Y
Yi Wu 已提交
5517
  options.max_subcompactions = max_subcompactions_;
5518
  SetTimeElapseOnlySleepOnReopen(&options);
5519 5520
  DestroyAndReopen(options);

5521 5522
  constexpr unsigned n = 1000;
  for (unsigned i = 0; i < n; i++) {
Y
Yi Wu 已提交
5523
    ASSERT_OK(db_->Merge(WriteOptions(), "foo", "TEST"));
5524 5525
    ASSERT_OK(Flush());
  }
Y
Yi Wu 已提交
5526
  dbfull()->TEST_WaitForFlushMemTable();
5527

5528 5529 5530 5531 5532 5533
  CompactRangeOptions cro;
  cro.exclusive_manual_compaction = exclusive_manual_compaction_;
  ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));

  ASSERT_EQ(uint64_t{n} * 1000000U,
            TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME));
5534
}
5535

Y
Yi Wu 已提交
5536
TEST_P(DBTestWithParam, FilterCompactionTimeTest) {
5537
  Options options = CurrentOptions();
Y
Yi Wu 已提交
5538 5539
  options.compaction_filter_factory =
      std::make_shared<DelayFilterFactory>(this);
5540
  options.disable_auto_compactions = true;
Y
Yi Wu 已提交
5541
  options.create_if_missing = true;
5542
  options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
5543
  options.statistics->set_stats_level(kExceptTimeForMutex);
Y
Yi Wu 已提交
5544
  options.max_subcompactions = max_subcompactions_;
5545
  SetTimeElapseOnlySleepOnReopen(&options);
5546 5547
  DestroyAndReopen(options);

5548
  unsigned n = 0;
Y
Yi Wu 已提交
5549 5550 5551 5552
  // put some data
  for (int table = 0; table < 4; ++table) {
    for (int i = 0; i < 10 + table; ++i) {
      Put(ToString(table * 100 + i), "val");
5553
      ++n;
Y
Yi Wu 已提交
5554 5555
    }
    Flush();
5556 5557
  }

Y
Yi Wu 已提交
5558 5559 5560 5561
  CompactRangeOptions cro;
  cro.exclusive_manual_compaction = exclusive_manual_compaction_;
  ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
  ASSERT_EQ(0U, CountLiveFiles());
5562

Y
Yi Wu 已提交
5563
  Reopen(options);
5564

Y
Yi Wu 已提交
5565 5566
  Iterator* itr = db_->NewIterator(ReadOptions());
  itr->SeekToFirst();
5567 5568
  ASSERT_EQ(uint64_t{n} * 1000000U,
            TestGetTickerCount(options, FILTER_OPERATION_TOTAL_TIME));
Y
Yi Wu 已提交
5569
  delete itr;
5570
}
Y
Yi Wu 已提交
5571
#endif  // ROCKSDB_LITE
5572

Y
Yi Wu 已提交
5573 5574 5575 5576 5577 5578
TEST_F(DBTest, TestLogCleanup) {
  Options options = CurrentOptions();
  options.write_buffer_size = 64 * 1024;  // very small
  // only two memtables allowed ==> only two log files
  options.max_write_buffer_number = 2;
  Reopen(options);
5579

Y
Yi Wu 已提交
5580 5581 5582 5583 5584
  for (int i = 0; i < 100000; ++i) {
    Put(Key(i), "val");
    // only 2 memtables will be alive, so logs_to_free needs to always be below
    // 2
    ASSERT_LT(dbfull()->TEST_LogsToFreeSize(), static_cast<size_t>(3));
5585 5586 5587
  }
}

Y
Yi Wu 已提交
5588 5589 5590 5591 5592 5593 5594
#ifndef ROCKSDB_LITE
TEST_F(DBTest, EmptyCompactedDB) {
  Options options = CurrentOptions();
  options.max_open_files = -1;
  Close();
  ASSERT_OK(ReadOnlyReopen(options));
  Status s = Put("new", "value");
5595
  ASSERT_TRUE(s.IsNotSupported());
Y
Yi Wu 已提交
5596
  Close();
5597
}
Y
Yi Wu 已提交
5598
#endif  // ROCKSDB_LITE
5599

I
Islam AbdelRahman 已提交
5600
#ifndef ROCKSDB_LITE
5601
TEST_F(DBTest, DISABLED_SuggestCompactRangeTest) {
Y
Yi Wu 已提交
5602 5603
  class CompactionFilterFactoryGetContext : public CompactionFilterFactory {
   public:
5604
    std::unique_ptr<CompactionFilter> CreateCompactionFilter(
Y
Yi Wu 已提交
5605 5606 5607 5608
        const CompactionFilter::Context& context) override {
      saved_context = context;
      std::unique_ptr<CompactionFilter> empty_filter;
      return empty_filter;
5609
    }
Y
Yi Wu 已提交
5610 5611
    const char* Name() const override {
      return "CompactionFilterFactoryGetContext";
5612
    }
Y
Yi Wu 已提交
5613 5614 5615 5616
    static bool IsManual(CompactionFilterFactory* compaction_filter_factory) {
      return reinterpret_cast<CompactionFilterFactoryGetContext*>(
                 compaction_filter_factory)
          ->saved_context.is_manual_compaction;
5617
    }
Y
Yi Wu 已提交
5618 5619
    CompactionFilter::Context saved_context;
  };
5620

5621
  Options options = CurrentOptions();
Y
Yi Wu 已提交
5622 5623 5624 5625 5626 5627 5628 5629 5630
  options.memtable_factory.reset(
      new SpecialSkipListFactory(DBTestBase::kNumKeysByGenerateNewRandomFile));
  options.compaction_style = kCompactionStyleLevel;
  options.compaction_filter_factory.reset(
      new CompactionFilterFactoryGetContext());
  options.write_buffer_size = 200 << 10;
  options.arena_block_size = 4 << 10;
  options.level0_file_num_compaction_trigger = 4;
  options.num_levels = 4;
5631
  options.compression = kNoCompression;
Y
Yi Wu 已提交
5632 5633
  options.max_bytes_for_level_base = 450 << 10;
  options.target_file_size_base = 98 << 10;
5634
  options.max_compaction_bytes = static_cast<uint64_t>(1) << 60;  // inf
5635

Y
Yi Wu 已提交
5636
  Reopen(options);
5637

Y
Yi Wu 已提交
5638
  Random rnd(301);
5639

Y
Yi Wu 已提交
5640 5641
  for (int num = 0; num < 3; num++) {
    GenerateNewRandomFile(&rnd);
5642 5643
  }

Y
Yi Wu 已提交
5644 5645 5646 5647
  GenerateNewRandomFile(&rnd);
  ASSERT_EQ("0,4", FilesPerLevel(0));
  ASSERT_TRUE(!CompactionFilterFactoryGetContext::IsManual(
      options.compaction_filter_factory.get()));
5648

Y
Yi Wu 已提交
5649 5650
  GenerateNewRandomFile(&rnd);
  ASSERT_EQ("1,4", FilesPerLevel(0));
5651

Y
Yi Wu 已提交
5652 5653
  GenerateNewRandomFile(&rnd);
  ASSERT_EQ("2,4", FilesPerLevel(0));
5654

Y
Yi Wu 已提交
5655 5656 5657 5658 5659
  GenerateNewRandomFile(&rnd);
  ASSERT_EQ("3,4", FilesPerLevel(0));

  GenerateNewRandomFile(&rnd);
  ASSERT_EQ("0,4,4", FilesPerLevel(0));
5660

Y
Yi Wu 已提交
5661 5662
  GenerateNewRandomFile(&rnd);
  ASSERT_EQ("1,4,4", FilesPerLevel(0));
5663

Y
Yi Wu 已提交
5664 5665
  GenerateNewRandomFile(&rnd);
  ASSERT_EQ("2,4,4", FilesPerLevel(0));
5666

Y
Yi Wu 已提交
5667 5668
  GenerateNewRandomFile(&rnd);
  ASSERT_EQ("3,4,4", FilesPerLevel(0));
5669

Y
Yi Wu 已提交
5670 5671
  GenerateNewRandomFile(&rnd);
  ASSERT_EQ("0,4,8", FilesPerLevel(0));
5672

Y
Yi Wu 已提交
5673 5674
  GenerateNewRandomFile(&rnd);
  ASSERT_EQ("1,4,8", FilesPerLevel(0));
5675

Y
Yi Wu 已提交
5676 5677 5678 5679
  // compact it three times
  for (int i = 0; i < 3; ++i) {
    ASSERT_OK(experimental::SuggestCompactRange(db_, nullptr, nullptr));
    dbfull()->TEST_WaitForCompact();
5680 5681
  }

Y
Yi Wu 已提交
5682 5683 5684
  // All files are compacted
  ASSERT_EQ(0, NumTableFilesAtLevel(0));
  ASSERT_EQ(0, NumTableFilesAtLevel(1));
5685

Y
Yi Wu 已提交
5686 5687
  GenerateNewRandomFile(&rnd);
  ASSERT_EQ(1, NumTableFilesAtLevel(0));
5688

Y
Yi Wu 已提交
5689 5690 5691 5692
  // nonoverlapping with the file on level 0
  Slice start("a"), end("b");
  ASSERT_OK(experimental::SuggestCompactRange(db_, &start, &end));
  dbfull()->TEST_WaitForCompact();
5693

Y
Yi Wu 已提交
5694 5695
  // should not compact the level 0 file
  ASSERT_EQ(1, NumTableFilesAtLevel(0));
5696

Y
Yi Wu 已提交
5697 5698 5699 5700 5701 5702
  start = Slice("j");
  end = Slice("m");
  ASSERT_OK(experimental::SuggestCompactRange(db_, &start, &end));
  dbfull()->TEST_WaitForCompact();
  ASSERT_TRUE(CompactionFilterFactoryGetContext::IsManual(
      options.compaction_filter_factory.get()));
5703

Y
Yi Wu 已提交
5704 5705 5706
  // now it should compact the level 0 file
  ASSERT_EQ(0, NumTableFilesAtLevel(0));
  ASSERT_EQ(1, NumTableFilesAtLevel(1));
5707 5708
}

5709

Y
Yi Wu 已提交
5710 5711 5712 5713 5714
TEST_F(DBTest, PromoteL0) {
  Options options = CurrentOptions();
  options.disable_auto_compactions = true;
  options.write_buffer_size = 10 * 1024 * 1024;
  DestroyAndReopen(options);
5715

Y
Yi Wu 已提交
5716 5717 5718
  // non overlapping ranges
  std::vector<std::pair<int32_t, int32_t>> ranges = {
      {81, 160}, {0, 80}, {161, 240}, {241, 320}};
5719

Y
Yi Wu 已提交
5720
  int32_t value_size = 10 * 1024;  // 10 KB
5721

Y
Yi Wu 已提交
5722 5723 5724 5725
  Random rnd(301);
  std::map<int32_t, std::string> values;
  for (const auto& range : ranges) {
    for (int32_t j = range.first; j < range.second; j++) {
M
mrambacher 已提交
5726
      values[j] = rnd.RandomString(value_size);
Y
Yi Wu 已提交
5727
      ASSERT_OK(Put(Key(j), values[j]));
5728
    }
Y
Yi Wu 已提交
5729 5730
    ASSERT_OK(Flush());
  }
5731

Y
Yi Wu 已提交
5732 5733 5734
  int32_t level0_files = NumTableFilesAtLevel(0, 0);
  ASSERT_EQ(level0_files, ranges.size());
  ASSERT_EQ(NumTableFilesAtLevel(1, 0), 0);  // No files in L1
5735

Y
Yi Wu 已提交
5736 5737 5738 5739 5740
  // Promote L0 level to L2.
  ASSERT_OK(experimental::PromoteL0(db_, db_->DefaultColumnFamily(), 2));
  // We expect that all the files were trivially moved from L0 to L2
  ASSERT_EQ(NumTableFilesAtLevel(0, 0), 0);
  ASSERT_EQ(NumTableFilesAtLevel(2, 0), level0_files);
5741

Y
Yi Wu 已提交
5742 5743 5744 5745
  for (const auto& kv : values) {
    ASSERT_EQ(Get(Key(kv.first)), kv.second);
  }
}
5746

Y
Yi Wu 已提交
5747 5748 5749 5750 5751
TEST_F(DBTest, PromoteL0Failure) {
  Options options = CurrentOptions();
  options.disable_auto_compactions = true;
  options.write_buffer_size = 10 * 1024 * 1024;
  DestroyAndReopen(options);
5752

Y
Yi Wu 已提交
5753 5754 5755 5756 5757 5758
  // Produce two L0 files with overlapping ranges.
  ASSERT_OK(Put(Key(0), ""));
  ASSERT_OK(Put(Key(3), ""));
  ASSERT_OK(Flush());
  ASSERT_OK(Put(Key(1), ""));
  ASSERT_OK(Flush());
5759

Y
Yi Wu 已提交
5760 5761 5762 5763
  Status status;
  // Fails because L0 has overlapping files.
  status = experimental::PromoteL0(db_, db_->DefaultColumnFamily());
  ASSERT_TRUE(status.IsInvalidArgument());
5764

Y
Yi Wu 已提交
5765 5766 5767
  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
  // Now there is a file in L1.
  ASSERT_GE(NumTableFilesAtLevel(1, 0), 1);
5768

Y
Yi Wu 已提交
5769 5770 5771 5772 5773
  ASSERT_OK(Put(Key(5), ""));
  ASSERT_OK(Flush());
  // Fails because L1 is non-empty.
  status = experimental::PromoteL0(db_, db_->DefaultColumnFamily());
  ASSERT_TRUE(status.IsInvalidArgument());
5774 5775
}

Y
Yi Wu 已提交
5776
// Github issue #596
5777 5778 5779
TEST_F(DBTest, CompactRangeWithEmptyBottomLevel) {
  const int kNumLevels = 2;
  const int kNumL0Files = 2;
5780
  Options options = CurrentOptions();
5781 5782
  options.disable_auto_compactions = true;
  options.num_levels = kNumLevels;
Y
Yi Wu 已提交
5783
  DestroyAndReopen(options);
5784

Y
Yi Wu 已提交
5785
  Random rnd(301);
5786
  for (int i = 0; i < kNumL0Files; ++i) {
M
mrambacher 已提交
5787
    ASSERT_OK(Put(Key(0), rnd.RandomString(1024)));
5788
    Flush();
5789
  }
5790 5791
  ASSERT_EQ(NumTableFilesAtLevel(0), kNumL0Files);
  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
5792

Y
Yi Wu 已提交
5793
  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
5794 5795
  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  ASSERT_EQ(NumTableFilesAtLevel(1), kNumL0Files);
Y
Yi Wu 已提交
5796
}
Y
Yi Wu 已提交
5797
#endif  // ROCKSDB_LITE
5798

Y
Yi Wu 已提交
5799
TEST_F(DBTest, AutomaticConflictsWithManualCompaction) {
5800
  const int kNumL0Files = 50;
Y
Yi Wu 已提交
5801
  Options options = CurrentOptions();
5802 5803 5804 5805
  options.level0_file_num_compaction_trigger = 4;
  // never slowdown / stop
  options.level0_slowdown_writes_trigger = 999999;
  options.level0_stop_writes_trigger = 999999;
Y
Yi Wu 已提交
5806
  options.max_background_compactions = 10;
5807 5808
  DestroyAndReopen(options);

5809 5810
  // schedule automatic compactions after the manual one starts, but before it
  // finishes to ensure conflict.
5811
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
5812 5813 5814 5815
      {{"DBImpl::BackgroundCompaction:Start",
        "DBTest::AutomaticConflictsWithManualCompaction:PrePuts"},
       {"DBTest::AutomaticConflictsWithManualCompaction:PostPuts",
        "DBImpl::BackgroundCompaction:NonTrivial:AfterRun"}});
Y
Yi Wu 已提交
5816
  std::atomic<int> callback_count(0);
5817
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
5818
      "DBImpl::MaybeScheduleFlushOrCompaction:Conflict",
5819
      [&](void* /*arg*/) { callback_count.fetch_add(1); });
5820
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
5821 5822 5823 5824 5825

  Random rnd(301);
  for (int i = 0; i < 2; ++i) {
    // put two keys to ensure no trivial move
    for (int j = 0; j < 2; ++j) {
M
mrambacher 已提交
5826
      ASSERT_OK(Put(Key(j), rnd.RandomString(1024)));
5827 5828 5829
    }
    ASSERT_OK(Flush());
  }
D
Dmitri Smirnov 已提交
5830
  port::Thread manual_compaction_thread([this]() {
5831 5832 5833 5834
    CompactRangeOptions croptions;
    croptions.exclusive_manual_compaction = true;
    ASSERT_OK(db_->CompactRange(croptions, nullptr, nullptr));
  });
5835 5836 5837 5838 5839

  TEST_SYNC_POINT("DBTest::AutomaticConflictsWithManualCompaction:PrePuts");
  for (int i = 0; i < kNumL0Files; ++i) {
    // put two keys to ensure no trivial move
    for (int j = 0; j < 2; ++j) {
M
mrambacher 已提交
5840
      ASSERT_OK(Put(Key(j), rnd.RandomString(1024)));
5841 5842 5843 5844 5845
    }
    ASSERT_OK(Flush());
  }
  TEST_SYNC_POINT("DBTest::AutomaticConflictsWithManualCompaction:PostPuts");

Y
Yi Wu 已提交
5846
  ASSERT_GE(callback_count.load(), 1);
5847
  for (int i = 0; i < 2; ++i) {
Y
Yi Wu 已提交
5848 5849
    ASSERT_NE("NOT_FOUND", Get(Key(i)));
  }
5850
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
5851 5852
  manual_compaction_thread.join();
  dbfull()->TEST_WaitForCompact();
5853 5854
}

5855
#ifndef ROCKSDB_LITE
5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868
TEST_F(DBTest, CompactFilesShouldTriggerAutoCompaction) {
  Options options = CurrentOptions();
  options.max_background_compactions = 1;
  options.level0_file_num_compaction_trigger = 4;
  options.level0_slowdown_writes_trigger = 36;
  options.level0_stop_writes_trigger = 36;
  DestroyAndReopen(options);

  // generate files for manual compaction
  Random rnd(301);
  for (int i = 0; i < 2; ++i) {
    // put two keys to ensure no trivial move
    for (int j = 0; j < 2; ++j) {
M
mrambacher 已提交
5869
      ASSERT_OK(Put(Key(j), rnd.RandomString(1024)));
5870 5871 5872 5873
    }
    ASSERT_OK(Flush());
  }

5874
  ROCKSDB_NAMESPACE::ColumnFamilyMetaData cf_meta_data;
5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898
  db_->GetColumnFamilyMetaData(db_->DefaultColumnFamily(), &cf_meta_data);

  std::vector<std::string> input_files;
  input_files.push_back(cf_meta_data.levels[0].files[0].name);

  SyncPoint::GetInstance()->LoadDependency({
      {"CompactFilesImpl:0",
       "DBTest::CompactFilesShouldTriggerAutoCompaction:Begin"},
      {"DBTest::CompactFilesShouldTriggerAutoCompaction:End",
       "CompactFilesImpl:1"},
  });

  SyncPoint::GetInstance()->EnableProcessing();

  port::Thread manual_compaction_thread([&]() {
      auto s = db_->CompactFiles(CompactionOptions(),
          db_->DefaultColumnFamily(), input_files, 0);
  });

  TEST_SYNC_POINT(
          "DBTest::CompactFilesShouldTriggerAutoCompaction:Begin");
  // generate enough files to trigger compaction
  for (int i = 0; i < 20; ++i) {
    for (int j = 0; j < 2; ++j) {
M
mrambacher 已提交
5899
      ASSERT_OK(Put(Key(j), rnd.RandomString(1024)));
5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915
    }
    ASSERT_OK(Flush());
  }
  db_->GetColumnFamilyMetaData(db_->DefaultColumnFamily(), &cf_meta_data);
  ASSERT_GT(cf_meta_data.levels[0].files.size(),
      options.level0_file_num_compaction_trigger);
  TEST_SYNC_POINT(
          "DBTest::CompactFilesShouldTriggerAutoCompaction:End");

  manual_compaction_thread.join();
  dbfull()->TEST_WaitForCompact();

  db_->GetColumnFamilyMetaData(db_->DefaultColumnFamily(), &cf_meta_data);
  ASSERT_LE(cf_meta_data.levels[0].files.size(),
      options.level0_file_num_compaction_trigger);
}
5916
#endif  // ROCKSDB_LITE
5917

Y
Yi Wu 已提交
5918 5919 5920
// Github issue #595
// Large write batch with column families
TEST_F(DBTest, LargeBatchWithColumnFamilies) {
5921 5922
  Options options = CurrentOptions();
  options.env = env_;
Y
Yi Wu 已提交
5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938
  options.write_buffer_size = 100000;  // Small write buffer
  CreateAndReopenWithCF({"pikachu"}, options);
  int64_t j = 0;
  for (int i = 0; i < 5; i++) {
    for (int pass = 1; pass <= 3; pass++) {
      WriteBatch batch;
      size_t write_size = 1024 * 1024 * (5 + i);
      fprintf(stderr, "prepare: %" ROCKSDB_PRIszt " MB, pass:%d\n",
              (write_size / 1024 / 1024), pass);
      for (;;) {
        std::string data(3000, j++ % 127 + 20);
        data += ToString(j);
        batch.Put(handles_[0], Slice(data), Slice(data));
        if (batch.GetDataSize() > write_size) {
          break;
        }
5939
      }
Y
Yi Wu 已提交
5940 5941 5942 5943
      fprintf(stderr, "write: %" ROCKSDB_PRIszt " MB\n",
              (batch.GetDataSize() / 1024 / 1024));
      ASSERT_OK(dbfull()->Write(WriteOptions(), &batch));
      fprintf(stderr, "done\n");
5944
    }
Y
Yi Wu 已提交
5945 5946 5947 5948
  }
  // make sure we can re-open it.
  ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options));
}
5949

Y
Yi Wu 已提交
5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960
// Make sure that Flushes can proceed in parallel with CompactRange()
TEST_F(DBTest, FlushesInParallelWithCompactRange) {
  // iter == 0 -- leveled
  // iter == 1 -- leveled, but throw in a flush between two levels compacting
  // iter == 2 -- universal
  for (int iter = 0; iter < 3; ++iter) {
    Options options = CurrentOptions();
    if (iter < 2) {
      options.compaction_style = kCompactionStyleLevel;
    } else {
      options.compaction_style = kCompactionStyleUniversal;
5961
    }
Y
Yi Wu 已提交
5962 5963 5964 5965 5966 5967 5968
    options.write_buffer_size = 110 << 10;
    options.level0_file_num_compaction_trigger = 4;
    options.num_levels = 4;
    options.compression = kNoCompression;
    options.max_bytes_for_level_base = 450 << 10;
    options.target_file_size_base = 98 << 10;
    options.max_write_buffer_number = 2;
5969 5970 5971

    DestroyAndReopen(options);

Y
Yi Wu 已提交
5972 5973 5974
    Random rnd(301);
    for (int num = 0; num < 14; num++) {
      GenerateNewRandomFile(&rnd);
5975 5976
    }

Y
Yi Wu 已提交
5977
    if (iter == 1) {
5978
      ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
Y
Yi Wu 已提交
5979 5980 5981 5982 5983
          {{"DBImpl::RunManualCompaction()::1",
            "DBTest::FlushesInParallelWithCompactRange:1"},
           {"DBTest::FlushesInParallelWithCompactRange:2",
            "DBImpl::RunManualCompaction()::2"}});
    } else {
5984
      ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
Y
Yi Wu 已提交
5985 5986 5987 5988
          {{"CompactionJob::Run():Start",
            "DBTest::FlushesInParallelWithCompactRange:1"},
           {"DBTest::FlushesInParallelWithCompactRange:2",
            "CompactionJob::Run():End"}});
5989
    }
5990
    ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
5991

D
Dmitri Smirnov 已提交
5992
    std::vector<port::Thread> threads;
Y
Yi Wu 已提交
5993
    threads.emplace_back([&]() { Compact("a", "z"); });
5994

Y
Yi Wu 已提交
5995 5996 5997 5998 5999 6000 6001
    TEST_SYNC_POINT("DBTest::FlushesInParallelWithCompactRange:1");

    // this has to start a flush. if flushes are blocked, this will try to
    // create
    // 3 memtables, and that will fail because max_write_buffer_number is 2
    for (int num = 0; num < 3; num++) {
      GenerateNewRandomFile(&rnd, /* nowait */ true);
6002 6003
    }

Y
Yi Wu 已提交
6004
    TEST_SYNC_POINT("DBTest::FlushesInParallelWithCompactRange:2");
6005

Y
Yi Wu 已提交
6006 6007
    for (auto& t : threads) {
      t.join();
6008
    }
6009
    ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
Y
Yi Wu 已提交
6010
  }
6011 6012
}

Y
Yi Wu 已提交
6013 6014
TEST_F(DBTest, DelayedWriteRate) {
  const int kEntriesPerMemTable = 100;
S
Siying Dong 已提交
6015
  const int kTotalFlushes = 12;
6016

D
dyniusz 已提交
6017
  Options options = CurrentOptions();
Y
Yi Wu 已提交
6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028
  env_->SetBackgroundThreads(1, Env::LOW);
  options.env = env_;
  options.write_buffer_size = 100000000;
  options.max_write_buffer_number = 256;
  options.max_background_compactions = 1;
  options.level0_file_num_compaction_trigger = 3;
  options.level0_slowdown_writes_trigger = 3;
  options.level0_stop_writes_trigger = 999999;
  options.delayed_write_rate = 20000000;  // Start with 200MB/s
  options.memtable_factory.reset(
      new SpecialSkipListFactory(kEntriesPerMemTable));
D
dyniusz 已提交
6029

6030
  SetTimeElapseOnlySleepOnReopen(&options);
Y
Yi Wu 已提交
6031 6032 6033 6034 6035 6036 6037 6038 6039 6040
  CreateAndReopenWithCF({"pikachu"}, options);

  // Block compactions
  test::SleepingBackgroundTask sleeping_task_low;
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
                 Env::Priority::LOW);

  for (int i = 0; i < 3; i++) {
    Put(Key(i), std::string(10000, 'x'));
    Flush();
D
dyniusz 已提交
6041 6042
  }

Y
Yi Wu 已提交
6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060
  // These writes will be slowed down to 1KB/s
  uint64_t estimated_sleep_time = 0;
  Random rnd(301);
  Put("", "");
  uint64_t cur_rate = options.delayed_write_rate;
  for (int i = 0; i < kTotalFlushes; i++) {
    uint64_t size_memtable = 0;
    for (int j = 0; j < kEntriesPerMemTable; j++) {
      auto rand_num = rnd.Uniform(20);
      // Spread the size range to more.
      size_t entry_size = rand_num * rand_num * rand_num;
      WriteOptions wo;
      Put(Key(i), std::string(entry_size, 'x'), wo);
      size_memtable += entry_size + 18;
      // Occasionally sleep a while
      if (rnd.Uniform(20) == 6) {
        env_->SleepForMicroseconds(2666);
      }
D
dyniusz 已提交
6061
    }
Y
Yi Wu 已提交
6062 6063 6064
    dbfull()->TEST_WaitForFlushMemTable();
    estimated_sleep_time += size_memtable * 1000000u / cur_rate;
    // Slow down twice. One for memtable switch and one for flush finishes.
S
Siying Dong 已提交
6065 6066
    cur_rate = static_cast<uint64_t>(static_cast<double>(cur_rate) *
                                     kIncSlowdownRatio * kIncSlowdownRatio);
D
dyniusz 已提交
6067
  }
Y
Yi Wu 已提交
6068
  // Estimate the total sleep time fall into the rough range.
6069 6070
  ASSERT_GT(env_->NowMicros(), estimated_sleep_time / 2);
  ASSERT_LT(env_->NowMicros(), estimated_sleep_time * 2);
D
dyniusz 已提交
6071

6072
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
Y
Yi Wu 已提交
6073 6074
  sleeping_task_low.WakeUp();
  sleeping_task_low.WaitUntilDone();
D
dyniusz 已提交
6075 6076
}

Y
Yi Wu 已提交
6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091
TEST_F(DBTest, HardLimit) {
  Options options = CurrentOptions();
  options.env = env_;
  env_->SetBackgroundThreads(1, Env::LOW);
  options.max_write_buffer_number = 256;
  options.write_buffer_size = 110 << 10;  // 110KB
  options.arena_block_size = 4 * 1024;
  options.level0_file_num_compaction_trigger = 4;
  options.level0_slowdown_writes_trigger = 999999;
  options.level0_stop_writes_trigger = 999999;
  options.hard_pending_compaction_bytes_limit = 800 << 10;
  options.max_bytes_for_level_base = 10000000000u;
  options.max_background_compactions = 1;
  options.memtable_factory.reset(
      new SpecialSkipListFactory(KNumKeysByGenerateNewFile - 1));
6092

Y
Yi Wu 已提交
6093 6094 6095 6096
  env_->SetBackgroundThreads(1, Env::LOW);
  test::SleepingBackgroundTask sleeping_task_low;
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
                 Env::Priority::LOW);
6097

Y
Yi Wu 已提交
6098
  CreateAndReopenWithCF({"pikachu"}, options);
6099

Y
Yi Wu 已提交
6100
  std::atomic<int> callback_count(0);
6101 6102 6103 6104 6105 6106
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
      "DBImpl::DelayWrite:Wait", [&](void* /*arg*/) {
        callback_count.fetch_add(1);
        sleeping_task_low.WakeUp();
      });
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
6107

Y
Yi Wu 已提交
6108 6109 6110 6111 6112 6113
  Random rnd(301);
  int key_idx = 0;
  for (int num = 0; num < 5; num++) {
    GenerateNewFile(&rnd, &key_idx, true);
    dbfull()->TEST_WaitForFlushMemTable();
  }
6114

Y
Yi Wu 已提交
6115
  ASSERT_EQ(0, callback_count.load());
6116

Y
Yi Wu 已提交
6117 6118 6119 6120 6121
  for (int num = 0; num < 5; num++) {
    GenerateNewFile(&rnd, &key_idx, true);
    dbfull()->TEST_WaitForFlushMemTable();
  }
  ASSERT_GE(callback_count.load(), 1);
6122

6123
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
Y
Yi Wu 已提交
6124 6125
  sleeping_task_low.WaitUntilDone();
}
6126

6127
#if !defined(ROCKSDB_LITE) && !defined(ROCKSDB_DISABLE_STALL_NOTIFICATION)
6128 6129
class WriteStallListener : public EventListener {
 public:
Y
Yi Wu 已提交
6130
  WriteStallListener() : condition_(WriteStallCondition::kNormal) {}
6131
  void OnStallConditionsChanged(const WriteStallInfo& info) override {
Y
Yi Wu 已提交
6132
    MutexLock l(&mutex_);
6133 6134 6135
    condition_ = info.condition.cur;
  }
  bool CheckCondition(WriteStallCondition expected) {
Y
Yi Wu 已提交
6136
    MutexLock l(&mutex_);
Y
Yi Wu 已提交
6137
    return expected == condition_;
6138 6139
  }
 private:
D
Dmitri Smirnov 已提交
6140
  port::Mutex   mutex_;
6141 6142 6143
  WriteStallCondition condition_;
};

Y
Yi Wu 已提交
6144 6145 6146 6147 6148 6149 6150 6151 6152
TEST_F(DBTest, SoftLimit) {
  Options options = CurrentOptions();
  options.env = env_;
  options.write_buffer_size = 100000;  // Small write buffer
  options.max_write_buffer_number = 256;
  options.level0_file_num_compaction_trigger = 1;
  options.level0_slowdown_writes_trigger = 3;
  options.level0_stop_writes_trigger = 999999;
  options.delayed_write_rate = 20000;  // About 200KB/s limited rate
6153
  options.soft_pending_compaction_bytes_limit = 160000;
Y
Yi Wu 已提交
6154 6155 6156 6157 6158
  options.target_file_size_base = 99999999;  // All into one file
  options.max_bytes_for_level_base = 50000;
  options.max_bytes_for_level_multiplier = 10;
  options.max_background_compactions = 1;
  options.compression = kNoCompression;
6159 6160
  WriteStallListener* listener = new WriteStallListener();
  options.listeners.emplace_back(listener);
6161

Y
Yi Wu 已提交
6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194
  // FlushMemtable with opt.wait=true does not wait for
  // `OnStallConditionsChanged` being called. The event listener is triggered
  // on `JobContext::Clean`, which happens after flush result is installed.
  // We use sync point to create a custom WaitForFlush that waits for
  // context cleanup.
  port::Mutex flush_mutex;
  port::CondVar flush_cv(&flush_mutex);
  bool flush_finished = false;
  auto InstallFlushCallback = [&]() {
    {
      MutexLock l(&flush_mutex);
      flush_finished = false;
    }
    SyncPoint::GetInstance()->SetCallBack(
        "DBImpl::BackgroundCallFlush:ContextCleanedUp", [&](void*) {
          {
            MutexLock l(&flush_mutex);
            flush_finished = true;
          }
          flush_cv.SignalAll();
        });
  };
  auto WaitForFlush = [&]() {
    {
      MutexLock l(&flush_mutex);
      while (!flush_finished) {
        flush_cv.Wait();
      }
    }
    SyncPoint::GetInstance()->ClearCallBack(
        "DBImpl::BackgroundCallFlush:ContextCleanedUp");
  };

6195
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
Y
Yi Wu 已提交
6196

Y
Yi Wu 已提交
6197
  Reopen(options);
6198 6199 6200 6201 6202

  // Generating 360KB in Level 3
  for (int i = 0; i < 72; i++) {
    Put(Key(i), std::string(5000, 'x'));
    if (i % 10 == 0) {
6203
      dbfull()->TEST_FlushMemTable(true, true);
6204 6205 6206 6207 6208 6209 6210 6211 6212
    }
  }
  dbfull()->TEST_WaitForCompact();
  MoveFilesToLevel(3);

  // Generating 360KB in Level 2
  for (int i = 0; i < 72; i++) {
    Put(Key(i), std::string(5000, 'x'));
    if (i % 10 == 0) {
6213
      dbfull()->TEST_FlushMemTable(true, true);
6214 6215 6216 6217 6218
    }
  }
  dbfull()->TEST_WaitForCompact();
  MoveFilesToLevel(2);

Y
Yi Wu 已提交
6219
  Put(Key(0), "");
6220

Y
Yi Wu 已提交
6221 6222 6223 6224 6225
  test::SleepingBackgroundTask sleeping_task_low;
  // Block compactions
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
                 Env::Priority::LOW);
  sleeping_task_low.WaitUntilSleeping();
6226

Y
Yi Wu 已提交
6227 6228 6229 6230 6231
  // Create 3 L0 files, making score of L0 to be 3.
  for (int i = 0; i < 3; i++) {
    Put(Key(i), std::string(5000, 'x'));
    Put(Key(100 - i), std::string(5000, 'x'));
    // Flush the file. File size is around 30KB.
Y
Yi Wu 已提交
6232
    InstallFlushCallback();
6233
    dbfull()->TEST_FlushMemTable(true, true);
Y
Yi Wu 已提交
6234
    WaitForFlush();
Y
Yi Wu 已提交
6235 6236
  }
  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
6237
  ASSERT_TRUE(listener->CheckCondition(WriteStallCondition::kDelayed));
6238

Y
Yi Wu 已提交
6239 6240 6241 6242
  sleeping_task_low.WakeUp();
  sleeping_task_low.WaitUntilDone();
  sleeping_task_low.Reset();
  dbfull()->TEST_WaitForCompact();
6243

Y
Yi Wu 已提交
6244 6245 6246 6247
  // Now there is one L1 file but doesn't trigger soft_rate_limit
  // The L1 file size is around 30KB.
  ASSERT_EQ(NumTableFilesAtLevel(1), 1);
  ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
6248
  ASSERT_TRUE(listener->CheckCondition(WriteStallCondition::kNormal));
6249

Y
Yi Wu 已提交
6250
  // Only allow one compactin going through.
6251
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
6252
      "BackgroundCallCompaction:0", [&](void* /*arg*/) {
Y
Yi Wu 已提交
6253 6254 6255 6256 6257
        // Schedule a sleeping task.
        sleeping_task_low.Reset();
        env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
                       &sleeping_task_low, Env::Priority::LOW);
      });
6258

Y
Yi Wu 已提交
6259 6260 6261 6262 6263 6264 6265 6266
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
                 Env::Priority::LOW);
  sleeping_task_low.WaitUntilSleeping();
  // Create 3 L0 files, making score of L0 to be 3
  for (int i = 0; i < 3; i++) {
    Put(Key(10 + i), std::string(5000, 'x'));
    Put(Key(90 - i), std::string(5000, 'x'));
    // Flush the file. File size is around 30KB.
Y
Yi Wu 已提交
6267
    InstallFlushCallback();
6268
    dbfull()->TEST_FlushMemTable(true, true);
Y
Yi Wu 已提交
6269
    WaitForFlush();
6270 6271
  }

Y
Yi Wu 已提交
6272 6273 6274 6275 6276
  // Wake up sleep task to enable compaction to run and waits
  // for it to go to sleep state again to make sure one compaction
  // goes through.
  sleeping_task_low.WakeUp();
  sleeping_task_low.WaitUntilSleeping();
6277

Y
Yi Wu 已提交
6278 6279 6280 6281 6282
  // Now there is one L1 file (around 60KB) which exceeds 50KB base by 10KB
  // Given level multiplier 10, estimated pending compaction is around 100KB
  // doesn't trigger soft_pending_compaction_bytes_limit
  ASSERT_EQ(NumTableFilesAtLevel(1), 1);
  ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
6283
  ASSERT_TRUE(listener->CheckCondition(WriteStallCondition::kNormal));
6284

Y
Yi Wu 已提交
6285 6286 6287 6288 6289
  // Create 3 L0 files, making score of L0 to be 3, higher than L0.
  for (int i = 0; i < 3; i++) {
    Put(Key(20 + i), std::string(5000, 'x'));
    Put(Key(80 - i), std::string(5000, 'x'));
    // Flush the file. File size is around 30KB.
Y
Yi Wu 已提交
6290
    InstallFlushCallback();
6291
    dbfull()->TEST_FlushMemTable(true, true);
Y
Yi Wu 已提交
6292
    WaitForFlush();
6293
  }
Y
Yi Wu 已提交
6294 6295 6296 6297 6298 6299 6300
  // Wake up sleep task to enable compaction to run and waits
  // for it to go to sleep state again to make sure one compaction
  // goes through.
  sleeping_task_low.WakeUp();
  sleeping_task_low.WaitUntilSleeping();

  // Now there is one L1 file (around 90KB) which exceeds 50KB base by 40KB
6301 6302
  // L2 size is 360KB, so the estimated level fanout 4, estimated pending
  // compaction is around 200KB
Y
Yi Wu 已提交
6303 6304 6305
  // triggerring soft_pending_compaction_bytes_limit
  ASSERT_EQ(NumTableFilesAtLevel(1), 1);
  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
6306
  ASSERT_TRUE(listener->CheckCondition(WriteStallCondition::kDelayed));
6307

Y
Yi Wu 已提交
6308 6309
  sleeping_task_low.WakeUp();
  sleeping_task_low.WaitUntilSleeping();
6310

Y
Yi Wu 已提交
6311
  ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
6312
  ASSERT_TRUE(listener->CheckCondition(WriteStallCondition::kNormal));
6313

Y
Yi Wu 已提交
6314 6315 6316 6317
  // shrink level base so L2 will hit soft limit easier.
  ASSERT_OK(dbfull()->SetOptions({
      {"max_bytes_for_level_base", "5000"},
  }));
6318

Y
Yi Wu 已提交
6319 6320 6321
  Put("", "");
  Flush();
  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
6322
  ASSERT_TRUE(listener->CheckCondition(WriteStallCondition::kDelayed));
6323

Y
Yi Wu 已提交
6324
  sleeping_task_low.WaitUntilSleeping();
6325
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
Y
Yi Wu 已提交
6326 6327
  sleeping_task_low.WakeUp();
  sleeping_task_low.WaitUntilDone();
6328 6329
}

Y
Yi Wu 已提交
6330
TEST_F(DBTest, LastWriteBufferDelay) {
6331
  Options options = CurrentOptions();
Y
Yi Wu 已提交
6332 6333 6334 6335 6336 6337 6338 6339 6340
  options.env = env_;
  options.write_buffer_size = 100000;
  options.max_write_buffer_number = 4;
  options.delayed_write_rate = 20000;
  options.compression = kNoCompression;
  options.disable_auto_compactions = true;
  int kNumKeysPerMemtable = 3;
  options.memtable_factory.reset(
      new SpecialSkipListFactory(kNumKeysPerMemtable));
6341

Y
Yi Wu 已提交
6342 6343 6344 6345 6346 6347
  Reopen(options);
  test::SleepingBackgroundTask sleeping_task;
  // Block flushes
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task,
                 Env::Priority::HIGH);
  sleeping_task.WaitUntilSleeping();
6348

Y
Yi Wu 已提交
6349 6350 6351 6352 6353 6354 6355
  // Create 3 L0 files, making score of L0 to be 3.
  for (int i = 0; i < 3; i++) {
    // Fill one mem table
    for (int j = 0; j < kNumKeysPerMemtable; j++) {
      Put(Key(j), "");
    }
    ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
6356
  }
Y
Yi Wu 已提交
6357 6358 6359
  // Inserting a new entry would create a new mem table, triggering slow down.
  Put(Key(0), "");
  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
6360

Y
Yi Wu 已提交
6361 6362 6363
  sleeping_task.WakeUp();
  sleeping_task.WaitUntilDone();
}
6364
#endif  // !defined(ROCKSDB_LITE) && !defined(ROCKSDB_DISABLE_STALL_NOTIFICATION)
6365

Y
Yi Wu 已提交
6366 6367
TEST_F(DBTest, FailWhenCompressionNotSupportedTest) {
  CompressionType compressions[] = {kZlibCompression, kBZip2Compression,
A
Aaron Gao 已提交
6368
                                    kLZ4Compression, kLZ4HCCompression,
6369 6370 6371
                                    kXpressCompression};
  for (auto comp : compressions) {
    if (!CompressionTypeSupported(comp)) {
Y
Yi Wu 已提交
6372 6373
      // not supported, we should fail the Open()
      Options options = CurrentOptions();
6374
      options.compression = comp;
Y
Yi Wu 已提交
6375 6376 6377 6378 6379
      ASSERT_TRUE(!TryReopen(options).ok());
      // Try if CreateColumnFamily also fails
      options.compression = kNoCompression;
      ASSERT_OK(TryReopen(options));
      ColumnFamilyOptions cf_options(options);
6380
      cf_options.compression = comp;
Y
Yi Wu 已提交
6381 6382
      ColumnFamilyHandle* handle;
      ASSERT_TRUE(!db_->CreateColumnFamily(cf_options, "name", &handle).ok());
6383 6384 6385 6386
    }
  }
}

6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399
TEST_F(DBTest, CreateColumnFamilyShouldFailOnIncompatibleOptions) {
  Options options = CurrentOptions();
  options.max_open_files = 100;
  Reopen(options);

  ColumnFamilyOptions cf_options(options);
  // ttl is now supported when max_open_files is -1.
  cf_options.ttl = 3600;
  ColumnFamilyHandle* handle;
  ASSERT_OK(db_->CreateColumnFamily(cf_options, "pikachu", &handle));
  delete handle;
}

Y
Yi Wu 已提交
6400 6401
#ifndef ROCKSDB_LITE
TEST_F(DBTest, RowCache) {
6402
  Options options = CurrentOptions();
6403
  options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
Y
Yi Wu 已提交
6404
  options.row_cache = NewLRUCache(8192);
6405 6406
  DestroyAndReopen(options);

Y
Yi Wu 已提交
6407 6408
  ASSERT_OK(Put("foo", "bar"));
  ASSERT_OK(Flush());
6409

Y
Yi Wu 已提交
6410 6411 6412 6413 6414 6415 6416 6417 6418
  ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_HIT), 0);
  ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_MISS), 0);
  ASSERT_EQ(Get("foo"), "bar");
  ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_HIT), 0);
  ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_MISS), 1);
  ASSERT_EQ(Get("foo"), "bar");
  ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_HIT), 1);
  ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_MISS), 1);
}
6419 6420 6421

TEST_F(DBTest, PinnableSliceAndRowCache) {
  Options options = CurrentOptions();
6422
  options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448
  options.row_cache = NewLRUCache(8192);
  DestroyAndReopen(options);

  ASSERT_OK(Put("foo", "bar"));
  ASSERT_OK(Flush());

  ASSERT_EQ(Get("foo"), "bar");
  ASSERT_EQ(
      reinterpret_cast<LRUCache*>(options.row_cache.get())->TEST_GetLRUSize(),
      1);

  {
    PinnableSlice pin_slice;
    ASSERT_EQ(Get("foo", &pin_slice), Status::OK());
    ASSERT_EQ(pin_slice.ToString(), "bar");
    // Entry is already in cache, lookup will remove the element from lru
    ASSERT_EQ(
        reinterpret_cast<LRUCache*>(options.row_cache.get())->TEST_GetLRUSize(),
        0);
  }
  // After PinnableSlice destruction element is added back in LRU
  ASSERT_EQ(
      reinterpret_cast<LRUCache*>(options.row_cache.get())->TEST_GetLRUSize(),
      1);
}

Y
Yi Wu 已提交
6449
#endif  // ROCKSDB_LITE
6450

Y
Yi Wu 已提交
6451
TEST_F(DBTest, DeletingOldWalAfterDrop) {
6452
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
Y
Yi Wu 已提交
6453 6454
      {{"Test:AllowFlushes", "DBImpl::BGWorkFlush"},
       {"DBImpl::BGWorkFlush:done", "Test:WaitForFlush"}});
6455
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearTrace();
6456

6457
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
Y
Yi Wu 已提交
6458 6459 6460 6461 6462 6463 6464 6465 6466
  Options options = CurrentOptions();
  options.max_total_wal_size = 8192;
  options.compression = kNoCompression;
  options.write_buffer_size = 1 << 20;
  options.level0_file_num_compaction_trigger = (1 << 30);
  options.level0_slowdown_writes_trigger = (1 << 30);
  options.level0_stop_writes_trigger = (1 << 30);
  options.disable_auto_compactions = true;
  DestroyAndReopen(options);
6467
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
6468

Y
Yi Wu 已提交
6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483
  CreateColumnFamilies({"cf1", "cf2"}, options);
  ASSERT_OK(Put(0, "key1", DummyString(8192)));
  ASSERT_OK(Put(0, "key2", DummyString(8192)));
  // the oldest wal should now be getting_flushed
  ASSERT_OK(db_->DropColumnFamily(handles_[0]));
  // all flushes should now do nothing because their CF is dropped
  TEST_SYNC_POINT("Test:AllowFlushes");
  TEST_SYNC_POINT("Test:WaitForFlush");
  uint64_t lognum1 = dbfull()->TEST_LogfileNumber();
  ASSERT_OK(Put(1, "key3", DummyString(8192)));
  ASSERT_OK(Put(1, "key4", DummyString(8192)));
  // new wal should have been created
  uint64_t lognum2 = dbfull()->TEST_LogfileNumber();
  EXPECT_GT(lognum2, lognum1);
}
6484

Y
Yi Wu 已提交
6485 6486 6487 6488 6489
TEST_F(DBTest, UnsupportedManualSync) {
  DestroyAndReopen(CurrentOptions());
  env_->is_wal_sync_thread_safe_.store(false);
  Status s = db_->SyncWAL();
  ASSERT_TRUE(s.IsNotSupported());
6490 6491
}

6492 6493 6494
INSTANTIATE_TEST_CASE_P(DBTestWithParam, DBTestWithParam,
                        ::testing::Combine(::testing::Values(1, 4),
                                           ::testing::Bool()));
6495

6496
TEST_F(DBTest, PauseBackgroundWorkTest) {
S
sdong 已提交
6497
  Options options = CurrentOptions();
6498 6499 6500
  options.write_buffer_size = 100000;  // Small write buffer
  Reopen(options);

D
Dmitri Smirnov 已提交
6501
  std::vector<port::Thread> threads;
6502
  std::atomic<bool> done(false);
6503 6504 6505 6506
  db_->PauseBackgroundWork();
  threads.emplace_back([&]() {
    Random rnd(301);
    for (int i = 0; i < 10000; ++i) {
M
mrambacher 已提交
6507
      Put(rnd.RandomString(10), rnd.RandomString(10));
6508 6509 6510 6511 6512
    }
    done.store(true);
  });
  env_->SleepForMicroseconds(200000);
  // make sure the thread is not done
A
Andrew Kryczka 已提交
6513
  ASSERT_FALSE(done.load());
6514 6515 6516 6517 6518
  db_->ContinueBackgroundWork();
  for (auto& t : threads) {
    t.join();
  }
  // now it's done
S
Siying Dong 已提交
6519
  ASSERT_TRUE(done.load());
6520
}
6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531

// Keep spawning short-living threads that create an iterator and quit.
// Meanwhile in another thread keep flushing memtables.
// This used to cause a deadlock.
TEST_F(DBTest, ThreadLocalPtrDeadlock) {
  std::atomic<int> flushes_done{0};
  std::atomic<int> threads_destroyed{0};
  auto done = [&] {
    return flushes_done.load() > 10;
  };

D
Dmitri Smirnov 已提交
6532
  port::Thread flushing_thread([&] {
6533 6534 6535 6536 6537 6538 6539 6540 6541
    for (int i = 0; !done(); ++i) {
      ASSERT_OK(db_->Put(WriteOptions(), Slice("hi"),
                         Slice(std::to_string(i).c_str())));
      ASSERT_OK(db_->Flush(FlushOptions()));
      int cnt = ++flushes_done;
      fprintf(stderr, "Flushed %d times\n", cnt);
    }
  });

D
Dmitri Smirnov 已提交
6542
  std::vector<port::Thread> thread_spawning_threads(10);
6543
  for (auto& t: thread_spawning_threads) {
D
Dmitri Smirnov 已提交
6544
    t = port::Thread([&] {
6545 6546
      while (!done()) {
        {
D
Dmitri Smirnov 已提交
6547
          port::Thread tmp_thread([&] {
6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564
            auto it = db_->NewIterator(ReadOptions());
            delete it;
          });
          tmp_thread.join();
        }
        ++threads_destroyed;
      }
    });
  }

  for (auto& t: thread_spawning_threads) {
    t.join();
  }
  flushing_thread.join();
  fprintf(stderr, "Done. Flushed %d times, destroyed %d threads\n",
          flushes_done.load(), threads_destroyed.load());
}
6565 6566 6567 6568 6569 6570

TEST_F(DBTest, LargeBlockSizeTest) {
  Options options = CurrentOptions();
  CreateAndReopenWithCF({"pikachu"}, options);
  ASSERT_OK(Put(0, "foo", "bar"));
  BlockBasedTableOptions table_options;
6571
  table_options.block_size = 8LL * 1024 * 1024 * 1024LL;
6572 6573 6574 6575
  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  ASSERT_NOK(TryReopenWithColumnFamilies({"default", "pikachu"}, options));
}

V
Vijay Nadimpalli 已提交
6576 6577
#ifndef ROCKSDB_LITE

6578 6579 6580 6581 6582 6583 6584
TEST_F(DBTest, CreationTimeOfOldestFile) {
  const int kNumKeysPerFile = 32;
  const int kNumLevelFiles = 2;
  const int kValueSize = 100;

  Options options = CurrentOptions();
  options.max_open_files = -1;
6585
  env_->SetMockSleep();
6586 6587
  options.env = env_;

6588 6589
  // NOTE: Presumed unnecessary and removed: resetting mock time in env

6590 6591 6592 6593 6594 6595 6596 6597 6598 6599
  DestroyAndReopen(options);

  bool set_file_creation_time_to_zero = true;
  int idx = 0;

  int64_t time_1 = 0;
  env_->GetCurrentTime(&time_1);
  const uint64_t uint_time_1 = static_cast<uint64_t>(time_1);

  // Add 50 hours
6600
  env_->MockSleepForSeconds(50 * 60 * 60);
6601 6602 6603 6604 6605

  int64_t time_2 = 0;
  env_->GetCurrentTime(&time_2);
  const uint64_t uint_time_2 = static_cast<uint64_t>(time_2);

6606
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625
      "PropertyBlockBuilder::AddTableProperty:Start", [&](void* arg) {
        TableProperties* props = reinterpret_cast<TableProperties*>(arg);
        if (set_file_creation_time_to_zero) {
          if (idx == 0) {
            props->file_creation_time = 0;
            idx++;
          } else if (idx == 1) {
            props->file_creation_time = uint_time_1;
            idx = 0;
          }
        } else {
          if (idx == 0) {
            props->file_creation_time = uint_time_1;
            idx++;
          } else if (idx == 1) {
            props->file_creation_time = uint_time_2;
          }
        }
      });
6626
  // Set file creation time in manifest all to 0.
6627
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
6628 6629 6630 6631
      "FileMetaData::FileMetaData", [&](void* arg) {
        FileMetaData* meta = static_cast<FileMetaData*>(arg);
        meta->file_creation_time = 0;
      });
6632
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
6633 6634 6635 6636 6637

  Random rnd(301);
  for (int i = 0; i < kNumLevelFiles; ++i) {
    for (int j = 0; j < kNumKeysPerFile; ++j) {
      ASSERT_OK(
M
mrambacher 已提交
6638
          Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize)));
6639 6640 6641 6642
    }
    Flush();
  }

6643
  // At this point there should be 2 files, one with file_creation_time = 0 and
6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655
  // the other non-zero. GetCreationTimeOfOldestFile API should return 0.
  uint64_t creation_time;
  Status s1 = dbfull()->GetCreationTimeOfOldestFile(&creation_time);
  ASSERT_EQ(0, creation_time);
  ASSERT_EQ(s1, Status::OK());

  // Testing with non-zero file creation time.
  set_file_creation_time_to_zero = false;
  options = CurrentOptions();
  options.max_open_files = -1;
  options.env = env_;

6656 6657
  // NOTE: Presumed unnecessary and removed: resetting mock time in env

6658 6659 6660 6661 6662
  DestroyAndReopen(options);

  for (int i = 0; i < kNumLevelFiles; ++i) {
    for (int j = 0; j < kNumKeysPerFile; ++j) {
      ASSERT_OK(
M
mrambacher 已提交
6663
          Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize)));
6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681
    }
    Flush();
  }

  // At this point there should be 2 files with non-zero file creation time.
  // GetCreationTimeOfOldestFile API should return non-zero value.
  uint64_t ctime;
  Status s2 = dbfull()->GetCreationTimeOfOldestFile(&ctime);
  ASSERT_EQ(uint_time_1, ctime);
  ASSERT_EQ(s2, Status::OK());

  // Testing with max_open_files != -1
  options = CurrentOptions();
  options.max_open_files = 10;
  DestroyAndReopen(options);
  Status s3 = dbfull()->GetCreationTimeOfOldestFile(&ctime);
  ASSERT_EQ(s3, Status::NotSupported());

6682
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
6683 6684
}

6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724
TEST_F(DBTest, MemoryUsageWithMaxWriteBufferSizeToMaintain) {
  Options options = CurrentOptions();
  options.max_write_buffer_size_to_maintain = 10000;
  options.write_buffer_size = 160000;
  Reopen(options);
  Random rnd(301);
  bool memory_limit_exceeded = false;
  uint64_t size_all_mem_table = 0;
  uint64_t cur_active_mem = 0;
  for (int i = 0; i < 1000; i++) {
    std::string value = rnd.RandomString(1000);
    ASSERT_OK(Put("keykey_" + std::to_string(i), value));

    dbfull()->TEST_WaitForFlushMemTable();

    ASSERT_TRUE(db_->GetIntProperty(db_->DefaultColumnFamily(),
                                    DB::Properties::kSizeAllMemTables,
                                    &size_all_mem_table));
    ASSERT_TRUE(db_->GetIntProperty(db_->DefaultColumnFamily(),
                                    DB::Properties::kCurSizeActiveMemTable,
                                    &cur_active_mem));

    // Errors out if memory usage keeps on increasing beyond the limit.
    // Once memory limit exceeds,  memory_limit_exceeded  is set and if
    // size_all_mem_table doesn't drop out in the next write then it errors out
    // (not expected behaviour). If memory usage drops then
    // memory_limit_exceeded is set to false.
    if ((size_all_mem_table > cur_active_mem) &&
        (cur_active_mem >=
         static_cast<uint64_t>(options.max_write_buffer_size_to_maintain)) &&
        (size_all_mem_table > options.max_write_buffer_size_to_maintain +
                                  options.write_buffer_size)) {
      ASSERT_FALSE(memory_limit_exceeded);
      memory_limit_exceeded = true;
    } else {
      memory_limit_exceeded = false;
    }
  }
}

V
Vijay Nadimpalli 已提交
6725 6726
#endif

6727
}  // namespace ROCKSDB_NAMESPACE
J
jorlow@chromium.org 已提交
6728

6729 6730 6731 6732 6733 6734 6735 6736
#ifdef ROCKSDB_UNITTESTS_WITH_CUSTOM_OBJECTS_FROM_STATIC_LIBS
extern "C" {
void RegisterCustomObjects(int argc, char** argv);
}
#else
void RegisterCustomObjects(int /*argc*/, char** /*argv*/) {}
#endif  // !ROCKSDB_UNITTESTS_WITH_CUSTOM_OBJECTS_FROM_STATIC_LIBS

J
jorlow@chromium.org 已提交
6737
int main(int argc, char** argv) {
6738
  ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
I
Igor Sugak 已提交
6739
  ::testing::InitGoogleTest(&argc, argv);
6740
  RegisterCustomObjects(argc, argv);
I
Igor Sugak 已提交
6741
  return RUN_ALL_TESTS();
J
jorlow@chromium.org 已提交
6742
}